refactor: accept partial machine configuration
This refactors code to handle partial machine config - only multi-doc without v1alpha1 config. This uses improvements from https://github.com/cosi-project/runtime/pull/300: * where possible, use `TransformController` * use integrated tracker to reduce boilerplate Sometimes fix/rewrite tests where applicable. Signed-off-by: Andrey Smirnov <andrey.smirnov@talos-systems.com>
This commit is contained in:
parent
9b0bc3e931
commit
544cb4fe7d
2
go.mod
2
go.mod
@ -37,7 +37,7 @@ require (
|
||||
github.com/containernetworking/plugins v1.3.0
|
||||
github.com/coreos/go-iptables v0.6.0
|
||||
github.com/coreos/go-semver v0.3.1
|
||||
github.com/cosi-project/runtime v0.3.1-alpha.8
|
||||
github.com/cosi-project/runtime v0.3.1-beta.0
|
||||
github.com/docker/distribution v2.8.2+incompatible
|
||||
github.com/docker/docker v24.0.4+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
|
4
go.sum
4
go.sum
@ -457,8 +457,8 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cosi-project/runtime v0.3.1-alpha.8 h1:7OTWghF4Og3Uixwzuw2RWGRO+UjuPFFlRLiMgNeqNl8=
|
||||
github.com/cosi-project/runtime v0.3.1-alpha.8/go.mod h1:n6rQ/b9GkrniSslnrFId6dzWDG+htbcC9fW+f3f1K94=
|
||||
github.com/cosi-project/runtime v0.3.1-beta.0 h1:LKiflYmX6/dEX84geYPOo/ApPb+4ikr7Tok8X83HelY=
|
||||
github.com/cosi-project/runtime v0.3.1-beta.0/go.mod h1:n6rQ/b9GkrniSslnrFId6dzWDG+htbcC9fW+f3f1K94=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
|
@ -163,8 +163,11 @@ func apidMain() error {
|
||||
}
|
||||
|
||||
injector := &authz.Injector{
|
||||
Mode: mode,
|
||||
Logger: log.New(log.Writer(), "apid/authz/injector/http ", log.Flags()).Printf,
|
||||
Mode: mode,
|
||||
}
|
||||
|
||||
if debug.Enabled {
|
||||
injector.Logger = log.New(log.Writer(), "apid/authz/injector/http ", log.Flags()).Printf
|
||||
}
|
||||
|
||||
return factory.NewServer(
|
||||
@ -190,8 +193,11 @@ func apidMain() error {
|
||||
|
||||
socketServer := func() *grpc.Server {
|
||||
injector := &authz.Injector{
|
||||
Mode: authz.MetadataOnly,
|
||||
Logger: log.New(log.Writer(), "apid/authz/injector/unix ", log.Flags()).Printf,
|
||||
Mode: authz.MetadataOnly,
|
||||
}
|
||||
|
||||
if debug.Enabled {
|
||||
injector.Logger = log.New(log.Writer(), "apid/authz/injector/unix ", log.Flags()).Printf
|
||||
}
|
||||
|
||||
return factory.NewServer(
|
||||
|
@ -1125,7 +1125,7 @@ func (s *Server) Version(ctx context.Context, in *emptypb.Empty) (reply *machine
|
||||
var features *machine.FeaturesInfo
|
||||
|
||||
config := s.Controller.Runtime().Config()
|
||||
if config != nil {
|
||||
if config != nil && config.Machine() != nil {
|
||||
features = &machine.FeaturesInfo{
|
||||
Rbac: config.Machine().Features().RBACEnabled(),
|
||||
}
|
||||
|
@ -126,5 +126,7 @@ func (suite *AffiliateMergeSuite) TestReconcileDefault() {
|
||||
}
|
||||
|
||||
func TestAffiliateMergeSuite(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, new(AffiliateMergeSuite))
|
||||
}
|
||||
|
@ -7,15 +7,12 @@ package cluster
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"github.com/cosi-project/runtime/pkg/controller/generic/transform"
|
||||
"github.com/siderolabs/gen/optional"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/cluster"
|
||||
@ -23,129 +20,74 @@ import (
|
||||
)
|
||||
|
||||
// ConfigController watches v1alpha1.Config, updates discovery config.
|
||||
type ConfigController struct{}
|
||||
type ConfigController = transform.Controller[*config.MachineConfig, *cluster.Config]
|
||||
|
||||
// Name implements controller.Controller interface.
|
||||
func (ctrl *ConfigController) Name() string {
|
||||
return "cluster.ConfigController"
|
||||
}
|
||||
|
||||
// Inputs implements controller.Controller interface.
|
||||
func (ctrl *ConfigController) Inputs() []controller.Input {
|
||||
return []controller.Input{
|
||||
{
|
||||
Namespace: config.NamespaceName,
|
||||
Type: config.MachineConfigType,
|
||||
ID: pointer.To(config.V1Alpha1ID),
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Outputs implements controller.Controller interface.
|
||||
func (ctrl *ConfigController) Outputs() []controller.Output {
|
||||
return []controller.Output{
|
||||
{
|
||||
Type: cluster.ConfigType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run implements controller.Controller interface.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (ctrl *ConfigController) Run(ctx context.Context, r controller.Runtime, logger *zap.Logger) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-r.EventCh():
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
if err != nil {
|
||||
if !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
// NewConfigController instanciates the config controller.
|
||||
func NewConfigController() *ConfigController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *cluster.Config]{
|
||||
Name: "cluster.ConfigController",
|
||||
MapMetadataOptionalFunc: func(cfg *config.MachineConfig) optional.Optional[*cluster.Config] {
|
||||
if cfg.Metadata().ID() != config.V1Alpha1ID {
|
||||
return optional.None[*cluster.Config]()
|
||||
}
|
||||
}
|
||||
|
||||
touchedIDs := map[resource.ID]struct{}{}
|
||||
if cfg.Config().Cluster() == nil {
|
||||
return optional.None[*cluster.Config]()
|
||||
}
|
||||
|
||||
if cfg != nil {
|
||||
c := cfg.(*config.MachineConfig).Config()
|
||||
return optional.Some(cluster.NewConfig(config.NamespaceName, cluster.ConfigID))
|
||||
},
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, cfg *config.MachineConfig, res *cluster.Config) error {
|
||||
c := cfg.Config()
|
||||
|
||||
if err = safe.WriterModify(ctx, r, cluster.NewConfig(config.NamespaceName, cluster.ConfigID), func(res *cluster.Config) error {
|
||||
res.TypedSpec().DiscoveryEnabled = c.Cluster().Discovery().Enabled()
|
||||
res.TypedSpec().DiscoveryEnabled = c.Cluster().Discovery().Enabled()
|
||||
|
||||
if c.Cluster().Discovery().Enabled() {
|
||||
res.TypedSpec().RegistryKubernetesEnabled = c.Cluster().Discovery().Registries().Kubernetes().Enabled()
|
||||
res.TypedSpec().RegistryServiceEnabled = c.Cluster().Discovery().Registries().Service().Enabled()
|
||||
if c.Cluster().Discovery().Enabled() {
|
||||
res.TypedSpec().RegistryKubernetesEnabled = c.Cluster().Discovery().Registries().Kubernetes().Enabled()
|
||||
res.TypedSpec().RegistryServiceEnabled = c.Cluster().Discovery().Registries().Service().Enabled()
|
||||
|
||||
if c.Cluster().Discovery().Registries().Service().Enabled() {
|
||||
var u *url.URL
|
||||
if c.Cluster().Discovery().Registries().Service().Enabled() {
|
||||
var u *url.URL
|
||||
|
||||
u, err = url.ParseRequestURI(c.Cluster().Discovery().Registries().Service().Endpoint())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host := u.Hostname()
|
||||
port := u.Port()
|
||||
|
||||
if port == "" {
|
||||
if u.Scheme == "http" {
|
||||
port = "80"
|
||||
} else {
|
||||
port = "443" // use default https port for everything else
|
||||
}
|
||||
}
|
||||
|
||||
res.TypedSpec().ServiceEndpoint = net.JoinHostPort(host, port)
|
||||
res.TypedSpec().ServiceEndpointInsecure = u.Scheme == "http"
|
||||
|
||||
res.TypedSpec().ServiceEncryptionKey, err = base64.StdEncoding.DecodeString(c.Cluster().Secret())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res.TypedSpec().ServiceClusterID = c.Cluster().ID()
|
||||
} else {
|
||||
res.TypedSpec().ServiceEndpoint = ""
|
||||
res.TypedSpec().ServiceEndpointInsecure = false
|
||||
res.TypedSpec().ServiceEncryptionKey = nil
|
||||
res.TypedSpec().ServiceClusterID = ""
|
||||
u, err := url.ParseRequestURI(c.Cluster().Discovery().Registries().Service().Endpoint())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host := u.Hostname()
|
||||
port := u.Port()
|
||||
|
||||
if port == "" {
|
||||
if u.Scheme == "http" {
|
||||
port = "80"
|
||||
} else {
|
||||
port = "443" // use default https port for everything else
|
||||
}
|
||||
}
|
||||
|
||||
res.TypedSpec().ServiceEndpoint = net.JoinHostPort(host, port)
|
||||
res.TypedSpec().ServiceEndpointInsecure = u.Scheme == "http"
|
||||
|
||||
res.TypedSpec().ServiceEncryptionKey, err = base64.StdEncoding.DecodeString(c.Cluster().Secret())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res.TypedSpec().ServiceClusterID = c.Cluster().ID()
|
||||
} else {
|
||||
res.TypedSpec().RegistryKubernetesEnabled = false
|
||||
res.TypedSpec().RegistryServiceEnabled = false
|
||||
res.TypedSpec().ServiceEndpoint = ""
|
||||
res.TypedSpec().ServiceEndpointInsecure = false
|
||||
res.TypedSpec().ServiceEncryptionKey = nil
|
||||
res.TypedSpec().ServiceClusterID = ""
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
} else {
|
||||
res.TypedSpec().RegistryKubernetesEnabled = false
|
||||
res.TypedSpec().RegistryServiceEnabled = false
|
||||
}
|
||||
|
||||
touchedIDs[cluster.ConfigID] = struct{}{}
|
||||
}
|
||||
|
||||
// list keys for cleanup
|
||||
list, err := r.List(ctx, resource.NewMetadata(config.NamespaceName, cluster.ConfigType, "", resource.VersionUndefined))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing resources: %w", err)
|
||||
}
|
||||
|
||||
for _, res := range list.Items {
|
||||
if res.Metadata().Owner() != ctrl.Name() {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := touchedIDs[res.Metadata().ID()]; !ok {
|
||||
if err = r.Destroy(ctx, res.Metadata()); err != nil {
|
||||
return fmt.Errorf("error cleaning up specs: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
@ -10,11 +10,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/resource/rtestutils"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"github.com/siderolabs/go-retry/retry"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
clusterctrl "github.com/siderolabs/talos/internal/app/machined/pkg/controllers/cluster"
|
||||
"github.com/siderolabs/talos/internal/app/machined/pkg/controllers/ctest"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/container"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/types/v1alpha1"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/cluster"
|
||||
@ -22,14 +24,10 @@ import (
|
||||
)
|
||||
|
||||
type ConfigSuite struct {
|
||||
ClusterSuite
|
||||
ctest.DefaultSuite
|
||||
}
|
||||
|
||||
func (suite *ConfigSuite) TestReconcileConfig() {
|
||||
suite.Require().NoError(suite.runtime.RegisterController(&clusterctrl.ConfigController{}))
|
||||
|
||||
suite.startRuntime()
|
||||
|
||||
cfg := config.NewMachineConfig(container.NewV1Alpha1(&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
ClusterConfig: &v1alpha1.ClusterConfig{
|
||||
@ -41,37 +39,30 @@ func (suite *ConfigSuite) TestReconcileConfig() {
|
||||
},
|
||||
}))
|
||||
|
||||
suite.Require().NoError(suite.state.Create(suite.ctx, cfg))
|
||||
suite.Require().NoError(suite.State().Create(suite.Ctx(), cfg))
|
||||
|
||||
specMD := resource.NewMetadata(config.NamespaceName, cluster.ConfigType, cluster.ConfigID, resource.VersionUndefined)
|
||||
rtestutils.AssertResources(suite.Ctx(), suite.T(), suite.State(), []resource.ID{cluster.ConfigID},
|
||||
func(res *cluster.Config, asrt *assert.Assertions) {
|
||||
spec := res.TypedSpec()
|
||||
|
||||
suite.Assert().NoError(retry.Constant(3*time.Second, retry.WithUnits(100*time.Millisecond)).Retry(
|
||||
suite.assertResource(
|
||||
specMD,
|
||||
func(res resource.Resource) error {
|
||||
spec := res.(*cluster.Config).TypedSpec()
|
||||
asrt.True(spec.DiscoveryEnabled)
|
||||
asrt.True(spec.RegistryKubernetesEnabled)
|
||||
asrt.True(spec.RegistryServiceEnabled)
|
||||
asrt.Equal("discovery.talos.dev:443", spec.ServiceEndpoint)
|
||||
asrt.False(spec.ServiceEndpointInsecure)
|
||||
asrt.Equal("cluster1", spec.ServiceClusterID)
|
||||
asrt.Equal(
|
||||
[]byte("\x90\x24\x2c\x2a\xbe\x01\xdb\xc5\x54\x97\xba\xb0\xd6\xc5\x64\x4c\x33\x45\xf5\xf1\x47\xfb\xe5\x62\x22\xe2\xac\xb2\xcf\x82\xea\x47"),
|
||||
spec.ServiceEncryptionKey,
|
||||
)
|
||||
})
|
||||
|
||||
suite.Assert().True(spec.DiscoveryEnabled)
|
||||
suite.Assert().True(spec.RegistryKubernetesEnabled)
|
||||
suite.Assert().True(spec.RegistryServiceEnabled)
|
||||
suite.Assert().Equal("discovery.talos.dev:443", spec.ServiceEndpoint)
|
||||
suite.Assert().False(spec.ServiceEndpointInsecure)
|
||||
suite.Assert().Equal("cluster1", spec.ServiceClusterID)
|
||||
suite.Assert().Equal(
|
||||
[]byte("\x90\x24\x2c\x2a\xbe\x01\xdb\xc5\x54\x97\xba\xb0\xd6\xc5\x64\x4c\x33\x45\xf5\xf1\x47\xfb\xe5\x62\x22\xe2\xac\xb2\xcf\x82\xea\x47"),
|
||||
spec.ServiceEncryptionKey)
|
||||
suite.Require().NoError(suite.State().Destroy(suite.Ctx(), cfg.Metadata()))
|
||||
|
||||
return nil
|
||||
},
|
||||
),
|
||||
))
|
||||
rtestutils.AssertNoResource[*cluster.Config](suite.Ctx(), suite.T(), suite.State(), cluster.ConfigID)
|
||||
}
|
||||
|
||||
func (suite *ConfigSuite) TestReconcileConfigCustom() {
|
||||
suite.Require().NoError(suite.runtime.RegisterController(&clusterctrl.ConfigController{}))
|
||||
|
||||
suite.startRuntime()
|
||||
|
||||
cfg := config.NewMachineConfig(container.NewV1Alpha1(&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
ClusterConfig: &v1alpha1.ClusterConfig{
|
||||
@ -91,33 +82,22 @@ func (suite *ConfigSuite) TestReconcileConfigCustom() {
|
||||
},
|
||||
}))
|
||||
|
||||
suite.Require().NoError(suite.state.Create(suite.ctx, cfg))
|
||||
suite.Require().NoError(suite.State().Create(suite.Ctx(), cfg))
|
||||
|
||||
specMD := resource.NewMetadata(config.NamespaceName, cluster.ConfigType, cluster.ConfigID, resource.VersionUndefined)
|
||||
rtestutils.AssertResources(suite.Ctx(), suite.T(), suite.State(), []resource.ID{cluster.ConfigID},
|
||||
func(res *cluster.Config, asrt *assert.Assertions) {
|
||||
spec := res.TypedSpec()
|
||||
|
||||
suite.Assert().NoError(retry.Constant(3*time.Second, retry.WithUnits(100*time.Millisecond)).Retry(
|
||||
suite.assertResource(
|
||||
specMD,
|
||||
func(res resource.Resource) error {
|
||||
spec := res.(*cluster.Config).TypedSpec()
|
||||
|
||||
suite.Assert().True(spec.DiscoveryEnabled)
|
||||
suite.Assert().False(spec.RegistryKubernetesEnabled)
|
||||
suite.Assert().True(spec.RegistryServiceEnabled)
|
||||
suite.Assert().Equal("[2001:470:6d:30e:565d:e162:e2a0:cf5a]:3456", spec.ServiceEndpoint)
|
||||
suite.Assert().False(spec.ServiceEndpointInsecure)
|
||||
|
||||
return nil
|
||||
},
|
||||
),
|
||||
))
|
||||
asrt.True(spec.DiscoveryEnabled)
|
||||
asrt.False(spec.RegistryKubernetesEnabled)
|
||||
asrt.True(spec.RegistryServiceEnabled)
|
||||
asrt.Equal("[2001:470:6d:30e:565d:e162:e2a0:cf5a]:3456", spec.ServiceEndpoint)
|
||||
asrt.False(spec.ServiceEndpointInsecure)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (suite *ConfigSuite) TestReconcileConfigCustomInsecure() {
|
||||
suite.Require().NoError(suite.runtime.RegisterController(&clusterctrl.ConfigController{}))
|
||||
|
||||
suite.startRuntime()
|
||||
|
||||
cfg := config.NewMachineConfig(container.NewV1Alpha1(&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
ClusterConfig: &v1alpha1.ClusterConfig{
|
||||
@ -137,58 +117,83 @@ func (suite *ConfigSuite) TestReconcileConfigCustomInsecure() {
|
||||
},
|
||||
}))
|
||||
|
||||
suite.Require().NoError(suite.state.Create(suite.ctx, cfg))
|
||||
suite.Require().NoError(suite.State().Create(suite.Ctx(), cfg))
|
||||
|
||||
specMD := resource.NewMetadata(config.NamespaceName, cluster.ConfigType, cluster.ConfigID, resource.VersionUndefined)
|
||||
rtestutils.AssertResources(suite.Ctx(), suite.T(), suite.State(), []resource.ID{cluster.ConfigID},
|
||||
func(res *cluster.Config, asrt *assert.Assertions) {
|
||||
spec := res.TypedSpec()
|
||||
|
||||
suite.Assert().NoError(retry.Constant(3*time.Second, retry.WithUnits(100*time.Millisecond)).Retry(
|
||||
suite.assertResource(
|
||||
specMD,
|
||||
func(res resource.Resource) error {
|
||||
spec := res.(*cluster.Config).TypedSpec()
|
||||
|
||||
suite.Assert().True(spec.DiscoveryEnabled)
|
||||
suite.Assert().False(spec.RegistryKubernetesEnabled)
|
||||
suite.Assert().True(spec.RegistryServiceEnabled)
|
||||
suite.Assert().Equal("localhost:3000", spec.ServiceEndpoint)
|
||||
suite.Assert().True(spec.ServiceEndpointInsecure)
|
||||
|
||||
return nil
|
||||
},
|
||||
),
|
||||
))
|
||||
asrt.True(spec.DiscoveryEnabled)
|
||||
asrt.False(spec.RegistryKubernetesEnabled)
|
||||
asrt.True(spec.RegistryServiceEnabled)
|
||||
asrt.Equal("localhost:3000", spec.ServiceEndpoint)
|
||||
asrt.True(spec.ServiceEndpointInsecure)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (suite *ConfigSuite) TestReconcileDisabled() {
|
||||
suite.Require().NoError(suite.runtime.RegisterController(&clusterctrl.ConfigController{}))
|
||||
|
||||
suite.startRuntime()
|
||||
|
||||
cfg := config.NewMachineConfig(container.NewV1Alpha1(&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
MachineConfig: &v1alpha1.MachineConfig{},
|
||||
ClusterConfig: &v1alpha1.ClusterConfig{},
|
||||
}))
|
||||
|
||||
suite.Require().NoError(suite.state.Create(suite.ctx, cfg))
|
||||
suite.Require().NoError(suite.State().Create(suite.Ctx(), cfg))
|
||||
|
||||
specMD := resource.NewMetadata(config.NamespaceName, cluster.ConfigType, cluster.ConfigID, resource.VersionUndefined)
|
||||
rtestutils.AssertResources(suite.Ctx(), suite.T(), suite.State(), []resource.ID{cluster.ConfigID},
|
||||
func(res *cluster.Config, asrt *assert.Assertions) {
|
||||
spec := res.TypedSpec()
|
||||
|
||||
suite.Assert().NoError(retry.Constant(3*time.Second, retry.WithUnits(100*time.Millisecond)).Retry(
|
||||
suite.assertResource(
|
||||
specMD,
|
||||
func(res resource.Resource) error {
|
||||
spec := res.(*cluster.Config).TypedSpec()
|
||||
asrt.False(spec.DiscoveryEnabled)
|
||||
asrt.False(spec.RegistryKubernetesEnabled)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
suite.Assert().False(spec.DiscoveryEnabled)
|
||||
suite.Assert().False(spec.RegistryKubernetesEnabled)
|
||||
func (suite *ConfigSuite) TestReconcilePartial() {
|
||||
cfg := config.NewMachineConfig(container.NewV1Alpha1(&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
MachineConfig: &v1alpha1.MachineConfig{},
|
||||
ClusterConfig: &v1alpha1.ClusterConfig{},
|
||||
}))
|
||||
|
||||
return nil
|
||||
},
|
||||
),
|
||||
))
|
||||
suite.Require().NoError(suite.State().Create(suite.Ctx(), cfg))
|
||||
|
||||
rtestutils.AssertResources(suite.Ctx(), suite.T(), suite.State(), []resource.ID{cluster.ConfigID},
|
||||
func(res *cluster.Config, asrt *assert.Assertions) {
|
||||
spec := res.TypedSpec()
|
||||
|
||||
asrt.False(spec.DiscoveryEnabled)
|
||||
asrt.False(spec.RegistryKubernetesEnabled)
|
||||
},
|
||||
)
|
||||
|
||||
newCfg := config.NewMachineConfig(must(container.New()))
|
||||
newCfg.Metadata().SetVersion(cfg.Metadata().Version())
|
||||
|
||||
suite.Require().NoError(suite.State().Update(suite.Ctx(), newCfg))
|
||||
|
||||
rtestutils.AssertNoResource[*cluster.Config](suite.Ctx(), suite.T(), suite.State(), cluster.ConfigID)
|
||||
}
|
||||
|
||||
func TestConfigSuite(t *testing.T) {
|
||||
suite.Run(t, new(ConfigSuite))
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, &ConfigSuite{
|
||||
DefaultSuite: ctest.DefaultSuite{
|
||||
Timeout: 5 * time.Second,
|
||||
AfterSetup: func(suite *ctest.DefaultSuite) {
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(clusterctrl.NewConfigController()))
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func must[T any](t T, err error) T {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
@ -366,5 +366,7 @@ func (suite *DiscoveryServiceSuite) TestDisable() {
|
||||
}
|
||||
|
||||
func TestDiscoveryServiceSuite(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, new(DiscoveryServiceSuite))
|
||||
}
|
||||
|
@ -77,5 +77,7 @@ func (suite *EndpointSuite) TestReconcileDefault() {
|
||||
}
|
||||
|
||||
func TestEndpointSuite(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, new(EndpointSuite))
|
||||
}
|
||||
|
@ -6,12 +6,10 @@ package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"github.com/cosi-project/runtime/pkg/controller/generic/transform"
|
||||
"github.com/siderolabs/gen/optional"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/cluster"
|
||||
@ -19,63 +17,30 @@ import (
|
||||
)
|
||||
|
||||
// InfoController looks up control plane infos.
|
||||
type InfoController struct{}
|
||||
type InfoController = transform.Controller[*config.MachineConfig, *cluster.Info]
|
||||
|
||||
// Name implements controller.Controller interface.
|
||||
func (ctrl *InfoController) Name() string {
|
||||
return "cluster.InfoController"
|
||||
}
|
||||
// NewInfoController instanciates the cluster info controller.
|
||||
func NewInfoController() *InfoController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *cluster.Info]{
|
||||
Name: "cluster.InfoController",
|
||||
MapMetadataOptionalFunc: func(cfg *config.MachineConfig) optional.Optional[*cluster.Info] {
|
||||
if cfg.Metadata().ID() != config.V1Alpha1ID {
|
||||
return optional.None[*cluster.Info]()
|
||||
}
|
||||
|
||||
// Inputs implements controller.Controller interface.
|
||||
func (ctrl *InfoController) Inputs() []controller.Input {
|
||||
return []controller.Input{
|
||||
{
|
||||
Namespace: config.NamespaceName,
|
||||
Type: config.MachineConfigType,
|
||||
ID: pointer.To(config.V1Alpha1ID),
|
||||
Kind: controller.InputWeak,
|
||||
if cfg.Config().Cluster() == nil {
|
||||
return optional.None[*cluster.Info]()
|
||||
}
|
||||
|
||||
return optional.Some(cluster.NewInfo())
|
||||
},
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, cfg *config.MachineConfig, info *cluster.Info) error {
|
||||
info.TypedSpec().ClusterID = cfg.Config().Cluster().ID()
|
||||
info.TypedSpec().ClusterName = cfg.Config().Cluster().Name()
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Outputs implements controller.Controller interface.
|
||||
func (ctrl *InfoController) Outputs() []controller.Output {
|
||||
return []controller.Output{
|
||||
{
|
||||
Type: cluster.InfoType,
|
||||
Kind: controller.OutputShared,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run implements controller.Controller interface.
|
||||
func (ctrl *InfoController) Run(ctx context.Context, r controller.Runtime, logger *zap.Logger) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-r.EventCh():
|
||||
}
|
||||
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
|
||||
err = safe.WriterModify(ctx, r, cluster.NewInfo(), func(info *cluster.Info) error {
|
||||
info.TypedSpec().ClusterID = cfg.Config().Cluster().ID()
|
||||
info.TypedSpec().ClusterName = cfg.Config().Cluster().Name()
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating objects: %w", err)
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
}
|
||||
)
|
||||
}
|
||||
|
63
internal/app/machined/pkg/controllers/cluster/info_test.go
Normal file
63
internal/app/machined/pkg/controllers/cluster/info_test.go
Normal file
@ -0,0 +1,63 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package cluster_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/resource/rtestutils"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
clusterctrl "github.com/siderolabs/talos/internal/app/machined/pkg/controllers/cluster"
|
||||
"github.com/siderolabs/talos/internal/app/machined/pkg/controllers/ctest"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/container"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/types/v1alpha1"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/cluster"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/config"
|
||||
)
|
||||
|
||||
type InfoSuite struct {
|
||||
ctest.DefaultSuite
|
||||
}
|
||||
|
||||
func (suite *InfoSuite) TestReconcile() {
|
||||
cfg := config.NewMachineConfig(container.NewV1Alpha1(&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
ClusterConfig: &v1alpha1.ClusterConfig{
|
||||
ClusterID: "cluster1",
|
||||
ClusterName: "foo",
|
||||
},
|
||||
}))
|
||||
|
||||
suite.Require().NoError(suite.State().Create(suite.Ctx(), cfg))
|
||||
|
||||
rtestutils.AssertResources(suite.Ctx(), suite.T(), suite.State(), []resource.ID{cluster.InfoID},
|
||||
func(res *cluster.Info, asrt *assert.Assertions) {
|
||||
spec := res.TypedSpec()
|
||||
|
||||
asrt.Equal("cluster1", spec.ClusterID)
|
||||
asrt.Equal("foo", spec.ClusterName)
|
||||
})
|
||||
|
||||
suite.Require().NoError(suite.State().Destroy(suite.Ctx(), cfg.Metadata()))
|
||||
|
||||
rtestutils.AssertNoResource[*cluster.Config](suite.Ctx(), suite.T(), suite.State(), cluster.ConfigID)
|
||||
}
|
||||
|
||||
func TestInfoSuite(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, &InfoSuite{
|
||||
DefaultSuite: ctest.DefaultSuite{
|
||||
Timeout: 5 * time.Second,
|
||||
AfterSetup: func(suite *ctest.DefaultSuite) {
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(clusterctrl.NewInfoController()))
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
@ -92,8 +92,9 @@ func (ctrl *LocalAffiliateController) Inputs() []controller.Input {
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
{
|
||||
Namespace: config.NamespaceName,
|
||||
Type: config.MachineConfigType,
|
||||
Namespace: k8s.ControlPlaneNamespaceName,
|
||||
Type: k8s.APIServerConfigType,
|
||||
ID: pointer.To(k8s.APIServerConfigID),
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
}
|
||||
@ -182,11 +183,6 @@ func (ctrl *LocalAffiliateController) Run(ctx context.Context, r controller.Runt
|
||||
continue
|
||||
}
|
||||
|
||||
machineConfig, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil && !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting machine config: %w", err)
|
||||
}
|
||||
|
||||
// optional resources (kubespan)
|
||||
kubespanIdentity, err := safe.ReaderGetByID[*kubespan.Identity](ctx, r, kubespan.LocalIdentity)
|
||||
if err != nil && !state.IsNotFoundError(err) {
|
||||
@ -208,6 +204,12 @@ func (ctrl *LocalAffiliateController) Run(ctx context.Context, r controller.Runt
|
||||
return fmt.Errorf("error getting discovered public IP: %w", err)
|
||||
}
|
||||
|
||||
// optional resources (kubernetes)
|
||||
apiServerConfig, err := safe.ReaderGetByID[*k8s.APIServerConfig](ctx, r, k8s.APIServerConfigID)
|
||||
if err != nil && !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting API server config: %w", err)
|
||||
}
|
||||
|
||||
localID := identity.TypedSpec().NodeID
|
||||
|
||||
touchedIDs := map[resource.ID]struct{}{}
|
||||
@ -222,9 +224,9 @@ func (ctrl *LocalAffiliateController) Run(ctx context.Context, r controller.Runt
|
||||
spec.MachineType = machineType.MachineType()
|
||||
spec.OperatingSystem = fmt.Sprintf("%s (%s)", version.Name, version.Tag)
|
||||
|
||||
if machineType.MachineType().IsControlPlane() && machineConfig != nil {
|
||||
if machineType.MachineType().IsControlPlane() && apiServerConfig != nil {
|
||||
spec.ControlPlane = &cluster.ControlPlane{
|
||||
APIServerPort: machineConfig.Config().Cluster().LocalAPIServerPort(),
|
||||
APIServerPort: apiServerConfig.TypedSpec().LocalPort,
|
||||
}
|
||||
} else {
|
||||
spec.ControlPlane = nil
|
||||
|
@ -7,7 +7,6 @@ package cluster_test
|
||||
import (
|
||||
"net"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/siderolabs/gen/slices"
|
||||
@ -18,9 +17,7 @@ import (
|
||||
kubespanadapter "github.com/siderolabs/talos/internal/app/machined/pkg/adapters/kubespan"
|
||||
clusterctrl "github.com/siderolabs/talos/internal/app/machined/pkg/controllers/cluster"
|
||||
"github.com/siderolabs/talos/internal/app/machined/pkg/controllers/ctest"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/container"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/machine"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/types/v1alpha1"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/cluster"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/config"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/k8s"
|
||||
@ -153,26 +150,9 @@ func (suite *LocalAffiliateSuite) TestCPGeneration() {
|
||||
machineType.SetMachineType(machine.TypeControlPlane)
|
||||
suite.Require().NoError(suite.state.Create(suite.ctx, machineType))
|
||||
|
||||
u, err := url.Parse("https://foo:6443")
|
||||
suite.Require().NoError(err)
|
||||
|
||||
mc := config.NewMachineConfig(
|
||||
container.NewV1Alpha1(
|
||||
&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
MachineConfig: &v1alpha1.MachineConfig{},
|
||||
ClusterConfig: &v1alpha1.ClusterConfig{
|
||||
ControlPlane: &v1alpha1.ControlPlaneConfig{
|
||||
Endpoint: &v1alpha1.Endpoint{
|
||||
URL: u,
|
||||
},
|
||||
LocalAPIServerPort: 6445,
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
)
|
||||
suite.Require().NoError(suite.state.Create(suite.ctx, mc))
|
||||
apiServerConfig := k8s.NewAPIServerConfig()
|
||||
apiServerConfig.TypedSpec().LocalPort = 6445
|
||||
suite.Require().NoError(suite.state.Create(suite.ctx, apiServerConfig))
|
||||
|
||||
ctest.AssertResource(suite, nodeIdentity.TypedSpec().NodeID, func(r *cluster.Affiliate, asrt *assert.Assertions) {
|
||||
spec := r.TypedSpec()
|
||||
@ -241,5 +221,7 @@ func (suite *LocalAffiliateSuite) createResources() (*cluster.Identity, *network
|
||||
}
|
||||
|
||||
func TestLocalAffiliateSuite(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, new(LocalAffiliateSuite))
|
||||
}
|
||||
|
@ -111,5 +111,7 @@ func (suite *NodeIdentitySuite) TestLoad() {
|
||||
}
|
||||
|
||||
func TestNodeIdentitySuite(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, new(NodeIdentitySuite))
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"go.uber.org/zap"
|
||||
@ -59,13 +60,13 @@ func (ctrl *MachineTypeController) Run(ctx context.Context, r controller.Runtime
|
||||
|
||||
var machineType machine.Type
|
||||
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
} else {
|
||||
machineType = cfg.(*config.MachineConfig).Config().Machine().Type()
|
||||
} else if cfg.Config().Machine() != nil {
|
||||
machineType = cfg.Config().Machine().Type()
|
||||
}
|
||||
|
||||
if err = r.Modify(ctx, config.NewMachineType(), func(r resource.Resource) error {
|
||||
|
@ -68,37 +68,23 @@ func (ctrl *SeccompProfileController) Run(ctx context.Context, r controller.Runt
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
|
||||
touchedIDs := make(map[string]struct{}, len(cfg.Config().Machine().SeccompProfiles()))
|
||||
r.StartTrackingOutputs()
|
||||
|
||||
for _, profile := range cfg.Config().Machine().SeccompProfiles() {
|
||||
if err = safe.WriterModify(ctx, r, cri.NewSeccompProfile(profile.Name()), func(cri *cri.SeccompProfile) error {
|
||||
cri.TypedSpec().Name = profile.Name()
|
||||
cri.TypedSpec().Value = profile.Value()
|
||||
if cfg.Config().Machine() != nil {
|
||||
for _, profile := range cfg.Config().Machine().SeccompProfiles() {
|
||||
if err = safe.WriterModify(ctx, r, cri.NewSeccompProfile(profile.Name()), func(cri *cri.SeccompProfile) error {
|
||||
cri.TypedSpec().Name = profile.Name()
|
||||
cri.TypedSpec().Value = profile.Value()
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
touchedIDs[profile.Name()] = struct{}{}
|
||||
}
|
||||
|
||||
// list keys for cleanup
|
||||
list, err := safe.ReaderListAll[*cri.SeccompProfile](ctx, r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing seccomp profiles: %w", err)
|
||||
}
|
||||
|
||||
for iter := safe.IteratorFromList(list); iter.Next(); {
|
||||
profile := iter.Value()
|
||||
|
||||
if _, ok := touchedIDs[profile.Metadata().ID()]; !ok {
|
||||
if err := r.Destroy(ctx, profile.Metadata()); err != nil {
|
||||
return fmt.Errorf("error deleting seccomp profile: %w", err)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
if err = safe.CleanupOutputs[*cri.SeccompProfile](ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -6,134 +6,64 @@ package etcd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"github.com/cosi-project/runtime/pkg/controller/generic/transform"
|
||||
"github.com/siderolabs/gen/optional"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/config"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/etcd"
|
||||
)
|
||||
|
||||
// ConfigController renders manifests based on templates and config/secrets.
|
||||
type ConfigController struct{}
|
||||
// ConfigController watches v1alpha1.Config, updates etcd config.
|
||||
type ConfigController = transform.Controller[*config.MachineConfig, *etcd.Config]
|
||||
|
||||
// Name implements controller.Controller interface.
|
||||
func (ctrl *ConfigController) Name() string {
|
||||
return "etcd.ConfigController"
|
||||
}
|
||||
|
||||
// Inputs implements controller.Controller interface.
|
||||
func (ctrl *ConfigController) Inputs() []controller.Input {
|
||||
return []controller.Input{
|
||||
{
|
||||
Namespace: config.NamespaceName,
|
||||
Type: config.MachineConfigType,
|
||||
ID: pointer.To(config.V1Alpha1ID),
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
{
|
||||
Namespace: config.NamespaceName,
|
||||
Type: config.MachineTypeType,
|
||||
ID: pointer.To(config.MachineTypeID),
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Outputs implements controller.Controller interface.
|
||||
func (ctrl *ConfigController) Outputs() []controller.Output {
|
||||
return []controller.Output{
|
||||
{
|
||||
Type: etcd.ConfigType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run implements controller.Controller interface.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (ctrl *ConfigController) Run(ctx context.Context, r controller.Runtime, logger *zap.Logger) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-r.EventCh():
|
||||
}
|
||||
|
||||
machineType, err := safe.ReaderGet[*config.MachineType](ctx, r, resource.NewMetadata(config.NamespaceName, config.MachineTypeType, config.MachineTypeID, resource.VersionUndefined))
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
return fmt.Errorf("error getting machine config: %w", err)
|
||||
}
|
||||
|
||||
if !machineType.MachineType().IsControlPlane() {
|
||||
if err = ctrl.teardownAll(ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
machineConfig, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
return fmt.Errorf("error getting machine config: %w", err)
|
||||
}
|
||||
|
||||
if err = safe.WriterModify(ctx, r, etcd.NewConfig(etcd.NamespaceName, etcd.ConfigID), func(cfg *etcd.Config) error {
|
||||
cfg.TypedSpec().AdvertiseValidSubnets = machineConfig.Config().Cluster().Etcd().AdvertisedSubnets()
|
||||
cfg.TypedSpec().AdvertiseExcludeSubnets = nil
|
||||
cfg.TypedSpec().ListenValidSubnets = machineConfig.Config().Cluster().Etcd().ListenSubnets()
|
||||
cfg.TypedSpec().ListenExcludeSubnets = nil
|
||||
|
||||
// filter out any virtual IPs, they can't be node IPs either
|
||||
for _, device := range machineConfig.Config().Machine().Network().Devices() {
|
||||
if device.VIPConfig() != nil {
|
||||
cfg.TypedSpec().AdvertiseExcludeSubnets = append(cfg.TypedSpec().AdvertiseExcludeSubnets, device.VIPConfig().IP())
|
||||
// NewConfigController instanciates the config controller.
|
||||
func NewConfigController() *ConfigController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *etcd.Config]{
|
||||
Name: "etcd.ConfigController",
|
||||
MapMetadataOptionalFunc: func(cfg *config.MachineConfig) optional.Optional[*etcd.Config] {
|
||||
if cfg.Metadata().ID() != config.V1Alpha1ID {
|
||||
return optional.None[*etcd.Config]()
|
||||
}
|
||||
|
||||
for _, vlan := range device.Vlans() {
|
||||
if vlan.VIPConfig() != nil {
|
||||
cfg.TypedSpec().AdvertiseExcludeSubnets = append(cfg.TypedSpec().AdvertiseExcludeSubnets, vlan.VIPConfig().IP())
|
||||
if cfg.Config().Machine() == nil || cfg.Config().Cluster() == nil {
|
||||
return optional.None[*etcd.Config]()
|
||||
}
|
||||
|
||||
if !cfg.Config().Machine().Type().IsControlPlane() {
|
||||
// etcd only runs on controlplane nodes
|
||||
return optional.None[*etcd.Config]()
|
||||
}
|
||||
|
||||
return optional.Some(etcd.NewConfig(etcd.NamespaceName, etcd.ConfigID))
|
||||
},
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, machineConfig *config.MachineConfig, cfg *etcd.Config) error {
|
||||
cfg.TypedSpec().AdvertiseValidSubnets = machineConfig.Config().Cluster().Etcd().AdvertisedSubnets()
|
||||
cfg.TypedSpec().AdvertiseExcludeSubnets = nil
|
||||
cfg.TypedSpec().ListenValidSubnets = machineConfig.Config().Cluster().Etcd().ListenSubnets()
|
||||
cfg.TypedSpec().ListenExcludeSubnets = nil
|
||||
|
||||
// filter out any virtual IPs, they can't be node IPs either
|
||||
for _, device := range machineConfig.Config().Machine().Network().Devices() {
|
||||
if device.VIPConfig() != nil {
|
||||
cfg.TypedSpec().AdvertiseExcludeSubnets = append(cfg.TypedSpec().AdvertiseExcludeSubnets, device.VIPConfig().IP())
|
||||
}
|
||||
|
||||
for _, vlan := range device.Vlans() {
|
||||
if vlan.VIPConfig() != nil {
|
||||
cfg.TypedSpec().AdvertiseExcludeSubnets = append(cfg.TypedSpec().AdvertiseExcludeSubnets, vlan.VIPConfig().IP())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cfg.TypedSpec().Image = machineConfig.Config().Cluster().Etcd().Image()
|
||||
cfg.TypedSpec().ExtraArgs = machineConfig.Config().Cluster().Etcd().ExtraArgs()
|
||||
cfg.TypedSpec().Image = machineConfig.Config().Cluster().Etcd().Image()
|
||||
cfg.TypedSpec().ExtraArgs = machineConfig.Config().Cluster().Etcd().ExtraArgs()
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error updating Config status: %w", err)
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
}
|
||||
}
|
||||
|
||||
func (ctrl *ConfigController) teardownAll(ctx context.Context, r controller.Runtime) error {
|
||||
list, err := r.List(ctx, resource.NewMetadata(etcd.NamespaceName, etcd.ConfigType, "", resource.VersionUndefined))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, res := range list.Items {
|
||||
if err = r.Destroy(ctx, res.Metadata()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return nil
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
@ -16,17 +16,18 @@ import (
|
||||
"github.com/siderolabs/talos/internal/app/machined/pkg/controllers/ctest"
|
||||
etcdctrl "github.com/siderolabs/talos/internal/app/machined/pkg/controllers/etcd"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/container"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/machine"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/types/v1alpha1"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/config"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/etcd"
|
||||
)
|
||||
|
||||
func TestConfigSuite(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, &ConfigSuite{
|
||||
DefaultSuite: ctest.DefaultSuite{
|
||||
AfterSetup: func(suite *ctest.DefaultSuite) {
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(&etcdctrl.ConfigController{}))
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(etcdctrl.NewConfigController()))
|
||||
},
|
||||
},
|
||||
})
|
||||
@ -37,10 +38,6 @@ type ConfigSuite struct {
|
||||
}
|
||||
|
||||
func (suite *ConfigSuite) TestReconcile() {
|
||||
machineType := config.NewMachineType()
|
||||
machineType.SetMachineType(machine.TypeControlPlane)
|
||||
suite.Require().NoError(suite.State().Create(suite.Ctx(), machineType))
|
||||
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
etcdConfig *v1alpha1.EtcdConfig
|
||||
@ -154,6 +151,7 @@ func (suite *ConfigSuite) TestReconcile() {
|
||||
EtcdConfig: tt.etcdConfig,
|
||||
},
|
||||
MachineConfig: &v1alpha1.MachineConfig{
|
||||
MachineType: "controlplane",
|
||||
MachineNetwork: &v1alpha1.NetworkConfig{
|
||||
NetworkInterfaces: tt.networkConfig,
|
||||
},
|
||||
|
@ -21,6 +21,8 @@ import (
|
||||
)
|
||||
|
||||
func TestMemberSuite(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctrl := &etcdctrl.MemberController{}
|
||||
|
||||
suite.Run(t, &MemberSuite{
|
||||
|
@ -22,6 +22,8 @@ import (
|
||||
)
|
||||
|
||||
func TestSpecSuite(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, &SpecSuite{
|
||||
DefaultSuite: ctest.DefaultSuite{
|
||||
AfterSetup: func(suite *ctest.DefaultSuite) {
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/gen/slices"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
@ -88,7 +89,7 @@ func (ctrl *CRIRegistryConfigController) Run(ctx context.Context, r controller.R
|
||||
case <-r.EventCh():
|
||||
}
|
||||
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil && !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
@ -98,13 +99,13 @@ func (ctrl *CRIRegistryConfigController) Run(ctx context.Context, r controller.R
|
||||
criHosts *containerd.HostsConfig
|
||||
)
|
||||
|
||||
if cfg != nil {
|
||||
criRegistryContents, err = containerd.GenerateCRIConfig(cfg.(*config.MachineConfig).Config().Machine().Registries())
|
||||
if cfg != nil && cfg.Config().Machine() != nil {
|
||||
criRegistryContents, err = containerd.GenerateCRIConfig(cfg.Config().Machine().Registries())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
criHosts, err = containerd.GenerateHosts(cfg.(*config.MachineConfig).Config().Machine().Registries(), basePath)
|
||||
criHosts, err = containerd.GenerateHosts(cfg.Config().Machine().Registries(), basePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/gen/slices"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
@ -62,15 +63,15 @@ func (ctrl *AddressFilterController) Run(ctx context.Context, r controller.Runti
|
||||
case <-r.EventCh():
|
||||
}
|
||||
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil && !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
|
||||
touchedIDs := make(map[resource.ID]struct{})
|
||||
r.StartTrackingOutputs()
|
||||
|
||||
if cfg != nil {
|
||||
cfgProvider := cfg.(*config.MachineConfig).Config()
|
||||
if cfg != nil && cfg.Config().Cluster() != nil {
|
||||
cfgProvider := cfg.Config()
|
||||
|
||||
var podCIDRs, serviceCIDRs []netip.Prefix
|
||||
|
||||
@ -106,8 +107,6 @@ func (ctrl *AddressFilterController) Run(ctx context.Context, r controller.Runti
|
||||
return fmt.Errorf("error updating output resource: %w", err)
|
||||
}
|
||||
|
||||
touchedIDs[k8s.NodeAddressFilterNoK8s] = struct{}{}
|
||||
|
||||
if err = r.Modify(ctx, network.NewNodeAddressFilter(network.NamespaceName, k8s.NodeAddressFilterOnlyK8s), func(r resource.Resource) error {
|
||||
spec := r.(*network.NodeAddressFilter).TypedSpec()
|
||||
|
||||
@ -117,28 +116,10 @@ func (ctrl *AddressFilterController) Run(ctx context.Context, r controller.Runti
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error updating output resource: %w", err)
|
||||
}
|
||||
|
||||
touchedIDs[k8s.NodeAddressFilterOnlyK8s] = struct{}{}
|
||||
}
|
||||
|
||||
// list keys for cleanup
|
||||
list, err := r.List(ctx, resource.NewMetadata(network.NamespaceName, network.NodeAddressFilterType, "", resource.VersionUndefined))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing resources: %w", err)
|
||||
if err = safe.CleanupOutputs[*network.NodeAddressFilter](ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, res := range list.Items {
|
||||
if res.Metadata().Owner() != ctrl.Name() {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := touchedIDs[res.Metadata().ID()]; !ok {
|
||||
if err = r.Destroy(ctx, res.Metadata()); err != nil {
|
||||
return fmt.Errorf("error cleaning up specs: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
}
|
||||
}
|
||||
|
@ -10,11 +10,10 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/cosi-project/runtime/pkg/controller/generic"
|
||||
"github.com/cosi-project/runtime/pkg/controller/generic/transform"
|
||||
"github.com/siderolabs/gen/optional"
|
||||
"github.com/siderolabs/gen/slices"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"go.uber.org/zap"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
@ -27,123 +26,309 @@ import (
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/k8s"
|
||||
)
|
||||
|
||||
// ControlPlaneController manages Kubernetes control plane resources based on configuration.
|
||||
type ControlPlaneController struct{}
|
||||
|
||||
// Name implements controller.Controller interface.
|
||||
func (ctrl *ControlPlaneController) Name() string {
|
||||
return "k8s.ControlPlaneController"
|
||||
}
|
||||
|
||||
// Inputs implements controller.Controller interface.
|
||||
func (ctrl *ControlPlaneController) Inputs() []controller.Input {
|
||||
return []controller.Input{
|
||||
{
|
||||
Namespace: config.NamespaceName,
|
||||
Type: config.MachineConfigType,
|
||||
ID: pointer.To(config.V1Alpha1ID),
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
{
|
||||
Namespace: config.NamespaceName,
|
||||
Type: config.MachineTypeType,
|
||||
ID: pointer.To(config.MachineTypeID),
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Outputs implements controller.Controller interface.
|
||||
func (ctrl *ControlPlaneController) Outputs() []controller.Output {
|
||||
return []controller.Output{
|
||||
{
|
||||
Type: k8s.AdmissionControlConfigType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
{
|
||||
Type: k8s.AuditPolicyConfigType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
{
|
||||
Type: k8s.APIServerConfigType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
{
|
||||
Type: k8s.ControllerManagerConfigType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
{
|
||||
Type: k8s.ExtraManifestsConfigType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
{
|
||||
Type: k8s.BootstrapManifestsConfigType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
{
|
||||
Type: k8s.SchedulerConfigType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run implements controller.Controller interface.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (ctrl *ControlPlaneController) Run(ctx context.Context, r controller.Runtime, logger *zap.Logger) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-r.EventCh():
|
||||
// controlplaneMapFunc is a shared "map" func for transform controller which guards on:
|
||||
// * machine config is there
|
||||
// * it has cluster & machine parts
|
||||
// * machine is controlplane one.
|
||||
func controlplaneMapFunc[Output generic.ResourceWithRD](output Output) func(cfg *config.MachineConfig) optional.Optional[Output] {
|
||||
return func(cfg *config.MachineConfig) optional.Optional[Output] {
|
||||
if cfg.Metadata().ID() != config.V1Alpha1ID {
|
||||
return optional.None[Output]()
|
||||
}
|
||||
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
if err = ctrl.teardownAll(ctx, r); err != nil {
|
||||
return fmt.Errorf("error destroying resources: %w", err)
|
||||
if cfg.Config().Cluster() == nil || cfg.Config().Machine() == nil {
|
||||
return optional.None[Output]()
|
||||
}
|
||||
|
||||
if !cfg.Config().Machine().Type().IsControlPlane() {
|
||||
return optional.None[Output]()
|
||||
}
|
||||
|
||||
return optional.Some(output)
|
||||
}
|
||||
}
|
||||
|
||||
// ControlPlaneAdmissionControlController manages k8s.AdmissionControlConfig based on configuration.
|
||||
type ControlPlaneAdmissionControlController = transform.Controller[*config.MachineConfig, *k8s.AdmissionControlConfig]
|
||||
|
||||
// NewControlPlaneAdmissionControlController instanciates the controller.
|
||||
func NewControlPlaneAdmissionControlController() *ControlPlaneAdmissionControlController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *k8s.AdmissionControlConfig]{
|
||||
Name: "k8s.ControlPlaneAdmissionControlController",
|
||||
MapMetadataOptionalFunc: controlplaneMapFunc(k8s.NewAdmissionControlConfig()),
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, machineConfig *config.MachineConfig, res *k8s.AdmissionControlConfig) error {
|
||||
cfgProvider := machineConfig.Config()
|
||||
|
||||
res.TypedSpec().Config = nil
|
||||
|
||||
for _, cfg := range cfgProvider.Cluster().APIServer().AdmissionControl() {
|
||||
res.TypedSpec().Config = append(res.TypedSpec().Config,
|
||||
k8s.AdmissionPluginSpec{
|
||||
Name: cfg.Name(),
|
||||
Configuration: cfg.Configuration(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
// ControlPlaneAuditPolicyController manages k8s.AuditPolicyConfig based on configuration.
|
||||
type ControlPlaneAuditPolicyController = transform.Controller[*config.MachineConfig, *k8s.AuditPolicyConfig]
|
||||
|
||||
machineType, err := safe.ReaderGet[*config.MachineType](ctx, r, resource.NewMetadata(config.NamespaceName, config.MachineTypeType, config.MachineTypeID, resource.VersionUndefined))
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
continue
|
||||
}
|
||||
// NewControlPlaneAuditPolicyController instanciates the controller.
|
||||
func NewControlPlaneAuditPolicyController() *ControlPlaneAuditPolicyController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *k8s.AuditPolicyConfig]{
|
||||
Name: "k8s.ControlPlaneAuditPolicyController",
|
||||
MapMetadataOptionalFunc: controlplaneMapFunc(k8s.NewAuditPolicyConfig()),
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, machineConfig *config.MachineConfig, res *k8s.AuditPolicyConfig) error {
|
||||
cfgProvider := machineConfig.Config()
|
||||
|
||||
return fmt.Errorf("error getting machine type: %w", err)
|
||||
}
|
||||
res.TypedSpec().Config = cfgProvider.Cluster().APIServer().AuditPolicy()
|
||||
|
||||
if !machineType.MachineType().IsControlPlane() {
|
||||
if err = ctrl.teardownAll(ctx, r); err != nil {
|
||||
return fmt.Errorf("error destroying resources: %w", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
// ControlPlaneAPIServerController manages k8s.APIServerConfig based on configuration.
|
||||
type ControlPlaneAPIServerController = transform.Controller[*config.MachineConfig, *k8s.APIServerConfig]
|
||||
|
||||
for _, f := range []func(context.Context, controller.Runtime, *zap.Logger, talosconfig.Config) error{
|
||||
ctrl.manageAPIServerConfig,
|
||||
ctrl.manageAdmissionControlConfig,
|
||||
ctrl.manageAuditPolicyConfig,
|
||||
ctrl.manageControllerManagerConfig,
|
||||
ctrl.manageSchedulerConfig,
|
||||
ctrl.manageManifestsConfig,
|
||||
ctrl.manageExtraManifestsConfig,
|
||||
} {
|
||||
if err = f(ctx, r, logger, cfg.Config()); err != nil {
|
||||
return fmt.Errorf("error updating objects: %w", err)
|
||||
}
|
||||
}
|
||||
// NewControlPlaneAPIServerController instanciates the controller.
|
||||
func NewControlPlaneAPIServerController() *ControlPlaneAPIServerController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *k8s.APIServerConfig]{
|
||||
Name: "k8s.ControlPlaneAPIServerController",
|
||||
MapMetadataOptionalFunc: controlplaneMapFunc(k8s.NewAPIServerConfig()),
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, machineConfig *config.MachineConfig, res *k8s.APIServerConfig) error {
|
||||
cfgProvider := machineConfig.Config()
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
}
|
||||
var cloudProvider string
|
||||
if cfgProvider.Cluster().ExternalCloudProvider().Enabled() {
|
||||
cloudProvider = "external" //nolint:goconst
|
||||
}
|
||||
|
||||
advertisedAddress := "$(POD_IP)"
|
||||
if cfgProvider.Machine().Kubelet().SkipNodeRegistration() {
|
||||
advertisedAddress = ""
|
||||
}
|
||||
|
||||
*res.TypedSpec() = k8s.APIServerConfigSpec{
|
||||
Image: cfgProvider.Cluster().APIServer().Image(),
|
||||
CloudProvider: cloudProvider,
|
||||
ControlPlaneEndpoint: cfgProvider.Cluster().Endpoint().String(),
|
||||
EtcdServers: []string{fmt.Sprintf("https://%s", nethelpers.JoinHostPort("localhost", constants.EtcdClientPort))},
|
||||
LocalPort: cfgProvider.Cluster().LocalAPIServerPort(),
|
||||
ServiceCIDRs: cfgProvider.Cluster().Network().ServiceCIDRs(),
|
||||
ExtraArgs: cfgProvider.Cluster().APIServer().ExtraArgs(),
|
||||
ExtraVolumes: convertVolumes(cfgProvider.Cluster().APIServer().ExtraVolumes()),
|
||||
EnvironmentVariables: cfgProvider.Cluster().APIServer().Env(),
|
||||
PodSecurityPolicyEnabled: !cfgProvider.Cluster().APIServer().DisablePodSecurityPolicy(),
|
||||
AdvertisedAddress: advertisedAddress,
|
||||
Resources: convertResources(cfgProvider.Cluster().APIServer().Resources()),
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// ControlPlaneControllerManagerController manages k8s.ControllerManagerConfig based on configuration.
|
||||
type ControlPlaneControllerManagerController = transform.Controller[*config.MachineConfig, *k8s.ControllerManagerConfig]
|
||||
|
||||
// NewControlPlaneControllerManagerController instanciates the controller.
|
||||
func NewControlPlaneControllerManagerController() *ControlPlaneControllerManagerController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *k8s.ControllerManagerConfig]{
|
||||
Name: "k8s.ControlPlaneControllerManagerController",
|
||||
MapMetadataOptionalFunc: controlplaneMapFunc(k8s.NewControllerManagerConfig()),
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, machineConfig *config.MachineConfig, res *k8s.ControllerManagerConfig) error {
|
||||
cfgProvider := machineConfig.Config()
|
||||
|
||||
var cloudProvider string
|
||||
|
||||
if cfgProvider.Cluster().ExternalCloudProvider().Enabled() {
|
||||
cloudProvider = "external"
|
||||
}
|
||||
|
||||
*res.TypedSpec() = k8s.ControllerManagerConfigSpec{
|
||||
Enabled: !cfgProvider.Machine().Controlplane().ControllerManager().Disabled(),
|
||||
Image: cfgProvider.Cluster().ControllerManager().Image(),
|
||||
CloudProvider: cloudProvider,
|
||||
PodCIDRs: cfgProvider.Cluster().Network().PodCIDRs(),
|
||||
ServiceCIDRs: cfgProvider.Cluster().Network().ServiceCIDRs(),
|
||||
ExtraArgs: cfgProvider.Cluster().ControllerManager().ExtraArgs(),
|
||||
ExtraVolumes: convertVolumes(cfgProvider.Cluster().ControllerManager().ExtraVolumes()),
|
||||
EnvironmentVariables: cfgProvider.Cluster().ControllerManager().Env(),
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// ControlPlaneSchedulerController manages k8s.SchedulerConfig based on configuration.
|
||||
type ControlPlaneSchedulerController = transform.Controller[*config.MachineConfig, *k8s.SchedulerConfig]
|
||||
|
||||
// NewControlPlaneSchedulerController instanciates the controller.
|
||||
func NewControlPlaneSchedulerController() *ControlPlaneSchedulerController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *k8s.SchedulerConfig]{
|
||||
Name: "k8s.ControlPlaneSchedulerController",
|
||||
MapMetadataOptionalFunc: controlplaneMapFunc(k8s.NewSchedulerConfig()),
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, machineConfig *config.MachineConfig, res *k8s.SchedulerConfig) error {
|
||||
cfgProvider := machineConfig.Config()
|
||||
|
||||
*res.TypedSpec() = k8s.SchedulerConfigSpec{
|
||||
Enabled: !cfgProvider.Machine().Controlplane().Scheduler().Disabled(),
|
||||
Image: cfgProvider.Cluster().Scheduler().Image(),
|
||||
ExtraArgs: cfgProvider.Cluster().Scheduler().ExtraArgs(),
|
||||
ExtraVolumes: convertVolumes(cfgProvider.Cluster().Scheduler().ExtraVolumes()),
|
||||
EnvironmentVariables: cfgProvider.Cluster().Scheduler().Env(),
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// ControlPlaneBootstrapManifestsController manages k8s.BootstrapManifestsConfig based on configuration.
|
||||
type ControlPlaneBootstrapManifestsController = transform.Controller[*config.MachineConfig, *k8s.BootstrapManifestsConfig]
|
||||
|
||||
// NewControlPlaneBootstrapManifestsController instanciates the controller.
|
||||
func NewControlPlaneBootstrapManifestsController() *ControlPlaneBootstrapManifestsController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *k8s.BootstrapManifestsConfig]{
|
||||
Name: "k8s.ControlPlaneBootstrapManifestsController",
|
||||
MapMetadataOptionalFunc: controlplaneMapFunc(k8s.NewBootstrapManifestsConfig()),
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, machineConfig *config.MachineConfig, res *k8s.BootstrapManifestsConfig) error {
|
||||
cfgProvider := machineConfig.Config()
|
||||
|
||||
dnsServiceIPs, err := cfgProvider.Cluster().Network().DNSServiceIPs()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error calculating DNS service IPs: %w", err)
|
||||
}
|
||||
|
||||
dnsServiceIP := ""
|
||||
dnsServiceIPv6 := ""
|
||||
|
||||
for _, ip := range dnsServiceIPs {
|
||||
if dnsServiceIP == "" && ip.Is4() {
|
||||
dnsServiceIP = ip.String()
|
||||
}
|
||||
|
||||
if dnsServiceIPv6 == "" && ip.Is6() {
|
||||
dnsServiceIPv6 = ip.String()
|
||||
}
|
||||
}
|
||||
|
||||
images := images.List(cfgProvider)
|
||||
|
||||
proxyArgs, err := getProxyArgs(cfgProvider)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var server string
|
||||
if cfgProvider.Machine().Features().KubePrism().Enabled() {
|
||||
server = fmt.Sprintf("https://localhost:%d", cfgProvider.Machine().Features().KubePrism().Port())
|
||||
} else {
|
||||
server = cfgProvider.Cluster().Endpoint().String()
|
||||
}
|
||||
|
||||
*res.TypedSpec() = k8s.BootstrapManifestsConfigSpec{
|
||||
Server: server,
|
||||
ClusterDomain: cfgProvider.Cluster().Network().DNSDomain(),
|
||||
|
||||
PodCIDRs: cfgProvider.Cluster().Network().PodCIDRs(),
|
||||
|
||||
ProxyEnabled: cfgProvider.Cluster().Proxy().Enabled(),
|
||||
ProxyImage: cfgProvider.Cluster().Proxy().Image(),
|
||||
ProxyArgs: proxyArgs,
|
||||
|
||||
CoreDNSEnabled: cfgProvider.Cluster().CoreDNS().Enabled(),
|
||||
CoreDNSImage: cfgProvider.Cluster().CoreDNS().Image(),
|
||||
|
||||
DNSServiceIP: dnsServiceIP,
|
||||
DNSServiceIPv6: dnsServiceIPv6,
|
||||
|
||||
FlannelEnabled: cfgProvider.Cluster().Network().CNI().Name() == constants.FlannelCNI,
|
||||
FlannelImage: images.Flannel,
|
||||
FlannelCNIImage: images.FlannelCNI,
|
||||
|
||||
PodSecurityPolicyEnabled: !cfgProvider.Cluster().APIServer().DisablePodSecurityPolicy(),
|
||||
|
||||
TalosAPIServiceEnabled: cfgProvider.Machine().Features().KubernetesTalosAPIAccess().Enabled(),
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// ControlPlaneExtraManifestsController manages k8s.ExtraManifestsConfig based on configuration.
|
||||
type ControlPlaneExtraManifestsController = transform.Controller[*config.MachineConfig, *k8s.ExtraManifestsConfig]
|
||||
|
||||
// NewControlPlaneExtraManifestsController instanciates the controller.
|
||||
func NewControlPlaneExtraManifestsController() *ControlPlaneExtraManifestsController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *k8s.ExtraManifestsConfig]{
|
||||
Name: "k8s.ControlPlaneExtraManifestsController",
|
||||
MapMetadataOptionalFunc: controlplaneMapFunc(k8s.NewExtraManifestsConfig()),
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, machineConfig *config.MachineConfig, res *k8s.ExtraManifestsConfig) error {
|
||||
cfgProvider := machineConfig.Config()
|
||||
|
||||
spec := k8s.ExtraManifestsConfigSpec{}
|
||||
|
||||
for _, url := range cfgProvider.Cluster().Network().CNI().URLs() {
|
||||
spec.ExtraManifests = append(spec.ExtraManifests, k8s.ExtraManifest{
|
||||
Name: url,
|
||||
URL: url,
|
||||
Priority: "05", // push CNI to the top
|
||||
})
|
||||
}
|
||||
|
||||
for _, url := range cfgProvider.Cluster().ExternalCloudProvider().ManifestURLs() {
|
||||
spec.ExtraManifests = append(spec.ExtraManifests, k8s.ExtraManifest{
|
||||
Name: url,
|
||||
URL: url,
|
||||
Priority: "30", // after default manifests
|
||||
})
|
||||
}
|
||||
|
||||
for _, url := range cfgProvider.Cluster().ExtraManifestURLs() {
|
||||
spec.ExtraManifests = append(spec.ExtraManifests, k8s.ExtraManifest{
|
||||
Name: url,
|
||||
URL: url,
|
||||
Priority: "99", // make sure extra manifests come last, when PSP is already created
|
||||
ExtraHeaders: cfgProvider.Cluster().ExtraManifestHeaderMap(),
|
||||
})
|
||||
}
|
||||
|
||||
for _, manifest := range cfgProvider.Cluster().InlineManifests() {
|
||||
spec.ExtraManifests = append(spec.ExtraManifests, k8s.ExtraManifest{
|
||||
Name: manifest.Name(),
|
||||
Priority: "99", // make sure extra manifests come last, when PSP is already created
|
||||
InlineManifest: manifest.Contents(),
|
||||
})
|
||||
}
|
||||
|
||||
*res.TypedSpec() = spec
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func convertVolumes(volumes []talosconfig.VolumeMount) []k8s.ExtraVolume {
|
||||
@ -184,228 +369,6 @@ func convertResources(resources talosconfig.Resources) k8s.Resources {
|
||||
}
|
||||
}
|
||||
|
||||
func (ctrl *ControlPlaneController) manageAPIServerConfig(ctx context.Context, r controller.Runtime, logger *zap.Logger, cfgProvider talosconfig.Config) error {
|
||||
var cloudProvider string
|
||||
if cfgProvider.Cluster().ExternalCloudProvider().Enabled() {
|
||||
cloudProvider = "external" //nolint:goconst
|
||||
}
|
||||
|
||||
advertisedAddress := "$(POD_IP)"
|
||||
if cfgProvider.Machine().Kubelet().SkipNodeRegistration() {
|
||||
advertisedAddress = ""
|
||||
}
|
||||
|
||||
return safe.WriterModify(ctx, r, k8s.NewAPIServerConfig(), func(r *k8s.APIServerConfig) error {
|
||||
*r.TypedSpec() = k8s.APIServerConfigSpec{
|
||||
Image: cfgProvider.Cluster().APIServer().Image(),
|
||||
CloudProvider: cloudProvider,
|
||||
ControlPlaneEndpoint: cfgProvider.Cluster().Endpoint().String(),
|
||||
EtcdServers: []string{fmt.Sprintf("https://%s", nethelpers.JoinHostPort("localhost", constants.EtcdClientPort))},
|
||||
LocalPort: cfgProvider.Cluster().LocalAPIServerPort(),
|
||||
ServiceCIDRs: cfgProvider.Cluster().Network().ServiceCIDRs(),
|
||||
ExtraArgs: cfgProvider.Cluster().APIServer().ExtraArgs(),
|
||||
ExtraVolumes: convertVolumes(cfgProvider.Cluster().APIServer().ExtraVolumes()),
|
||||
EnvironmentVariables: cfgProvider.Cluster().APIServer().Env(),
|
||||
PodSecurityPolicyEnabled: !cfgProvider.Cluster().APIServer().DisablePodSecurityPolicy(),
|
||||
AdvertisedAddress: advertisedAddress,
|
||||
Resources: convertResources(cfgProvider.Cluster().APIServer().Resources()),
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (ctrl *ControlPlaneController) manageAdmissionControlConfig(ctx context.Context, r controller.Runtime, _ *zap.Logger, cfgProvider talosconfig.Config) error {
|
||||
spec := k8s.AdmissionControlConfigSpec{}
|
||||
|
||||
for _, cfg := range cfgProvider.Cluster().APIServer().AdmissionControl() {
|
||||
spec.Config = append(spec.Config,
|
||||
k8s.AdmissionPluginSpec{
|
||||
Name: cfg.Name(),
|
||||
Configuration: cfg.Configuration(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return safe.WriterModify(ctx, r, k8s.NewAdmissionControlConfig(), func(r *k8s.AdmissionControlConfig) error {
|
||||
*r.TypedSpec() = spec
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (ctrl *ControlPlaneController) manageAuditPolicyConfig(ctx context.Context, r controller.Runtime, _ *zap.Logger, cfgProvider talosconfig.Config) error {
|
||||
spec := k8s.AuditPolicyConfigSpec{}
|
||||
|
||||
spec.Config = cfgProvider.Cluster().APIServer().AuditPolicy()
|
||||
|
||||
return safe.WriterModify(ctx, r, k8s.NewAuditPolicyConfig(), func(r *k8s.AuditPolicyConfig) error {
|
||||
*r.TypedSpec() = spec
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (ctrl *ControlPlaneController) manageControllerManagerConfig(ctx context.Context, r controller.Runtime, _ *zap.Logger, cfgProvider talosconfig.Config) error {
|
||||
var cloudProvider string
|
||||
if cfgProvider.Cluster().ExternalCloudProvider().Enabled() {
|
||||
cloudProvider = "external"
|
||||
}
|
||||
|
||||
return safe.WriterModify(ctx, r, k8s.NewControllerManagerConfig(), func(r *k8s.ControllerManagerConfig) error {
|
||||
*r.TypedSpec() = k8s.ControllerManagerConfigSpec{
|
||||
Enabled: !cfgProvider.Machine().Controlplane().ControllerManager().Disabled(),
|
||||
Image: cfgProvider.Cluster().ControllerManager().Image(),
|
||||
CloudProvider: cloudProvider,
|
||||
PodCIDRs: cfgProvider.Cluster().Network().PodCIDRs(),
|
||||
ServiceCIDRs: cfgProvider.Cluster().Network().ServiceCIDRs(),
|
||||
ExtraArgs: cfgProvider.Cluster().ControllerManager().ExtraArgs(),
|
||||
ExtraVolumes: convertVolumes(cfgProvider.Cluster().ControllerManager().ExtraVolumes()),
|
||||
EnvironmentVariables: cfgProvider.Cluster().ControllerManager().Env(),
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (ctrl *ControlPlaneController) manageSchedulerConfig(ctx context.Context, r controller.Runtime, _ *zap.Logger, cfgProvider talosconfig.Config) error {
|
||||
return safe.WriterModify(ctx, r, k8s.NewSchedulerConfig(), func(r *k8s.SchedulerConfig) error {
|
||||
*r.TypedSpec() = k8s.SchedulerConfigSpec{
|
||||
Enabled: !cfgProvider.Machine().Controlplane().Scheduler().Disabled(),
|
||||
Image: cfgProvider.Cluster().Scheduler().Image(),
|
||||
ExtraArgs: cfgProvider.Cluster().Scheduler().ExtraArgs(),
|
||||
ExtraVolumes: convertVolumes(cfgProvider.Cluster().Scheduler().ExtraVolumes()),
|
||||
EnvironmentVariables: cfgProvider.Cluster().Scheduler().Env(),
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (ctrl *ControlPlaneController) manageManifestsConfig(ctx context.Context, r controller.Runtime, _ *zap.Logger, cfgProvider talosconfig.Config) error {
|
||||
dnsServiceIPs, err := cfgProvider.Cluster().Network().DNSServiceIPs()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error calculating DNS service IPs: %w", err)
|
||||
}
|
||||
|
||||
dnsServiceIP := ""
|
||||
dnsServiceIPv6 := ""
|
||||
|
||||
for _, ip := range dnsServiceIPs {
|
||||
if dnsServiceIP == "" && ip.Is4() {
|
||||
dnsServiceIP = ip.String()
|
||||
}
|
||||
|
||||
if dnsServiceIPv6 == "" && ip.Is6() {
|
||||
dnsServiceIPv6 = ip.String()
|
||||
}
|
||||
}
|
||||
|
||||
return safe.WriterModify(ctx, r, k8s.NewBootstrapManifestsConfig(), func(r *k8s.BootstrapManifestsConfig) error {
|
||||
images := images.List(cfgProvider)
|
||||
|
||||
proxyArgs, err := getProxyArgs(cfgProvider)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var server string
|
||||
if cfgProvider.Machine().Features().KubePrism().Enabled() {
|
||||
server = fmt.Sprintf("https://localhost:%d", cfgProvider.Machine().Features().KubePrism().Port())
|
||||
} else {
|
||||
server = cfgProvider.Cluster().Endpoint().String()
|
||||
}
|
||||
|
||||
*r.TypedSpec() = k8s.BootstrapManifestsConfigSpec{
|
||||
Server: server,
|
||||
ClusterDomain: cfgProvider.Cluster().Network().DNSDomain(),
|
||||
|
||||
PodCIDRs: cfgProvider.Cluster().Network().PodCIDRs(),
|
||||
|
||||
ProxyEnabled: cfgProvider.Cluster().Proxy().Enabled(),
|
||||
ProxyImage: cfgProvider.Cluster().Proxy().Image(),
|
||||
ProxyArgs: proxyArgs,
|
||||
|
||||
CoreDNSEnabled: cfgProvider.Cluster().CoreDNS().Enabled(),
|
||||
CoreDNSImage: cfgProvider.Cluster().CoreDNS().Image(),
|
||||
|
||||
DNSServiceIP: dnsServiceIP,
|
||||
DNSServiceIPv6: dnsServiceIPv6,
|
||||
|
||||
FlannelEnabled: cfgProvider.Cluster().Network().CNI().Name() == constants.FlannelCNI,
|
||||
FlannelImage: images.Flannel,
|
||||
FlannelCNIImage: images.FlannelCNI,
|
||||
|
||||
PodSecurityPolicyEnabled: !cfgProvider.Cluster().APIServer().DisablePodSecurityPolicy(),
|
||||
|
||||
TalosAPIServiceEnabled: cfgProvider.Machine().Features().KubernetesTalosAPIAccess().Enabled(),
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (ctrl *ControlPlaneController) manageExtraManifestsConfig(ctx context.Context, r controller.Runtime, _ *zap.Logger, cfgProvider talosconfig.Config) error {
|
||||
return safe.WriterModify(ctx, r, k8s.NewExtraManifestsConfig(), func(r *k8s.ExtraManifestsConfig) error {
|
||||
spec := k8s.ExtraManifestsConfigSpec{}
|
||||
|
||||
for _, url := range cfgProvider.Cluster().Network().CNI().URLs() {
|
||||
spec.ExtraManifests = append(spec.ExtraManifests, k8s.ExtraManifest{
|
||||
Name: url,
|
||||
URL: url,
|
||||
Priority: "05", // push CNI to the top
|
||||
})
|
||||
}
|
||||
|
||||
for _, url := range cfgProvider.Cluster().ExternalCloudProvider().ManifestURLs() {
|
||||
spec.ExtraManifests = append(spec.ExtraManifests, k8s.ExtraManifest{
|
||||
Name: url,
|
||||
URL: url,
|
||||
Priority: "30", // after default manifests
|
||||
})
|
||||
}
|
||||
|
||||
for _, url := range cfgProvider.Cluster().ExtraManifestURLs() {
|
||||
spec.ExtraManifests = append(spec.ExtraManifests, k8s.ExtraManifest{
|
||||
Name: url,
|
||||
URL: url,
|
||||
Priority: "99", // make sure extra manifests come last, when PSP is already created
|
||||
ExtraHeaders: cfgProvider.Cluster().ExtraManifestHeaderMap(),
|
||||
})
|
||||
}
|
||||
|
||||
for _, manifest := range cfgProvider.Cluster().InlineManifests() {
|
||||
spec.ExtraManifests = append(spec.ExtraManifests, k8s.ExtraManifest{
|
||||
Name: manifest.Name(),
|
||||
Priority: "99", // make sure extra manifests come last, when PSP is already created
|
||||
InlineManifest: manifest.Contents(),
|
||||
})
|
||||
}
|
||||
|
||||
*r.TypedSpec() = spec
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (ctrl *ControlPlaneController) teardownAll(ctx context.Context, r controller.Runtime) error {
|
||||
for _, md := range []*resource.Metadata{
|
||||
k8s.NewAPIServerConfig().Metadata(),
|
||||
k8s.NewAdmissionControlConfig().Metadata(),
|
||||
k8s.NewAuditPolicyConfig().Metadata(),
|
||||
k8s.NewControllerManagerConfig().Metadata(),
|
||||
k8s.NewSchedulerConfig().Metadata(),
|
||||
k8s.NewBootstrapManifestsConfig().Metadata(),
|
||||
k8s.NewExtraManifestsConfig().Metadata(),
|
||||
} {
|
||||
if err := r.Destroy(ctx, md); err != nil && !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error destroying resources: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getProxyArgs(cfgProvider talosconfig.Config) ([]string, error) {
|
||||
clusterCidr := strings.Join(cfgProvider.Cluster().Network().PodCIDRs(), ",")
|
||||
|
||||
|
@ -13,7 +13,6 @@ import (
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/resource/rtestutils"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
@ -21,7 +20,6 @@ import (
|
||||
"github.com/siderolabs/talos/internal/app/machined/pkg/controllers/ctest"
|
||||
k8sctrl "github.com/siderolabs/talos/internal/app/machined/pkg/controllers/k8s"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/container"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/machine"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/types/v1alpha1"
|
||||
"github.com/siderolabs/talos/pkg/machinery/constants"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/config"
|
||||
@ -35,10 +33,6 @@ type K8sControlPlaneSuite struct {
|
||||
// setupMachine creates a machine with given configuration, waits for it to become ready,
|
||||
// and returns API server's spec.
|
||||
func (suite *K8sControlPlaneSuite) setupMachine(cfg *config.MachineConfig) {
|
||||
machineType := config.NewMachineType()
|
||||
machineType.SetMachineType(machine.TypeControlPlane)
|
||||
|
||||
suite.Require().NoError(suite.State().Create(suite.Ctx(), machineType))
|
||||
suite.Require().NoError(suite.State().Create(suite.Ctx(), cfg))
|
||||
|
||||
rtestutils.AssertResources(suite.Ctx(), suite.T(), suite.State(), []resource.ID{k8s.AdmissionControlConfigID}, func(*k8s.AdmissionControlConfig, *assert.Assertions) {})
|
||||
@ -58,7 +52,9 @@ func (suite *K8sControlPlaneSuite) TestReconcileDefaults() {
|
||||
container.NewV1Alpha1(
|
||||
&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
MachineConfig: &v1alpha1.MachineConfig{},
|
||||
MachineConfig: &v1alpha1.MachineConfig{
|
||||
MachineType: "controlplane",
|
||||
},
|
||||
ClusterConfig: &v1alpha1.ClusterConfig{
|
||||
ControlPlane: &v1alpha1.ControlPlaneConfig{
|
||||
Endpoint: &v1alpha1.Endpoint{
|
||||
@ -102,7 +98,9 @@ func (suite *K8sControlPlaneSuite) TestReconcileTransitionWorker() {
|
||||
container.NewV1Alpha1(
|
||||
&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
MachineConfig: &v1alpha1.MachineConfig{},
|
||||
MachineConfig: &v1alpha1.MachineConfig{
|
||||
MachineType: "controlplane",
|
||||
},
|
||||
ClusterConfig: &v1alpha1.ClusterConfig{
|
||||
ControlPlane: &v1alpha1.ControlPlaneConfig{
|
||||
Endpoint: &v1alpha1.Endpoint{
|
||||
@ -116,11 +114,8 @@ func (suite *K8sControlPlaneSuite) TestReconcileTransitionWorker() {
|
||||
|
||||
suite.setupMachine(cfg)
|
||||
|
||||
machineType, err := safe.StateGet[*config.MachineType](suite.Ctx(), suite.State(), resource.NewMetadata(config.NamespaceName, config.MachineTypeType, config.MachineTypeID, resource.VersionUndefined))
|
||||
suite.Require().NoError(err)
|
||||
|
||||
machineType.SetMachineType(machine.TypeWorker)
|
||||
suite.Require().NoError(suite.State().Update(suite.Ctx(), machineType))
|
||||
cfg.Container().RawV1Alpha1().MachineConfig.MachineType = "worker"
|
||||
suite.Require().NoError(suite.State().Update(suite.Ctx(), cfg))
|
||||
|
||||
rtestutils.AssertNoResource[*k8s.AdmissionControlConfig](suite.Ctx(), suite.T(), suite.State(), k8s.AdmissionControlConfigID)
|
||||
rtestutils.AssertNoResource[*k8s.AuditPolicyConfig](suite.Ctx(), suite.T(), suite.State(), k8s.AuditPolicyConfigID)
|
||||
@ -139,7 +134,9 @@ func (suite *K8sControlPlaneSuite) TestReconcileIPv6() {
|
||||
container.NewV1Alpha1(
|
||||
&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
MachineConfig: &v1alpha1.MachineConfig{},
|
||||
MachineConfig: &v1alpha1.MachineConfig{
|
||||
MachineType: "controlplane",
|
||||
},
|
||||
ClusterConfig: &v1alpha1.ClusterConfig{
|
||||
ControlPlane: &v1alpha1.ControlPlaneConfig{
|
||||
Endpoint: &v1alpha1.Endpoint{
|
||||
@ -173,7 +170,9 @@ func (suite *K8sControlPlaneSuite) TestReconcileDualStack() {
|
||||
container.NewV1Alpha1(
|
||||
&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
MachineConfig: &v1alpha1.MachineConfig{},
|
||||
MachineConfig: &v1alpha1.MachineConfig{
|
||||
MachineType: "controlplane",
|
||||
},
|
||||
ClusterConfig: &v1alpha1.ClusterConfig{
|
||||
ControlPlane: &v1alpha1.ControlPlaneConfig{
|
||||
Endpoint: &v1alpha1.Endpoint{
|
||||
@ -207,7 +206,9 @@ func (suite *K8sControlPlaneSuite) TestReconcileExtraVolumes() {
|
||||
container.NewV1Alpha1(
|
||||
&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
MachineConfig: &v1alpha1.MachineConfig{},
|
||||
MachineConfig: &v1alpha1.MachineConfig{
|
||||
MachineType: "controlplane",
|
||||
},
|
||||
ClusterConfig: &v1alpha1.ClusterConfig{
|
||||
ControlPlane: &v1alpha1.ControlPlaneConfig{
|
||||
Endpoint: &v1alpha1.Endpoint{
|
||||
@ -265,7 +266,9 @@ func (suite *K8sControlPlaneSuite) TestReconcileEnvironment() {
|
||||
container.NewV1Alpha1(
|
||||
&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
MachineConfig: &v1alpha1.MachineConfig{},
|
||||
MachineConfig: &v1alpha1.MachineConfig{
|
||||
MachineType: "controlplane",
|
||||
},
|
||||
ClusterConfig: &v1alpha1.ClusterConfig{
|
||||
ControlPlane: &v1alpha1.ControlPlaneConfig{
|
||||
Endpoint: &v1alpha1.Endpoint{
|
||||
@ -305,7 +308,9 @@ func (suite *K8sControlPlaneSuite) TestReconcileResources() {
|
||||
container.NewV1Alpha1(
|
||||
&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
MachineConfig: &v1alpha1.MachineConfig{},
|
||||
MachineConfig: &v1alpha1.MachineConfig{
|
||||
MachineType: "controlplane",
|
||||
},
|
||||
ClusterConfig: &v1alpha1.ClusterConfig{
|
||||
ControlPlane: &v1alpha1.ControlPlaneConfig{
|
||||
Endpoint: &v1alpha1.Endpoint{
|
||||
@ -363,7 +368,9 @@ func (suite *K8sControlPlaneSuite) TestReconcileExternalCloudProvider() {
|
||||
container.NewV1Alpha1(
|
||||
&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
MachineConfig: &v1alpha1.MachineConfig{},
|
||||
MachineConfig: &v1alpha1.MachineConfig{
|
||||
MachineType: "controlplane",
|
||||
},
|
||||
ClusterConfig: &v1alpha1.ClusterConfig{
|
||||
ControlPlane: &v1alpha1.ControlPlaneConfig{
|
||||
Endpoint: &v1alpha1.Endpoint{
|
||||
@ -427,7 +434,9 @@ func (suite *K8sControlPlaneSuite) TestReconcileInlineManifests() {
|
||||
container.NewV1Alpha1(
|
||||
&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
MachineConfig: &v1alpha1.MachineConfig{},
|
||||
MachineConfig: &v1alpha1.MachineConfig{
|
||||
MachineType: "controlplane",
|
||||
},
|
||||
ClusterConfig: &v1alpha1.ClusterConfig{
|
||||
ControlPlane: &v1alpha1.ControlPlaneConfig{
|
||||
Endpoint: &v1alpha1.Endpoint{
|
||||
@ -478,7 +487,13 @@ func TestK8sControlPlaneSuite(t *testing.T) {
|
||||
DefaultSuite: ctest.DefaultSuite{
|
||||
Timeout: 10 * time.Second,
|
||||
AfterSetup: func(suite *ctest.DefaultSuite) {
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(&k8sctrl.ControlPlaneController{}))
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(k8sctrl.NewControlPlaneAPIServerController()))
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(k8sctrl.NewControlPlaneAdmissionControlController()))
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(k8sctrl.NewControlPlaneAuditPolicyController()))
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(k8sctrl.NewControlPlaneBootstrapManifestsController()))
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(k8sctrl.NewControlPlaneControllerManagerController()))
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(k8sctrl.NewControlPlaneExtraManifestsController()))
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(k8sctrl.NewControlPlaneSchedulerController()))
|
||||
},
|
||||
},
|
||||
})
|
||||
|
@ -10,142 +10,85 @@ import (
|
||||
"net/netip"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/controller/generic/transform"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/gen/optional"
|
||||
"github.com/siderolabs/gen/slices"
|
||||
"github.com/siderolabs/gen/xerrors"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"go.uber.org/zap"
|
||||
|
||||
talosconfig "github.com/siderolabs/talos/pkg/machinery/config"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/config"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/k8s"
|
||||
)
|
||||
|
||||
// KubeletConfigController renders manifests based on templates and config/secrets.
|
||||
type KubeletConfigController struct{}
|
||||
// KubeletConfigController renders kubelet configuration based on machine config.
|
||||
type KubeletConfigController = transform.Controller[*config.MachineConfig, *k8s.KubeletConfig]
|
||||
|
||||
// Name implements controller.Controller interface.
|
||||
func (ctrl *KubeletConfigController) Name() string {
|
||||
return "k8s.KubeletConfigController"
|
||||
}
|
||||
// NewKubeletConfigController instanciates the config controller.
|
||||
func NewKubeletConfigController() *KubeletConfigController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *k8s.KubeletConfig]{
|
||||
Name: "k8s.KubeletConfigController",
|
||||
MapMetadataOptionalFunc: func(cfg *config.MachineConfig) optional.Optional[*k8s.KubeletConfig] {
|
||||
if cfg.Metadata().ID() != config.V1Alpha1ID {
|
||||
return optional.None[*k8s.KubeletConfig]()
|
||||
}
|
||||
|
||||
// Inputs implements controller.Controller interface.
|
||||
func (ctrl *KubeletConfigController) Inputs() []controller.Input {
|
||||
return []controller.Input{
|
||||
{
|
||||
Namespace: config.NamespaceName,
|
||||
Type: config.MachineConfigType,
|
||||
ID: pointer.To(config.V1Alpha1ID),
|
||||
Kind: controller.InputWeak,
|
||||
if cfg.Config().Cluster() == nil || cfg.Config().Machine() == nil {
|
||||
return optional.None[*k8s.KubeletConfig]()
|
||||
}
|
||||
|
||||
return optional.Some(k8s.NewKubeletConfig(k8s.NamespaceName, k8s.KubeletID))
|
||||
},
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, cfg *config.MachineConfig, res *k8s.KubeletConfig) error {
|
||||
staticPodURL, err := safe.ReaderGetByID[*k8s.StaticPodServerStatus](ctx, r, k8s.StaticPodServerStatusResourceID)
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
return xerrors.NewTaggedf[transform.SkipReconcileTag]("static pod server status resource not found; not creating kubelet config")
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
kubeletConfig := res.TypedSpec()
|
||||
cfgProvider := cfg.Config()
|
||||
|
||||
kubeletConfig.Image = cfgProvider.Machine().Kubelet().Image()
|
||||
|
||||
kubeletConfig.ClusterDNS = cfgProvider.Machine().Kubelet().ClusterDNS()
|
||||
|
||||
if len(kubeletConfig.ClusterDNS) == 0 {
|
||||
addrs, err := cfgProvider.Cluster().Network().DNSServiceIPs()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building DNS service IPs: %w", err)
|
||||
}
|
||||
|
||||
kubeletConfig.ClusterDNS = slices.Map(addrs, netip.Addr.String)
|
||||
}
|
||||
|
||||
kubeletConfig.ClusterDomain = cfgProvider.Cluster().Network().DNSDomain()
|
||||
kubeletConfig.ExtraArgs = cfgProvider.Machine().Kubelet().ExtraArgs()
|
||||
kubeletConfig.ExtraMounts = cfgProvider.Machine().Kubelet().ExtraMounts()
|
||||
kubeletConfig.ExtraConfig = cfgProvider.Machine().Kubelet().ExtraConfig()
|
||||
kubeletConfig.CloudProviderExternal = cfgProvider.Cluster().ExternalCloudProvider().Enabled()
|
||||
kubeletConfig.DefaultRuntimeSeccompEnabled = cfgProvider.Machine().Kubelet().DefaultRuntimeSeccompProfileEnabled()
|
||||
kubeletConfig.SkipNodeRegistration = cfgProvider.Machine().Kubelet().SkipNodeRegistration()
|
||||
kubeletConfig.StaticPodListURL = staticPodURL.TypedSpec().URL
|
||||
kubeletConfig.DisableManifestsDirectory = cfgProvider.Machine().Kubelet().DisableManifestsDirectory()
|
||||
kubeletConfig.EnableFSQuotaMonitoring = cfgProvider.Machine().Features().DiskQuotaSupportEnabled()
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Namespace: k8s.NamespaceName,
|
||||
Type: k8s.StaticPodServerStatusType,
|
||||
ID: pointer.To(k8s.StaticPodServerStatusResourceID),
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Outputs implements controller.Controller interface.
|
||||
func (ctrl *KubeletConfigController) Outputs() []controller.Output {
|
||||
return []controller.Output{
|
||||
{
|
||||
Type: k8s.KubeletConfigType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run implements controller.Controller interface.
|
||||
func (ctrl *KubeletConfigController) Run(ctx context.Context, r controller.Runtime, logger *zap.Logger) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-r.EventCh():
|
||||
}
|
||||
|
||||
staticPodListURL, err := getStaticPodListURL(ctx, r)
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
logger.Warn("static pod list url is not available yet; not creating kubelet config", zap.Error(err))
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
return fmt.Errorf("error accessing static pod server status resource: %w", err)
|
||||
}
|
||||
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
|
||||
cfgProvider := cfg.Config()
|
||||
|
||||
if err = r.Modify(
|
||||
ctx,
|
||||
k8s.NewKubeletConfig(k8s.NamespaceName, k8s.KubeletID),
|
||||
modifyKubeletConfig(cfgProvider, staticPodListURL),
|
||||
); err != nil {
|
||||
return fmt.Errorf("error modifying KubeletConfig resource: %w", err)
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
}
|
||||
}
|
||||
|
||||
func getStaticPodListURL(ctx context.Context, r controller.Runtime) (string, error) {
|
||||
staticPodURLRes, err := safe.ReaderGet[*k8s.StaticPodServerStatus](
|
||||
ctx,
|
||||
r,
|
||||
resource.NewMetadata(
|
||||
k8s.NamespaceName,
|
||||
k8s.StaticPodServerStatusType,
|
||||
k8s.StaticPodServerStatusResourceID,
|
||||
resource.VersionUndefined,
|
||||
))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return staticPodURLRes.TypedSpec().URL, nil
|
||||
}
|
||||
|
||||
func modifyKubeletConfig(cfgProvider talosconfig.Config, staticPodListURL string) func(resource.Resource) error {
|
||||
return func(r resource.Resource) error {
|
||||
kubeletConfig := r.(*k8s.KubeletConfig).TypedSpec()
|
||||
|
||||
kubeletConfig.Image = cfgProvider.Machine().Kubelet().Image()
|
||||
|
||||
kubeletConfig.ClusterDNS = cfgProvider.Machine().Kubelet().ClusterDNS()
|
||||
|
||||
if len(kubeletConfig.ClusterDNS) == 0 {
|
||||
addrs, err := cfgProvider.Cluster().Network().DNSServiceIPs()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building DNS service IPs: %w", err)
|
||||
}
|
||||
|
||||
kubeletConfig.ClusterDNS = slices.Map(addrs, netip.Addr.String)
|
||||
}
|
||||
|
||||
kubeletConfig.ClusterDomain = cfgProvider.Cluster().Network().DNSDomain()
|
||||
kubeletConfig.ExtraArgs = cfgProvider.Machine().Kubelet().ExtraArgs()
|
||||
kubeletConfig.ExtraMounts = cfgProvider.Machine().Kubelet().ExtraMounts()
|
||||
kubeletConfig.ExtraConfig = cfgProvider.Machine().Kubelet().ExtraConfig()
|
||||
kubeletConfig.CloudProviderExternal = cfgProvider.Cluster().ExternalCloudProvider().Enabled()
|
||||
kubeletConfig.DefaultRuntimeSeccompEnabled = cfgProvider.Machine().Kubelet().DefaultRuntimeSeccompProfileEnabled()
|
||||
kubeletConfig.SkipNodeRegistration = cfgProvider.Machine().Kubelet().SkipNodeRegistration()
|
||||
kubeletConfig.StaticPodListURL = staticPodListURL
|
||||
kubeletConfig.DisableManifestsDirectory = cfgProvider.Machine().Kubelet().DisableManifestsDirectory()
|
||||
kubeletConfig.EnableFSQuotaMonitoring = cfgProvider.Machine().Features().DiskQuotaSupportEnabled()
|
||||
|
||||
return nil
|
||||
}
|
||||
transform.WithExtraInputs(
|
||||
controller.Input{
|
||||
Namespace: k8s.NamespaceName,
|
||||
Type: k8s.StaticPodServerStatusType,
|
||||
ID: pointer.To(k8s.StaticPodServerStatusResourceID),
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ func (suite *KubeletConfigSuite) SetupTest() {
|
||||
suite.runtime, err = runtime.NewRuntime(suite.state, logging.Wrap(log.Writer()))
|
||||
suite.Require().NoError(err)
|
||||
|
||||
suite.Require().NoError(suite.runtime.RegisterController(&k8sctrl.KubeletConfigController{}))
|
||||
suite.Require().NoError(suite.runtime.RegisterController(k8sctrl.NewKubeletConfigController()))
|
||||
|
||||
suite.startRuntime()
|
||||
}
|
||||
|
@ -6,14 +6,14 @@ package k8s
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/controller/generic/transform"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/gen/channel"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"github.com/siderolabs/gen/optional"
|
||||
"github.com/siderolabs/gen/xerrors"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/config"
|
||||
@ -21,118 +21,50 @@ import (
|
||||
)
|
||||
|
||||
// KubePrismConfigController creates config for KubePrism.
|
||||
type KubePrismConfigController struct{}
|
||||
type KubePrismConfigController = transform.Controller[*config.MachineConfig, *k8s.KubePrismConfig]
|
||||
|
||||
// Name implements controller.Controller interface.
|
||||
func (ctrl *KubePrismConfigController) Name() string {
|
||||
return "k8s.KubePrismConfigController"
|
||||
}
|
||||
|
||||
// Inputs implements controller.Controller interface.
|
||||
func (ctrl *KubePrismConfigController) Inputs() []controller.Input {
|
||||
return []controller.Input{
|
||||
{
|
||||
Namespace: k8s.NamespaceName,
|
||||
Type: k8s.KubePrismEndpointsType,
|
||||
ID: pointer.To(k8s.KubePrismEndpointsID),
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
{
|
||||
Namespace: config.NamespaceName,
|
||||
Type: config.MachineConfigType,
|
||||
ID: pointer.To(config.V1Alpha1ID),
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Outputs implements controller.Controller interface.
|
||||
func (ctrl *KubePrismConfigController) Outputs() []controller.Output {
|
||||
return []controller.Output{
|
||||
{
|
||||
Type: k8s.KubePrismConfigType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run implements controller.Controller interface.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (ctrl *KubePrismConfigController) Run(ctx context.Context, r controller.Runtime, _ *zap.Logger) error {
|
||||
for {
|
||||
if _, ok := channel.RecvWithContext(ctx, r.EventCh()); !ok && ctx.Err() != nil {
|
||||
return nil //nolint:nilerr
|
||||
}
|
||||
|
||||
endpt, err := safe.ReaderGetByID[*k8s.KubePrismEndpoints](ctx, r, k8s.KubePrismEndpointsID)
|
||||
if err != nil && !state.IsNotFoundError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
mc, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil && !state.IsNotFoundError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
wroteConfig, err := ctrl.writeConfig(ctx, r, endpt, mc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// list keys for cleanup
|
||||
lbCfgList, err := safe.ReaderListAll[*k8s.KubePrismConfig](ctx, r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing KubePrism resources: %w", err)
|
||||
}
|
||||
|
||||
for it := safe.IteratorFromList(lbCfgList); it.Next(); {
|
||||
res := it.Value()
|
||||
|
||||
if !wroteConfig || res.Metadata().ID() != k8s.KubePrismConfigID {
|
||||
if err = r.Destroy(ctx, res.Metadata()); err != nil {
|
||||
return fmt.Errorf("error cleaning up KubePrism config: %w", err)
|
||||
// NewKubePrismConfigController instanciates the controller.
|
||||
func NewKubePrismConfigController() *KubePrismConfigController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *k8s.KubePrismConfig]{
|
||||
Name: "k8s.KubePrismConfigController",
|
||||
MapMetadataOptionalFunc: func(cfg *config.MachineConfig) optional.Optional[*k8s.KubePrismConfig] {
|
||||
if cfg.Metadata().ID() != config.V1Alpha1ID {
|
||||
return optional.None[*k8s.KubePrismConfig]()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
}
|
||||
}
|
||||
if cfg.Config().Machine() == nil {
|
||||
return optional.None[*k8s.KubePrismConfig]()
|
||||
}
|
||||
|
||||
func (ctrl *KubePrismConfigController) writeConfig(ctx context.Context, r controller.Runtime, endpt *k8s.KubePrismEndpoints, mc *config.MachineConfig) (bool, error) {
|
||||
if endpt == nil || mc == nil {
|
||||
return false, nil
|
||||
}
|
||||
if !cfg.Config().Machine().Features().KubePrism().Enabled() {
|
||||
return optional.None[*k8s.KubePrismConfig]()
|
||||
}
|
||||
|
||||
endpoints := endpt.TypedSpec().Endpoints
|
||||
if len(endpoints) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
return optional.Some(k8s.NewKubePrismConfig(k8s.NamespaceName, k8s.KubePrismConfigID))
|
||||
},
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, cfg *config.MachineConfig, res *k8s.KubePrismConfig) error {
|
||||
endpt, err := safe.ReaderGetByID[*k8s.KubePrismEndpoints](ctx, r, k8s.KubePrismEndpointsID)
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
return xerrors.NewTaggedf[transform.SkipReconcileTag]("KubePrism endpoints resource not found; not creating KubePrism config")
|
||||
}
|
||||
|
||||
balancerCfg := mc.Config().Machine().Features().KubePrism()
|
||||
if !balancerCfg.Enabled() {
|
||||
return false, nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err := safe.WriterModify(
|
||||
ctx,
|
||||
r,
|
||||
k8s.NewKubePrismConfig(k8s.NamespaceName, k8s.KubePrismConfigID),
|
||||
func(res *k8s.KubePrismConfig) error {
|
||||
spec := res.TypedSpec()
|
||||
spec.Endpoints = endpoints
|
||||
spec.Host = "localhost"
|
||||
spec.Port = balancerCfg.Port()
|
||||
spec := res.TypedSpec()
|
||||
spec.Endpoints = endpt.TypedSpec().Endpoints
|
||||
spec.Host = "localhost"
|
||||
spec.Port = cfg.Config().Machine().Features().KubePrism().Port()
|
||||
|
||||
return nil
|
||||
return nil
|
||||
},
|
||||
},
|
||||
transform.WithExtraInputs(
|
||||
safe.Input[*k8s.KubePrismEndpoints](controller.InputWeak),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to KubePrism balancer config: %w", err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func toPort(port string) uint32 {
|
||||
|
@ -106,7 +106,6 @@ func (suite *KubePrismConfigControllerSuite) TestGeneration() {
|
||||
e.TypedSpec(),
|
||||
)
|
||||
})
|
||||
|
||||
suite.Require().NoError(suite.State().Destroy(suite.Ctx(), mc.Metadata()))
|
||||
|
||||
ctest.AssertNoResource[*k8s.KubePrismConfig](suite, k8s.KubePrismConfigID)
|
||||
@ -128,10 +127,6 @@ func (suite *KubePrismConfigControllerSuite) TestGeneration() {
|
||||
e.TypedSpec(),
|
||||
)
|
||||
})
|
||||
|
||||
suite.Require().NoError(suite.State().Destroy(suite.Ctx(), endpoints.Metadata()))
|
||||
|
||||
ctest.AssertNoResource[*k8s.KubePrismConfig](suite, k8s.KubePrismConfigID)
|
||||
}
|
||||
|
||||
func TestEndpointsBalancerConfigControllerSuite(t *testing.T) {
|
||||
@ -140,7 +135,7 @@ func TestEndpointsBalancerConfigControllerSuite(t *testing.T) {
|
||||
suite.Run(t, &KubePrismConfigControllerSuite{
|
||||
DefaultSuite: ctest.DefaultSuite{
|
||||
AfterSetup: func(suite *ctest.DefaultSuite) {
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(&clusterctrl.KubePrismConfigController{}))
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(clusterctrl.NewKubePrismConfigController()))
|
||||
},
|
||||
},
|
||||
})
|
||||
|
@ -9,148 +9,80 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/controller/generic/transform"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/gen/channel"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"github.com/siderolabs/gen/optional"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/machine"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/cluster"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/config"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/k8s"
|
||||
)
|
||||
|
||||
// KubePrismEndpointsController creates a list of API server endpoints.
|
||||
type KubePrismEndpointsController struct{}
|
||||
type KubePrismEndpointsController = transform.Controller[*config.MachineConfig, *k8s.KubePrismEndpoints]
|
||||
|
||||
// Name implements controller.Controller interface.
|
||||
func (ctrl *KubePrismEndpointsController) Name() string {
|
||||
return "cluster.KubePrismEndpointsController"
|
||||
}
|
||||
|
||||
// Inputs implements controller.Controller interface.
|
||||
func (ctrl *KubePrismEndpointsController) Inputs() []controller.Input {
|
||||
return []controller.Input{
|
||||
{
|
||||
Namespace: config.NamespaceName,
|
||||
Type: config.MachineTypeType,
|
||||
ID: pointer.To(config.MachineTypeID),
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
safe.Input[*cluster.Member](controller.InputWeak),
|
||||
safe.Input[*config.MachineConfig](controller.InputWeak),
|
||||
}
|
||||
}
|
||||
|
||||
// Outputs implements controller.Controller interface.
|
||||
func (ctrl *KubePrismEndpointsController) Outputs() []controller.Output {
|
||||
return []controller.Output{
|
||||
{
|
||||
Type: k8s.KubePrismEndpointsType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run implements controller.Controller interface.
|
||||
// NewKubePrismEndpointsController instanciates the controller.
|
||||
//
|
||||
//nolint:gocyclo,cyclop
|
||||
func (ctrl *KubePrismEndpointsController) Run(ctx context.Context, r controller.Runtime, logger *zap.Logger) error {
|
||||
for {
|
||||
if _, ok := channel.RecvWithContext(ctx, r.EventCh()); !ok && ctx.Err() != nil {
|
||||
return nil //nolint:nilerr
|
||||
}
|
||||
//nolint:gocyclo
|
||||
func NewKubePrismEndpointsController() *KubePrismEndpointsController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *k8s.KubePrismEndpoints]{
|
||||
Name: "k8s.KubePrismEndpointsController",
|
||||
MapMetadataOptionalFunc: func(cfg *config.MachineConfig) optional.Optional[*k8s.KubePrismEndpoints] {
|
||||
if cfg.Metadata().ID() != config.V1Alpha1ID {
|
||||
return optional.None[*k8s.KubePrismEndpoints]()
|
||||
}
|
||||
|
||||
machineConfig, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting machine config: %w", err)
|
||||
}
|
||||
if cfg.Config().Cluster() == nil || cfg.Config().Machine() == nil {
|
||||
return optional.None[*k8s.KubePrismEndpoints]()
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
return optional.Some(k8s.NewKubePrismEndpoints(k8s.NamespaceName, k8s.KubePrismEndpointsID))
|
||||
},
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, machineConfig *config.MachineConfig, res *k8s.KubePrismEndpoints) error {
|
||||
members, err := safe.ReaderListAll[*cluster.Member](ctx, r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing affiliates: %w", err)
|
||||
}
|
||||
|
||||
machineType, err := safe.ReaderGetByID[*config.MachineType](ctx, r, config.MachineTypeID)
|
||||
if err != nil {
|
||||
if !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting machine type: %w", err)
|
||||
}
|
||||
var endpoints []k8s.KubePrismEndpoint
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
members, err := safe.ReaderListAll[*cluster.Member](ctx, r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing affiliates: %w", err)
|
||||
}
|
||||
|
||||
var endpoints []k8s.KubePrismEndpoint
|
||||
|
||||
ce := machineConfig.Config().Cluster().Endpoint()
|
||||
if ce != nil {
|
||||
endpoints = append(endpoints, k8s.KubePrismEndpoint{
|
||||
Host: ce.Hostname(),
|
||||
Port: toPort(ce.Port()),
|
||||
})
|
||||
}
|
||||
|
||||
if machineType.MachineType() == machine.TypeControlPlane {
|
||||
endpoints = append(endpoints, k8s.KubePrismEndpoint{
|
||||
Host: "localhost",
|
||||
Port: uint32(machineConfig.Config().Cluster().LocalAPIServerPort()),
|
||||
})
|
||||
}
|
||||
|
||||
for it := safe.IteratorFromList(members); it.Next(); {
|
||||
memberSpec := it.Value().TypedSpec()
|
||||
|
||||
if len(memberSpec.Addresses) > 0 && memberSpec.ControlPlane != nil {
|
||||
for _, addr := range memberSpec.Addresses {
|
||||
ce := machineConfig.Config().Cluster().Endpoint()
|
||||
if ce != nil {
|
||||
endpoints = append(endpoints, k8s.KubePrismEndpoint{
|
||||
Host: addr.String(),
|
||||
Port: uint32(memberSpec.ControlPlane.APIServerPort),
|
||||
Host: ce.Hostname(),
|
||||
Port: toPort(ce.Port()),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = safe.WriterModify[*k8s.KubePrismEndpoints](
|
||||
ctx,
|
||||
r,
|
||||
k8s.NewKubePrismEndpoints(k8s.NamespaceName, k8s.KubePrismEndpointsID),
|
||||
func(res *k8s.KubePrismEndpoints) error {
|
||||
if machineConfig.Config().Machine().Type().IsControlPlane() {
|
||||
endpoints = append(endpoints, k8s.KubePrismEndpoint{
|
||||
Host: "localhost",
|
||||
Port: uint32(machineConfig.Config().Cluster().LocalAPIServerPort()),
|
||||
})
|
||||
}
|
||||
|
||||
for it := safe.IteratorFromList(members); it.Next(); {
|
||||
memberSpec := it.Value().TypedSpec()
|
||||
|
||||
if len(memberSpec.Addresses) > 0 && memberSpec.ControlPlane != nil {
|
||||
for _, addr := range memberSpec.Addresses {
|
||||
endpoints = append(endpoints, k8s.KubePrismEndpoint{
|
||||
Host: addr.String(),
|
||||
Port: uint32(memberSpec.ControlPlane.APIServerPort),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res.TypedSpec().Endpoints = endpoints
|
||||
|
||||
return nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating KubePrism endpoints: %w", err)
|
||||
}
|
||||
|
||||
// list keys for cleanup
|
||||
list, err := safe.ReaderListAll[*k8s.KubePrismEndpoints](ctx, r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing KubePrism resources: %w", err)
|
||||
}
|
||||
|
||||
for it := safe.IteratorFromList(list); it.Next(); {
|
||||
res := it.Value()
|
||||
|
||||
if res.Metadata().Owner() != ctrl.Name() {
|
||||
continue
|
||||
}
|
||||
|
||||
if res.Metadata().ID() != k8s.KubePrismEndpointsID {
|
||||
if err = r.Destroy(ctx, res.Metadata()); err != nil {
|
||||
return fmt.Errorf("error cleaning up KubePrism specs: %w", err)
|
||||
}
|
||||
|
||||
logger.Info("removed KubePrism endpoints resource", zap.String("id", res.Metadata().ID()))
|
||||
}
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
}
|
||||
},
|
||||
transform.WithExtraInputs(
|
||||
safe.Input[*cluster.Member](controller.InputWeak),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
@ -35,7 +35,9 @@ func (suite *KubePrismControllerSuite) TestGeneration() {
|
||||
|
||||
mc := config.NewMachineConfig(container.NewV1Alpha1(&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
MachineConfig: &v1alpha1.MachineConfig{},
|
||||
MachineConfig: &v1alpha1.MachineConfig{
|
||||
MachineType: "controlplane",
|
||||
},
|
||||
ClusterConfig: &v1alpha1.ClusterConfig{
|
||||
ControlPlane: &v1alpha1.ControlPlaneConfig{
|
||||
Endpoint: &v1alpha1.Endpoint{
|
||||
@ -48,10 +50,6 @@ func (suite *KubePrismControllerSuite) TestGeneration() {
|
||||
|
||||
suite.Create(mc)
|
||||
|
||||
machineType := config.NewMachineType()
|
||||
machineType.SetMachineType(machine.TypeControlPlane)
|
||||
suite.Create(machineType)
|
||||
|
||||
member1 := cluster.NewMember(cluster.NamespaceName, "service/7x1SuC8Ege5BGXdAfTEff5iQnlWZLfv9h1LGMxA2pYkC")
|
||||
*member1.TypedSpec() = cluster.MemberSpec{
|
||||
NodeID: "7x1SuC8Ege5BGXdAfTEff5iQnlWZLfv9h1LGMxA2pYkC",
|
||||
@ -125,7 +123,7 @@ func TestEndpointsBalancerControllerSuite(t *testing.T) {
|
||||
suite.Run(t, &KubePrismControllerSuite{
|
||||
DefaultSuite: ctest.DefaultSuite{
|
||||
AfterSetup: func(suite *ctest.DefaultSuite) {
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(&clusterctrl.KubePrismEndpointsController{}))
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(clusterctrl.NewKubePrismEndpointsController()))
|
||||
},
|
||||
},
|
||||
})
|
||||
|
@ -61,22 +61,24 @@ func (ctrl *NodeLabelSpecController) Run(ctx context.Context, r controller.Runti
|
||||
}
|
||||
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
|
||||
nodeLabels := cfg.Config().Machine().NodeLabels()
|
||||
r.StartTrackingOutputs()
|
||||
|
||||
if nodeLabels == nil {
|
||||
nodeLabels = map[string]string{}
|
||||
}
|
||||
var nodeLabels map[string]string
|
||||
|
||||
if cfg.Config().Machine().Type().IsControlPlane() {
|
||||
nodeLabels[constants.LabelNodeRoleControlPlane] = ""
|
||||
if cfg != nil && cfg.Config().Machine() != nil {
|
||||
nodeLabels = cfg.Config().Machine().NodeLabels()
|
||||
|
||||
if cfg.Config().Machine().Type().IsControlPlane() {
|
||||
if nodeLabels == nil {
|
||||
nodeLabels = map[string]string{}
|
||||
}
|
||||
|
||||
nodeLabels[constants.LabelNodeRoleControlPlane] = ""
|
||||
}
|
||||
}
|
||||
|
||||
for key, value := range nodeLabels {
|
||||
@ -90,24 +92,8 @@ func (ctrl *NodeLabelSpecController) Run(ctx context.Context, r controller.Runti
|
||||
}
|
||||
}
|
||||
|
||||
labelSpecs, err := safe.ReaderListAll[*k8s.NodeLabelSpec](ctx, r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting node label specs: %w", err)
|
||||
if err = safe.CleanupOutputs[*k8s.NodeLabelSpec](ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for iter := safe.IteratorFromList(labelSpecs); iter.Next(); {
|
||||
labelSpec := iter.Value()
|
||||
|
||||
_, touched := nodeLabels[labelSpec.TypedSpec().Key]
|
||||
if touched {
|
||||
continue
|
||||
}
|
||||
|
||||
if err = r.Destroy(ctx, labelSpec.Metadata()); err != nil {
|
||||
return fmt.Errorf("error destroying node label spec: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
}
|
||||
}
|
||||
|
@ -62,45 +62,28 @@ func (ctrl *NodeTaintSpecController) Run(ctx context.Context, r controller.Runti
|
||||
}
|
||||
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
|
||||
touched := map[string]struct{}{}
|
||||
r.StartTrackingOutputs()
|
||||
|
||||
if cfg.Config().Machine().Type().IsControlPlane() && !cfg.Config().Cluster().ScheduleOnControlPlanes() {
|
||||
touched[constants.LabelNodeRoleControlPlane] = struct{}{}
|
||||
if cfg != nil && cfg.Config().Machine() != nil && cfg.Config().Cluster() != nil {
|
||||
if cfg.Config().Machine().Type().IsControlPlane() && !cfg.Config().Cluster().ScheduleOnControlPlanes() {
|
||||
if err = safe.WriterModify(ctx, r, k8s.NewNodeTaintSpec(constants.LabelNodeRoleControlPlane), func(k *k8s.NodeTaintSpec) error {
|
||||
k.TypedSpec().Key = constants.LabelNodeRoleControlPlane
|
||||
k.TypedSpec().Value = ""
|
||||
k.TypedSpec().Effect = string(v1.TaintEffectNoSchedule)
|
||||
|
||||
if err = safe.WriterModify(ctx, r, k8s.NewNodeTaintSpec(constants.LabelNodeRoleControlPlane), func(k *k8s.NodeTaintSpec) error {
|
||||
k.TypedSpec().Key = constants.LabelNodeRoleControlPlane
|
||||
k.TypedSpec().Value = ""
|
||||
k.TypedSpec().Effect = string(v1.TaintEffectNoSchedule)
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error updating node taint spec: %w", err)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error updating node taint spec: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
taintSpecs, err := safe.ReaderListAll[*k8s.NodeTaintSpec](ctx, r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting node taint specs: %w", err)
|
||||
if err = safe.CleanupOutputs[*k8s.NodeTaintSpec](ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for iter := safe.IteratorFromList(taintSpecs); iter.Next(); {
|
||||
if _, touched := touched[iter.Value().Metadata().ID()]; touched {
|
||||
continue
|
||||
}
|
||||
|
||||
if err = r.Destroy(ctx, iter.Value().Metadata()); err != nil {
|
||||
return fmt.Errorf("error destroying node taint spec: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
}
|
||||
}
|
||||
|
@ -10,77 +10,43 @@ import (
|
||||
"net/netip"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"github.com/cosi-project/runtime/pkg/controller/generic/transform"
|
||||
"github.com/siderolabs/gen/optional"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/config"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/k8s"
|
||||
)
|
||||
|
||||
// NodeIPConfigController renders manifests based on templates and config/secrets.
|
||||
type NodeIPConfigController struct{}
|
||||
// NodeIPConfigController configures k8s.NodeIP based on machine config.
|
||||
type NodeIPConfigController = transform.Controller[*config.MachineConfig, *k8s.NodeIPConfig]
|
||||
|
||||
// Name implements controller.Controller interface.
|
||||
func (ctrl *NodeIPConfigController) Name() string {
|
||||
return "k8s.NodeIPConfigController"
|
||||
}
|
||||
// NewNodeIPConfigController instanciates the controller.
|
||||
func NewNodeIPConfigController() *NodeIPConfigController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *k8s.NodeIPConfig]{
|
||||
Name: "k8s.NodeIPConfigController",
|
||||
MapMetadataOptionalFunc: func(cfg *config.MachineConfig) optional.Optional[*k8s.NodeIPConfig] {
|
||||
if cfg.Metadata().ID() != config.V1Alpha1ID {
|
||||
return optional.None[*k8s.NodeIPConfig]()
|
||||
}
|
||||
|
||||
// Inputs implements controller.Controller interface.
|
||||
func (ctrl *NodeIPConfigController) Inputs() []controller.Input {
|
||||
return []controller.Input{
|
||||
{
|
||||
Namespace: config.NamespaceName,
|
||||
Type: config.MachineConfigType,
|
||||
ID: pointer.To(config.V1Alpha1ID),
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
}
|
||||
}
|
||||
if cfg.Config().Machine() == nil || cfg.Config().Cluster() == nil {
|
||||
return optional.None[*k8s.NodeIPConfig]()
|
||||
}
|
||||
|
||||
// Outputs implements controller.Controller interface.
|
||||
func (ctrl *NodeIPConfigController) Outputs() []controller.Output {
|
||||
return []controller.Output{
|
||||
{
|
||||
Type: k8s.NodeIPConfigType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run implements controller.Controller interface.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (ctrl *NodeIPConfigController) Run(ctx context.Context, r controller.Runtime, logger *zap.Logger) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-r.EventCh():
|
||||
}
|
||||
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
|
||||
cfgProvider := cfg.(*config.MachineConfig).Config()
|
||||
|
||||
if err = r.Modify(
|
||||
ctx,
|
||||
k8s.NewNodeIPConfig(k8s.NamespaceName, k8s.KubeletID),
|
||||
func(r resource.Resource) error {
|
||||
spec := r.(*k8s.NodeIPConfig).TypedSpec()
|
||||
return optional.Some(k8s.NewNodeIPConfig(k8s.NamespaceName, k8s.KubeletID))
|
||||
},
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, cfg *config.MachineConfig, res *k8s.NodeIPConfig) error {
|
||||
spec := res.TypedSpec()
|
||||
cfgProvider := cfg.Config()
|
||||
|
||||
spec.ValidSubnets = cfgProvider.Machine().Kubelet().NodeIP().ValidSubnets()
|
||||
|
||||
if len(spec.ValidSubnets) == 0 {
|
||||
// automatically deduce validsubnets from ServiceCIDRs
|
||||
var err error
|
||||
|
||||
spec.ValidSubnets, err = ipSubnetsFromServiceCIDRs(cfgProvider.Cluster().Network().ServiceCIDRs())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building valid subnets: %w", err)
|
||||
@ -113,12 +79,8 @@ func (ctrl *NodeIPConfigController) Run(ctx context.Context, r controller.Runtim
|
||||
|
||||
return nil
|
||||
},
|
||||
); err != nil {
|
||||
return fmt.Errorf("error modifying NodeIPConfig resource: %w", err)
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func ipSubnetsFromServiceCIDRs(serviceCIDRs []string) ([]string, error) {
|
||||
|
@ -52,7 +52,7 @@ func (suite *NodeIPConfigSuite) SetupTest() {
|
||||
suite.runtime, err = runtime.NewRuntime(suite.state, logging.Wrap(log.Writer()))
|
||||
suite.Require().NoError(err)
|
||||
|
||||
suite.Require().NoError(suite.runtime.RegisterController(&k8sctrl.NodeIPConfigController{}))
|
||||
suite.Require().NoError(suite.runtime.RegisterController(k8sctrl.NewNodeIPConfigController()))
|
||||
|
||||
suite.startRuntime()
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"go.uber.org/zap"
|
||||
@ -57,6 +58,8 @@ func (ctrl *NodenameController) Outputs() []controller.Output {
|
||||
}
|
||||
|
||||
// Run implements controller.Controller interface.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (ctrl *NodenameController) Run(ctx context.Context, r controller.Runtime, logger *zap.Logger) error {
|
||||
for {
|
||||
select {
|
||||
@ -65,7 +68,7 @@ func (ctrl *NodenameController) Run(ctx context.Context, r controller.Runtime, l
|
||||
case <-r.EventCh():
|
||||
}
|
||||
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
continue
|
||||
@ -74,7 +77,11 @@ func (ctrl *NodenameController) Run(ctx context.Context, r controller.Runtime, l
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
|
||||
cfgProvider := cfg.(*config.MachineConfig).Config()
|
||||
cfgProvider := cfg.Config()
|
||||
|
||||
if cfgProvider.Machine() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
hostnameResource, err := r.Get(ctx, resource.NewMetadata(network.NamespaceName, network.HostnameStatusType, network.HostnameID, resource.VersionUndefined))
|
||||
if err != nil {
|
||||
|
@ -52,6 +52,8 @@ func (ctrl *StaticEndpointController) Outputs() []controller.Output {
|
||||
}
|
||||
|
||||
// Run implements controller.Controller interface.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (ctrl *StaticEndpointController) Run(ctx context.Context, r controller.Runtime, logger *zap.Logger) error {
|
||||
for {
|
||||
select {
|
||||
@ -61,31 +63,38 @@ func (ctrl *StaticEndpointController) Run(ctx context.Context, r controller.Runt
|
||||
}
|
||||
|
||||
machineConfig, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting machine config: %w", err)
|
||||
}
|
||||
|
||||
cpHostname := machineConfig.Config().Cluster().Endpoint().Hostname()
|
||||
r.StartTrackingOutputs()
|
||||
|
||||
var resolver net.Resolver
|
||||
if machineConfig != nil && machineConfig.Config().Cluster() != nil {
|
||||
cpHostname := machineConfig.Config().Cluster().Endpoint().Hostname()
|
||||
|
||||
addrs, err := resolver.LookupNetIP(ctx, "ip", cpHostname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error resolving %q: %w", cpHostname, err)
|
||||
var (
|
||||
resolver net.Resolver
|
||||
addrs []netip.Addr
|
||||
)
|
||||
|
||||
addrs, err = resolver.LookupNetIP(ctx, "ip", cpHostname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error resolving %q: %w", cpHostname, err)
|
||||
}
|
||||
|
||||
addrs = slices.Map(addrs, netip.Addr.Unmap)
|
||||
|
||||
if err = safe.WriterModify(ctx, r, k8s.NewEndpoint(k8s.ControlPlaneNamespaceName, k8s.ControlPlaneKubernetesEndpointsID), func(endpoint *k8s.Endpoint) error {
|
||||
endpoint.TypedSpec().Addresses = addrs
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error modifying endpoint: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
addrs = slices.Map(addrs, netip.Addr.Unmap)
|
||||
|
||||
if err = safe.WriterModify(ctx, r, k8s.NewEndpoint(k8s.ControlPlaneNamespaceName, k8s.ControlPlaneKubernetesEndpointsID), func(endpoint *k8s.Endpoint) error {
|
||||
endpoint.TypedSpec().Addresses = addrs
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error modifying endpoint: %w", err)
|
||||
if err = safe.CleanupOutputs[*k8s.Endpoint](ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -52,6 +52,10 @@ func (suite *StaticEndpointControllerSuite) TestReconcile() {
|
||||
func(endpoint *k8s.Endpoint, assert *assert.Assertions) {
|
||||
assert.Equal([]netip.Addr{netip.MustParseAddr("2001:db8::1")}, endpoint.TypedSpec().Addresses)
|
||||
})
|
||||
|
||||
suite.Require().NoError(suite.State().Destroy(suite.Ctx(), cfg.Metadata()))
|
||||
|
||||
rtestutils.AssertNoResource[*k8s.Endpoint](suite.Ctx(), suite.T(), suite.State(), k8s.ControlPlaneKubernetesEndpointsID)
|
||||
}
|
||||
|
||||
func TestStaticEndpointControllerSuite(t *testing.T) {
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"go.uber.org/zap"
|
||||
@ -61,20 +62,25 @@ func (ctrl *StaticPodConfigController) Run(ctx context.Context, r controller.Run
|
||||
case <-r.EventCh():
|
||||
}
|
||||
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
touchedIDs := map[string]struct{}{}
|
||||
r.StartTrackingOutputs()
|
||||
|
||||
if cfg != nil {
|
||||
cfgProvider := cfg.(*config.MachineConfig).Config()
|
||||
if cfg != nil && cfg.Config().Machine() != nil {
|
||||
cfgProvider := cfg.Config()
|
||||
|
||||
for _, pod := range cfgProvider.Machine().Pods() {
|
||||
name, ok, err := unstructured.NestedString(pod, "metadata", "name")
|
||||
var (
|
||||
name, namespace string
|
||||
ok bool
|
||||
)
|
||||
|
||||
name, ok, err = unstructured.NestedString(pod, "metadata", "name")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting name from static pod: %w", err)
|
||||
}
|
||||
@ -83,7 +89,7 @@ func (ctrl *StaticPodConfigController) Run(ctx context.Context, r controller.Run
|
||||
return fmt.Errorf("name is missing in static pod metadata")
|
||||
}
|
||||
|
||||
namespace, ok, err := unstructured.NestedString(pod, "metadata", "namespace")
|
||||
namespace, ok, err = unstructured.NestedString(pod, "metadata", "namespace")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting namespace from static pod: %w", err)
|
||||
}
|
||||
@ -101,33 +107,12 @@ func (ctrl *StaticPodConfigController) Run(ctx context.Context, r controller.Run
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error modifying resource: %w", err)
|
||||
}
|
||||
|
||||
touchedIDs[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// clean up static pods which haven't been touched
|
||||
{
|
||||
list, err := r.List(ctx, resource.NewMetadata(k8s.NamespaceName, k8s.StaticPodType, "", resource.VersionUndefined))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, res := range list.Items {
|
||||
if _, ok := touchedIDs[res.Metadata().ID()]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if res.Metadata().Owner() != ctrl.Name() {
|
||||
continue
|
||||
}
|
||||
|
||||
if err = r.Destroy(ctx, res.Metadata()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = safe.CleanupOutputs[*k8s.StaticPod](ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
}
|
||||
}
|
||||
|
@ -6,12 +6,10 @@ package kubeaccess
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"github.com/cosi-project/runtime/pkg/controller/generic/transform"
|
||||
"github.com/siderolabs/gen/optional"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/config"
|
||||
@ -19,97 +17,43 @@ import (
|
||||
)
|
||||
|
||||
// ConfigController watches v1alpha1.Config, updates Talos API access config.
|
||||
type ConfigController struct{}
|
||||
type ConfigController = transform.Controller[*config.MachineConfig, *kubeaccess.Config]
|
||||
|
||||
// Name implements controller.Controller interface.
|
||||
func (ctrl *ConfigController) Name() string {
|
||||
return "kubeaccess.ConfigController"
|
||||
}
|
||||
|
||||
// Inputs implements controller.Controller interface.
|
||||
func (ctrl *ConfigController) Inputs() []controller.Input {
|
||||
return []controller.Input{
|
||||
{
|
||||
Namespace: config.NamespaceName,
|
||||
Type: config.MachineTypeType,
|
||||
ID: pointer.To(config.MachineTypeID),
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
{
|
||||
Namespace: config.NamespaceName,
|
||||
Type: config.MachineConfigType,
|
||||
ID: pointer.To(config.V1Alpha1ID),
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Outputs implements controller.Controller interface.
|
||||
func (ctrl *ConfigController) Outputs() []controller.Output {
|
||||
return []controller.Output{
|
||||
{
|
||||
Type: kubeaccess.ConfigType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run implements controller.Controller interface.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (ctrl *ConfigController) Run(ctx context.Context, r controller.Runtime, logger *zap.Logger) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-r.EventCh():
|
||||
}
|
||||
|
||||
machineType, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineTypeType, config.MachineTypeID, resource.VersionUndefined))
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
return fmt.Errorf("error getting machine type: %w", err)
|
||||
}
|
||||
|
||||
if !machineType.(*config.MachineType).MachineType().IsControlPlane() {
|
||||
if err = r.Destroy(ctx, kubeaccess.NewConfig(config.NamespaceName, kubeaccess.ConfigID).Metadata()); err != nil {
|
||||
if !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error destroying kubeaccess config: %w", err)
|
||||
// NewConfigController instanciates the config controller.
|
||||
func NewConfigController() *ConfigController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *kubeaccess.Config]{
|
||||
Name: "kubeaccess.ConfigController",
|
||||
MapMetadataOptionalFunc: func(cfg *config.MachineConfig) optional.Optional[*kubeaccess.Config] {
|
||||
if cfg.Metadata().ID() != config.V1Alpha1ID {
|
||||
return optional.None[*kubeaccess.Config]()
|
||||
}
|
||||
}
|
||||
|
||||
// not a control plane node, nothing to do
|
||||
continue
|
||||
}
|
||||
if cfg.Config().Machine() == nil {
|
||||
return optional.None[*kubeaccess.Config]()
|
||||
}
|
||||
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
if err != nil {
|
||||
if !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
}
|
||||
if !cfg.Config().Machine().Type().IsControlPlane() {
|
||||
return optional.None[*kubeaccess.Config]()
|
||||
}
|
||||
|
||||
if err = r.Modify(ctx, kubeaccess.NewConfig(config.NamespaceName, kubeaccess.ConfigID), func(res resource.Resource) error {
|
||||
spec := res.(*kubeaccess.Config).TypedSpec()
|
||||
return optional.Some(kubeaccess.NewConfig(config.NamespaceName, kubeaccess.ConfigID))
|
||||
},
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, cfg *config.MachineConfig, res *kubeaccess.Config) error {
|
||||
spec := res.TypedSpec()
|
||||
|
||||
*spec = kubeaccess.ConfigSpec{}
|
||||
*spec = kubeaccess.ConfigSpec{}
|
||||
|
||||
if cfg != nil {
|
||||
c := cfg.(*config.MachineConfig).Config()
|
||||
if cfg != nil && cfg.Config().Machine() != nil {
|
||||
c := cfg.Config()
|
||||
|
||||
spec.Enabled = c.Machine().Features().KubernetesTalosAPIAccess().Enabled()
|
||||
spec.AllowedAPIRoles = c.Machine().Features().KubernetesTalosAPIAccess().AllowedRoles()
|
||||
spec.AllowedKubernetesNamespaces = c.Machine().Features().KubernetesTalosAPIAccess().AllowedKubernetesNamespaces()
|
||||
}
|
||||
spec.Enabled = c.Machine().Features().KubernetesTalosAPIAccess().Enabled()
|
||||
spec.AllowedAPIRoles = c.Machine().Features().KubernetesTalosAPIAccess().AllowedRoles()
|
||||
spec.AllowedKubernetesNamespaces = c.Machine().Features().KubernetesTalosAPIAccess().AllowedKubernetesNamespaces()
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
@ -9,35 +9,28 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/resource/rtestutils"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"github.com/siderolabs/go-retry/retry"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/siderolabs/talos/internal/app/machined/pkg/controllers/ctest"
|
||||
kubeaccessctrl "github.com/siderolabs/talos/internal/app/machined/pkg/controllers/kubeaccess"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/container"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/machine"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/types/v1alpha1"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/config"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/kubeaccess"
|
||||
)
|
||||
|
||||
type ConfigSuite struct {
|
||||
KubeaccessSuite
|
||||
ctest.DefaultSuite
|
||||
}
|
||||
|
||||
func (suite *ConfigSuite) TestReconcileConfig() {
|
||||
suite.Require().NoError(suite.runtime.RegisterController(&kubeaccessctrl.ConfigController{}))
|
||||
|
||||
suite.startRuntime()
|
||||
|
||||
machineType := config.NewMachineType()
|
||||
machineType.SetMachineType(machine.TypeControlPlane)
|
||||
|
||||
suite.Require().NoError(suite.state.Create(suite.ctx, machineType))
|
||||
|
||||
cfg := config.NewMachineConfig(container.NewV1Alpha1(&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
MachineConfig: &v1alpha1.MachineConfig{
|
||||
MachineType: "controlplane",
|
||||
MachineFeatures: &v1alpha1.FeaturesConfig{
|
||||
KubernetesTalosAPIAccessConfig: &v1alpha1.KubernetesTalosAPIAccessConfig{
|
||||
AccessEnabled: pointer.To(true),
|
||||
@ -48,74 +41,41 @@ func (suite *ConfigSuite) TestReconcileConfig() {
|
||||
},
|
||||
}))
|
||||
|
||||
suite.Require().NoError(suite.state.Create(suite.ctx, cfg))
|
||||
suite.Require().NoError(suite.State().Create(suite.Ctx(), cfg))
|
||||
|
||||
specMD := resource.NewMetadata(config.NamespaceName, kubeaccess.ConfigType, kubeaccess.ConfigID, resource.VersionUndefined)
|
||||
rtestutils.AssertResources(suite.Ctx(), suite.T(), suite.State(), []resource.ID{kubeaccess.ConfigID}, func(r *kubeaccess.Config, asrt *assert.Assertions) {
|
||||
spec := r.TypedSpec()
|
||||
|
||||
suite.Assert().NoError(retry.Constant(3*time.Second, retry.WithUnits(100*time.Millisecond)).Retry(
|
||||
suite.assertResource(
|
||||
specMD,
|
||||
func(res resource.Resource) error {
|
||||
spec := res.(*kubeaccess.Config).TypedSpec()
|
||||
|
||||
suite.Assert().True(spec.Enabled)
|
||||
suite.Assert().Equal([]string{"os:admin"}, spec.AllowedAPIRoles)
|
||||
suite.Assert().Equal([]string{"kube-system"}, spec.AllowedKubernetesNamespaces)
|
||||
|
||||
return nil
|
||||
},
|
||||
),
|
||||
))
|
||||
asrt.True(spec.Enabled)
|
||||
asrt.Equal([]string{"os:admin"}, spec.AllowedAPIRoles)
|
||||
asrt.Equal([]string{"kube-system"}, spec.AllowedKubernetesNamespaces)
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *ConfigSuite) TestReconcileDisabled() {
|
||||
suite.Require().NoError(suite.runtime.RegisterController(&kubeaccessctrl.ConfigController{}))
|
||||
|
||||
suite.startRuntime()
|
||||
|
||||
machineType := config.NewMachineType()
|
||||
machineType.SetMachineType(machine.TypeInit)
|
||||
|
||||
suite.Require().NoError(suite.state.Create(suite.ctx, machineType))
|
||||
|
||||
cfg := config.NewMachineConfig(container.NewV1Alpha1(&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
MachineConfig: &v1alpha1.MachineConfig{},
|
||||
}))
|
||||
|
||||
suite.Require().NoError(suite.state.Create(suite.ctx, cfg))
|
||||
|
||||
specMD := resource.NewMetadata(config.NamespaceName, kubeaccess.ConfigType, kubeaccess.ConfigID, resource.VersionUndefined)
|
||||
|
||||
suite.Assert().NoError(retry.Constant(3*time.Second, retry.WithUnits(100*time.Millisecond)).Retry(
|
||||
suite.assertResource(
|
||||
specMD,
|
||||
func(res resource.Resource) error {
|
||||
spec := res.(*kubeaccess.Config).TypedSpec()
|
||||
|
||||
suite.Assert().False(spec.Enabled)
|
||||
suite.Assert().Empty(spec.AllowedAPIRoles)
|
||||
suite.Assert().Empty(spec.AllowedKubernetesNamespaces)
|
||||
|
||||
return nil
|
||||
},
|
||||
),
|
||||
))
|
||||
}
|
||||
|
||||
func (suite *ConfigSuite) TestReconcileWorker() {
|
||||
suite.Require().NoError(suite.runtime.RegisterController(&kubeaccessctrl.ConfigController{}))
|
||||
|
||||
suite.startRuntime()
|
||||
|
||||
machineType := config.NewMachineType()
|
||||
machineType.SetMachineType(machine.TypeWorker)
|
||||
|
||||
suite.Require().NoError(suite.state.Create(suite.ctx, machineType))
|
||||
|
||||
cfg := config.NewMachineConfig(container.NewV1Alpha1(&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
MachineConfig: &v1alpha1.MachineConfig{
|
||||
MachineType: "init",
|
||||
},
|
||||
}))
|
||||
|
||||
suite.Require().NoError(suite.State().Create(suite.Ctx(), cfg))
|
||||
|
||||
rtestutils.AssertResources(suite.Ctx(), suite.T(), suite.State(), []resource.ID{kubeaccess.ConfigID}, func(r *kubeaccess.Config, asrt *assert.Assertions) {
|
||||
spec := r.TypedSpec()
|
||||
|
||||
asrt.False(spec.Enabled)
|
||||
asrt.Empty(spec.AllowedAPIRoles)
|
||||
asrt.Empty(spec.AllowedKubernetesNamespaces)
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *ConfigSuite) TestReconcileWorker() {
|
||||
cfg := config.NewMachineConfig(container.NewV1Alpha1(&v1alpha1.Config{
|
||||
ConfigVersion: "v1alpha1",
|
||||
MachineConfig: &v1alpha1.MachineConfig{
|
||||
MachineType: "worker",
|
||||
MachineFeatures: &v1alpha1.FeaturesConfig{
|
||||
KubernetesTalosAPIAccessConfig: &v1alpha1.KubernetesTalosAPIAccessConfig{
|
||||
AccessEnabled: pointer.To(true),
|
||||
@ -126,15 +86,21 @@ func (suite *ConfigSuite) TestReconcileWorker() {
|
||||
},
|
||||
}))
|
||||
|
||||
suite.Require().NoError(suite.state.Create(suite.ctx, cfg))
|
||||
suite.Require().NoError(suite.State().Create(suite.Ctx(), cfg))
|
||||
|
||||
// worker should have feature disabled even if it is enabled in the config
|
||||
specMD := resource.NewMetadata(config.NamespaceName, kubeaccess.ConfigType, kubeaccess.ConfigID, resource.VersionUndefined)
|
||||
|
||||
suite.Assert().NoError(retry.Constant(3*time.Second, retry.WithUnits(100*time.Millisecond)).Retry(
|
||||
suite.assertNoResource(specMD)))
|
||||
rtestutils.AssertNoResource[*kubeaccess.Config](suite.Ctx(), suite.T(), suite.State(), kubeaccess.ConfigID)
|
||||
}
|
||||
|
||||
func TestConfigSuite(t *testing.T) {
|
||||
suite.Run(t, new(ConfigSuite))
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, &ConfigSuite{
|
||||
DefaultSuite: ctest.DefaultSuite{
|
||||
Timeout: 5 * time.Second,
|
||||
AfterSetup: func(s *ctest.DefaultSuite) {
|
||||
s.Require().NoError(s.Runtime().RegisterController(kubeaccessctrl.NewConfigController()))
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
@ -1,95 +0,0 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package kubeaccess_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller/runtime"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/cosi-project/runtime/pkg/state/impl/inmem"
|
||||
"github.com/cosi-project/runtime/pkg/state/impl/namespaced"
|
||||
"github.com/siderolabs/go-retry/retry"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/siderolabs/talos/pkg/logging"
|
||||
)
|
||||
|
||||
type KubeaccessSuite struct {
|
||||
suite.Suite
|
||||
|
||||
state state.State
|
||||
|
||||
runtime *runtime.Runtime
|
||||
wg sync.WaitGroup
|
||||
|
||||
ctx context.Context //nolint:containedctx
|
||||
ctxCancel context.CancelFunc
|
||||
}
|
||||
|
||||
func (suite *KubeaccessSuite) SetupTest() {
|
||||
suite.ctx, suite.ctxCancel = context.WithTimeout(context.Background(), 3*time.Minute)
|
||||
|
||||
suite.state = state.WrapCore(namespaced.NewState(inmem.Build))
|
||||
|
||||
var err error
|
||||
|
||||
logger := logging.Wrap(log.Writer())
|
||||
|
||||
suite.runtime, err = runtime.NewRuntime(suite.state, logger)
|
||||
suite.Require().NoError(err)
|
||||
}
|
||||
|
||||
func (suite *KubeaccessSuite) startRuntime() {
|
||||
suite.wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer suite.wg.Done()
|
||||
|
||||
suite.Assert().NoError(suite.runtime.Run(suite.ctx))
|
||||
}()
|
||||
}
|
||||
|
||||
func (suite *KubeaccessSuite) assertResource(md resource.Metadata, check func(res resource.Resource) error) func() error {
|
||||
return func() error {
|
||||
r, err := suite.state.Get(suite.ctx, md)
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
return retry.ExpectedError(err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return check(r)
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *KubeaccessSuite) assertNoResource(md resource.Metadata) func() error {
|
||||
return func() error {
|
||||
_, err := suite.state.Get(suite.ctx, md)
|
||||
if err == nil {
|
||||
return retry.ExpectedErrorf("resource %s still exists", md)
|
||||
}
|
||||
|
||||
if state.IsNotFoundError(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *KubeaccessSuite) TearDownTest() {
|
||||
suite.T().Log("tear down")
|
||||
|
||||
suite.ctxCancel()
|
||||
|
||||
suite.wg.Wait()
|
||||
}
|
@ -6,12 +6,10 @@ package kubespan
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"github.com/cosi-project/runtime/pkg/controller/generic/transform"
|
||||
"github.com/siderolabs/gen/optional"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/config"
|
||||
@ -19,92 +17,43 @@ import (
|
||||
)
|
||||
|
||||
// ConfigController watches v1alpha1.Config, updates KubeSpan config.
|
||||
type ConfigController struct{}
|
||||
type ConfigController = transform.Controller[*config.MachineConfig, *kubespan.Config]
|
||||
|
||||
// Name implements controller.Controller interface.
|
||||
func (ctrl *ConfigController) Name() string {
|
||||
return "kubespan.ConfigController"
|
||||
}
|
||||
// NewConfigController instanciates the config controller.
|
||||
func NewConfigController() *ConfigController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *kubespan.Config]{
|
||||
Name: "kubespan.ConfigController",
|
||||
MapMetadataOptionalFunc: func(cfg *config.MachineConfig) optional.Optional[*kubespan.Config] {
|
||||
if cfg.Metadata().ID() != config.V1Alpha1ID {
|
||||
return optional.None[*kubespan.Config]()
|
||||
}
|
||||
|
||||
// Inputs implements controller.Controller interface.
|
||||
func (ctrl *ConfigController) Inputs() []controller.Input {
|
||||
return []controller.Input{
|
||||
{
|
||||
Namespace: config.NamespaceName,
|
||||
Type: config.MachineConfigType,
|
||||
ID: pointer.To(config.V1Alpha1ID),
|
||||
Kind: controller.InputWeak,
|
||||
if cfg.Config().Machine() == nil || cfg.Config().Cluster() == nil {
|
||||
return optional.None[*kubespan.Config]()
|
||||
}
|
||||
|
||||
return optional.Some(kubespan.NewConfig(config.NamespaceName, kubespan.ConfigID))
|
||||
},
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, cfg *config.MachineConfig, res *kubespan.Config) error {
|
||||
spec := res.TypedSpec()
|
||||
|
||||
*spec = kubespan.ConfigSpec{}
|
||||
|
||||
if cfg != nil && cfg.Config().Machine() != nil {
|
||||
c := cfg.Config()
|
||||
|
||||
res.TypedSpec().Enabled = c.Machine().Network().KubeSpan().Enabled()
|
||||
res.TypedSpec().ClusterID = c.Cluster().ID()
|
||||
res.TypedSpec().SharedSecret = c.Cluster().Secret()
|
||||
res.TypedSpec().ForceRouting = c.Machine().Network().KubeSpan().ForceRouting()
|
||||
res.TypedSpec().AdvertiseKubernetesNetworks = c.Machine().Network().KubeSpan().AdvertiseKubernetesNetworks()
|
||||
res.TypedSpec().MTU = c.Machine().Network().KubeSpan().MTU()
|
||||
res.TypedSpec().EndpointFilters = c.Machine().Network().KubeSpan().Filters().Endpoints()
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Outputs implements controller.Controller interface.
|
||||
func (ctrl *ConfigController) Outputs() []controller.Output {
|
||||
return []controller.Output{
|
||||
{
|
||||
Type: kubespan.ConfigType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run implements controller.Controller interface.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (ctrl *ConfigController) Run(ctx context.Context, r controller.Runtime, logger *zap.Logger) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-r.EventCh():
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
if err != nil {
|
||||
if !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
touchedIDs := make(map[resource.ID]struct{})
|
||||
|
||||
if cfg != nil {
|
||||
c := cfg.(*config.MachineConfig).Config()
|
||||
|
||||
if err = r.Modify(ctx, kubespan.NewConfig(config.NamespaceName, kubespan.ConfigID), func(res resource.Resource) error {
|
||||
res.(*kubespan.Config).TypedSpec().Enabled = c.Machine().Network().KubeSpan().Enabled()
|
||||
res.(*kubespan.Config).TypedSpec().ClusterID = c.Cluster().ID()
|
||||
res.(*kubespan.Config).TypedSpec().SharedSecret = c.Cluster().Secret()
|
||||
res.(*kubespan.Config).TypedSpec().ForceRouting = c.Machine().Network().KubeSpan().ForceRouting()
|
||||
res.(*kubespan.Config).TypedSpec().AdvertiseKubernetesNetworks = c.Machine().Network().KubeSpan().AdvertiseKubernetesNetworks()
|
||||
res.(*kubespan.Config).TypedSpec().MTU = c.Machine().Network().KubeSpan().MTU()
|
||||
res.(*kubespan.Config).TypedSpec().EndpointFilters = c.Machine().Network().KubeSpan().Filters().Endpoints()
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
touchedIDs[kubespan.ConfigID] = struct{}{}
|
||||
}
|
||||
|
||||
// list keys for cleanup
|
||||
list, err := r.List(ctx, resource.NewMetadata(config.NamespaceName, kubespan.ConfigType, "", resource.VersionUndefined))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing resources: %w", err)
|
||||
}
|
||||
|
||||
for _, res := range list.Items {
|
||||
if res.Metadata().Owner() != ctrl.Name() {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := touchedIDs[res.Metadata().ID()]; !ok {
|
||||
if err = r.Destroy(ctx, res.Metadata()); err != nil {
|
||||
return fmt.Errorf("error cleaning up specs: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
}
|
||||
)
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ type ConfigSuite struct {
|
||||
}
|
||||
|
||||
func (suite *ConfigSuite) TestReconcileConfig() {
|
||||
suite.Require().NoError(suite.runtime.RegisterController(&kubespanctrl.ConfigController{}))
|
||||
suite.Require().NoError(suite.runtime.RegisterController(kubespanctrl.NewConfigController()))
|
||||
|
||||
suite.startRuntime()
|
||||
|
||||
@ -68,7 +68,7 @@ func (suite *ConfigSuite) TestReconcileConfig() {
|
||||
}
|
||||
|
||||
func (suite *ConfigSuite) TestReconcileDisabled() {
|
||||
suite.Require().NoError(suite.runtime.RegisterController(&kubespanctrl.ConfigController{}))
|
||||
suite.Require().NoError(suite.runtime.RegisterController(kubespanctrl.NewConfigController()))
|
||||
|
||||
suite.startRuntime()
|
||||
|
||||
@ -99,5 +99,7 @@ func (suite *ConfigSuite) TestReconcileDisabled() {
|
||||
}
|
||||
|
||||
func TestConfigSuite(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, new(ConfigSuite))
|
||||
}
|
||||
|
@ -125,5 +125,7 @@ func (suite *EndpointSuite) TestReconcile() {
|
||||
}
|
||||
|
||||
func TestEndpointSuite(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, new(EndpointSuite))
|
||||
}
|
||||
|
@ -137,5 +137,7 @@ publicKey: Oak2fBEWngBhwslBxDVgnRNHXs88OAp4kjroSX0uqUE=
|
||||
}
|
||||
|
||||
func TestIdentitySuite(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, new(IdentitySuite))
|
||||
}
|
||||
|
@ -251,5 +251,7 @@ func (suite *PeerSpecSuite) TestIPOverlap() {
|
||||
}
|
||||
|
||||
func TestPeerSpecSuite(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, new(PeerSpecSuite))
|
||||
}
|
||||
|
@ -86,8 +86,6 @@ func (ctrl *DeviceConfigController) Run(ctx context.Context, r controller.Runtim
|
||||
return err
|
||||
}
|
||||
|
||||
touchedIDs := make(map[resource.ID]struct{})
|
||||
|
||||
var cfgProvider talosconfig.Config
|
||||
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
@ -99,7 +97,9 @@ func (ctrl *DeviceConfigController) Run(ctx context.Context, r controller.Runtim
|
||||
cfgProvider = cfg.Config()
|
||||
}
|
||||
|
||||
if cfgProvider != nil {
|
||||
r.StartTrackingOutputs()
|
||||
|
||||
if cfgProvider != nil && cfgProvider.Machine() != nil {
|
||||
selectedInterfaces := map[string]struct{}{}
|
||||
|
||||
for index, device := range cfgProvider.Machine().Network().Devices() {
|
||||
@ -135,8 +135,6 @@ func (ctrl *DeviceConfigController) Run(ctx context.Context, r controller.Runtim
|
||||
|
||||
id := fmt.Sprintf("%s/%03d", device.Interface(), index)
|
||||
|
||||
touchedIDs[id] = struct{}{}
|
||||
|
||||
config := network.NewDeviceConfig(id, device)
|
||||
|
||||
if err = r.Modify(
|
||||
@ -153,21 +151,9 @@ func (ctrl *DeviceConfigController) Run(ctx context.Context, r controller.Runtim
|
||||
}
|
||||
}
|
||||
|
||||
// list network devices for cleanup
|
||||
list, err := r.List(ctx, resource.NewMetadata(network.NamespaceName, network.DeviceConfigSpecType, "", resource.VersionUndefined))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing resources: %w", err)
|
||||
if err = safe.CleanupOutputs[*network.DeviceConfigSpec](ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, res := range list.Items {
|
||||
if _, ok := touchedIDs[res.Metadata().ID()]; !ok {
|
||||
if err = r.Destroy(ctx, res.Metadata()); err != nil {
|
||||
return fmt.Errorf("error cleaning up routes: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"go.uber.org/zap"
|
||||
@ -84,13 +85,13 @@ func (ctrl *EtcFileController) Run(ctx context.Context, r controller.Runtime, lo
|
||||
|
||||
var cfgProvider talosconfig.Config
|
||||
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
} else {
|
||||
cfgProvider = cfg.(*config.MachineConfig).Config()
|
||||
cfgProvider = cfg.Config()
|
||||
}
|
||||
|
||||
var resolverStatus *network.ResolverStatusSpec
|
||||
@ -167,7 +168,7 @@ func (ctrl *EtcFileController) renderResolvConf(resolverStatus *network.Resolver
|
||||
}
|
||||
|
||||
var disableSearchDomain bool
|
||||
if cfgProvider != nil {
|
||||
if cfgProvider != nil && cfgProvider.Machine() != nil {
|
||||
disableSearchDomain = cfgProvider.Machine().Network().DisableSearchDomain()
|
||||
}
|
||||
|
||||
@ -201,7 +202,7 @@ func (ctrl *EtcFileController) renderHosts(hostnameStatus *network.HostnameStatu
|
||||
write("ff02::1\tip6-allnodes\n")
|
||||
write("ff02::2\tip6-allrouters\n")
|
||||
|
||||
if cfgProvider != nil {
|
||||
if cfgProvider != nil && cfgProvider.Machine() != nil {
|
||||
for _, extraHost := range cfgProvider.Machine().Network().ExtraHosts() {
|
||||
write(fmt.Sprintf("%s\t%s\n", extraHost.IP(), strings.Join(extraHost.Aliases(), " ")))
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/martinlindhe/base36"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
@ -83,13 +84,13 @@ func (ctrl *HostnameConfigController) Run(ctx context.Context, r controller.Runt
|
||||
|
||||
var cfgProvider talosconfig.Config
|
||||
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
} else {
|
||||
cfgProvider = cfg.(*config.MachineConfig).Config()
|
||||
} else if cfg.Config().Machine() != nil {
|
||||
cfgProvider = cfg.Config()
|
||||
}
|
||||
|
||||
var specs []network.HostnameSpecSpec
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"github.com/siderolabs/go-procfs/procfs"
|
||||
@ -69,13 +70,13 @@ func (ctrl *ResolverConfigController) Run(ctx context.Context, r controller.Runt
|
||||
|
||||
var cfgProvider talosconfig.Config
|
||||
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
} else {
|
||||
cfgProvider = cfg.(*config.MachineConfig).Config()
|
||||
} else if cfg.Config().Machine() != nil {
|
||||
cfgProvider = cfg.Config()
|
||||
}
|
||||
|
||||
var specs []network.ResolverSpecSpec
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/gen/slices"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
@ -69,13 +70,13 @@ func (ctrl *TimeServerConfigController) Run(ctx context.Context, r controller.Ru
|
||||
|
||||
var cfgProvider talosconfig.Config
|
||||
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
} else {
|
||||
cfgProvider = cfg.(*config.MachineConfig).Config()
|
||||
} else if cfg.Config().Machine() != nil {
|
||||
cfgProvider = cfg.Config()
|
||||
}
|
||||
|
||||
var specs []network.TimeServerSpecSpec
|
||||
|
@ -80,12 +80,10 @@ func (ctrl *EventsSinkConfigController) Run(ctx context.Context, r controller.Ru
|
||||
endpoint = *cfg.Config().Runtime().EventsEndpoint()
|
||||
}
|
||||
|
||||
if endpoint == "" {
|
||||
if err := r.Destroy(ctx, runtime.NewEventSinkConfig().Metadata()); err != nil && !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error destroying event sink config: %w", err)
|
||||
}
|
||||
} else {
|
||||
if err := safe.WriterModify(ctx, r, runtime.NewEventSinkConfig(), func(cfg *runtime.EventSinkConfig) error {
|
||||
r.StartTrackingOutputs()
|
||||
|
||||
if endpoint != "" {
|
||||
if err = safe.WriterModify(ctx, r, runtime.NewEventSinkConfig(), func(cfg *runtime.EventSinkConfig) error {
|
||||
cfg.TypedSpec().Endpoint = endpoint
|
||||
|
||||
return nil
|
||||
@ -94,6 +92,8 @@ func (ctrl *EventsSinkConfigController) Run(ctx context.Context, r controller.Ru
|
||||
}
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
if err = safe.CleanupOutputs[*runtime.EventSinkConfig](ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"go.uber.org/zap"
|
||||
@ -56,52 +56,34 @@ func (ctrl *KernelModuleConfigController) Run(ctx context.Context, r controller.
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-r.EventCh():
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
if err != nil {
|
||||
if !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
touchedIDs := make(map[resource.ID]struct{})
|
||||
r.StartTrackingOutputs()
|
||||
|
||||
if cfg != nil {
|
||||
c, _ := cfg.(*config.MachineConfig) //nolint:errcheck
|
||||
for _, module := range c.Config().Machine().Kernel().Modules() {
|
||||
touchedIDs[module.Name()] = struct{}{}
|
||||
if cfg != nil && cfg.Config().Machine() != nil {
|
||||
for _, module := range cfg.Config().Machine().Kernel().Modules() {
|
||||
item := runtime.NewKernelModuleSpec(runtime.NamespaceName, module.Name())
|
||||
|
||||
item := runtime.NewKernelModuleSpec(runtime.NamespaceName, module.Name())
|
||||
if err = safe.WriterModify(ctx, r, item, func(res *runtime.KernelModuleSpec) error {
|
||||
res.TypedSpec().Name = module.Name()
|
||||
res.TypedSpec().Parameters = module.Parameters()
|
||||
|
||||
if err = r.Modify(ctx, item, func(res resource.Resource) error {
|
||||
res.(*runtime.KernelModuleSpec).TypedSpec().Name = module.Name()
|
||||
res.(*runtime.KernelModuleSpec).TypedSpec().Parameters = module.Parameters()
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// list keys for cleanup
|
||||
list, err := r.List(ctx, resource.NewMetadata(runtime.NamespaceName, runtime.KernelModuleSpecType, "", resource.VersionUndefined))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing resources: %w", err)
|
||||
}
|
||||
|
||||
for _, res := range list.Items {
|
||||
if res.Metadata().Owner() != ctrl.Name() {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := touchedIDs[res.Metadata().ID()]; !ok {
|
||||
if err = r.Destroy(ctx, res.Metadata()); err != nil {
|
||||
return fmt.Errorf("error cleaning up specs: %w", err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
if err = safe.CleanupOutputs[*runtime.KernelModuleSpec](ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"go.uber.org/zap"
|
||||
@ -58,61 +59,43 @@ func (ctrl *KernelParamConfigController) Run(ctx context.Context, r controller.R
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-r.EventCh():
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
if err != nil {
|
||||
if !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
r.StartTrackingOutputs()
|
||||
|
||||
setKernelParam := func(kind, key, value string) error {
|
||||
item := runtime.NewKernelParamSpec(runtime.NamespaceName, strings.Join([]string{kind, key}, "."))
|
||||
|
||||
return r.Modify(ctx, item, func(res resource.Resource) error {
|
||||
res.(*runtime.KernelParamSpec).TypedSpec().Value = value
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if cfg != nil && cfg.Config().Machine() != nil {
|
||||
for key, value := range cfg.Config().Machine().Sysctls() {
|
||||
if err = setKernelParam(kernel.Sysctl, key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
touchedIDs := make(map[resource.ID]struct{})
|
||||
|
||||
setKernelParam := func(kind, key, value string) error {
|
||||
item := runtime.NewKernelParamSpec(runtime.NamespaceName, strings.Join([]string{kind, key}, "."))
|
||||
|
||||
touchedIDs[item.Metadata().ID()] = struct{}{}
|
||||
|
||||
return r.Modify(ctx, item, func(res resource.Resource) error {
|
||||
res.(*runtime.KernelParamSpec).TypedSpec().Value = value
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if cfg != nil {
|
||||
c, _ := cfg.(*config.MachineConfig) //nolint:errcheck
|
||||
for key, value := range c.Config().Machine().Sysctls() {
|
||||
if err = setKernelParam(kernel.Sysctl, key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for key, value := range c.Config().Machine().Sysfs() {
|
||||
if err = setKernelParam(kernel.Sysfs, key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// list keys for cleanup
|
||||
list, err := r.List(ctx, resource.NewMetadata(runtime.NamespaceName, runtime.KernelParamSpecType, "", resource.VersionUndefined))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing resources: %w", err)
|
||||
}
|
||||
|
||||
for _, res := range list.Items {
|
||||
if res.Metadata().Owner() != ctrl.Name() {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := touchedIDs[res.Metadata().ID()]; !ok {
|
||||
if err = r.Destroy(ctx, res.Metadata()); err != nil {
|
||||
return fmt.Errorf("error cleaning up specs: %w", err)
|
||||
}
|
||||
for key, value := range cfg.Config().Machine().Sysfs() {
|
||||
if err = setKernelParam(kernel.Sysfs, key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
if err = safe.CleanupOutputs[*runtime.KernelParamSpec](ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -93,12 +93,10 @@ func (ctrl *KmsgLogConfigController) Run(ctx context.Context, r controller.Runti
|
||||
})...)
|
||||
}
|
||||
|
||||
if len(destinations) == 0 {
|
||||
if err := r.Destroy(ctx, runtime.NewKmsgLogConfig().Metadata()); err != nil && !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error destroying kmsg log config: %w", err)
|
||||
}
|
||||
} else {
|
||||
if err := safe.WriterModify(ctx, r, runtime.NewKmsgLogConfig(), func(cfg *runtime.KmsgLogConfig) error {
|
||||
r.StartTrackingOutputs()
|
||||
|
||||
if len(destinations) > 0 {
|
||||
if err = safe.WriterModify(ctx, r, runtime.NewKmsgLogConfig(), func(cfg *runtime.KmsgLogConfig) error {
|
||||
cfg.TypedSpec().Destinations = destinations
|
||||
|
||||
return nil
|
||||
@ -107,6 +105,8 @@ func (ctrl *KmsgLogConfigController) Run(ctx context.Context, r controller.Runti
|
||||
}
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
if err = safe.CleanupOutputs[*runtime.KmsgLogConfig](ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/crypto/x509"
|
||||
"github.com/siderolabs/gen/slices"
|
||||
"github.com/siderolabs/go-debug"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
@ -112,10 +113,15 @@ func (ctrl *MaintenanceServiceController) Run(ctx context.Context, r controller.
|
||||
|
||||
cfgCh := make(chan machineryconfig.Provider)
|
||||
srv := maintenance.New(cfgCh)
|
||||
|
||||
injector := &authz.Injector{
|
||||
Mode: authz.ReadOnly,
|
||||
Logger: logger.Sugar().Debugf,
|
||||
Mode: authz.ReadOnly,
|
||||
}
|
||||
|
||||
if debug.Enabled {
|
||||
injector.Logger = logger.Sugar().Infof
|
||||
}
|
||||
|
||||
tlsProvider := maintenance.NewTLSProvider()
|
||||
|
||||
for {
|
||||
|
@ -10,128 +10,70 @@ import (
|
||||
"net/url"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"github.com/cosi-project/runtime/pkg/controller/generic/transform"
|
||||
"github.com/siderolabs/gen/optional"
|
||||
"go.uber.org/zap"
|
||||
|
||||
talosconfig "github.com/siderolabs/talos/pkg/machinery/config"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/config"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/secrets"
|
||||
)
|
||||
|
||||
// KubeletController manages secrets.Kubelet based on configuration.
|
||||
type KubeletController struct{}
|
||||
type KubeletController = transform.Controller[*config.MachineConfig, *secrets.Kubelet]
|
||||
|
||||
// Name implements controller.Controller interface.
|
||||
func (ctrl *KubeletController) Name() string {
|
||||
return "secrets.KubeletController"
|
||||
}
|
||||
|
||||
// Inputs implements controller.Controller interface.
|
||||
func (ctrl *KubeletController) Inputs() []controller.Input {
|
||||
return []controller.Input{
|
||||
{
|
||||
Namespace: config.NamespaceName,
|
||||
Type: config.MachineConfigType,
|
||||
ID: pointer.To(config.V1Alpha1ID),
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Outputs implements controller.Controller interface.
|
||||
func (ctrl *KubeletController) Outputs() []controller.Output {
|
||||
return []controller.Output{
|
||||
{
|
||||
Type: secrets.KubeletType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run implements controller.Controller interface.
|
||||
func (ctrl *KubeletController) Run(ctx context.Context, r controller.Runtime, logger *zap.Logger) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-r.EventCh():
|
||||
}
|
||||
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
if err = ctrl.teardown(ctx, r, secrets.KubeletType); err != nil {
|
||||
return fmt.Errorf("error destroying secrets: %w", err)
|
||||
// NewKubeletController instanciates the controller.
|
||||
func NewKubeletController() *KubeletController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *secrets.Kubelet]{
|
||||
Name: "secrets.KubeletController",
|
||||
MapMetadataOptionalFunc: func(cfg *config.MachineConfig) optional.Optional[*secrets.Kubelet] {
|
||||
if cfg.Metadata().ID() != config.V1Alpha1ID {
|
||||
return optional.None[*secrets.Kubelet]()
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
if cfg.Config().Cluster() == nil || cfg.Config().Machine() == nil {
|
||||
return optional.None[*secrets.Kubelet]()
|
||||
}
|
||||
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
return optional.Some(secrets.NewKubelet(secrets.KubeletID))
|
||||
},
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, cfg *config.MachineConfig, res *secrets.Kubelet) error {
|
||||
cfgProvider := cfg.Config()
|
||||
kubeletSecrets := res.TypedSpec()
|
||||
|
||||
cfgProvider := cfg.(*config.MachineConfig).Config()
|
||||
switch {
|
||||
case cfgProvider.Machine().Features().KubePrism().Enabled():
|
||||
// use cluster endpoint for controlplane nodes with loadbalancer support
|
||||
localEndpoint, err := url.Parse(fmt.Sprintf("https://localhost:%d", cfgProvider.Machine().Features().KubePrism().Port()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = r.Modify(ctx, secrets.NewKubelet(secrets.KubeletID), func(r resource.Resource) error {
|
||||
return ctrl.updateKubeletSecrets(cfgProvider, r.(*secrets.Kubelet).TypedSpec())
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
kubeletSecrets.Endpoint = localEndpoint
|
||||
case cfgProvider.Machine().Type().IsControlPlane():
|
||||
// use localhost endpoint for controlplane nodes
|
||||
localEndpoint, err := url.Parse(fmt.Sprintf("https://localhost:%d", cfgProvider.Cluster().LocalAPIServerPort()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
}
|
||||
}
|
||||
|
||||
func (ctrl *KubeletController) updateKubeletSecrets(cfgProvider talosconfig.Config, kubeletSecrets *secrets.KubeletSpec) error {
|
||||
switch {
|
||||
case cfgProvider.Machine().Features().KubePrism().Enabled():
|
||||
// use cluster endpoint for controlplane nodes with loadbalancer support
|
||||
localEndpoint, err := url.Parse(fmt.Sprintf("https://localhost:%d", cfgProvider.Machine().Features().KubePrism().Port()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kubeletSecrets.Endpoint = localEndpoint
|
||||
case cfgProvider.Machine().Type().IsControlPlane():
|
||||
// use localhost endpoint for controlplane nodes
|
||||
localEndpoint, err := url.Parse(fmt.Sprintf("https://localhost:%d", cfgProvider.Cluster().LocalAPIServerPort()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kubeletSecrets.Endpoint = localEndpoint
|
||||
default:
|
||||
// use cluster endpoint for workers
|
||||
kubeletSecrets.Endpoint = cfgProvider.Cluster().Endpoint()
|
||||
}
|
||||
|
||||
kubeletSecrets.CA = cfgProvider.Cluster().CA()
|
||||
|
||||
if kubeletSecrets.CA == nil {
|
||||
return fmt.Errorf("missing cluster.CA secret")
|
||||
}
|
||||
|
||||
kubeletSecrets.BootstrapTokenID = cfgProvider.Cluster().Token().ID()
|
||||
kubeletSecrets.BootstrapTokenSecret = cfgProvider.Cluster().Token().Secret()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctrl *KubeletController) teardown(ctx context.Context, r controller.Runtime, types ...resource.Type) error {
|
||||
for _, resourceType := range types {
|
||||
items, err := r.List(ctx, resource.NewMetadata(secrets.NamespaceName, resourceType, "", resource.VersionUndefined))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, item := range items.Items {
|
||||
if err := r.Destroy(ctx, item.Metadata()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
kubeletSecrets.Endpoint = localEndpoint
|
||||
default:
|
||||
// use cluster endpoint for workers
|
||||
kubeletSecrets.Endpoint = cfgProvider.Cluster().Endpoint()
|
||||
}
|
||||
|
||||
kubeletSecrets.CA = cfgProvider.Cluster().CA()
|
||||
|
||||
if kubeletSecrets.CA == nil {
|
||||
return fmt.Errorf("missing cluster.CA secret")
|
||||
}
|
||||
|
||||
kubeletSecrets.BootstrapTokenID = cfgProvider.Cluster().Token().ID()
|
||||
kubeletSecrets.BootstrapTokenSecret = cfgProvider.Cluster().Token().Secret()
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ func TestKubeletSuite(t *testing.T) {
|
||||
suite.Run(t, &KubeletSuite{
|
||||
DefaultSuite: ctest.DefaultSuite{
|
||||
AfterSetup: func(suite *ctest.DefaultSuite) {
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(&secretsctrl.KubeletController{}))
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(secretsctrl.NewKubeletController()))
|
||||
},
|
||||
},
|
||||
})
|
||||
|
@ -11,233 +11,166 @@ import (
|
||||
"net/url"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"github.com/cosi-project/runtime/pkg/controller/generic"
|
||||
"github.com/cosi-project/runtime/pkg/controller/generic/transform"
|
||||
"github.com/siderolabs/gen/optional"
|
||||
"go.uber.org/zap"
|
||||
|
||||
talosconfig "github.com/siderolabs/talos/pkg/machinery/config"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/machine"
|
||||
"github.com/siderolabs/talos/pkg/machinery/constants"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/config"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/secrets"
|
||||
)
|
||||
|
||||
// RootController manages secrets.Root based on configuration.
|
||||
type RootController struct{}
|
||||
|
||||
// Name implements controller.Controller interface.
|
||||
func (ctrl *RootController) Name() string {
|
||||
return "secrets.RootController"
|
||||
}
|
||||
|
||||
// Inputs implements controller.Controller interface.
|
||||
func (ctrl *RootController) Inputs() []controller.Input {
|
||||
return []controller.Input{
|
||||
{
|
||||
Namespace: config.NamespaceName,
|
||||
Type: config.MachineConfigType,
|
||||
ID: pointer.To(config.V1Alpha1ID),
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
{
|
||||
Namespace: config.NamespaceName,
|
||||
Type: config.MachineTypeType,
|
||||
ID: pointer.To(config.MachineTypeID),
|
||||
Kind: controller.InputWeak,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Outputs implements controller.Controller interface.
|
||||
func (ctrl *RootController) Outputs() []controller.Output {
|
||||
return []controller.Output{
|
||||
{
|
||||
Type: secrets.EtcdRootType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
{
|
||||
Type: secrets.KubernetesRootType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
{
|
||||
Type: secrets.OSRootType,
|
||||
Kind: controller.OutputExclusive,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run implements controller.Controller interface.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (ctrl *RootController) Run(ctx context.Context, r controller.Runtime, logger *zap.Logger) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-r.EventCh():
|
||||
func rootMapFunc[Output generic.ResourceWithRD](output Output, requireControlPlane bool) func(cfg *config.MachineConfig) optional.Optional[Output] {
|
||||
return func(cfg *config.MachineConfig) optional.Optional[Output] {
|
||||
if cfg.Metadata().ID() != config.V1Alpha1ID {
|
||||
return optional.None[Output]()
|
||||
}
|
||||
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
if err = ctrl.teardown(ctx, r, secrets.OSRootType, secrets.EtcdRootType, secrets.KubernetesRootType); err != nil {
|
||||
return fmt.Errorf("error destroying secrets: %w", err)
|
||||
if cfg.Config().Cluster() == nil || cfg.Config().Machine() == nil {
|
||||
return optional.None[Output]()
|
||||
}
|
||||
|
||||
if requireControlPlane && !cfg.Config().Machine().Type().IsControlPlane() {
|
||||
return optional.None[Output]()
|
||||
}
|
||||
|
||||
return optional.Some(output)
|
||||
}
|
||||
}
|
||||
|
||||
// RootEtcdController manages secrets.EtcdRoot based on configuration.
|
||||
type RootEtcdController = transform.Controller[*config.MachineConfig, *secrets.EtcdRoot]
|
||||
|
||||
// NewRootEtcdController instanciates the controller.
|
||||
func NewRootEtcdController() *RootEtcdController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *secrets.EtcdRoot]{
|
||||
Name: "secrets.RootEtcdController",
|
||||
MapMetadataOptionalFunc: rootMapFunc(secrets.NewEtcdRoot(secrets.EtcdRootID), true),
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, cfg *config.MachineConfig, res *secrets.EtcdRoot) error {
|
||||
cfgProvider := cfg.Config()
|
||||
etcdSecrets := res.TypedSpec()
|
||||
|
||||
etcdSecrets.EtcdCA = cfgProvider.Cluster().Etcd().CA()
|
||||
|
||||
if etcdSecrets.EtcdCA == nil {
|
||||
return fmt.Errorf("missing cluster.etcdCA secret")
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
}
|
||||
|
||||
cfgProvider := cfg.(*config.MachineConfig).Config()
|
||||
|
||||
machineTypeRes, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineTypeType, config.MachineTypeID, resource.VersionUndefined))
|
||||
if err != nil {
|
||||
if state.IsNotFoundError(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
return fmt.Errorf("error getting machine type: %w", err)
|
||||
}
|
||||
|
||||
machineType := machineTypeRes.(*config.MachineType).MachineType()
|
||||
|
||||
if err = r.Modify(ctx, secrets.NewOSRoot(secrets.OSRootID), func(r resource.Resource) error {
|
||||
return ctrl.updateOSSecrets(cfgProvider, r.(*secrets.OSRoot).TypedSpec())
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: k8s secrets (partial) should be valid for the worker nodes as well, worker node should have machine (OS) CA cert (?)
|
||||
if machineType == machine.TypeWorker {
|
||||
if err = ctrl.teardown(ctx, r, secrets.EtcdRootType, secrets.KubernetesRootType); err != nil {
|
||||
return fmt.Errorf("error destroying secrets: %w", err)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if err = r.Modify(ctx, secrets.NewEtcdRoot(secrets.EtcdRootID), func(r resource.Resource) error {
|
||||
return ctrl.updateEtcdSecrets(cfgProvider, r.(*secrets.EtcdRoot).TypedSpec())
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = r.Modify(ctx, secrets.NewKubernetesRoot(secrets.KubernetesRootID), func(r resource.Resource) error {
|
||||
return ctrl.updateK8sSecrets(cfgProvider, r.(*secrets.KubernetesRoot).TypedSpec())
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.ResetRestartBackoff()
|
||||
}
|
||||
}
|
||||
|
||||
func (ctrl *RootController) updateOSSecrets(cfgProvider talosconfig.Config, osSecrets *secrets.OSRootSpec) error {
|
||||
osSecrets.CA = cfgProvider.Machine().Security().CA()
|
||||
|
||||
osSecrets.CertSANIPs = nil
|
||||
osSecrets.CertSANDNSNames = nil
|
||||
|
||||
for _, san := range cfgProvider.Machine().Security().CertSANs() {
|
||||
if ip, err := netip.ParseAddr(san); err == nil {
|
||||
osSecrets.CertSANIPs = append(osSecrets.CertSANIPs, ip)
|
||||
} else {
|
||||
osSecrets.CertSANDNSNames = append(osSecrets.CertSANDNSNames, san)
|
||||
}
|
||||
}
|
||||
|
||||
if cfgProvider.Machine().Features().KubernetesTalosAPIAccess().Enabled() {
|
||||
// add Kubernetes Talos service name to the list of SANs
|
||||
osSecrets.CertSANDNSNames = append(osSecrets.CertSANDNSNames,
|
||||
constants.KubernetesTalosAPIServiceName,
|
||||
constants.KubernetesTalosAPIServiceName+"."+constants.KubernetesTalosAPIServiceNamespace,
|
||||
)
|
||||
}
|
||||
|
||||
osSecrets.Token = cfgProvider.Machine().Security().Token()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctrl *RootController) updateEtcdSecrets(cfgProvider talosconfig.Config, etcdSecrets *secrets.EtcdRootSpec) error {
|
||||
etcdSecrets.EtcdCA = cfgProvider.Cluster().Etcd().CA()
|
||||
|
||||
if etcdSecrets.EtcdCA == nil {
|
||||
return fmt.Errorf("missing cluster.etcdCA secret")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctrl *RootController) updateK8sSecrets(cfgProvider talosconfig.Config, k8sSecrets *secrets.KubernetesRootSpec) error {
|
||||
var (
|
||||
err error
|
||||
localEndpoint *url.URL
|
||||
return nil
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
if cfgProvider.Machine().Features().KubePrism().Enabled() {
|
||||
localEndpoint, err = url.Parse(fmt.Sprintf("https://localhost:%d", cfgProvider.Machine().Features().KubePrism().Port()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
localEndpoint, err = url.Parse(fmt.Sprintf("https://localhost:%d", cfgProvider.Cluster().LocalAPIServerPort()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
k8sSecrets.Name = cfgProvider.Cluster().Name()
|
||||
k8sSecrets.Endpoint = cfgProvider.Cluster().Endpoint()
|
||||
k8sSecrets.LocalEndpoint = localEndpoint
|
||||
k8sSecrets.CertSANs = cfgProvider.Cluster().CertSANs()
|
||||
k8sSecrets.DNSDomain = cfgProvider.Cluster().Network().DNSDomain()
|
||||
|
||||
k8sSecrets.APIServerIPs, err = cfgProvider.Cluster().Network().APIServerIPs()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building API service IPs: %w", err)
|
||||
}
|
||||
|
||||
k8sSecrets.AggregatorCA = cfgProvider.Cluster().AggregatorCA()
|
||||
|
||||
if k8sSecrets.AggregatorCA == nil {
|
||||
return fmt.Errorf("missing cluster.aggregatorCA secret")
|
||||
}
|
||||
|
||||
k8sSecrets.CA = cfgProvider.Cluster().CA()
|
||||
|
||||
if k8sSecrets.CA == nil {
|
||||
return fmt.Errorf("missing cluster.CA secret")
|
||||
}
|
||||
|
||||
k8sSecrets.ServiceAccount = cfgProvider.Cluster().ServiceAccount()
|
||||
|
||||
k8sSecrets.AESCBCEncryptionSecret = cfgProvider.Cluster().AESCBCEncryptionSecret()
|
||||
k8sSecrets.SecretboxEncryptionSecret = cfgProvider.Cluster().SecretboxEncryptionSecret()
|
||||
|
||||
k8sSecrets.BootstrapTokenID = cfgProvider.Cluster().Token().ID()
|
||||
k8sSecrets.BootstrapTokenSecret = cfgProvider.Cluster().Token().Secret()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctrl *RootController) teardown(ctx context.Context, r controller.Runtime, types ...resource.Type) error {
|
||||
// TODO: change this to proper teardown sequence
|
||||
for _, resourceType := range types {
|
||||
items, err := r.List(ctx, resource.NewMetadata(secrets.NamespaceName, resourceType, "", resource.VersionUndefined))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// RootKubernetesController manages secrets.KubernetesRoot based on configuration.
|
||||
type RootKubernetesController = transform.Controller[*config.MachineConfig, *secrets.KubernetesRoot]
|
||||
|
||||
for _, item := range items.Items {
|
||||
if err := r.Destroy(ctx, item.Metadata()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
// NewRootKubernetesController instanciates the controller.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func NewRootKubernetesController() *RootKubernetesController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *secrets.KubernetesRoot]{
|
||||
Name: "secrets.RootKubernetesController",
|
||||
MapMetadataOptionalFunc: rootMapFunc(secrets.NewKubernetesRoot(secrets.KubernetesRootID), true),
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, cfg *config.MachineConfig, res *secrets.KubernetesRoot) error {
|
||||
cfgProvider := cfg.Config()
|
||||
k8sSecrets := res.TypedSpec()
|
||||
|
||||
return nil
|
||||
var (
|
||||
err error
|
||||
localEndpoint *url.URL
|
||||
)
|
||||
|
||||
if cfgProvider.Machine().Features().KubePrism().Enabled() {
|
||||
localEndpoint, err = url.Parse(fmt.Sprintf("https://localhost:%d", cfgProvider.Machine().Features().KubePrism().Port()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
localEndpoint, err = url.Parse(fmt.Sprintf("https://localhost:%d", cfgProvider.Cluster().LocalAPIServerPort()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
k8sSecrets.Name = cfgProvider.Cluster().Name()
|
||||
k8sSecrets.Endpoint = cfgProvider.Cluster().Endpoint()
|
||||
k8sSecrets.LocalEndpoint = localEndpoint
|
||||
k8sSecrets.CertSANs = cfgProvider.Cluster().CertSANs()
|
||||
k8sSecrets.DNSDomain = cfgProvider.Cluster().Network().DNSDomain()
|
||||
|
||||
k8sSecrets.APIServerIPs, err = cfgProvider.Cluster().Network().APIServerIPs()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building API service IPs: %w", err)
|
||||
}
|
||||
|
||||
k8sSecrets.AggregatorCA = cfgProvider.Cluster().AggregatorCA()
|
||||
|
||||
if k8sSecrets.AggregatorCA == nil {
|
||||
return fmt.Errorf("missing cluster.aggregatorCA secret")
|
||||
}
|
||||
|
||||
k8sSecrets.CA = cfgProvider.Cluster().CA()
|
||||
|
||||
if k8sSecrets.CA == nil {
|
||||
return fmt.Errorf("missing cluster.CA secret")
|
||||
}
|
||||
|
||||
k8sSecrets.ServiceAccount = cfgProvider.Cluster().ServiceAccount()
|
||||
|
||||
k8sSecrets.AESCBCEncryptionSecret = cfgProvider.Cluster().AESCBCEncryptionSecret()
|
||||
k8sSecrets.SecretboxEncryptionSecret = cfgProvider.Cluster().SecretboxEncryptionSecret()
|
||||
|
||||
k8sSecrets.BootstrapTokenID = cfgProvider.Cluster().Token().ID()
|
||||
k8sSecrets.BootstrapTokenSecret = cfgProvider.Cluster().Token().Secret()
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// RootOSController manages secrets.OSRoot based on configuration.
|
||||
type RootOSController = transform.Controller[*config.MachineConfig, *secrets.OSRoot]
|
||||
|
||||
// NewRootOSController instanciates the controller.
|
||||
func NewRootOSController() *RootOSController {
|
||||
return transform.NewController(
|
||||
transform.Settings[*config.MachineConfig, *secrets.OSRoot]{
|
||||
Name: "secrets.RootOSController",
|
||||
MapMetadataOptionalFunc: rootMapFunc(secrets.NewOSRoot(secrets.OSRootID), false),
|
||||
TransformFunc: func(ctx context.Context, r controller.Reader, logger *zap.Logger, cfg *config.MachineConfig, res *secrets.OSRoot) error {
|
||||
cfgProvider := cfg.Config()
|
||||
osSecrets := res.TypedSpec()
|
||||
|
||||
osSecrets.CA = cfgProvider.Machine().Security().CA()
|
||||
|
||||
osSecrets.CertSANIPs = nil
|
||||
osSecrets.CertSANDNSNames = nil
|
||||
|
||||
for _, san := range cfgProvider.Machine().Security().CertSANs() {
|
||||
if ip, err := netip.ParseAddr(san); err == nil {
|
||||
osSecrets.CertSANIPs = append(osSecrets.CertSANIPs, ip)
|
||||
} else {
|
||||
osSecrets.CertSANDNSNames = append(osSecrets.CertSANDNSNames, san)
|
||||
}
|
||||
}
|
||||
|
||||
if cfgProvider.Machine().Features().KubernetesTalosAPIAccess().Enabled() {
|
||||
// add Kubernetes Talos service name to the list of SANs
|
||||
osSecrets.CertSANDNSNames = append(osSecrets.CertSANDNSNames,
|
||||
constants.KubernetesTalosAPIServiceName,
|
||||
constants.KubernetesTalosAPIServiceName+"."+constants.KubernetesTalosAPIServiceNamespace,
|
||||
)
|
||||
}
|
||||
|
||||
osSecrets.Token = cfgProvider.Machine().Security().Token()
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
97
internal/app/machined/pkg/controllers/secrets/root_test.go
Normal file
97
internal/app/machined/pkg/controllers/secrets/root_test.go
Normal file
@ -0,0 +1,97 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
//nolint:dupl
|
||||
package secrets_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/resource/rtestutils"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/siderolabs/talos/internal/app/machined/pkg/controllers/ctest"
|
||||
secretsctrl "github.com/siderolabs/talos/internal/app/machined/pkg/controllers/secrets"
|
||||
talosconfig "github.com/siderolabs/talos/pkg/machinery/config"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/generate"
|
||||
"github.com/siderolabs/talos/pkg/machinery/config/machine"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/config"
|
||||
"github.com/siderolabs/talos/pkg/machinery/resources/secrets"
|
||||
)
|
||||
|
||||
func TestRootSuite(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, &RootSuite{
|
||||
DefaultSuite: ctest.DefaultSuite{
|
||||
Timeout: 5 * time.Second,
|
||||
AfterSetup: func(suite *ctest.DefaultSuite) {
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(secretsctrl.NewRootEtcdController()))
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(secretsctrl.NewRootKubernetesController()))
|
||||
suite.Require().NoError(suite.Runtime().RegisterController(secretsctrl.NewRootOSController()))
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type RootSuite struct {
|
||||
ctest.DefaultSuite
|
||||
}
|
||||
|
||||
func (suite *RootSuite) genConfig(controlplane bool) talosconfig.Config {
|
||||
input, err := generate.NewInput("test-cluster", "http://localhost:6443", "")
|
||||
suite.Require().NoError(err)
|
||||
|
||||
var cfg talosconfig.Provider
|
||||
|
||||
if controlplane {
|
||||
cfg, err = input.Config(machine.TypeControlPlane)
|
||||
} else {
|
||||
cfg, err = input.Config(machine.TypeWorker)
|
||||
}
|
||||
|
||||
suite.Require().NoError(err)
|
||||
|
||||
machineCfg := config.NewMachineConfig(cfg)
|
||||
suite.Require().NoError(suite.State().Create(suite.Ctx(), machineCfg))
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (suite *RootSuite) TestReconcileControlPlane() {
|
||||
cfg := suite.genConfig(true)
|
||||
|
||||
rtestutils.AssertResources(suite.Ctx(), suite.T(), suite.State(), []resource.ID{secrets.EtcdRootID},
|
||||
func(res *secrets.EtcdRoot, asrt *assert.Assertions) {
|
||||
asrt.Equal(res.TypedSpec().EtcdCA, cfg.Cluster().Etcd().CA())
|
||||
},
|
||||
)
|
||||
rtestutils.AssertResources(suite.Ctx(), suite.T(), suite.State(), []resource.ID{secrets.KubernetesRootID},
|
||||
func(res *secrets.KubernetesRoot, asrt *assert.Assertions) {
|
||||
asrt.Equal(res.TypedSpec().CA, cfg.Cluster().CA())
|
||||
},
|
||||
)
|
||||
|
||||
rtestutils.AssertResources(suite.Ctx(), suite.T(), suite.State(), []resource.ID{secrets.OSRootID},
|
||||
func(res *secrets.OSRoot, asrt *assert.Assertions) {
|
||||
asrt.Equal(res.TypedSpec().CA, cfg.Machine().Security().CA())
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (suite *RootSuite) TestReconcileWorker() {
|
||||
cfg := suite.genConfig(false)
|
||||
|
||||
rtestutils.AssertResources(suite.Ctx(), suite.T(), suite.State(), []resource.ID{secrets.OSRootID},
|
||||
func(res *secrets.OSRoot, asrt *assert.Assertions) {
|
||||
asrt.Equal(res.TypedSpec().CA, cfg.Machine().Security().CA())
|
||||
},
|
||||
)
|
||||
|
||||
rtestutils.AssertNoResource[*secrets.Etcd](suite.Ctx(), suite.T(), suite.State(), secrets.EtcdRootID)
|
||||
rtestutils.AssertNoResource[*secrets.Kubernetes](suite.Ctx(), suite.T(), suite.State(), secrets.KubernetesRootID)
|
||||
}
|
@ -27,6 +27,8 @@ import (
|
||||
)
|
||||
|
||||
func TestTrustdSuite(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
suite.Run(t, &TrustdSuite{
|
||||
DefaultSuite: ctest.DefaultSuite{
|
||||
AfterSetup: func(suite *ctest.DefaultSuite) {
|
||||
|
@ -1,69 +0,0 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package secrets
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
// RateLimitEvents to reduce the rate of reconcile events.
|
||||
//
|
||||
// RateLimitEvents makes sure that reconcile events are not coming faster than interval.
|
||||
// Any reconcile events which come during the waiting delay are coalesced with the original events.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func RateLimitEvents(ctx context.Context, in <-chan controller.ReconcileEvent, interval time.Duration) <-chan controller.ReconcileEvent {
|
||||
limiter := rate.NewLimiter(rate.Every(interval), 1)
|
||||
ch := make(chan controller.ReconcileEvent)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
var event controller.ReconcileEvent
|
||||
|
||||
// wait for an actual reconcile event
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case event = <-in:
|
||||
}
|
||||
|
||||
// figure out if the event can be delivered immediately
|
||||
reservation := limiter.Reserve()
|
||||
delay := reservation.Delay()
|
||||
|
||||
if delay != 0 {
|
||||
timer := time.NewTimer(delay)
|
||||
defer timer.Stop()
|
||||
|
||||
WAIT:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
reservation.Cancel()
|
||||
|
||||
return
|
||||
case <-in:
|
||||
// coalesce extra events while waiting
|
||||
case <-timer.C:
|
||||
break WAIT
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deliver rate-limited coalesced event
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case ch <- event:
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
@ -1,45 +0,0 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package secrets_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/siderolabs/talos/internal/app/machined/pkg/controllers/secrets"
|
||||
)
|
||||
|
||||
func TestRateLimitEvents(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
inCh := make(chan controller.ReconcileEvent)
|
||||
outCh := secrets.RateLimitEvents(ctx, inCh, time.Second)
|
||||
|
||||
inputs := 0
|
||||
outputs := 0
|
||||
|
||||
timer := time.NewTimer(3 * time.Second)
|
||||
defer timer.Stop()
|
||||
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
break LOOP
|
||||
case <-outCh:
|
||||
outputs++
|
||||
case inCh <- controller.ReconcileEvent{}:
|
||||
inputs++
|
||||
}
|
||||
}
|
||||
|
||||
assert.Less(t, outputs, 5)
|
||||
assert.Greater(t, inputs, 15)
|
||||
}
|
@ -66,30 +66,22 @@ func (ctrl *ConfigController) Run(ctx context.Context, r controller.Runtime, _ *
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ctrl.updateConfig(ctx, r, cfg); err != nil {
|
||||
return fmt.Errorf("failed to update config: %w", err)
|
||||
r.StartTrackingOutputs()
|
||||
|
||||
if endpoint := ctrl.apiEndpoint(cfg); endpoint != "" {
|
||||
if err = safe.WriterModify(ctx, r, siderolink.NewConfig(config.NamespaceName, siderolink.ConfigID), func(c *siderolink.Config) error {
|
||||
c.TypedSpec().APIEndpoint = endpoint
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to update config: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ctrl *ConfigController) updateConfig(ctx context.Context, r controller.Runtime, machineConfig *config.MachineConfig) error {
|
||||
cfg := siderolink.NewConfig(config.NamespaceName, siderolink.ConfigID)
|
||||
|
||||
endpoint := ctrl.apiEndpoint(machineConfig)
|
||||
if endpoint == "" {
|
||||
err := r.Destroy(ctx, cfg.Metadata())
|
||||
if err != nil && !state.IsNotFoundError(err) {
|
||||
if err = safe.CleanupOutputs[*siderolink.Config](ctx, r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return safe.WriterModify(ctx, r, cfg, func(c *siderolink.Config) error {
|
||||
c.TypedSpec().APIEndpoint = endpoint
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (ctrl *ConfigController) apiEndpoint(machineConfig *config.MachineConfig) string {
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/cosi-project/runtime/pkg/controller"
|
||||
"github.com/cosi-project/runtime/pkg/resource"
|
||||
"github.com/cosi-project/runtime/pkg/safe"
|
||||
"github.com/cosi-project/runtime/pkg/state"
|
||||
"github.com/siderolabs/go-pointer"
|
||||
"go.uber.org/zap"
|
||||
@ -143,7 +144,7 @@ func (ctrl *SyncController) Run(ctx context.Context, r controller.Runtime, logge
|
||||
|
||||
timeServers := timeServersStatus.(*network.TimeServerStatus).TypedSpec().NTPServers
|
||||
|
||||
cfg, err := r.Get(ctx, resource.NewMetadata(config.NamespaceName, config.MachineConfigType, config.V1Alpha1ID, resource.VersionUndefined))
|
||||
cfg, err := safe.ReaderGetByID[*config.MachineConfig](ctx, r, config.V1Alpha1ID)
|
||||
if err != nil {
|
||||
if !state.IsNotFoundError(err) {
|
||||
return fmt.Errorf("error getting config: %w", err)
|
||||
@ -158,12 +159,12 @@ func (ctrl *SyncController) Run(ctx context.Context, r controller.Runtime, logge
|
||||
syncDisabled = true
|
||||
}
|
||||
|
||||
if cfg != nil && cfg.(*config.MachineConfig).Config().Machine().Time().Disabled() {
|
||||
syncDisabled = true
|
||||
}
|
||||
if cfg != nil && cfg.Config().Machine() != nil {
|
||||
if cfg.Config().Machine().Time().Disabled() {
|
||||
syncDisabled = true
|
||||
}
|
||||
|
||||
if cfg != nil {
|
||||
syncTimeout = cfg.(*config.MachineConfig).Config().Machine().Time().BootTimeout()
|
||||
syncTimeout = cfg.Config().Machine().Time().BootTimeout()
|
||||
}
|
||||
|
||||
if !timeSynced {
|
||||
|
@ -40,7 +40,7 @@ import (
|
||||
runtimelogging "github.com/siderolabs/talos/internal/app/machined/pkg/runtime/logging"
|
||||
"github.com/siderolabs/talos/internal/app/machined/pkg/system"
|
||||
"github.com/siderolabs/talos/pkg/logging"
|
||||
talosconfig "github.com/siderolabs/talos/pkg/machinery/config"
|
||||
talosconfig "github.com/siderolabs/talos/pkg/machinery/config/config"
|
||||
"github.com/siderolabs/talos/pkg/machinery/constants"
|
||||
configresource "github.com/siderolabs/talos/pkg/machinery/resources/config"
|
||||
)
|
||||
@ -86,10 +86,10 @@ func (ctrl *Controller) Run(ctx context.Context, drainer *runtime.Drainer) error
|
||||
|
||||
for _, c := range []controller.Controller{
|
||||
&cluster.AffiliateMergeController{},
|
||||
&cluster.ConfigController{},
|
||||
cluster.NewConfigController(),
|
||||
&cluster.DiscoveryServiceController{},
|
||||
&cluster.EndpointController{},
|
||||
&cluster.InfoController{},
|
||||
cluster.NewInfoController(),
|
||||
&cluster.KubernetesPullController{},
|
||||
&cluster.KubernetesPushController{},
|
||||
&cluster.LocalAffiliateController{},
|
||||
@ -116,7 +116,7 @@ func (ctrl *Controller) Run(ctx context.Context, drainer *runtime.Drainer) error
|
||||
SeccompProfilesDirectory: constants.SeccompProfilesDirectory,
|
||||
},
|
||||
&etcd.AdvertisedPeerController{},
|
||||
&etcd.ConfigController{},
|
||||
etcd.NewConfigController(),
|
||||
&etcd.PKIController{},
|
||||
&etcd.SpecController{},
|
||||
&etcd.MemberController{},
|
||||
@ -130,11 +130,17 @@ func (ctrl *Controller) Run(ctx context.Context, drainer *runtime.Drainer) error
|
||||
V1Alpha1Mode: ctrl.v1alpha1Runtime.State().Platform().Mode(),
|
||||
},
|
||||
&k8s.AddressFilterController{},
|
||||
&k8s.ControlPlaneController{},
|
||||
k8s.NewControlPlaneAPIServerController(),
|
||||
k8s.NewControlPlaneAdmissionControlController(),
|
||||
k8s.NewControlPlaneAuditPolicyController(),
|
||||
k8s.NewControlPlaneBootstrapManifestsController(),
|
||||
k8s.NewControlPlaneControllerManagerController(),
|
||||
k8s.NewControlPlaneExtraManifestsController(),
|
||||
k8s.NewControlPlaneSchedulerController(),
|
||||
&k8s.ControlPlaneStaticPodController{},
|
||||
&k8s.EndpointController{},
|
||||
&k8s.ExtraManifestController{},
|
||||
&k8s.KubeletConfigController{},
|
||||
k8s.NewKubeletConfigController(),
|
||||
&k8s.KubeletServiceController{
|
||||
V1Alpha1Services: system.Services(ctrl.v1alpha1Runtime),
|
||||
V1Alpha1Mode: ctrl.v1alpha1Runtime.State().Platform().Mode(),
|
||||
@ -143,12 +149,12 @@ func (ctrl *Controller) Run(ctx context.Context, drainer *runtime.Drainer) error
|
||||
V1Alpha1Mode: ctrl.v1alpha1Runtime.State().Platform().Mode(),
|
||||
},
|
||||
&k8s.KubeletStaticPodController{},
|
||||
&k8s.KubePrismEndpointsController{},
|
||||
&k8s.KubePrismConfigController{},
|
||||
k8s.NewKubePrismEndpointsController(),
|
||||
k8s.NewKubePrismConfigController(),
|
||||
&k8s.KubePrismController{},
|
||||
&k8s.ManifestApplyController{},
|
||||
&k8s.ManifestController{},
|
||||
&k8s.NodeIPConfigController{},
|
||||
k8s.NewNodeIPConfigController(),
|
||||
&k8s.NodeIPController{},
|
||||
&k8s.NodeApplyController{},
|
||||
&k8s.NodeCordonedSpecController{},
|
||||
@ -161,10 +167,10 @@ func (ctrl *Controller) Run(ctx context.Context, drainer *runtime.Drainer) error
|
||||
&k8s.StaticEndpointController{},
|
||||
&k8s.StaticPodConfigController{},
|
||||
&k8s.StaticPodServerController{},
|
||||
&kubeaccess.ConfigController{},
|
||||
kubeaccess.NewConfigController(),
|
||||
&kubeaccess.CRDController{},
|
||||
&kubeaccess.EndpointController{},
|
||||
&kubespan.ConfigController{},
|
||||
kubespan.NewConfigController(),
|
||||
&kubespan.EndpointController{},
|
||||
&kubespan.IdentityController{},
|
||||
&kubespan.ManagerController{},
|
||||
@ -275,14 +281,16 @@ func (ctrl *Controller) Run(ctx context.Context, drainer *runtime.Drainer) error
|
||||
&secrets.APICertSANsController{},
|
||||
&secrets.APIController{},
|
||||
&secrets.EtcdController{},
|
||||
&secrets.KubeletController{},
|
||||
secrets.NewKubeletController(),
|
||||
&secrets.KubernetesCertSANsController{},
|
||||
&secrets.KubernetesDynamicCertsController{},
|
||||
&secrets.KubernetesController{},
|
||||
&secrets.MaintenanceController{},
|
||||
&secrets.MaintenanceCertSANsController{},
|
||||
&secrets.MaintenanceRootController{},
|
||||
&secrets.RootController{},
|
||||
secrets.NewRootEtcdController(),
|
||||
secrets.NewRootKubernetesController(),
|
||||
secrets.NewRootOSController(),
|
||||
&secrets.TrustdController{},
|
||||
&siderolink.ConfigController{
|
||||
Cmdline: procfs.ProcCmdline(),
|
||||
@ -337,14 +345,19 @@ func (ctrl *Controller) watchMachineConfig(ctx context.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
ctrl.updateConsoleLoggingConfig(cfg)
|
||||
ctrl.updateLoggingConfig(ctx, cfg, &loggingEndpoints)
|
||||
ctrl.updateConsoleLoggingConfig(cfg.Debug())
|
||||
|
||||
if cfg.Machine() == nil {
|
||||
ctrl.updateLoggingConfig(ctx, nil, &loggingEndpoints)
|
||||
} else {
|
||||
ctrl.updateLoggingConfig(ctx, cfg.Machine().Logging().Destinations(), &loggingEndpoints)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ctrl *Controller) updateConsoleLoggingConfig(cfg talosconfig.Config) {
|
||||
func (ctrl *Controller) updateConsoleLoggingConfig(debug bool) {
|
||||
newLogLevel := zapcore.InfoLevel
|
||||
if cfg.Debug() {
|
||||
if debug {
|
||||
newLogLevel = zapcore.DebugLevel
|
||||
}
|
||||
|
||||
@ -354,8 +367,7 @@ func (ctrl *Controller) updateConsoleLoggingConfig(cfg talosconfig.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
func (ctrl *Controller) updateLoggingConfig(ctx context.Context, cfg talosconfig.Config, prevLoggingEndpoints *[]*url.URL) {
|
||||
dests := cfg.Machine().Logging().Destinations()
|
||||
func (ctrl *Controller) updateLoggingConfig(ctx context.Context, dests []talosconfig.LoggingDestination, prevLoggingEndpoints *[]*url.URL) {
|
||||
loggingEndpoints := make([]*url.URL, len(dests))
|
||||
|
||||
for i, dest := range dests {
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/siderolabs/go-debug"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
v1alpha1server "github.com/siderolabs/talos/internal/app/machined/internal/server/v1alpha1"
|
||||
@ -109,8 +110,11 @@ type machinedService struct {
|
||||
// Main is an entrypoint to the API service.
|
||||
func (s *machinedService) Main(ctx context.Context, r runtime.Runtime, logWriter io.Writer) error {
|
||||
injector := &authz.Injector{
|
||||
Mode: authz.MetadataOnly,
|
||||
Logger: log.New(logWriter, "machined/authz/injector ", log.Flags()).Printf,
|
||||
Mode: authz.MetadataOnly,
|
||||
}
|
||||
|
||||
if debug.Enabled {
|
||||
injector.Logger = log.New(logWriter, "machined/authz/injector ", log.Flags()).Printf
|
||||
}
|
||||
|
||||
authorizer := &authz.Authorizer{
|
||||
|
@ -34,7 +34,7 @@ func GetCmdline(cmdline *procfs.Cmdline, cfg config.Config) []string {
|
||||
result = append(result, *val)
|
||||
}
|
||||
|
||||
if cfg != nil {
|
||||
if cfg != nil && cfg.Machine() != nil {
|
||||
for k, v := range cfg.Machine().Env() {
|
||||
result = append(result, k+"="+v)
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ replace gopkg.in/yaml.v3 => github.com/unix4ever/yaml v0.0.0-20220527175918-f17b
|
||||
|
||||
require (
|
||||
github.com/containerd/go-cni v1.1.9
|
||||
github.com/cosi-project/runtime v0.3.1-alpha.8
|
||||
github.com/cosi-project/runtime v0.3.1-beta.0
|
||||
github.com/dustin/go-humanize v1.0.1
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
|
@ -19,8 +19,8 @@ github.com/containerd/go-cni v1.1.9 h1:ORi7P1dYzCwVM6XPN4n3CbkuOx/NZ2DOqy+SHRdo9
|
||||
github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM=
|
||||
github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ=
|
||||
github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
|
||||
github.com/cosi-project/runtime v0.3.1-alpha.8 h1:7OTWghF4Og3Uixwzuw2RWGRO+UjuPFFlRLiMgNeqNl8=
|
||||
github.com/cosi-project/runtime v0.3.1-alpha.8/go.mod h1:n6rQ/b9GkrniSslnrFId6dzWDG+htbcC9fW+f3f1K94=
|
||||
github.com/cosi-project/runtime v0.3.1-beta.0 h1:LKiflYmX6/dEX84geYPOo/ApPb+4ikr7Tok8X83HelY=
|
||||
github.com/cosi-project/runtime v0.3.1-beta.0/go.mod h1:n6rQ/b9GkrniSslnrFId6dzWDG+htbcC9fW+f3f1K94=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
Loading…
x
Reference in New Issue
Block a user