refactor: extract Talos cluster provisioner as common code
This extracts Docker Talos cluster provisioner as common code which might be shared between `osctl cluster` and integration-test. There should be almost no functional changes. As proof of concept, abstract cluster readiness checks were implemented based on provisioned cluster state. It implements same checks as `basic-integration.sh` in pure Go via Talos/K8s clients. `conditions` package was promoted from machined-internal to `internal/pkg` as it is used to run the checks. Signed-off-by: Andrey Smirnov <smirnov.andrey@gmail.com>
This commit is contained in:
parent
f1a7f86703
commit
0081ac5fac
@ -6,26 +6,20 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/talos-systems/talos/cmd/osctl/cmd/cluster/pkg/node"
|
||||
"github.com/talos-systems/talos/cmd/osctl/pkg/client/config"
|
||||
"github.com/talos-systems/talos/cmd/osctl/pkg/helpers"
|
||||
"github.com/talos-systems/talos/internal/pkg/provision"
|
||||
"github.com/talos-systems/talos/internal/pkg/provision/access"
|
||||
"github.com/talos-systems/talos/internal/pkg/provision/check"
|
||||
"github.com/talos-systems/talos/internal/pkg/provision/providers/docker"
|
||||
"github.com/talos-systems/talos/pkg/config/types/v1alpha1/generate"
|
||||
"github.com/talos-systems/talos/pkg/constants"
|
||||
talosnet "github.com/talos-systems/talos/pkg/net"
|
||||
@ -35,11 +29,12 @@ var (
|
||||
clusterName string
|
||||
nodeImage string
|
||||
networkCIDR string
|
||||
networkMTU string
|
||||
networkMTU int
|
||||
workers int
|
||||
masters int
|
||||
clusterCpus string
|
||||
clusterMemory int
|
||||
clusterWait bool
|
||||
)
|
||||
|
||||
// clusterCmd represents the cluster command
|
||||
@ -56,7 +51,7 @@ var clusterUpCmd = &cobra.Command{
|
||||
Long: ``,
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return create()
|
||||
return helpers.WithCLIContext(context.Background(), create)
|
||||
},
|
||||
}
|
||||
|
||||
@ -67,19 +62,12 @@ var clusterDownCmd = &cobra.Command{
|
||||
Long: ``,
|
||||
Args: cobra.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return destroy()
|
||||
return helpers.WithCLIContext(context.Background(), destroy)
|
||||
},
|
||||
}
|
||||
|
||||
// nolint: gocyclo
|
||||
func create() (err error) {
|
||||
ctx := context.Background()
|
||||
|
||||
cli, err := client.NewEnvClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//nolint: gocyclo
|
||||
func create(ctx context.Context) (err error) {
|
||||
if masters < 1 {
|
||||
return fmt.Errorf("number of masters can't be less than 1")
|
||||
}
|
||||
@ -91,12 +79,6 @@ func create() (err error) {
|
||||
|
||||
memory := int64(clusterMemory) * 1024 * 1024
|
||||
|
||||
// Ensure the image is present.
|
||||
|
||||
if err = ensureImageExists(ctx, cli, nodeImage); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate CIDR range and allocate IPs
|
||||
fmt.Println("validating CIDR and reserving master IPs")
|
||||
|
||||
@ -118,238 +100,97 @@ func create() (err error) {
|
||||
ips[i] = masterIP.String()
|
||||
}
|
||||
|
||||
// Generate all PKI and tokens required by Talos.
|
||||
fmt.Println("generating PKI and tokens")
|
||||
|
||||
input, err := generate.NewInput(clusterName, "https://"+ips[0]+":6443", kubernetesVersion)
|
||||
provisioner, err := docker.NewProvisioner(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Setup the network.
|
||||
defer provisioner.Close() //nolint: errcheck
|
||||
|
||||
fmt.Println("creating network", clusterName)
|
||||
request := provision.ClusterRequest{
|
||||
Name: clusterName,
|
||||
|
||||
if _, err = createNetwork(cli); err != nil {
|
||||
return fmt.Errorf("a cluster might already exist, run \"osctl cluster destroy\" to permanently delete the existing cluster, and try again: %w", err)
|
||||
Network: provision.NetworkRequest{
|
||||
Name: clusterName,
|
||||
CIDR: *cidr,
|
||||
MTU: networkMTU,
|
||||
},
|
||||
|
||||
Image: nodeImage,
|
||||
KubernetesVersion: kubernetesVersion,
|
||||
}
|
||||
|
||||
// Create the master nodes.
|
||||
|
||||
requests := make([]*node.Request, masters)
|
||||
for i := range requests {
|
||||
requests[i] = &node.Request{
|
||||
Input: *input,
|
||||
Image: nodeImage,
|
||||
Name: fmt.Sprintf("%s-master-%d", clusterName, i+1),
|
||||
IP: net.ParseIP(ips[i]),
|
||||
Memory: memory,
|
||||
NanoCPUs: nanoCPUs,
|
||||
}
|
||||
|
||||
for i := 0; i < masters; i++ {
|
||||
var typ generate.Type
|
||||
if i == 0 {
|
||||
requests[i].Type = generate.TypeInit
|
||||
typ = generate.TypeInit
|
||||
} else {
|
||||
requests[i].Type = generate.TypeControlPlane
|
||||
typ = generate.TypeControlPlane
|
||||
}
|
||||
|
||||
request.Nodes = append(request.Nodes,
|
||||
provision.NodeRequest{
|
||||
Type: typ,
|
||||
Name: fmt.Sprintf("%s-master-%d", clusterName, i+1),
|
||||
IP: net.ParseIP(ips[i]),
|
||||
Memory: memory,
|
||||
NanoCPUs: nanoCPUs,
|
||||
})
|
||||
}
|
||||
|
||||
if err := createNodes(requests); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the worker nodes.
|
||||
|
||||
requests = []*node.Request{}
|
||||
|
||||
for i := 1; i <= workers; i++ {
|
||||
r := &node.Request{
|
||||
Type: generate.TypeJoin,
|
||||
Input: *input,
|
||||
Image: nodeImage,
|
||||
Name: fmt.Sprintf("%s-worker-%d", clusterName, i),
|
||||
Memory: memory,
|
||||
NanoCPUs: nanoCPUs,
|
||||
}
|
||||
requests = append(requests, r)
|
||||
request.Nodes = append(request.Nodes,
|
||||
provision.NodeRequest{
|
||||
Type: generate.TypeJoin,
|
||||
Name: fmt.Sprintf("%s-worker-%d", clusterName, i),
|
||||
Memory: memory,
|
||||
NanoCPUs: nanoCPUs,
|
||||
})
|
||||
}
|
||||
|
||||
if err := createNodes(requests); err != nil {
|
||||
cluster, err := provisioner.Create(ctx, request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create and save the osctl configuration file.
|
||||
|
||||
return saveConfig(input)
|
||||
}
|
||||
|
||||
// nolint: gocyclo
|
||||
func createNodes(requests []*node.Request) (err error) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(len(requests))
|
||||
|
||||
for _, req := range requests {
|
||||
go func(req *node.Request) {
|
||||
fmt.Println("creating node", req.Name)
|
||||
|
||||
if err = node.NewNode(clusterName, req); err != nil {
|
||||
helpers.Fatalf("failed to create node: %w", err)
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
}(req)
|
||||
if err = saveConfig(cluster); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
if !clusterWait {
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
// Run cluster readiness checks
|
||||
checkCtx, checkCtxCancel := context.WithTimeout(ctx, 20*time.Minute)
|
||||
defer checkCtxCancel()
|
||||
|
||||
clusterAccess := access.NewAdapter(cluster)
|
||||
defer clusterAccess.Close() //nolint: errcheck
|
||||
|
||||
return check.Wait(checkCtx, clusterAccess, check.DefaultClusterChecks(), check.StderrReporter())
|
||||
}
|
||||
|
||||
func destroy() error {
|
||||
cli, err := client.NewEnvClient()
|
||||
func destroy(ctx context.Context) error {
|
||||
provisioner, err := docker.NewProvisioner(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("label", "talos.owned=true")
|
||||
filters.Add("label", "talos.cluster.name="+clusterName)
|
||||
defer provisioner.Close() //nolint: errcheck
|
||||
|
||||
containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{All: true, Filters: filters})
|
||||
cluster, err := provisioner.(provision.ClusterNameReflector).Reflect(ctx, clusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(len(containers))
|
||||
|
||||
for _, container := range containers {
|
||||
go func(container types.Container) {
|
||||
fmt.Println("destroying node", container.Names[0][1:])
|
||||
|
||||
err := cli.ContainerRemove(context.Background(), container.ID, types.ContainerRemoveOptions{RemoveVolumes: true, Force: true})
|
||||
if err != nil {
|
||||
helpers.Fatalf("%+v", err)
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
}(container)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
fmt.Println("destroying network", clusterName)
|
||||
|
||||
return destroyNetwork(cli)
|
||||
return provisioner.Destroy(ctx, cluster)
|
||||
}
|
||||
|
||||
func ensureImageExists(ctx context.Context, cli *client.Client, image string) error {
|
||||
// In order to pull an image, the reference must be in canononical
|
||||
// format (e.g. domain/repo/image:tag).
|
||||
ref, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
image = ref.String()
|
||||
|
||||
// To filter the images, we need a familiar name and a tag
|
||||
// (e.g. domain/repo/image:tag => repo/image:tag).
|
||||
familiarName := reference.FamiliarName(ref)
|
||||
tag := ""
|
||||
|
||||
if tagged, isTagged := ref.(reference.Tagged); isTagged {
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("reference", familiarName+":"+tag)
|
||||
|
||||
images, err := cli.ImageList(ctx, types.ImageListOptions{Filters: filters})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(images) == 0 {
|
||||
fmt.Println("downloading", image)
|
||||
|
||||
var reader io.ReadCloser
|
||||
|
||||
if reader, err = cli.ImagePull(ctx, image, types.ImagePullOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// nolint: errcheck
|
||||
defer reader.Close()
|
||||
|
||||
if _, err = io.Copy(ioutil.Discard, reader); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createNetwork(cli *client.Client) (types.NetworkCreateResponse, error) {
|
||||
options := types.NetworkCreate{
|
||||
Labels: map[string]string{
|
||||
"talos.owned": "true",
|
||||
"talos.cluster.name": clusterName,
|
||||
},
|
||||
IPAM: &network.IPAM{
|
||||
Config: []network.IPAMConfig{
|
||||
{
|
||||
Subnet: networkCIDR,
|
||||
},
|
||||
},
|
||||
},
|
||||
Options: map[string]string{
|
||||
"com.docker.network.driver.mtu": networkMTU,
|
||||
},
|
||||
}
|
||||
|
||||
return cli.NetworkCreate(context.Background(), clusterName, options)
|
||||
}
|
||||
|
||||
func destroyNetwork(cli *client.Client) error {
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("label", "talos.owned=true")
|
||||
filters.Add("label", "talos.cluster.name="+clusterName)
|
||||
|
||||
options := types.NetworkListOptions{
|
||||
Filters: filters,
|
||||
}
|
||||
|
||||
networks, err := cli.NetworkList(context.Background(), options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var result *multierror.Error
|
||||
|
||||
for _, network := range networks {
|
||||
if err := cli.NetworkRemove(context.Background(), network.ID); err != nil {
|
||||
result = multierror.Append(result, err)
|
||||
}
|
||||
}
|
||||
|
||||
return result.ErrorOrNil()
|
||||
}
|
||||
|
||||
func saveConfig(input *generate.Input) (err error) {
|
||||
newConfig := &config.Config{
|
||||
Context: input.ClusterName,
|
||||
Contexts: map[string]*config.Context{
|
||||
input.ClusterName: {
|
||||
Endpoints: []string{"127.0.0.1"},
|
||||
CA: base64.StdEncoding.EncodeToString(input.Certs.OS.Crt),
|
||||
Crt: base64.StdEncoding.EncodeToString(input.Certs.Admin.Crt),
|
||||
Key: base64.StdEncoding.EncodeToString(input.Certs.Admin.Key),
|
||||
},
|
||||
},
|
||||
}
|
||||
func saveConfig(cluster provision.Cluster) (err error) {
|
||||
newConfig := cluster.TalosConfig()
|
||||
|
||||
c, err := config.Open(talosconfig)
|
||||
if err != nil {
|
||||
@ -360,9 +201,9 @@ func saveConfig(input *generate.Input) (err error) {
|
||||
c.Contexts = map[string]*config.Context{}
|
||||
}
|
||||
|
||||
c.Contexts[input.ClusterName] = newConfig.Contexts[input.ClusterName]
|
||||
c.Contexts[cluster.Info().ClusterName] = newConfig.Contexts[cluster.Info().ClusterName]
|
||||
|
||||
c.Context = input.ClusterName
|
||||
c.Context = cluster.Info().ClusterName
|
||||
|
||||
return c.Save(talosconfig)
|
||||
}
|
||||
@ -383,12 +224,13 @@ func parseCPUShare() (int64, error) {
|
||||
|
||||
func init() {
|
||||
clusterUpCmd.Flags().StringVar(&nodeImage, "image", defaultImage(constants.DefaultTalosImageRepository), "the image to use")
|
||||
clusterUpCmd.Flags().StringVar(&networkMTU, "mtu", "1500", "MTU of the docker bridge network")
|
||||
clusterUpCmd.Flags().IntVar(&networkMTU, "mtu", 1500, "MTU of the docker bridge network")
|
||||
clusterUpCmd.Flags().StringVar(&networkCIDR, "cidr", "10.5.0.0/24", "CIDR of the docker bridge network")
|
||||
clusterUpCmd.Flags().IntVar(&workers, "workers", 1, "the number of workers to create")
|
||||
clusterUpCmd.Flags().IntVar(&masters, "masters", 1, "the number of masters to create")
|
||||
clusterUpCmd.Flags().StringVar(&clusterCpus, "cpus", "1.5", "the share of CPUs as fraction (each container)")
|
||||
clusterUpCmd.Flags().IntVar(&clusterMemory, "memory", 1024, "the limit on memory usage in MB (each container)")
|
||||
clusterUpCmd.Flags().BoolVar(&clusterWait, "wait", false, "wait for the cluster to be ready before returning")
|
||||
clusterUpCmd.Flags().StringVar(&kubernetesVersion, "kubernetes-version", constants.DefaultKubernetesVersion, "desired kubernetes version to run")
|
||||
clusterCmd.PersistentFlags().StringVar(&clusterName, "name", "talos-default", "the name of the cluster")
|
||||
clusterCmd.AddCommand(clusterUpCmd)
|
||||
|
@ -1,153 +0,0 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"net"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/nat"
|
||||
|
||||
"github.com/talos-systems/talos/pkg/config/types/v1alpha1/generate"
|
||||
"github.com/talos-systems/talos/pkg/constants"
|
||||
)
|
||||
|
||||
// Request represents the set of options available for configuring a node.
|
||||
type Request struct {
|
||||
Type generate.Type
|
||||
Input generate.Input
|
||||
Image string
|
||||
Name string
|
||||
IP net.IP
|
||||
|
||||
// Share of CPUs, in 1e-9 fractions
|
||||
NanoCPUs int64
|
||||
// Memory limit in bytes
|
||||
Memory int64
|
||||
}
|
||||
|
||||
// NewNode creates a node as a container.
|
||||
func NewNode(clusterName string, req *Request) (err error) {
|
||||
data, err := generate.Config(req.Type, &req.Input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b64data := base64.StdEncoding.EncodeToString([]byte(data))
|
||||
|
||||
// Create the container config.
|
||||
|
||||
containerConfig := &container.Config{
|
||||
Hostname: req.Name,
|
||||
Image: req.Image,
|
||||
Env: []string{"PLATFORM=container", "USERDATA=" + b64data},
|
||||
Labels: map[string]string{
|
||||
"talos.owned": "true",
|
||||
"talos.cluster.name": clusterName,
|
||||
},
|
||||
Volumes: map[string]struct{}{
|
||||
"/var/lib/containerd": {},
|
||||
"/var/lib/kubelet": {},
|
||||
"/etc/cni": {},
|
||||
"/run": {},
|
||||
},
|
||||
}
|
||||
|
||||
// Create the host config.
|
||||
|
||||
hostConfig := &container.HostConfig{
|
||||
Privileged: true,
|
||||
SecurityOpt: []string{"seccomp:unconfined"},
|
||||
Resources: container.Resources{
|
||||
NanoCPUs: req.NanoCPUs,
|
||||
Memory: req.Memory,
|
||||
},
|
||||
}
|
||||
|
||||
// Ensure that the container is created in the talos network.
|
||||
|
||||
networkConfig := &network.NetworkingConfig{
|
||||
EndpointsConfig: map[string]*network.EndpointSettings{
|
||||
clusterName: {
|
||||
NetworkID: clusterName,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Mutate the container configurations based on the node type.
|
||||
|
||||
switch req.Type {
|
||||
case generate.TypeInit:
|
||||
var apidPort nat.Port
|
||||
apidPort, err = nat.NewPort("tcp", "50000")
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var apiServerPort nat.Port
|
||||
apiServerPort, err = nat.NewPort("tcp", "6443")
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
containerConfig.ExposedPorts = nat.PortSet{
|
||||
apidPort: struct{}{},
|
||||
apiServerPort: struct{}{},
|
||||
}
|
||||
|
||||
hostConfig.PortBindings = nat.PortMap{
|
||||
apidPort: []nat.PortBinding{
|
||||
{
|
||||
HostIP: "0.0.0.0",
|
||||
HostPort: "50000",
|
||||
},
|
||||
},
|
||||
apiServerPort: []nat.PortBinding{
|
||||
{
|
||||
HostIP: "0.0.0.0",
|
||||
HostPort: "6443",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fallthrough
|
||||
case generate.TypeControlPlane:
|
||||
containerConfig.Volumes[constants.EtcdDataPath] = struct{}{}
|
||||
|
||||
if req.IP == nil {
|
||||
return errors.New("an IP address must be provided when creating a master node")
|
||||
}
|
||||
}
|
||||
|
||||
if req.IP != nil {
|
||||
networkConfig.EndpointsConfig[clusterName].IPAMConfig = &network.EndpointIPAMConfig{IPv4Address: req.IP.String()}
|
||||
}
|
||||
|
||||
// Create the container.
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
cli, err := client.NewEnvClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := cli.ContainerCreate(ctx, containerConfig, hostConfig, networkConfig, req.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start the container.
|
||||
|
||||
return cli.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
|
||||
}
|
@ -49,14 +49,20 @@ type Client struct {
|
||||
NetworkClient networkapi.NetworkServiceClient
|
||||
}
|
||||
|
||||
// NewClientContextAndCredentialsFromConfig initializes ClientCredentials using default paths
|
||||
// to the required CA, certificate, and key.
|
||||
// NewClientContextAndCredentialsFromConfig initializes Credentials from config file.
|
||||
func NewClientContextAndCredentialsFromConfig(p string, ctx string) (context *config.Context, creds *Credentials, err error) {
|
||||
c, err := config.Open(p)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
context, creds, err = NewClientContextAndCredentialsFromParsedConfig(c, ctx)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// NewClientContextAndCredentialsFromParsedConfig initializes Credentials from parsed configuration.
|
||||
func NewClientContextAndCredentialsFromParsedConfig(c *config.Config, ctx string) (context *config.Context, creds *Credentials, err error) {
|
||||
if ctx != "" {
|
||||
c.Context = ctx
|
||||
}
|
||||
|
@ -21,7 +21,8 @@ osctl cluster create [flags]
|
||||
--kubernetes-version string desired kubernetes version to run (default "1.17.0")
|
||||
--masters int the number of masters to create (default 1)
|
||||
--memory int the limit on memory usage in MB (each container) (default 1024)
|
||||
--mtu string MTU of the docker bridge network (default "1500")
|
||||
--mtu int MTU of the docker bridge network (default 1500)
|
||||
--wait wait for the cluster to be ready before returning
|
||||
--workers int the number of workers to create (default 1)
|
||||
```
|
||||
|
||||
|
3
go.sum
3
go.sum
@ -141,6 +141,7 @@ github.com/emicklei/go-restful v2.11.1+incompatible/go.mod h1:otzb+WCGbkyDHkqmQm
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
@ -408,6 +409,7 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd
|
||||
github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.0-20170515075120-4cdb38c072b8/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
@ -705,6 +707,7 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
|
||||
k8s.io/kubernetes v1.13.0 h1:qTfB+u5M92k2fCCCVP2iuhgwwSOv1EkAkvQY1tQODD8=
|
||||
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20191030222137-2b95a09bc58d h1:1P0iBJsBzxRmR+dIFnM+Iu4aLxnoa7lBqozW/0uHbT8=
|
||||
|
@ -10,10 +10,10 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/events"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/health"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/runtime"
|
||||
)
|
||||
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/pkg/constants"
|
||||
)
|
||||
|
||||
|
@ -7,9 +7,9 @@ package system
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/health"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/runtime"
|
||||
)
|
||||
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
)
|
||||
|
||||
// StateEvent is a service event (e.g. 'up', 'down')
|
||||
|
@ -12,10 +12,10 @@ import (
|
||||
"time"
|
||||
|
||||
machineapi "github.com/talos-systems/talos/api/machine"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/events"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/health"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/runtime"
|
||||
)
|
||||
|
||||
|
@ -12,8 +12,8 @@ import (
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/events"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/pkg/retry"
|
||||
)
|
||||
|
||||
|
@ -16,11 +16,11 @@ import (
|
||||
"github.com/containerd/containerd/oci"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/health"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/containerd"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/restart"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/runtime"
|
||||
"github.com/talos-systems/talos/pkg/config/machine"
|
||||
"github.com/talos-systems/talos/pkg/constants"
|
||||
|
@ -27,9 +27,9 @@ import (
|
||||
"github.com/hashicorp/go-multierror"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/internal/bootkube"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/goroutine"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/etcd"
|
||||
"github.com/talos-systems/talos/internal/pkg/runtime"
|
||||
"github.com/talos-systems/talos/pkg/constants"
|
||||
|
@ -13,11 +13,11 @@ import (
|
||||
"github.com/containerd/containerd/defaults"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/health"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/process"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/restart"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/runtime"
|
||||
"github.com/talos-systems/talos/pkg/constants"
|
||||
)
|
||||
|
@ -23,11 +23,11 @@ import (
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/health"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/containerd"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/restart"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/etcd"
|
||||
"github.com/talos-systems/talos/internal/pkg/metadata"
|
||||
"github.com/talos-systems/talos/internal/pkg/runtime"
|
||||
|
@ -25,11 +25,11 @@ import (
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
|
||||
internalcni "github.com/talos-systems/talos/internal/app/machined/internal/cni"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/health"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/containerd"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/restart"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/runtime"
|
||||
"github.com/talos-systems/talos/pkg/argsbuilder"
|
||||
"github.com/talos-systems/talos/pkg/constants"
|
||||
|
@ -8,9 +8,9 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/internal/api"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/goroutine"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/runtime"
|
||||
)
|
||||
|
||||
|
@ -18,11 +18,11 @@ import (
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/health"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/containerd"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/restart"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/runtime"
|
||||
"github.com/talos-systems/talos/pkg/constants"
|
||||
)
|
||||
|
@ -17,10 +17,10 @@ import (
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/containerd"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/restart"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/runtime"
|
||||
"github.com/talos-systems/talos/pkg/constants"
|
||||
)
|
||||
|
@ -18,11 +18,11 @@ import (
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/health"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/containerd"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/restart"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/runtime"
|
||||
"github.com/talos-systems/talos/pkg/config/machine"
|
||||
"github.com/talos-systems/talos/pkg/constants"
|
||||
|
@ -11,11 +11,11 @@ import (
|
||||
"github.com/containerd/containerd"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/health"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/process"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/restart"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/runtime"
|
||||
"github.com/talos-systems/talos/pkg/constants"
|
||||
)
|
||||
|
@ -14,11 +14,11 @@ import (
|
||||
"github.com/containerd/containerd/oci"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/health"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/containerd"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/restart"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/runtime"
|
||||
"github.com/talos-systems/talos/pkg/constants"
|
||||
)
|
||||
|
@ -8,10 +8,10 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/process"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/restart"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/runtime"
|
||||
"github.com/talos-systems/talos/pkg/cmd"
|
||||
)
|
||||
|
@ -9,10 +9,10 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/process"
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/runner/restart"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/runtime"
|
||||
)
|
||||
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/runtime"
|
||||
)
|
||||
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
)
|
||||
|
||||
type AllSuite struct {
|
@ -14,7 +14,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/talos-systems/talos/internal/app/machined/pkg/system/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
)
|
||||
|
||||
type FilesSuite struct {
|
86
internal/pkg/conditions/poll.go
Normal file
86
internal/pkg/conditions/poll.go
Normal file
@ -0,0 +1,86 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package conditions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AssertionFunc is called every poll interval until it returns nil.
|
||||
type AssertionFunc func(ctx context.Context) error
|
||||
|
||||
type pollingCondition struct {
|
||||
lastErrMu sync.Mutex
|
||||
lastErr error
|
||||
lastErrSet bool
|
||||
|
||||
assertion AssertionFunc
|
||||
description string
|
||||
timeout, interval time.Duration
|
||||
}
|
||||
|
||||
func (p *pollingCondition) String() string {
|
||||
lastErr := "..."
|
||||
|
||||
p.lastErrMu.Lock()
|
||||
if p.lastErrSet {
|
||||
if p.lastErr != nil {
|
||||
lastErr = p.lastErr.Error()
|
||||
} else {
|
||||
lastErr = "OK"
|
||||
}
|
||||
}
|
||||
p.lastErrMu.Unlock()
|
||||
|
||||
return fmt.Sprintf("%s: %s", p.description, lastErr)
|
||||
}
|
||||
|
||||
func (p *pollingCondition) Wait(ctx context.Context) error {
|
||||
ticker := time.NewTicker(p.interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
timeoutCtx, timeoutCtxCancel := context.WithTimeout(ctx, p.timeout)
|
||||
defer timeoutCtxCancel()
|
||||
|
||||
for {
|
||||
err := func() error {
|
||||
runCtx, runCtxCancel := context.WithTimeout(ctx, p.interval)
|
||||
defer runCtxCancel()
|
||||
|
||||
err := p.assertion(runCtx)
|
||||
|
||||
p.lastErrMu.Lock()
|
||||
p.lastErr = err
|
||||
p.lastErrSet = true
|
||||
p.lastErrMu.Unlock()
|
||||
|
||||
return err
|
||||
}()
|
||||
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-timeoutCtx.Done():
|
||||
return timeoutCtx.Err()
|
||||
case <-ticker.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PollingCondition converts AssertionFunc into Condition by calling it every interval until timeout
|
||||
// is reached.
|
||||
func PollingCondition(description string, assertion AssertionFunc, timeout, interval time.Duration) Condition {
|
||||
return &pollingCondition{
|
||||
assertion: assertion,
|
||||
description: description,
|
||||
timeout: timeout,
|
||||
interval: interval,
|
||||
}
|
||||
}
|
6
internal/pkg/provision/access/access.go
Normal file
6
internal/pkg/provision/access/access.go
Normal file
@ -0,0 +1,6 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Package access provides methods to access provisioned Talos cluster.
|
||||
package access
|
14
internal/pkg/provision/access/access_test.go
Normal file
14
internal/pkg/provision/access/access_test.go
Normal file
@ -0,0 +1,14 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package access_test
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
// added for accurate coverage estimation
|
||||
//
|
||||
// please remove it once any unit-test is added
|
||||
// for this package
|
||||
}
|
104
internal/pkg/provision/access/adapter.go
Normal file
104
internal/pkg/provision/access/adapter.go
Normal file
@ -0,0 +1,104 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package access
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
"github.com/talos-systems/talos/cmd/osctl/pkg/client"
|
||||
"github.com/talos-systems/talos/internal/pkg/provision"
|
||||
"github.com/talos-systems/talos/pkg/constants"
|
||||
)
|
||||
|
||||
// NewAdapter returns ClusterAccess object from Cluster.
|
||||
func NewAdapter(cluster provision.Cluster) provision.ClusterAccess {
|
||||
return &adapter{
|
||||
Cluster: cluster,
|
||||
clients: make(map[string]*client.Client),
|
||||
}
|
||||
}
|
||||
|
||||
type adapter struct {
|
||||
provision.Cluster
|
||||
|
||||
clients map[string]*client.Client
|
||||
clientset *kubernetes.Clientset
|
||||
}
|
||||
|
||||
func (a *adapter) Client(endpoints ...string) (*client.Client, error) {
|
||||
key := strings.Join(endpoints, ",")
|
||||
|
||||
if cli := a.clients[key]; cli != nil {
|
||||
return cli, nil
|
||||
}
|
||||
|
||||
configContext, creds, err := client.NewClientContextAndCredentialsFromParsedConfig(a.TalosConfig(), "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(endpoints) == 0 {
|
||||
endpoints = configContext.Endpoints
|
||||
}
|
||||
|
||||
client, err := client.NewClient(creds, endpoints, constants.ApidPort)
|
||||
if err == nil {
|
||||
a.clients[key] = client
|
||||
}
|
||||
|
||||
return client, err
|
||||
}
|
||||
|
||||
func (a *adapter) K8sClient(ctx context.Context) (*kubernetes.Clientset, error) {
|
||||
if a.clientset != nil {
|
||||
return a.clientset, nil
|
||||
}
|
||||
|
||||
client, err := a.Client()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kubeconfig, err := client.Kubeconfig(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config, err := clientcmd.BuildConfigFromKubeconfigGetter("", func() (*clientcmdapi.Config, error) {
|
||||
return clientcmd.Load(kubeconfig)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// patch timeout
|
||||
config.Timeout = time.Minute
|
||||
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
if err == nil {
|
||||
a.clientset = clientset
|
||||
}
|
||||
|
||||
return clientset, err
|
||||
}
|
||||
|
||||
func (a *adapter) Close() error {
|
||||
for _, cli := range a.clients {
|
||||
if err := cli.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
a.clients = nil
|
||||
a.clientset = nil
|
||||
|
||||
return nil
|
||||
}
|
27
internal/pkg/provision/adapters.go
Normal file
27
internal/pkg/provision/adapters.go
Normal file
@ -0,0 +1,27 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package provision
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/talos-systems/talos/cmd/osctl/pkg/client"
|
||||
)
|
||||
|
||||
// ClusterAccess extends Cluster interface to provide clients to the cluster.
|
||||
type ClusterAccess interface {
|
||||
Cluster
|
||||
|
||||
// Client returns default Talos client.
|
||||
Client(endpoints ...string) (*client.Client, error)
|
||||
|
||||
// K8sClient returns Kubernetes client.
|
||||
K8sClient(context.Context) (*kubernetes.Clientset, error)
|
||||
|
||||
// Close shuts down all the clients.
|
||||
Close() error
|
||||
}
|
33
internal/pkg/provision/check/apid.go
Normal file
33
internal/pkg/provision/check/apid.go
Normal file
@ -0,0 +1,33 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Package check provides set of checks to verify cluster readiness.
|
||||
package check
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/talos-systems/talos/cmd/osctl/pkg/client"
|
||||
"github.com/talos-systems/talos/internal/pkg/provision"
|
||||
)
|
||||
|
||||
// ApidReadyAssertion checks whether apid is responsive on all the nodes.
|
||||
func ApidReadyAssertion(ctx context.Context, cluster provision.ClusterAccess) error {
|
||||
cli, err := cluster.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes := make([]string, 0, len(cluster.Info().Nodes))
|
||||
|
||||
for _, node := range cluster.Info().Nodes {
|
||||
nodes = append(nodes, node.PrivateIP.String())
|
||||
}
|
||||
|
||||
nodesCtx := client.WithNodes(ctx, nodes...)
|
||||
|
||||
_, err = cli.Version(nodesCtx)
|
||||
|
||||
return err
|
||||
}
|
76
internal/pkg/provision/check/check.go
Normal file
76
internal/pkg/provision/check/check.go
Normal file
@ -0,0 +1,76 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Package check provides set of checks to verify cluster readiness.
|
||||
package check
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/provision"
|
||||
)
|
||||
|
||||
const updateInterval = 100 * time.Millisecond
|
||||
|
||||
// ClusterCheck implements a function which returns condition based on ClusterAccess.
|
||||
type ClusterCheck func(provision.ClusterAccess) conditions.Condition
|
||||
|
||||
// Reporter presents wait progress.
|
||||
//
|
||||
// It is supposed that reporter drops duplicate messages.
|
||||
type Reporter interface {
|
||||
Update(check conditions.Condition)
|
||||
}
|
||||
|
||||
// Wait run the checks against the cluster and waits for the full set to succeed.
|
||||
//
|
||||
// Context ctx might have a timeout set to limit overall wait time.
|
||||
// Each check might define its own timeout.
|
||||
func Wait(ctx context.Context, cluster provision.ClusterAccess, checks []ClusterCheck, reporter Reporter) error {
|
||||
for _, check := range checks {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
condition := check(cluster)
|
||||
|
||||
errCh := make(chan error, 1)
|
||||
|
||||
go func(condition conditions.Condition) {
|
||||
errCh <- condition.Wait(ctx)
|
||||
}(condition)
|
||||
|
||||
var err error
|
||||
|
||||
func() {
|
||||
ticker := time.NewTicker(updateInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
// report initial state
|
||||
reporter.Update(condition)
|
||||
|
||||
// report last state
|
||||
defer reporter.Update(condition)
|
||||
|
||||
for {
|
||||
select {
|
||||
case err = <-errCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
reporter.Update(condition)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
14
internal/pkg/provision/check/check_test.go
Normal file
14
internal/pkg/provision/check/check_test.go
Normal file
@ -0,0 +1,14 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package check_test
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
// added for accurate coverage estimation
|
||||
//
|
||||
// please remove it once any unit-test is added
|
||||
// for this package
|
||||
}
|
61
internal/pkg/provision/check/default.go
Normal file
61
internal/pkg/provision/check/default.go
Normal file
@ -0,0 +1,61 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package check
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
"github.com/talos-systems/talos/internal/pkg/provision"
|
||||
)
|
||||
|
||||
// DefaultClusterChecks returns a set of default Talos cluster readiness checks.
|
||||
func DefaultClusterChecks() []ClusterCheck {
|
||||
return []ClusterCheck{
|
||||
// wait for bootkube to finish on init node
|
||||
func(cluster provision.ClusterAccess) conditions.Condition {
|
||||
return conditions.PollingCondition("bootkube to finish", func(ctx context.Context) error {
|
||||
return ServiceStateAssertion(ctx, cluster, "bootkube", "Finished")
|
||||
}, 5*time.Minute, 5*time.Second)
|
||||
},
|
||||
// wait for apid to be ready on all the nodes
|
||||
func(cluster provision.ClusterAccess) conditions.Condition {
|
||||
return conditions.PollingCondition("apid to be ready", func(ctx context.Context) error {
|
||||
return ApidReadyAssertion(ctx, cluster)
|
||||
}, 2*time.Minute, 5*time.Second)
|
||||
},
|
||||
// wait for all the nodes to report in at k8s level
|
||||
func(cluster provision.ClusterAccess) conditions.Condition {
|
||||
return conditions.PollingCondition("all k8s nodes to report", func(ctx context.Context) error {
|
||||
return K8sAllNodesReportedAssertion(ctx, cluster)
|
||||
}, 5*time.Minute, 5*time.Second)
|
||||
},
|
||||
// wait for all the nodes to report ready at k8s level
|
||||
func(cluster provision.ClusterAccess) conditions.Condition {
|
||||
return conditions.PollingCondition("all k8s nodes to report ready", func(ctx context.Context) error {
|
||||
return K8sAllNodesReadyAssertion(ctx, cluster)
|
||||
}, 10*time.Minute, 5*time.Second)
|
||||
},
|
||||
// wait for HA k8s control plane
|
||||
func(cluster provision.ClusterAccess) conditions.Condition {
|
||||
return conditions.PollingCondition("all master nodes to be part of k8s control plane", func(ctx context.Context) error {
|
||||
return K8sFullControlPlaneAssertion(ctx, cluster)
|
||||
}, 2*time.Minute, 5*time.Second)
|
||||
},
|
||||
// wait for kube-proxy to report ready
|
||||
func(cluster provision.ClusterAccess) conditions.Condition {
|
||||
return conditions.PollingCondition("kube-proxy to report ready", func(ctx context.Context) error {
|
||||
return K8sPodReadyAssertion(ctx, cluster, "kube-system", "k8s-app=kube-proxy")
|
||||
}, 3*time.Minute, 5*time.Second)
|
||||
},
|
||||
// wait for kube-dns to report ready
|
||||
func(cluster provision.ClusterAccess) conditions.Condition {
|
||||
return conditions.PollingCondition("kube-dns to report ready", func(ctx context.Context) error {
|
||||
return K8sPodReadyAssertion(ctx, cluster, "kube-system", "k8s-app=kube-dns")
|
||||
}, 3*time.Minute, 5*time.Second)
|
||||
},
|
||||
}
|
||||
}
|
176
internal/pkg/provision/check/kubernetes.go
Normal file
176
internal/pkg/provision/check/kubernetes.go
Normal file
@ -0,0 +1,176 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Package check provides set of checks to verify cluster readiness.
|
||||
package check
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/talos-systems/talos/internal/pkg/provision"
|
||||
"github.com/talos-systems/talos/pkg/config/types/v1alpha1/generate"
|
||||
)
|
||||
|
||||
// K8sAllNodesReportedAssertion checks whether all the nodes show up in node list.
|
||||
func K8sAllNodesReportedAssertion(ctx context.Context, cluster provision.ClusterAccess) error {
|
||||
clientset, err := cluster.K8sClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
expectedNodes := make([]string, 0, len(cluster.Info().Nodes))
|
||||
|
||||
for _, node := range cluster.Info().Nodes {
|
||||
expectedNodes = append(expectedNodes, node.PrivateIP.String())
|
||||
}
|
||||
|
||||
nodes, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var actualNodes []string
|
||||
|
||||
for _, node := range nodes.Items {
|
||||
for _, nodeAddress := range node.Status.Addresses {
|
||||
if nodeAddress.Type == v1.NodeInternalIP {
|
||||
actualNodes = append(actualNodes, nodeAddress.Address)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(expectedNodes)
|
||||
sort.Strings(actualNodes)
|
||||
|
||||
if reflect.DeepEqual(expectedNodes, actualNodes) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("expected %v nodes, but got %v nodes", expectedNodes, actualNodes)
|
||||
}
|
||||
|
||||
// K8sFullControlPlaneAssertion checks whether all the master nodes are k8s master nodes.
|
||||
//
|
||||
//nolint: gocyclo
|
||||
func K8sFullControlPlaneAssertion(ctx context.Context, cluster provision.ClusterAccess) error {
|
||||
clientset, err := cluster.K8sClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var expectedNodes []string
|
||||
|
||||
for _, node := range cluster.Info().Nodes {
|
||||
if node.Type == generate.TypeInit || node.Type == generate.TypeControlPlane {
|
||||
expectedNodes = append(expectedNodes, node.PrivateIP.String())
|
||||
}
|
||||
}
|
||||
|
||||
nodes, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var actualNodes []string
|
||||
|
||||
for _, node := range nodes.Items {
|
||||
for label := range node.Labels {
|
||||
if label == "node-role.kubernetes.io/master" {
|
||||
for _, nodeAddress := range node.Status.Addresses {
|
||||
if nodeAddress.Type == v1.NodeInternalIP {
|
||||
actualNodes = append(actualNodes, nodeAddress.Address)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(expectedNodes)
|
||||
sort.Strings(actualNodes)
|
||||
|
||||
if reflect.DeepEqual(expectedNodes, actualNodes) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("expected %v nodes, but got %v nodes", expectedNodes, actualNodes)
|
||||
}
|
||||
|
||||
// K8sAllNodesReadyAssertion checks whether all the nodes are Ready.
|
||||
func K8sAllNodesReadyAssertion(ctx context.Context, cluster provision.ClusterAccess) error {
|
||||
clientset, err := cluster.K8sClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var notReadyNodes []string
|
||||
|
||||
for _, node := range nodes.Items {
|
||||
for _, cond := range node.Status.Conditions {
|
||||
if cond.Type == v1.NodeReady {
|
||||
if cond.Status != v1.ConditionTrue {
|
||||
notReadyNodes = append(notReadyNodes, node.Name)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(notReadyNodes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("some nodes are not ready: %v", notReadyNodes)
|
||||
}
|
||||
|
||||
// K8sPodReadyAssertion checks whether all the nodes are Ready.
|
||||
func K8sPodReadyAssertion(ctx context.Context, cluster provision.ClusterAccess, namespace, labelSelector string) error {
|
||||
clientset, err := cluster.K8sClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pods, err := clientset.CoreV1().Pods(namespace).List(metav1.ListOptions{
|
||||
LabelSelector: labelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(pods.Items) == 0 {
|
||||
return fmt.Errorf("no pods found for namespace %q and label %q", namespace, labelSelector)
|
||||
}
|
||||
|
||||
var notReadyPods []string
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
for _, cond := range pod.Status.Conditions {
|
||||
if cond.Type == v1.PodReady {
|
||||
if cond.Status != v1.ConditionTrue {
|
||||
notReadyPods = append(notReadyPods, pod.Name)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(notReadyPods) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("some pods are not ready: %v", notReadyPods)
|
||||
}
|
34
internal/pkg/provision/check/reporter.go
Normal file
34
internal/pkg/provision/check/reporter.go
Normal file
@ -0,0 +1,34 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package check
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/talos-systems/talos/internal/pkg/conditions"
|
||||
)
|
||||
|
||||
type writerReporter struct {
|
||||
w io.Writer
|
||||
lastLine string
|
||||
}
|
||||
|
||||
func (wr *writerReporter) Update(condition conditions.Condition) {
|
||||
line := fmt.Sprintf("waiting for %s", condition)
|
||||
|
||||
if line != wr.lastLine {
|
||||
fmt.Fprintln(wr.w, line)
|
||||
wr.lastLine = line
|
||||
}
|
||||
}
|
||||
|
||||
// StderrReporter returns console reporter with stderr output.
|
||||
func StderrReporter() Reporter {
|
||||
return &writerReporter{
|
||||
w: os.Stderr,
|
||||
}
|
||||
}
|
47
internal/pkg/provision/check/service.go
Normal file
47
internal/pkg/provision/check/service.go
Normal file
@ -0,0 +1,47 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Package check provides set of checks to verify cluster readiness.
|
||||
package check
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/talos-systems/talos/internal/pkg/provision"
|
||||
)
|
||||
|
||||
// ServiceStateAssertion checks whether service reached some specified state.
|
||||
func ServiceStateAssertion(ctx context.Context, cluster provision.ClusterAccess, service, state string) error {
|
||||
client, err := cluster.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
servicesInfo, err := client.ServiceInfo(ctx, service)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serviceOk := false
|
||||
|
||||
for _, serviceInfo := range servicesInfo {
|
||||
if len(serviceInfo.Service.Events.Events) == 0 {
|
||||
return fmt.Errorf("no events recorded yet for service %q", service)
|
||||
}
|
||||
|
||||
lastEvent := serviceInfo.Service.Events.Events[len(serviceInfo.Service.Events.Events)-1]
|
||||
if lastEvent.State != state {
|
||||
return fmt.Errorf("service %q not in expected state %q: current state [%s] %s", service, state, lastEvent.State, lastEvent.Msg)
|
||||
}
|
||||
|
||||
serviceOk = true
|
||||
}
|
||||
|
||||
if !serviceOk {
|
||||
return fmt.Errorf("service %q not found", service)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
34
internal/pkg/provision/options.go
Normal file
34
internal/pkg/provision/options.go
Normal file
@ -0,0 +1,34 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package provision
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Option controls Provisioner.
|
||||
type Option func(o *Options) error
|
||||
|
||||
// WithLogWriter sets logging destination.
|
||||
func WithLogWriter(w io.Writer) Option {
|
||||
return func(o *Options) error {
|
||||
o.LogWriter = w
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Options describes Provisioner parameters.
|
||||
type Options struct {
|
||||
LogWriter io.Writer
|
||||
}
|
||||
|
||||
// DefaultOptions returns default options.
|
||||
func DefaultOptions() Options {
|
||||
return Options{
|
||||
LogWriter: os.Stderr,
|
||||
}
|
||||
}
|
91
internal/pkg/provision/providers/docker/create.go
Normal file
91
internal/pkg/provision/providers/docker/create.go
Normal file
@ -0,0 +1,91 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
|
||||
"github.com/talos-systems/talos/cmd/osctl/pkg/client/config"
|
||||
"github.com/talos-systems/talos/internal/pkg/provision"
|
||||
"github.com/talos-systems/talos/pkg/config/types/v1alpha1/generate"
|
||||
)
|
||||
|
||||
// Create Talos cluster as a set of docker containers on docker network.
|
||||
func (p *provisioner) Create(ctx context.Context, request provision.ClusterRequest, opts ...provision.Option) (provision.Cluster, error) {
|
||||
options := provision.DefaultOptions()
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(&options); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := p.ensureImageExists(ctx, request.Image, &options); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
initNode, err := request.Nodes.FindInitNode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fmt.Fprintln(options.LogWriter, "generating PKI and tokens")
|
||||
|
||||
input, err := generate.NewInput(request.Name, fmt.Sprintf("https://%s:6443", initNode.IP), request.KubernetesVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fmt.Fprintln(options.LogWriter, "creating network", request.Network.Name)
|
||||
|
||||
if err = p.createNetwork(ctx, request.Network); err != nil {
|
||||
return nil, fmt.Errorf("a cluster might already exist, run \"osctl cluster destroy\" to permanently delete the existing cluster, and try again: %w", err)
|
||||
}
|
||||
|
||||
var nodeInfo []provision.NodeInfo
|
||||
|
||||
fmt.Fprintln(options.LogWriter, "creating master nodes")
|
||||
|
||||
if nodeInfo, err = p.createNodes(ctx, request, input, request.Nodes.MasterNodes()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fmt.Fprintln(options.LogWriter, "creating worker nodes")
|
||||
|
||||
var workerNodeInfo []provision.NodeInfo
|
||||
|
||||
if workerNodeInfo, err = p.createNodes(ctx, request, input, request.Nodes.WorkerNodes()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodeInfo = append(nodeInfo, workerNodeInfo...)
|
||||
|
||||
res := &result{
|
||||
talosConfig: &config.Config{
|
||||
Context: request.Name,
|
||||
Contexts: map[string]*config.Context{
|
||||
request.Name: {
|
||||
Endpoints: []string{"127.0.0.1"},
|
||||
CA: base64.StdEncoding.EncodeToString(input.Certs.OS.Crt),
|
||||
Crt: base64.StdEncoding.EncodeToString(input.Certs.Admin.Crt),
|
||||
Key: base64.StdEncoding.EncodeToString(input.Certs.Admin.Key),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
clusterInfo: provision.ClusterInfo{
|
||||
ClusterName: request.Name,
|
||||
Network: provision.NetworkInfo{
|
||||
Name: request.Network.Name,
|
||||
CIDR: request.Network.CIDR,
|
||||
},
|
||||
Nodes: nodeInfo,
|
||||
},
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
33
internal/pkg/provision/providers/docker/destroy.go
Normal file
33
internal/pkg/provision/providers/docker/destroy.go
Normal file
@ -0,0 +1,33 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/talos-systems/talos/internal/pkg/provision"
|
||||
)
|
||||
|
||||
// Destroy Talos cluster as set of Docker nodes.
|
||||
//
|
||||
// Only cluster.Info().ClusterName and cluster.Info().Network.Name is being used.
|
||||
func (p *provisioner) Destroy(ctx context.Context, cluster provision.Cluster, opts ...provision.Option) error {
|
||||
options := provision.DefaultOptions()
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(&options); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := p.destroyNodes(ctx, cluster.Info().ClusterName, &options); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("destroying network", cluster.Info().Network.Name)
|
||||
|
||||
return p.destroyNetwork(ctx, cluster.Info().Network.Name)
|
||||
}
|
41
internal/pkg/provision/providers/docker/docker.go
Normal file
41
internal/pkg/provision/providers/docker/docker.go
Normal file
@ -0,0 +1,41 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Package docker implements Provisioner via docker.
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/client"
|
||||
|
||||
"github.com/talos-systems/talos/internal/pkg/provision"
|
||||
)
|
||||
|
||||
type provisioner struct {
|
||||
client *client.Client
|
||||
}
|
||||
|
||||
// NewProvisioner initializes docker provisioner.
|
||||
func NewProvisioner(ctx context.Context) (provision.Provisioner, error) {
|
||||
p := &provisioner{}
|
||||
|
||||
var err error
|
||||
|
||||
p.client, err = client.NewEnvClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Close and release resources.
|
||||
func (p *provisioner) Close() error {
|
||||
if p.client != nil {
|
||||
return p.client.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
14
internal/pkg/provision/providers/docker/docker_test.go
Normal file
14
internal/pkg/provision/providers/docker/docker_test.go
Normal file
@ -0,0 +1,14 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package docker_test
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
// added for accurate coverage estimation
|
||||
//
|
||||
// please remove it once any unit-test is added
|
||||
// for this package
|
||||
}
|
65
internal/pkg/provision/providers/docker/image.go
Normal file
65
internal/pkg/provision/providers/docker/image.go
Normal file
@ -0,0 +1,65 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
|
||||
"github.com/talos-systems/talos/internal/pkg/provision"
|
||||
)
|
||||
|
||||
func (p *provisioner) ensureImageExists(ctx context.Context, image string, options *provision.Options) error {
|
||||
// In order to pull an image, the reference must be in canonical
|
||||
// format (e.g. domain/repo/image:tag).
|
||||
ref, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
image = ref.String()
|
||||
|
||||
// To filter the images, we need a familiar name and a tag
|
||||
// (e.g. domain/repo/image:tag => repo/image:tag).
|
||||
familiarName := reference.FamiliarName(ref)
|
||||
tag := ""
|
||||
|
||||
if tagged, isTagged := ref.(reference.Tagged); isTagged {
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("reference", familiarName+":"+tag)
|
||||
|
||||
images, err := p.client.ImageList(ctx, types.ImageListOptions{Filters: filters})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(images) == 0 {
|
||||
fmt.Fprintln(options.LogWriter, "downloading", image)
|
||||
|
||||
var reader io.ReadCloser
|
||||
|
||||
if reader, err = p.client.ImagePull(ctx, image, types.ImagePullOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// nolint: errcheck
|
||||
defer reader.Close()
|
||||
|
||||
if _, err = io.Copy(ioutil.Discard, reader); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
69
internal/pkg/provision/providers/docker/network.go
Normal file
69
internal/pkg/provision/providers/docker/network.go
Normal file
@ -0,0 +1,69 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
|
||||
"github.com/talos-systems/talos/internal/pkg/provision"
|
||||
)
|
||||
|
||||
func (p *provisioner) createNetwork(ctx context.Context, req provision.NetworkRequest) error {
|
||||
options := types.NetworkCreate{
|
||||
Labels: map[string]string{
|
||||
"talos.owned": "true",
|
||||
"talos.cluster.name": req.Name,
|
||||
},
|
||||
IPAM: &network.IPAM{
|
||||
Config: []network.IPAMConfig{
|
||||
{
|
||||
Subnet: req.CIDR.String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
Options: map[string]string{
|
||||
"com.docker.network.driver.mtu": strconv.Itoa(req.MTU),
|
||||
},
|
||||
}
|
||||
|
||||
_, err := p.client.NetworkCreate(ctx, req.Name, options)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *provisioner) listNetworks(ctx context.Context, name string) ([]types.NetworkResource, error) {
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("label", "talos.owned=true")
|
||||
filters.Add("label", "talos.cluster.name="+name)
|
||||
|
||||
options := types.NetworkListOptions{
|
||||
Filters: filters,
|
||||
}
|
||||
|
||||
return p.client.NetworkList(ctx, options)
|
||||
}
|
||||
|
||||
func (p *provisioner) destroyNetwork(ctx context.Context, name string) error {
|
||||
networks, err := p.listNetworks(ctx, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var result *multierror.Error
|
||||
|
||||
for _, network := range networks {
|
||||
if err := p.client.NetworkRemove(ctx, network.ID); err != nil {
|
||||
result = multierror.Append(result, err)
|
||||
}
|
||||
}
|
||||
|
||||
return result.ErrorOrNil()
|
||||
}
|
220
internal/pkg/provision/providers/docker/node.go
Normal file
220
internal/pkg/provision/providers/docker/node.go
Normal file
@ -0,0 +1,220 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
|
||||
"github.com/talos-systems/talos/internal/pkg/provision"
|
||||
"github.com/talos-systems/talos/pkg/config/types/v1alpha1/generate"
|
||||
"github.com/talos-systems/talos/pkg/constants"
|
||||
)
|
||||
|
||||
func (p *provisioner) createNodes(ctx context.Context, clusterReq provision.ClusterRequest, input *generate.Input, nodeReqs []provision.NodeRequest) ([]provision.NodeInfo, error) {
|
||||
errCh := make(chan error)
|
||||
nodeCh := make(chan provision.NodeInfo, len(nodeReqs))
|
||||
|
||||
for _, nodeReq := range nodeReqs {
|
||||
go func(nodeReq provision.NodeRequest) {
|
||||
nodeInfo, err := p.createNode(ctx, clusterReq, input, nodeReq)
|
||||
errCh <- err
|
||||
|
||||
if err == nil {
|
||||
nodeCh <- nodeInfo
|
||||
}
|
||||
}(nodeReq)
|
||||
}
|
||||
|
||||
var multiErr *multierror.Error
|
||||
|
||||
for range nodeReqs {
|
||||
multiErr = multierror.Append(multiErr, <-errCh)
|
||||
}
|
||||
|
||||
close(nodeCh)
|
||||
|
||||
nodesInfo := make([]provision.NodeInfo, 0, len(nodeReqs))
|
||||
|
||||
for nodeInfo := range nodeCh {
|
||||
nodesInfo = append(nodesInfo, nodeInfo)
|
||||
}
|
||||
|
||||
return nodesInfo, multiErr.ErrorOrNil()
|
||||
}
|
||||
|
||||
//nolint: gocyclo
|
||||
func (p *provisioner) createNode(ctx context.Context, clusterReq provision.ClusterRequest, input *generate.Input, nodeReq provision.NodeRequest) (provision.NodeInfo, error) {
|
||||
inputCopy := *input // TOD: this looks like a bug in generate?
|
||||
|
||||
data, err := generate.Config(nodeReq.Type, &inputCopy)
|
||||
if err != nil {
|
||||
return provision.NodeInfo{}, err
|
||||
}
|
||||
|
||||
b64data := base64.StdEncoding.EncodeToString([]byte(data))
|
||||
|
||||
// Create the container config.
|
||||
|
||||
containerConfig := &container.Config{
|
||||
Hostname: nodeReq.Name,
|
||||
Image: clusterReq.Image,
|
||||
Env: []string{"PLATFORM=container", "USERDATA=" + b64data},
|
||||
Labels: map[string]string{
|
||||
"talos.owned": "true",
|
||||
"talos.cluster.name": clusterReq.Name,
|
||||
"talos.type": nodeReq.Type.String(),
|
||||
},
|
||||
Volumes: map[string]struct{}{
|
||||
"/var/lib/containerd": {},
|
||||
"/var/lib/kubelet": {},
|
||||
"/etc/cni": {},
|
||||
"/run": {},
|
||||
},
|
||||
}
|
||||
|
||||
// Create the host config.
|
||||
|
||||
hostConfig := &container.HostConfig{
|
||||
Privileged: true,
|
||||
SecurityOpt: []string{"seccomp:unconfined"},
|
||||
Resources: container.Resources{
|
||||
NanoCPUs: nodeReq.NanoCPUs,
|
||||
Memory: nodeReq.Memory,
|
||||
},
|
||||
}
|
||||
|
||||
// Ensure that the container is created in the talos network.
|
||||
|
||||
networkConfig := &network.NetworkingConfig{
|
||||
EndpointsConfig: map[string]*network.EndpointSettings{
|
||||
clusterReq.Network.Name: {
|
||||
NetworkID: clusterReq.Network.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Mutate the container configurations based on the node type.
|
||||
|
||||
switch nodeReq.Type {
|
||||
case generate.TypeInit:
|
||||
var apidPort nat.Port
|
||||
apidPort, err = nat.NewPort("tcp", "50000")
|
||||
|
||||
if err != nil {
|
||||
return provision.NodeInfo{}, err
|
||||
}
|
||||
|
||||
var apiServerPort nat.Port
|
||||
apiServerPort, err = nat.NewPort("tcp", "6443")
|
||||
|
||||
if err != nil {
|
||||
return provision.NodeInfo{}, err
|
||||
}
|
||||
|
||||
containerConfig.ExposedPorts = nat.PortSet{
|
||||
apidPort: struct{}{},
|
||||
apiServerPort: struct{}{},
|
||||
}
|
||||
|
||||
hostConfig.PortBindings = nat.PortMap{
|
||||
apidPort: []nat.PortBinding{
|
||||
{
|
||||
HostIP: "0.0.0.0",
|
||||
HostPort: "50000",
|
||||
},
|
||||
},
|
||||
apiServerPort: []nat.PortBinding{
|
||||
{
|
||||
HostIP: "0.0.0.0",
|
||||
HostPort: "6443",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fallthrough
|
||||
case generate.TypeControlPlane:
|
||||
containerConfig.Volumes[constants.EtcdDataPath] = struct{}{}
|
||||
|
||||
if nodeReq.IP == nil {
|
||||
return provision.NodeInfo{}, errors.New("an IP address must be provided when creating a master node")
|
||||
}
|
||||
}
|
||||
|
||||
if nodeReq.IP != nil {
|
||||
networkConfig.EndpointsConfig[clusterReq.Network.Name].IPAMConfig = &network.EndpointIPAMConfig{IPv4Address: nodeReq.IP.String()}
|
||||
}
|
||||
|
||||
// Create the container.
|
||||
resp, err := p.client.ContainerCreate(ctx, containerConfig, hostConfig, networkConfig, nodeReq.Name)
|
||||
if err != nil {
|
||||
return provision.NodeInfo{}, err
|
||||
}
|
||||
|
||||
// Start the container.
|
||||
err = p.client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
|
||||
if err != nil {
|
||||
return provision.NodeInfo{}, err
|
||||
}
|
||||
|
||||
// Inspect the container.
|
||||
info, err := p.client.ContainerInspect(ctx, resp.ID)
|
||||
if err != nil {
|
||||
return provision.NodeInfo{}, err
|
||||
}
|
||||
|
||||
nodeInfo := provision.NodeInfo{
|
||||
ID: info.ID,
|
||||
Name: info.Name,
|
||||
Type: nodeReq.Type,
|
||||
|
||||
PrivateIP: net.ParseIP(info.NetworkSettings.Networks[clusterReq.Network.Name].IPAddress),
|
||||
}
|
||||
|
||||
return nodeInfo, nil
|
||||
}
|
||||
|
||||
func (p *provisioner) listNodes(ctx context.Context, clusterName string) ([]types.Container, error) {
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("label", "talos.owned=true")
|
||||
filters.Add("label", "talos.cluster.name="+clusterName)
|
||||
|
||||
return p.client.ContainerList(ctx, types.ContainerListOptions{All: true, Filters: filters})
|
||||
}
|
||||
|
||||
func (p *provisioner) destroyNodes(ctx context.Context, clusterName string, options *provision.Options) error {
|
||||
containers, err := p.listNodes(ctx, clusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
errCh := make(chan error)
|
||||
|
||||
for _, container := range containers {
|
||||
go func(container types.Container) {
|
||||
fmt.Fprintln(options.LogWriter, "destroying node", container.Names[0][1:])
|
||||
|
||||
errCh <- p.client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{RemoveVolumes: true, Force: true})
|
||||
}(container)
|
||||
}
|
||||
|
||||
var multiErr *multierror.Error
|
||||
|
||||
for range containers {
|
||||
multiErr = multierror.Append(multiErr, <-errCh)
|
||||
}
|
||||
|
||||
return multiErr.ErrorOrNil()
|
||||
}
|
67
internal/pkg/provision/providers/docker/reflect.go
Normal file
67
internal/pkg/provision/providers/docker/reflect.go
Normal file
@ -0,0 +1,67 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"github.com/talos-systems/talos/internal/pkg/provision"
|
||||
"github.com/talos-systems/talos/pkg/config/types/v1alpha1/generate"
|
||||
)
|
||||
|
||||
func (p *provisioner) Reflect(ctx context.Context, clusterName string) (provision.Cluster, error) {
|
||||
res := &result{
|
||||
talosConfig: nil, // TODO: recover talosConfig
|
||||
|
||||
clusterInfo: provision.ClusterInfo{
|
||||
ClusterName: clusterName,
|
||||
},
|
||||
}
|
||||
|
||||
// find network assuming network name == cluster name
|
||||
networks, err := p.listNetworks(ctx, clusterName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(networks) > 0 {
|
||||
network := networks[0]
|
||||
|
||||
var cidr *net.IPNet
|
||||
_, cidr, err = net.ParseCIDR(network.IPAM.Config[0].Subnet)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res.clusterInfo.Network.Name = network.Name
|
||||
res.clusterInfo.Network.CIDR = *cidr
|
||||
}
|
||||
|
||||
// find nodes (containers)
|
||||
nodes, err := p.listNodes(ctx, clusterName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
t, err := generate.ParseType(node.Labels["talos.type"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res.clusterInfo.Nodes = append(res.clusterInfo.Nodes,
|
||||
provision.NodeInfo{
|
||||
ID: node.ID,
|
||||
Name: node.Names[0],
|
||||
Type: t,
|
||||
|
||||
PrivateIP: net.ParseIP(node.NetworkSettings.Networks[res.clusterInfo.Network.Name].IPAddress),
|
||||
})
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
24
internal/pkg/provision/providers/docker/result.go
Normal file
24
internal/pkg/provision/providers/docker/result.go
Normal file
@ -0,0 +1,24 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"github.com/talos-systems/talos/cmd/osctl/pkg/client/config"
|
||||
"github.com/talos-systems/talos/internal/pkg/provision"
|
||||
)
|
||||
|
||||
type result struct {
|
||||
talosConfig *config.Config
|
||||
|
||||
clusterInfo provision.ClusterInfo
|
||||
}
|
||||
|
||||
func (res *result) TalosConfig() *config.Config {
|
||||
return res.talosConfig
|
||||
}
|
||||
|
||||
func (res *result) Info() provision.ClusterInfo {
|
||||
return res.clusterInfo
|
||||
}
|
21
internal/pkg/provision/provision.go
Normal file
21
internal/pkg/provision/provision.go
Normal file
@ -0,0 +1,21 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Package provision provides abstract definitions for Talos cluster provisioners.
|
||||
package provision
|
||||
|
||||
import "context"
|
||||
|
||||
// Provisioner is an interface each provisioner should implement.
|
||||
type Provisioner interface {
|
||||
Create(context.Context, ClusterRequest, ...Option) (Cluster, error)
|
||||
Destroy(context.Context, Cluster, ...Option) error
|
||||
|
||||
Close() error
|
||||
}
|
||||
|
||||
// ClusterNameReflector rebuilds Cluster information by cluster name.
|
||||
type ClusterNameReflector interface {
|
||||
Reflect(ctx context.Context, clusterName string) (Cluster, error)
|
||||
}
|
14
internal/pkg/provision/provision_test.go
Normal file
14
internal/pkg/provision/provision_test.go
Normal file
@ -0,0 +1,14 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package provision_test
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
// added for accurate coverage estimation
|
||||
//
|
||||
// please remove it once any unit-test is added
|
||||
// for this package
|
||||
}
|
90
internal/pkg/provision/request.go
Normal file
90
internal/pkg/provision/request.go
Normal file
@ -0,0 +1,90 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package provision
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/talos-systems/talos/pkg/config/types/v1alpha1/generate"
|
||||
)
|
||||
|
||||
// ClusterRequest is the root object describing cluster to be provisioned.
|
||||
type ClusterRequest struct {
|
||||
Name string
|
||||
|
||||
Network NetworkRequest
|
||||
Nodes NodeRequests
|
||||
|
||||
Image string
|
||||
KubernetesVersion string
|
||||
}
|
||||
|
||||
// NetworkRequest describe cluster network.
|
||||
type NetworkRequest struct {
|
||||
Name string
|
||||
CIDR net.IPNet
|
||||
MTU int
|
||||
}
|
||||
|
||||
// NodeRequests is a list of NodeRequest.
|
||||
type NodeRequests []NodeRequest
|
||||
|
||||
// FindInitNode looks up init node, it returns an error if no init node is present or if it's duplicate.
|
||||
func (reqs NodeRequests) FindInitNode() (req NodeRequest, err error) {
|
||||
found := false
|
||||
|
||||
for i := range reqs {
|
||||
if reqs[i].Type == generate.TypeInit {
|
||||
if found {
|
||||
err = fmt.Errorf("duplicate init node in requests")
|
||||
return
|
||||
}
|
||||
|
||||
req = reqs[i]
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
err = fmt.Errorf("no init node found in requests")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// MasterNodes returns subset of nodes which are Init/ControlPlane type.
|
||||
func (reqs NodeRequests) MasterNodes() (nodes []NodeRequest) {
|
||||
for i := range reqs {
|
||||
if reqs[i].Type == generate.TypeInit || reqs[i].Type == generate.TypeControlPlane {
|
||||
nodes = append(nodes, reqs[i])
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// WorkerNodes returns subset of nodes which are Init/ControlPlane type.
|
||||
func (reqs NodeRequests) WorkerNodes() (nodes []NodeRequest) {
|
||||
for i := range reqs {
|
||||
if reqs[i].Type == generate.TypeJoin {
|
||||
nodes = append(nodes, reqs[i])
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// NodeRequest describes a request for a node.
|
||||
type NodeRequest struct {
|
||||
Type generate.Type
|
||||
Name string
|
||||
IP net.IP
|
||||
|
||||
// Share of CPUs, in 1e-9 fractions
|
||||
NanoCPUs int64
|
||||
// Memory limit in bytes
|
||||
Memory int64
|
||||
}
|
42
internal/pkg/provision/result.go
Normal file
42
internal/pkg/provision/result.go
Normal file
@ -0,0 +1,42 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package provision
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/talos-systems/talos/cmd/osctl/pkg/client/config"
|
||||
"github.com/talos-systems/talos/pkg/config/types/v1alpha1/generate"
|
||||
)
|
||||
|
||||
// Cluster describes the provisioned Cluster.
|
||||
type Cluster interface {
|
||||
TalosConfig() *config.Config
|
||||
Info() ClusterInfo
|
||||
}
|
||||
|
||||
// ClusterInfo describes the cluster.
|
||||
type ClusterInfo struct {
|
||||
ClusterName string
|
||||
|
||||
Network NetworkInfo
|
||||
Nodes []NodeInfo
|
||||
}
|
||||
|
||||
// NetworkInfo describes cluster network.
|
||||
type NetworkInfo struct {
|
||||
Name string
|
||||
CIDR net.IPNet
|
||||
}
|
||||
|
||||
// NodeInfo describes a node.
|
||||
type NodeInfo struct {
|
||||
ID string
|
||||
Name string
|
||||
Type generate.Type
|
||||
|
||||
PublicIP net.IP
|
||||
PrivateIP net.IP
|
||||
}
|
@ -10,6 +10,7 @@ import (
|
||||
stdlibx509 "crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"time"
|
||||
@ -47,11 +48,25 @@ const (
|
||||
TypeJoin
|
||||
)
|
||||
|
||||
// Sring returns the string representation of Type.
|
||||
// String returns the string representation of Type.
|
||||
func (t Type) String() string {
|
||||
return [...]string{"Init", "ControlPlane", "Join"}[t]
|
||||
}
|
||||
|
||||
// ParseType parses string constant as Type
|
||||
func ParseType(t string) (Type, error) {
|
||||
switch t {
|
||||
case "Init":
|
||||
return TypeInit, nil
|
||||
case "ControlPlane":
|
||||
return TypeControlPlane, nil
|
||||
case "Join":
|
||||
return TypeJoin, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown type %q", t)
|
||||
}
|
||||
}
|
||||
|
||||
// Config returns the talos config for a given node type.
|
||||
// nolint: gocyclo
|
||||
func Config(t Type, in *Input) (s string, err error) {
|
||||
|
Loading…
Reference in New Issue
Block a user