test: add support for IPv6 in talosctl cluster create

Modify provision library to support multiple IPs, CIDRs, gateways, which
can be IPv4/IPv6. Based on IP types, enable services in the cluster to
run DHCPv4/DHCPv6 in the test environment.

There's outstanding bug left with routes not being properly set up in
the cluster so, IPs are not properly routable, but DHCPv6 works and IPs
are allocated (validates DHCPv6 client).

Signed-off-by: Andrey Smirnov <smirnov.andrey@gmail.com>
This commit is contained in:
Andrey Smirnov 2021-02-08 16:41:32 +03:00 committed by talos-bot
parent 3aaa888f9a
commit 7f3dca8e4c
30 changed files with 453 additions and 154 deletions

View File

@ -8,9 +8,9 @@ REGISTRY_AND_USERNAME := $(REGISTRY)/$(USERNAME)
DOCKER_LOGIN_ENABLED ?= true
ARTIFACTS := _out
TOOLS ?= ghcr.io/talos-systems/tools:v0.3.0-17-g24a6dac
PKGS ?= v0.3.0-66-g3c35918
EXTRAS ?= v0.1.0-6-gdc32cc8
TOOLS ?= ghcr.io/talos-systems/tools:v0.3.0-19-ge54841a
PKGS ?= v0.3.0-69-gf9d9690
EXTRAS ?= v0.1.0-8-g3cb9fc9
GO_VERSION ?= 1.15
GOFUMPT_VERSION ?= abc0db2c416aca0f60ea33c23c76665f6e7ba0b6
IMPORTVET ?= autonomy/importvet:f6b07d9

View File

@ -58,6 +58,8 @@ var (
configDebug bool
networkCIDR string
networkMTU int
networkIPv4 bool
networkIPv6 bool
wireguardCIDR string
nameservers []string
dnsDomain string
@ -113,26 +115,56 @@ func create(ctx context.Context) (err error) {
// Validate CIDR range and allocate IPs
fmt.Println("validating CIDR and reserving IPs")
_, cidr, err := net.ParseCIDR(networkCIDR)
_, cidr4, err := net.ParseCIDR(networkCIDR)
if err != nil {
return fmt.Errorf("error validating cidr block: %w", err)
}
// Gateway addr at 1st IP in range, ex. 192.168.0.1
var gatewayIP net.IP
if cidr4.IP.To4() == nil {
return fmt.Errorf("--cidr is expected to be IPV4 CIDR")
}
gatewayIP, err = talosnet.NthIPInNetwork(cidr, 1)
// use ULA IPv6 network fd00::/8, add 'TAL' in hex to build /32 network, add IPv4 CIDR to build /64 unique network
_, cidr6, err := net.ParseCIDR(fmt.Sprintf("fd74:616c:%02x%02x:%02x%02x::/64", cidr4.IP[0], cidr4.IP[1], cidr4.IP[2], cidr4.IP[3]))
if err != nil {
return err
return fmt.Errorf("error validating cidr IPv6 block: %w", err)
}
var cidrs []net.IPNet
if networkIPv4 {
cidrs = append(cidrs, *cidr4)
}
if networkIPv6 {
cidrs = append(cidrs, *cidr6)
}
if len(cidrs) == 0 {
return fmt.Errorf("neither IPv4 nor IPv6 network was enabled")
}
// Gateway addr at 1st IP in range, ex. 192.168.0.1
gatewayIPs := make([]net.IP, len(cidrs))
for j := range gatewayIPs {
gatewayIPs[j], err = talosnet.NthIPInNetwork(&cidrs[j], 1)
if err != nil {
return err
}
}
// Set starting ip at 2nd ip in range, ex: 192.168.0.2
ips := make([]net.IP, masters+workers)
ips := make([][]net.IP, len(cidrs))
for i := range ips {
ips[i], err = talosnet.NthIPInNetwork(cidr, i+2)
if err != nil {
return err
for j := range cidrs {
ips[j] = make([]net.IP, masters+workers)
for i := range ips[j] {
ips[j][i], err = talosnet.NthIPInNetwork(&cidrs[j], i+2)
if err != nil {
return err
}
}
}
@ -158,11 +190,11 @@ func create(ctx context.Context) (err error) {
Name: clusterName,
Network: provision.NetworkRequest{
Name: clusterName,
CIDR: *cidr,
GatewayAddr: gatewayIP,
MTU: networkMTU,
Nameservers: nameserverIPs,
Name: clusterName,
CIDRs: cidrs,
GatewayAddrs: gatewayIPs,
MTU: networkMTU,
Nameservers: nameserverIPs,
CNI: provision.CNIConfig{
BinPath: cniBinPath,
ConfDir: cniConfDir,
@ -252,7 +284,7 @@ func create(ctx context.Context) (err error) {
if defaultInternalLB == "" {
// provisioner doesn't provide internal LB, so use first master node
defaultInternalLB = ips[0].String()
defaultInternalLB = ips[0][0].String()
}
var endpointList []string
@ -268,11 +300,11 @@ func create(ctx context.Context) (err error) {
endpointList = []string{forceEndpoint}
provisionOptions = append(provisionOptions, provision.WithEndpoint(forceEndpoint))
case forceInitNodeAsEndpoint:
endpointList = []string{ips[0].String()}
endpointList = []string{ips[0][0].String()}
default:
// use control plane nodes as endpoints, client-side load-balancing
for i := 0; i < masters; i++ {
endpointList = append(endpointList, ips[i].String())
endpointList = append(endpointList, ips[0][i].String())
}
}
@ -308,7 +340,7 @@ func create(ctx context.Context) (err error) {
// Wireguard configuration.
var wireguardConfigBundle *helpers.WireguardConfigBundle
if wireguardCIDR != "" {
wireguardConfigBundle, err = helpers.NewWireguardConfigBundle(ips, wireguardCIDR, 51111, masters)
wireguardConfigBundle, err = helpers.NewWireguardConfigBundle(ips[0], wireguardCIDR, 51111, masters)
if err != nil {
return err
}
@ -321,10 +353,15 @@ func create(ctx context.Context) (err error) {
for i := 0; i < masters; i++ {
var cfg config.Provider
nodeIPs := make([]net.IP, len(cidrs))
for j := range nodeIPs {
nodeIPs[j] = ips[j][i]
}
nodeReq := provision.NodeRequest{
Name: fmt.Sprintf("%s-master-%d", clusterName, i+1),
Type: machine.TypeControlPlane,
IP: ips[i],
IPs: nodeIPs,
Memory: memory,
NanoCPUs: nanoCPUs,
Disks: disks,
@ -343,7 +380,7 @@ func create(ctx context.Context) (err error) {
}
if wireguardConfigBundle != nil {
cfg, err = wireguardConfigBundle.PatchConfig(nodeReq.IP, cfg)
cfg, err = wireguardConfigBundle.PatchConfig(nodeIPs[0], cfg)
if err != nil {
return err
}
@ -358,10 +395,13 @@ func create(ctx context.Context) (err error) {
cfg := configBundle.Join()
ip := ips[masters+i-1]
nodeIPs := make([]net.IP, len(cidrs))
for j := range nodeIPs {
nodeIPs[j] = ips[j][masters+i-1]
}
if wireguardConfigBundle != nil {
cfg, err = wireguardConfigBundle.PatchConfig(ip, cfg)
cfg, err = wireguardConfigBundle.PatchConfig(nodeIPs[0], cfg)
if err != nil {
return err
}
@ -371,7 +411,7 @@ func create(ctx context.Context) (err error) {
provision.NodeRequest{
Name: name,
Type: machine.TypeJoin,
IP: ip,
IPs: nodeIPs,
Memory: memory,
NanoCPUs: nanoCPUs,
Disks: disks,
@ -601,9 +641,11 @@ func init() {
createCmd.Flags().StringSliceVar(&registryInsecure, "registry-insecure-skip-verify", []string{}, "list of registry hostnames to skip TLS verification for")
createCmd.Flags().BoolVar(&configDebug, "with-debug", false, "enable debug in Talos config to send service logs to the console")
createCmd.Flags().IntVar(&networkMTU, "mtu", 1500, "MTU of the cluster network")
createCmd.Flags().StringVar(&networkCIDR, "cidr", "10.5.0.0/24", "CIDR of the cluster network")
createCmd.Flags().StringVar(&networkCIDR, "cidr", "10.5.0.0/24", "CIDR of the cluster network (IPv4, ULA network for IPv6 is derived in automated way)")
createCmd.Flags().BoolVar(&networkIPv4, "ipv4", true, "enable IPv4 network in the cluster")
createCmd.Flags().BoolVar(&networkIPv6, "ipv6", false, "enable IPv6 network in the cluster (QEMU provisioner only)")
createCmd.Flags().StringVar(&wireguardCIDR, "wireguard-cidr", "", "CIDR of the wireguard network")
createCmd.Flags().StringSliceVar(&nameservers, "nameservers", []string{"8.8.8.8", "1.1.1.1"}, "list of nameservers to use")
createCmd.Flags().StringSliceVar(&nameservers, "nameservers", []string{"8.8.8.8", "1.1.1.1", "2001:4860:4860::8888", "2606:4700:4700::1111"}, "list of nameservers to use")
createCmd.Flags().IntVar(&workers, "workers", 1, "the number of workers to create")
createCmd.Flags().IntVar(&masters, "masters", 1, "the number of masters to create")
createCmd.Flags().StringVar(&clusterCpus, "cpus", "2.0", "the share of CPUs as fraction (each container/VM)")

View File

@ -9,10 +9,12 @@ import (
"fmt"
"os"
"sort"
"strings"
"text/tabwriter"
"github.com/dustin/go-humanize"
"github.com/spf13/cobra"
"github.com/talos-systems/net"
"github.com/talos-systems/talos/pkg/cli"
"github.com/talos-systems/talos/pkg/provision"
@ -52,9 +54,19 @@ func showCluster(cluster provision.Cluster) error {
fmt.Fprintf(w, "NAME\t%s\n", cluster.Info().ClusterName)
fmt.Fprintf(w, "NETWORK NAME\t%s\n", cluster.Info().Network.Name)
ones, _ := cluster.Info().Network.CIDR.Mask.Size()
fmt.Fprintf(w, "NETWORK CIDR\t%s/%d\n", cluster.Info().Network.CIDR.IP, ones)
fmt.Fprintf(w, "NETWORK GATEWAY\t%s\n", cluster.Info().Network.GatewayAddr)
cidrs := make([]string, len(cluster.Info().Network.CIDRs))
for i := range cidrs {
cidrs[i] = net.FormatCIDR(cluster.Info().Network.CIDRs[i].IP, cluster.Info().Network.CIDRs[i])
}
fmt.Fprintf(w, "NETWORK CIDR\t%s\n", strings.Join(cidrs, ","))
gateways := make([]string, len(cluster.Info().Network.GatewayAddrs))
for i := range gateways {
gateways[i] = cluster.Info().Network.GatewayAddrs[i].String()
}
fmt.Fprintf(w, "NETWORK GATEWAY\t%s\n", strings.Join(gateways, ","))
fmt.Fprintf(w, "NETWORK MTU\t%d\n", cluster.Info().Network.MTU)
if err := w.Flush(); err != nil {
@ -86,10 +98,15 @@ func showCluster(cluster provision.Cluster) error {
disk = humanize.Bytes(node.DiskSize)
}
ips := make([]string, len(node.IPs))
for i := range ips {
ips[i] = node.IPs[i].String()
}
fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n",
node.Name,
node.Type,
node.PrivateIP,
strings.Join(ips, ","),
cpus,
mem,
disk,

View File

@ -6,6 +6,7 @@ package mgmt
import (
"net"
"strings"
"github.com/spf13/cobra"
@ -26,12 +27,18 @@ var dhcpdLaunchCmd = &cobra.Command{
Args: cobra.NoArgs,
Hidden: true,
RunE: func(cmd *cobra.Command, args []string) error {
return vm.DHCPd(dhcpdLaunchCmdFlags.ifName, net.ParseIP(dhcpdLaunchCmdFlags.addr), dhcpdLaunchCmdFlags.statePath)
ips := []net.IP{}
for _, ip := range strings.Split(dhcpdLaunchCmdFlags.addr, ",") {
ips = append(ips, net.ParseIP(ip))
}
return vm.DHCPd(dhcpdLaunchCmdFlags.ifName, ips, dhcpdLaunchCmdFlags.statePath)
},
}
func init() {
dhcpdLaunchCmd.Flags().StringVar(&dhcpdLaunchCmdFlags.addr, "addr", "localhost", "IP address to listen on")
dhcpdLaunchCmd.Flags().StringVar(&dhcpdLaunchCmdFlags.addr, "addr", "localhost", "IP addresses to listen on")
dhcpdLaunchCmd.Flags().StringVar(&dhcpdLaunchCmdFlags.ifName, "interface", "", "interface to listen on")
dhcpdLaunchCmd.Flags().StringVar(&dhcpdLaunchCmdFlags.statePath, "state-path", "", "path to state directory")
addCommand(dhcpdLaunchCmd)

7
go.mod
View File

@ -22,8 +22,8 @@ require (
github.com/containerd/go-cni v1.0.1
github.com/containerd/ttrpc v1.0.2 // indirect
github.com/containerd/typeurl v1.0.1
github.com/containernetworking/cni v0.8.0
github.com/containernetworking/plugins v0.8.7
github.com/containernetworking/cni v0.8.1
github.com/containernetworking/plugins v0.9.0
github.com/coreos/go-iptables v0.4.5
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
github.com/docker/distribution v2.7.1+incompatible
@ -71,11 +71,10 @@ require (
github.com/talos-systems/go-retry v0.2.0
github.com/talos-systems/go-smbios v0.0.0-20200807005123-80196199691e
github.com/talos-systems/grpc-proxy v0.2.0
github.com/talos-systems/net v0.2.1-0.20210121122956-005a94f8b36b
github.com/talos-systems/net v0.2.1-0.20210204205549-52c750994376
github.com/talos-systems/os-runtime v0.0.0-20210126185717-734f1e1cee9e
github.com/talos-systems/talos/pkg/machinery v0.0.0-20200818212414-6a7cc0264819
github.com/u-root/u-root v7.0.0+incompatible
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect
github.com/vmware-tanzu/sonobuoy v0.19.0
github.com/vmware/vmw-guestinfo v0.0.0-20200218095840-687661b8bd8e
go.etcd.io/etcd v0.5.0-alpha.5.0.20201125193152-8a03d2e9614b

18
go.sum
View File

@ -182,8 +182,12 @@ github.com/containerd/typeurl v1.0.1 h1:PvuK4E3D5S5q6IqsPDCy928FhP0LUIGcmZ/Yhgp5
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
github.com/containernetworking/cni v0.8.0 h1:BT9lpgGoH4jw3lFC7Odz2prU5ruiYKcgAjMCbgybcKI=
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/cni v0.8.1 h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII3Epo9TmI=
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/containernetworking/plugins v0.8.7 h1:bU7QieuAp+sACI2vCzESJ3FoT860urYP+lThyZkb/2M=
github.com/containernetworking/plugins v0.8.7/go.mod h1:R7lXeZaBzpfqapcAbHRW8/CYwm0dHzbz0XEjofx0uB0=
github.com/containernetworking/plugins v0.9.0 h1:c+1gegKhR7+d0Caum9pEHugZlyhXPOG6v3V6xJgIGCI=
github.com/containernetworking/plugins v0.9.0/go.mod h1:dbWv4dI0QrBGuVgj+TuVQ6wJRZVOhrCQj91YyC92sxg=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@ -685,6 +689,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d h1:x3S6kxmy49zXVVyhcnrFqxvNVCBPb2KZ9hV2RBdS840=
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@ -693,11 +699,15 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
@ -872,8 +882,9 @@ github.com/talos-systems/go-smbios v0.0.0-20200807005123-80196199691e h1:uCp8BfH
github.com/talos-systems/go-smbios v0.0.0-20200807005123-80196199691e/go.mod h1:HxhrzAoTZ7ed5Z5VvtCvnCIrOxyXDS7V2B5hCetAMW8=
github.com/talos-systems/grpc-proxy v0.2.0 h1:DN75bLfaW4xfhq0r0mwFRnfGhSB+HPhK1LNzuMEs9Pw=
github.com/talos-systems/grpc-proxy v0.2.0/go.mod h1:sm97Vc/z2cok3pu6ruNeszQej4KDxFrDgfWs4C1mtC4=
github.com/talos-systems/net v0.2.1-0.20210121122956-005a94f8b36b h1:y3mBkTJdW7cUn+ff53TZN0yyWCpjS6XrVmlx+vx9pwA=
github.com/talos-systems/net v0.2.1-0.20210121122956-005a94f8b36b/go.mod h1:VreSAyRmxMtqussAHSKMKkJQa1YwBTSVfkmE4Jydam4=
github.com/talos-systems/net v0.2.1-0.20210204205549-52c750994376 h1:O0umyJKK3LJdnAwtg0xnsFJb2+1qYqExvbGymfNwCA8=
github.com/talos-systems/net v0.2.1-0.20210204205549-52c750994376/go.mod h1:VreSAyRmxMtqussAHSKMKkJQa1YwBTSVfkmE4Jydam4=
github.com/talos-systems/os-runtime v0.0.0-20210126185717-734f1e1cee9e h1:HrAdgwnXhVr9LlWjpc+kejkLVUpTRKbNTAJe7H+kRXM=
github.com/talos-systems/os-runtime v0.0.0-20210126185717-734f1e1cee9e/go.mod h1:+E9CUVoYpReh0nhOEvFpy7pwLiyq0700WF03I06giyk=
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
@ -897,6 +908,8 @@ github.com/viniciuschiele/tarx v0.0.0-20151205142357-6e3da540444d/go.mod h1:8uo3
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA=
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns=
@ -1044,6 +1057,7 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
@ -1096,6 +1110,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1128,6 +1143,7 @@ golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201017003518-b09fb700fbb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201118182958-a01c418693c7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201130171929-760e229fe7c5 h1:dMDtAap8F/+vsyXblqK90iTzYJjNix5MsXDicSYol6w=

View File

@ -95,7 +95,7 @@ func (suite *ResetSuite) TestResetNodeByNode() {
for _, node := range suite.Cluster.Info().Nodes {
if node.Type == machine.TypeInit {
initNodeAddress = node.PrivateIP.String()
initNodeAddress = node.IPs[0].String()
break
}

View File

@ -34,11 +34,11 @@ func (suite *CrashdumpSuite) TestRun() {
for _, node := range suite.Cluster.Info().Nodes {
switch node.Type {
case machine.TypeInit:
args = append(args, "--init-node", node.PrivateIP.String())
args = append(args, "--init-node", node.IPs[0].String())
case machine.TypeControlPlane:
args = append(args, "--control-plane-nodes", node.PrivateIP.String())
args = append(args, "--control-plane-nodes", node.IPs[0].String())
case machine.TypeJoin:
args = append(args, "--worker-nodes", node.PrivateIP.String())
args = append(args, "--worker-nodes", node.IPs[0].String())
case machine.TypeUnknown:
panic("unexpected")
}

View File

@ -46,9 +46,9 @@ func (suite *HealthSuite) TestClientSide() {
for _, node := range suite.Cluster.Info().Nodes {
switch node.Type {
case machine.TypeControlPlane:
args = append(args, "--control-plane-nodes", node.PrivateIP.String())
args = append(args, "--control-plane-nodes", node.IPs[0].String())
case machine.TypeJoin:
args = append(args, "--worker-nodes", node.PrivateIP.String())
args = append(args, "--worker-nodes", node.IPs[0].String())
case machine.TypeInit, machine.TypeUnknown:
panic("unexpected")
}
@ -57,11 +57,11 @@ func (suite *HealthSuite) TestClientSide() {
for _, node := range suite.Cluster.Info().Nodes {
switch node.Type {
case machine.TypeInit:
args = append(args, "--init-node", node.PrivateIP.String())
args = append(args, "--init-node", node.IPs[0].String())
case machine.TypeControlPlane:
args = append(args, "--control-plane-nodes", node.PrivateIP.String())
args = append(args, "--control-plane-nodes", node.IPs[0].String())
case machine.TypeJoin:
args = append(args, "--worker-nodes", node.PrivateIP.String())
args = append(args, "--worker-nodes", node.IPs[0].String())
case machine.TypeUnknown:
panic("unexpected")
}

View File

@ -276,11 +276,11 @@ func (suite *UpgradeSuite) setupCluster() {
Name: clusterName,
Network: provision.NetworkRequest{
Name: clusterName,
CIDR: *cidr,
GatewayAddr: gatewayIP,
MTU: DefaultSettings.MTU,
Nameservers: defaultNameservers,
Name: clusterName,
CIDRs: []net.IPNet{*cidr},
GatewayAddrs: []net.IP{gatewayIP},
MTU: DefaultSettings.MTU,
Nameservers: defaultNameservers,
CNI: provision.CNIConfig{
BinPath: defaultCNIBinPath,
ConfDir: defaultCNIConfDir,
@ -339,7 +339,7 @@ func (suite *UpgradeSuite) setupCluster() {
provision.NodeRequest{
Name: fmt.Sprintf("master-%d", i+1),
Type: machine.TypeControlPlane,
IP: ips[i],
IPs: []net.IP{ips[i]},
Memory: DefaultSettings.MemMB * 1024 * 1024,
NanoCPUs: DefaultSettings.CPUs * 1000 * 1000 * 1000,
Disks: []*provision.Disk{
@ -356,7 +356,7 @@ func (suite *UpgradeSuite) setupCluster() {
provision.NodeRequest{
Name: fmt.Sprintf("worker-%d", i),
Type: machine.TypeJoin,
IP: ips[suite.spec.MasterNodes+i-1],
IPs: []net.IP{ips[suite.spec.MasterNodes+i-1]},
Memory: DefaultSettings.MemMB * 1024 * 1024,
NanoCPUs: DefaultSettings.CPUs * 1000 * 1000 * 1000,
Disks: []*provision.Disk{
@ -428,7 +428,7 @@ func (suite *UpgradeSuite) assertSameVersionCluster(client *talosclient.Client,
nodes := make([]string, len(suite.Cluster.Info().Nodes))
for i, node := range suite.Cluster.Info().Nodes {
nodes[i] = node.PrivateIP.String()
nodes[i] = node.IPs[0].String()
}
ctx := talosclient.WithNodes(suite.ctx, nodes...)
@ -467,9 +467,9 @@ func (suite *UpgradeSuite) readVersion(nodeCtx context.Context, client *taloscli
}
func (suite *UpgradeSuite) upgradeNode(client *talosclient.Client, node provision.NodeInfo) {
suite.T().Logf("upgrading node %s", node.PrivateIP)
suite.T().Logf("upgrading node %s", node.IPs[0])
nodeCtx := talosclient.WithNodes(suite.ctx, node.PrivateIP.String())
nodeCtx := talosclient.WithNodes(suite.ctx, node.IPs[0].String())
resp, err := client.Upgrade(nodeCtx, suite.spec.TargetInstallerImage, suite.spec.UpgradePreserve, suite.spec.UpgradeStage, false)
@ -495,7 +495,7 @@ func (suite *UpgradeSuite) upgradeNode(client *talosclient.Client, node provisio
if version != suite.spec.TargetVersion {
// upgrade not finished yet
return retry.ExpectedError(fmt.Errorf("node %q version doesn't match expected: expected %q, got %q", node.PrivateIP.String(), suite.spec.TargetVersion, version))
return retry.ExpectedError(fmt.Errorf("node %q version doesn't match expected: expected %q, got %q", node.IPs[0].String(), suite.spec.TargetVersion, version))
}
return nil

View File

@ -31,7 +31,7 @@ func (s *APIBootstrapper) ApplyConfig(ctx context.Context, nodes []provision.Nod
configureNode := func() error {
c, err := client.New(ctx, client.WithTLSConfig(&tls.Config{
InsecureSkipVerify: true,
}), client.WithEndpoints(n.IP.String()))
}), client.WithEndpoints(n.IPs[0].String()))
if err != nil {
return retry.UnexpectedError(err)
}

View File

@ -28,7 +28,7 @@ func (wrapper *infoWrapper) Nodes() []string {
nodes := make([]string, len(wrapper.clusterInfo.Nodes))
for i := range nodes {
nodes[i] = wrapper.clusterInfo.Nodes[i].PrivateIP.String()
nodes[i] = wrapper.clusterInfo.Nodes[i].IPs[0].String()
}
return nodes
@ -39,7 +39,7 @@ func (wrapper *infoWrapper) NodesByType(t machine.Type) []string {
for _, node := range wrapper.clusterInfo.Nodes {
if node.Type == t {
nodes = append(nodes, node.PrivateIP.String())
nodes = append(nodes, node.IPs[0].String())
}
}

View File

@ -57,10 +57,10 @@ func (p *provisioner) Create(ctx context.Context, request provision.ClusterReque
clusterInfo: provision.ClusterInfo{
ClusterName: request.Name,
Network: provision.NetworkInfo{
Name: request.Network.Name,
CIDR: request.Network.CIDR,
GatewayAddr: request.Network.GatewayAddr,
MTU: request.Network.MTU,
Name: request.Network.Name,
CIDRs: request.Network.CIDRs[:1],
GatewayAddrs: request.Network.GatewayAddrs[:1],
MTU: request.Network.MTU,
},
Nodes: nodeInfo,
},

View File

@ -27,8 +27,8 @@ func (p *provisioner) createNetwork(ctx context.Context, req provision.NetworkRe
// If named net already exists, see if we can reuse it
if len(existingNet) > 0 {
if existingNet[0].IPAM.Config[0].Subnet != req.CIDR.String() {
return fmt.Errorf("existing network has differing cidr: %s vs %s", existingNet[0].IPAM.Config[0].Subnet, req.CIDR.String())
if existingNet[0].IPAM.Config[0].Subnet != req.CIDRs[0].String() {
return fmt.Errorf("existing network has differing cidr: %s vs %s", existingNet[0].IPAM.Config[0].Subnet, req.CIDRs[0].String())
}
// CIDRs match, we'll reuse
return nil
@ -43,7 +43,7 @@ func (p *provisioner) createNetwork(ctx context.Context, req provision.NetworkRe
IPAM: &network.IPAM{
Config: []network.IPAMConfig{
{
Subnet: req.CIDR.String(),
Subnet: req.CIDRs[0].String(),
},
},
},

View File

@ -134,13 +134,13 @@ func (p *provisioner) createNode(ctx context.Context, clusterReq provision.Clust
containerConfig.Volumes[constants.EtcdDataPath] = struct{}{}
if nodeReq.IP == nil {
if nodeReq.IPs == nil {
return provision.NodeInfo{}, errors.New("an IP address must be provided when creating a master node")
}
}
if nodeReq.IP != nil {
networkConfig.EndpointsConfig[clusterReq.Network.Name].IPAMConfig = &network.EndpointIPAMConfig{IPv4Address: nodeReq.IP.String()}
if nodeReq.IPs != nil {
networkConfig.EndpointsConfig[clusterReq.Network.Name].IPAMConfig = &network.EndpointIPAMConfig{IPv4Address: nodeReq.IPs[0].String()}
}
// Create the container.
@ -169,7 +169,7 @@ func (p *provisioner) createNode(ctx context.Context, clusterReq provision.Clust
NanoCPUs: nodeReq.NanoCPUs,
Memory: nodeReq.Memory,
PrivateIP: net.ParseIP(info.NetworkSettings.Networks[clusterReq.Network.Name].IPAddress),
IPs: []net.IP{net.ParseIP(info.NetworkSettings.Networks[clusterReq.Network.Name].IPAddress)},
}
return nodeInfo, nil

View File

@ -37,8 +37,8 @@ func (p *provisioner) Reflect(ctx context.Context, clusterName, stateDirectory s
}
res.clusterInfo.Network.Name = network.Name
res.clusterInfo.Network.CIDR = *cidr
res.clusterInfo.Network.GatewayAddr = net.ParseIP(network.IPAM.Config[0].Gateway)
res.clusterInfo.Network.CIDRs = []net.IPNet{*cidr}
res.clusterInfo.Network.GatewayAddrs = []net.IP{net.ParseIP(network.IPAM.Config[0].Gateway)}
mtuStr := network.Options["com.docker.network.driver.mtu"]
res.clusterInfo.Network.MTU, err = strconv.Atoi(mtuStr)
@ -66,7 +66,7 @@ func (p *provisioner) Reflect(ctx context.Context, clusterName, stateDirectory s
Name: node.Names[0],
Type: t,
PrivateIP: net.ParseIP(node.NetworkSettings.Networks[res.clusterInfo.Network.Name].IPAddress),
IPs: []net.IP{net.ParseIP(node.NetworkSettings.Networks[res.clusterInfo.Network.Name].IPAddress)},
})
}

View File

@ -85,10 +85,10 @@ func (p *provisioner) Create(ctx context.Context, request provision.ClusterReque
state.ClusterInfo = provision.ClusterInfo{
ClusterName: request.Name,
Network: provision.NetworkInfo{
Name: request.Network.Name,
CIDR: request.Network.CIDR,
GatewayAddr: request.Network.GatewayAddr,
MTU: request.Network.MTU,
Name: request.Network.Name,
CIDRs: request.Network.CIDRs[:1],
GatewayAddrs: request.Network.GatewayAddrs[:1],
MTU: request.Network.MTU,
},
Nodes: nodeInfo,
}

View File

@ -71,5 +71,5 @@ func (p *provisioner) GenOptions(networkReq provision.NetworkRequest) []generate
// GetLoadBalancers returns internal/external loadbalancer endpoints.
func (p *provisioner) GetLoadBalancers(networkReq provision.NetworkRequest) (internalEndpoint, externalEndpoint string) {
// firecracker runs loadbalancer on the bridge, which is good for both internal access, external access goes via round-robin
return networkReq.GatewayAddr.String(), ""
return networkReq.GatewayAddrs[0].String(), ""
}

View File

@ -19,6 +19,7 @@ import (
models "github.com/firecracker-microvm/firecracker-go-sdk/client/models"
multierror "github.com/hashicorp/go-multierror"
"github.com/talos-systems/go-procfs/procfs"
talosnet "github.com/talos-systems/net"
"k8s.io/apimachinery/pkg/util/json"
"github.com/talos-systems/talos/pkg/machinery/constants"
@ -107,8 +108,6 @@ func (p *provisioner) createNode(state *vm.State, clusterReq provision.ClusterRe
}
}
ones, _ := clusterReq.Network.CIDR.Mask.Size()
drives := make([]models.Drive, len(diskPaths))
for i, disk := range diskPaths {
@ -139,8 +138,8 @@ func (p *provisioner) createNode(state *vm.State, clusterReq provision.ClusterRe
CacheDir: clusterReq.Network.CNI.CacheDir,
NetworkConfig: state.VMCNIConfig,
Args: [][2]string{
{"IP", fmt.Sprintf("%s/%d", nodeReq.IP, ones)},
{"GATEWAY", clusterReq.Network.GatewayAddr.String()},
{"IP", talosnet.FormatCIDR(nodeReq.IPs[0], clusterReq.Network.CIDRs[0])},
{"GATEWAY", clusterReq.Network.GatewayAddrs[0].String()},
},
IfName: "veth0",
VMIfName: "eth0",
@ -160,7 +159,7 @@ func (p *provisioner) createNode(state *vm.State, clusterReq provision.ClusterRe
launchConfig := LaunchConfig{
FirecrackerConfig: cfg,
Config: nodeConfig,
GatewayAddr: clusterReq.Network.GatewayAddr,
GatewayAddr: clusterReq.Network.GatewayAddrs[0],
BootloaderEmulation: opts.BootloaderEnabled,
}
@ -206,7 +205,7 @@ func (p *provisioner) createNode(state *vm.State, clusterReq provision.ClusterRe
Memory: nodeReq.Memory,
DiskSize: nodeReq.Disks[0].Size,
PrivateIP: nodeReq.IP,
IPs: nodeReq.IPs[:1],
}
return nodeInfo, nil

View File

@ -97,10 +97,10 @@ func (p *provisioner) Create(ctx context.Context, request provision.ClusterReque
state.ClusterInfo = provision.ClusterInfo{
ClusterName: request.Name,
Network: provision.NetworkInfo{
Name: request.Network.Name,
CIDR: request.Network.CIDR,
GatewayAddr: request.Network.GatewayAddr,
MTU: request.Network.MTU,
Name: request.Network.Name,
CIDRs: request.Network.CIDRs,
GatewayAddrs: request.Network.GatewayAddrs,
MTU: request.Network.MTU,
},
Nodes: nodeInfo,
ExtraNodes: pxeNodeInfo,

View File

@ -21,6 +21,7 @@ import (
"github.com/containernetworking/plugins/pkg/testutils"
"github.com/google/uuid"
"github.com/talos-systems/go-blockdevice/blockdevice/partition/gpt"
talosnet "github.com/talos-systems/net"
"github.com/talos-systems/talos/pkg/provision"
"github.com/talos-systems/talos/pkg/provision/internal/cniutils"
@ -51,12 +52,13 @@ type LaunchConfig struct {
Config string
// Network
BridgeName string
NetworkConfig *libcni.NetworkConfigList
CNI provision.CNIConfig
IP net.IP
CIDR net.IPNet
IPs []net.IP
CIDRs []net.IPNet
Hostname string
GatewayAddr net.IP
GatewayAddrs []net.IP
MTU int
Nameservers []net.IP
@ -82,6 +84,8 @@ type LaunchConfig struct {
// withCNI creates network namespace, launches CNI and passes control to the next function
// filling config with netNS and interface details.
//
//nolint: gocyclo
func withCNI(ctx context.Context, config *LaunchConfig, f func(config *LaunchConfig) error) error {
// random ID for the CNI, maps to single VM
containerID := uuid.New().String()
@ -99,14 +103,23 @@ func withCNI(ctx context.Context, config *LaunchConfig, f func(config *LaunchCon
testutils.UnmountNS(ns) //nolint: errcheck
}()
ones, _ := config.CIDR.Mask.Size()
ips := make([]string, len(config.IPs))
for j := range ips {
ips[j] = talosnet.FormatCIDR(config.IPs[j], config.CIDRs[j])
}
gatewayAddrs := make([]string, len(config.GatewayAddrs))
for j := range gatewayAddrs {
gatewayAddrs[j] = config.GatewayAddrs[j].String()
}
runtimeConf := libcni.RuntimeConf{
ContainerID: containerID,
NetNS: ns.Path(),
IfName: "veth0",
Args: [][2]string{
{"IP", fmt.Sprintf("%s/%d", config.IP, ones)},
{"GATEWAY", config.GatewayAddr.String()},
{"IP", strings.Join(ips, ",")},
{"GATEWAY", strings.Join(gatewayAddrs, ",")},
},
}
@ -144,19 +157,36 @@ func withCNI(ctx context.Context, config *LaunchConfig, f func(config *LaunchCon
config.vmMAC = vmIface.Mac
config.ns = ns
// dump node IP/mac/hostname for dhcp
if err = vm.DumpIPAMRecord(config.StatePath, vm.IPAMRecord{
IP: config.IP,
Netmask: config.CIDR.Mask,
MAC: vmIface.Mac,
Hostname: config.Hostname,
Gateway: config.GatewayAddr,
MTU: config.MTU,
Nameservers: config.Nameservers,
TFTPServer: config.TFTPServer,
IPXEBootFilename: config.IPXEBootFileName,
}); err != nil {
return err
for j := range config.CIDRs {
nameservers := make([]net.IP, 0, len(config.Nameservers))
// filter nameservers by IPv4/IPv6 matching IPs
for i := range config.Nameservers {
if config.IPs[j].To4() == nil {
if config.Nameservers[i].To4() == nil {
nameservers = append(nameservers, config.Nameservers[i])
}
} else {
if config.Nameservers[i].To4() != nil {
nameservers = append(nameservers, config.Nameservers[i])
}
}
}
// dump node IP/mac/hostname for dhcp
if err = vm.DumpIPAMRecord(config.StatePath, vm.IPAMRecord{
IP: config.IPs[j],
Netmask: config.CIDRs[j].Mask,
MAC: vmIface.Mac,
Hostname: config.Hostname,
Gateway: config.GatewayAddrs[j],
MTU: config.MTU,
Nameservers: nameservers,
TFTPServer: config.TFTPServer,
IPXEBootFilename: config.IPXEBootFileName,
}); err != nil {
return err
}
}
return f(config)
@ -333,7 +363,7 @@ func Launch() error {
config.c = vm.ConfigureSignals()
config.controller = NewController()
httpServer, err := vm.NewHTTPServer(config.GatewayAddr, config.APIPort, []byte(config.Config), config.controller)
httpServer, err := vm.NewHTTPServer(config.GatewayAddrs[0], config.APIPort, []byte(config.Config), config.controller)
if err != nil {
return err
}

View File

@ -118,12 +118,13 @@ func (p *provisioner) createNode(state *vm.State, clusterReq provision.ClusterRe
BootloaderEnabled: opts.BootloaderEnabled,
NodeUUID: nodeUUID,
Config: nodeConfig,
BridgeName: state.BridgeName,
NetworkConfig: state.VMCNIConfig,
CNI: clusterReq.Network.CNI,
CIDR: clusterReq.Network.CIDR,
IP: nodeReq.IP,
CIDRs: clusterReq.Network.CIDRs,
IPs: nodeReq.IPs,
Hostname: nodeReq.Name,
GatewayAddr: clusterReq.Network.GatewayAddr,
GatewayAddrs: clusterReq.Network.GatewayAddrs,
MTU: clusterReq.Network.MTU,
Nameservers: clusterReq.Network.Nameservers,
TFTPServer: nodeReq.TFTPServer,
@ -185,7 +186,7 @@ func (p *provisioner) createNode(state *vm.State, clusterReq provision.ClusterRe
Memory: nodeReq.Memory,
DiskSize: nodeReq.Disks[0].Size,
PrivateIP: nodeReq.IP,
IPs: nodeReq.IPs,
APIPort: apiPort,
}
@ -226,7 +227,7 @@ func (p *provisioner) createNodes(state *vm.State, clusterReq provision.ClusterR
}
func (p *provisioner) findBridgeListenPort(clusterReq provision.ClusterRequest) (int, error) {
l, err := net.Listen("tcp", net.JoinHostPort(clusterReq.Network.GatewayAddr.String(), "0"))
l, err := net.Listen("tcp", net.JoinHostPort(clusterReq.Network.GatewayAddrs[0].String(), "0"))
if err != nil {
return 0, err
}

View File

@ -7,6 +7,9 @@ package qemu
import (
"context"
"github.com/AlekSi/pointer"
"github.com/talos-systems/talos/pkg/machinery/config/types/v1alpha1"
"github.com/talos-systems/talos/pkg/machinery/config/types/v1alpha1/generate"
"github.com/talos-systems/talos/pkg/provision"
"github.com/talos-systems/talos/pkg/provision/providers/vm"
@ -34,9 +37,15 @@ func (p *provisioner) Close() error {
// GenOptions provides a list of additional config generate options.
func (p *provisioner) GenOptions(networkReq provision.NetworkRequest) []generate.GenOption {
nameservers := make([]string, len(networkReq.Nameservers))
for i := range nameservers {
nameservers[i] = networkReq.Nameservers[i].String()
hasIPv4 := false
hasIPv6 := false
for _, cidr := range networkReq.CIDRs {
if cidr.IP.To4() == nil {
hasIPv6 = true
} else {
hasIPv4 = true
}
}
return []generate.GenOption{
@ -50,11 +59,25 @@ func (p *provisioner) GenOptions(networkReq provision.NetworkRequest) []generate
// Talos-specific
"talos.platform=metal",
}),
generate.WithNetworkConfig(
&v1alpha1.NetworkConfig{
NetworkInterfaces: []*v1alpha1.Device{
{
DeviceInterface: "eth0",
DeviceDHCP: true,
DeviceDHCPOptions: &v1alpha1.DHCPOptions{
DHCPIPv4: pointer.ToBool(hasIPv4),
DHCPIPv6: pointer.ToBool(hasIPv6),
},
},
},
},
),
}
}
// GetLoadBalancers returns internal/external loadbalancer endpoints.
func (p *provisioner) GetLoadBalancers(networkReq provision.NetworkRequest) (internalEndpoint, externalEndpoint string) {
// qemu runs loadbalancer on the bridge, which is good for both internal access, external access goes via round-robin
return networkReq.GatewayAddr.String(), ""
return networkReq.GatewayAddrs[0].String(), ""
}

View File

@ -12,18 +12,25 @@ import (
"os"
"os/exec"
"strconv"
"strings"
"syscall"
"time"
"github.com/insomniacslk/dhcp/dhcpv4"
"github.com/insomniacslk/dhcp/dhcpv4/server4"
"github.com/insomniacslk/dhcp/dhcpv6"
"github.com/insomniacslk/dhcp/dhcpv6/server6"
"github.com/insomniacslk/dhcp/iana"
"golang.org/x/sync/errgroup"
"github.com/talos-systems/talos/pkg/provision"
)
//nolint: gocyclo
func handler(serverIP net.IP, statePath string) server4.Handler {
func handlerDHCP4(serverIP net.IP, statePath string) server4.Handler {
return func(conn net.PacketConn, peer net.Addr, m *dhcpv4.DHCPv4) {
log.Printf("DHCPv6: got %s", m.Summary())
if m.OpCode != dhcpv4.OpcodeBootRequest {
return
}
@ -39,13 +46,20 @@ func handler(serverIP net.IP, statePath string) server4.Handler {
return
}
match, ok := db[m.ClientHWAddr.String()]
row, ok := db[m.ClientHWAddr.String()]
if !ok {
log.Printf("no match for MAC: %s", m.ClientHWAddr.String())
return
}
match, ok := row[4]
if !ok {
log.Printf("no match for MAC on IPv4: %s", m.ClientHWAddr.String())
return
}
resp, err := dhcpv4.NewReplyFromRequest(m,
dhcpv4.WithNetmask(match.Netmask),
dhcpv4.WithYourIP(match.IP),
@ -93,14 +107,136 @@ func handler(serverIP net.IP, statePath string) server4.Handler {
}
}
//nolint: gocyclo
func handlerDHCP6(serverHwAddr net.HardwareAddr, statePath string) server6.Handler {
return func(conn net.PacketConn, peer net.Addr, m dhcpv6.DHCPv6) {
log.Printf("DHCPv6: got %s", m.Summary())
db, err := LoadIPAMRecords(statePath)
if err != nil {
log.Printf("failed loading the IPAM db: %s", err)
return
}
if db == nil {
return
}
msg, err := m.GetInnerMessage()
if err != nil {
log.Printf("failed loading inner message: %s", err)
return
}
hwaddr, err := dhcpv6.ExtractMAC(m)
if err != nil {
log.Printf("error extracting hwaddr: %s", err)
return
}
row, ok := db[hwaddr.String()]
if !ok {
log.Printf("no match for MAC: %s", hwaddr)
return
}
match, ok := row[6]
if !ok {
log.Printf("no match for MAC on IPv6: %s", hwaddr)
return
}
modifiers := []dhcpv6.Modifier{
dhcpv6.WithDNS(match.Nameservers...),
dhcpv6.WithFQDN(0, match.Hostname),
dhcpv6.WithIANA(dhcpv6.OptIAAddress{
IPv6Addr: match.IP,
PreferredLifetime: 5 * time.Minute,
ValidLifetime: 5 * time.Minute,
}),
dhcpv6.WithServerID(dhcpv6.Duid{
Type: dhcpv6.DUID_LLT,
HwType: iana.HWTypeEthernet,
Time: dhcpv6.GetTime(),
LinkLayerAddr: serverHwAddr,
}),
}
var resp *dhcpv6.Message
switch msg.MessageType { //nolint: exhaustive
case dhcpv6.MessageTypeSolicit:
resp, err = dhcpv6.NewAdvertiseFromSolicit(msg, modifiers...)
case dhcpv6.MessageTypeRequest:
resp, err = dhcpv6.NewReplyFromMessage(msg, modifiers...)
default:
log.Printf("unsupported message type %s", msg.Summary())
}
if err != nil {
log.Printf("failure building response: %s", err)
return
}
_, err = conn.WriteTo(resp.ToBytes(), peer)
if err != nil {
log.Printf("failure sending response: %s", err)
}
}
}
// DHCPd entrypoint.
func DHCPd(ifName string, ip net.IP, statePath string) error {
server, err := server4.NewServer(ifName, nil, handler(ip, statePath), server4.WithDebugLogger())
func DHCPd(ifName string, ips []net.IP, statePath string) error {
iface, err := net.InterfaceByName(ifName)
if err != nil {
return err
return fmt.Errorf("error looking up interface: %w", err)
}
return server.Serve()
var eg errgroup.Group
for _, ip := range ips {
ip := ip
eg.Go(func() error {
if ip.To4() == nil {
server, err := server6.NewServer(
ifName,
nil,
handlerDHCP6(iface.HardwareAddr, statePath),
server6.WithDebugLogger(),
)
if err != nil {
log.Printf("error on dhcp6 startup: %s", err)
return err
}
return server.Serve()
}
server, err := server4.NewServer(
ifName,
nil,
handlerDHCP4(ip, statePath),
server4.WithSummaryLogger(),
)
if err != nil {
log.Printf("error on dhcp4 startup: %s", err)
return err
}
return server.Serve()
})
}
return eg.Wait()
}
const (
@ -124,10 +260,15 @@ func (p *Provisioner) CreateDHCPd(state *State, clusterReq provision.ClusterRequ
return err
}
gatewayAddrs := make([]string, len(clusterReq.Network.GatewayAddrs))
for j := range gatewayAddrs {
gatewayAddrs[j] = clusterReq.Network.GatewayAddrs[j].String()
}
args := []string{
"dhcpd-launch",
"--state-path", statePath,
"--addr", clusterReq.Network.GatewayAddr.String(),
"--addr", strings.Join(gatewayAddrs, ","),
"--interface", state.BridgeName,
}

View File

@ -27,8 +27,8 @@ type IPAMRecord struct {
IPXEBootFilename string
}
// IPAMDatabase is a mapping from MAC address to records.
type IPAMDatabase map[string]IPAMRecord
// IPAMDatabase is a mapping from MAC address to records with IPv4/IPv6 flag.
type IPAMDatabase map[string]map[int]IPAMRecord
const dbFile = "ipam.db"
@ -75,7 +75,16 @@ func LoadIPAMRecords(statePath string) (IPAMDatabase, error) {
return nil, err
}
result[record.MAC] = record
ipFormat := 4
if record.IP.To4() == nil {
ipFormat = 6
}
if result[record.MAC] == nil {
result[record.MAC] = make(map[int]IPAMRecord)
}
result[record.MAC][ipFormat] = record
}
return result, scanner.Err()

View File

@ -36,12 +36,12 @@ func (p *Provisioner) CreateLoadBalancer(state *State, clusterReq provision.Clus
masterIPs := make([]string, len(masterNodes))
for i := range masterIPs {
masterIPs[i] = masterNodes[i].IP.String()
masterIPs[i] = masterNodes[i].IPs[0].String()
}
args := []string{
"loadbalancer-launch",
"--loadbalancer-addr", clusterReq.Network.GatewayAddr.String(),
"--loadbalancer-addr", clusterReq.Network.GatewayAddrs[0].String(),
"--loadbalancer-upstreams", strings.Join(masterIPs, ","),
}

View File

@ -12,6 +12,7 @@ import (
"fmt"
"net"
"strconv"
"strings"
"text/template"
"github.com/containernetworking/cni/libcni"
@ -23,9 +24,11 @@ import (
"github.com/talos-systems/talos/pkg/provision"
)
// CreateNetwork build bridge interface name by taking part of checksum of the network name
// CreateNetwork builds bridge interface name by taking part of checksum of the network name
// so that interface name is defined by network name, and different networks have
// different bridge interfaces.
//
//nolint: gocyclo
func (p *Provisioner) CreateNetwork(ctx context.Context, state *State, network provision.NetworkRequest) error {
networkNameHash := sha256.Sum256([]byte(network.Name))
state.BridgeName = fmt.Sprintf("%s%s", "talos", hex.EncodeToString(networkNameHash[:])[:8])
@ -66,20 +69,31 @@ func (p *Provisioner) CreateNetwork(ctx context.Context, state *State, network p
}()
// pick a fake address to use for provisioning an interface
fakeIP, err := talosnet.NthIPInNetwork(&network.CIDR, 2)
if err != nil {
return err
fakeIPs := make([]string, len(network.CIDRs))
for j := range fakeIPs {
var fakeIP net.IP
fakeIP, err = talosnet.NthIPInNetwork(&network.CIDRs[j], 2)
if err != nil {
return err
}
fakeIPs[j] = talosnet.FormatCIDR(fakeIP, network.CIDRs[j])
}
gatewayAddrs := make([]string, len(network.GatewayAddrs))
for j := range gatewayAddrs {
gatewayAddrs[j] = network.GatewayAddrs[j].String()
}
ones, _ := network.CIDR.Mask.Size()
containerID := uuid.New().String()
runtimeConf := libcni.RuntimeConf{
ContainerID: containerID,
NetNS: ns.Path(),
IfName: "veth0",
Args: [][2]string{
{"IP", fmt.Sprintf("%s/%d", fakeIP, ones)},
{"GATEWAY", network.GatewayAddr.String()},
{"IP", strings.Join(fakeIPs, ",")},
{"GATEWAY", strings.Join(gatewayAddrs, ",")},
},
}

View File

@ -44,11 +44,11 @@ type CNIConfig struct {
// NetworkRequest describes cluster network.
type NetworkRequest struct {
Name string
CIDR net.IPNet
GatewayAddr net.IP
MTU int
Nameservers []net.IP
Name string
CIDRs []net.IPNet
GatewayAddrs []net.IP
MTU int
Nameservers []net.IP
// CNI-specific parameters.
CNI CNIConfig
@ -129,7 +129,7 @@ type Disk struct {
// NodeRequest describes a request for a node.
type NodeRequest struct {
Name string
IP net.IP
IPs []net.IP
Config config.Provider
Type machine.Type

View File

@ -35,10 +35,10 @@ type ClusterInfo struct {
// NetworkInfo describes cluster network.
type NetworkInfo struct {
Name string
CIDR net.IPNet
GatewayAddr net.IP
MTU int
Name string
CIDRs []net.IPNet
GatewayAddrs []net.IP
MTU int
}
// NodeInfo describes a node.
@ -55,8 +55,7 @@ type NodeInfo struct {
// Disk (volume) size in bytes, if applicable
DiskSize uint64
PublicIP net.IP
PrivateIP net.IP
IPs []net.IP
APIPort int
}

View File

@ -76,7 +76,7 @@ talosctl cluster create [flags]
```
--arch string cluster architecture (default "amd64")
--cidr string CIDR of the cluster network (default "10.5.0.0/24")
--cidr string CIDR of the cluster network (IPv4, ULA network for IPv6 is derived in automated way) (default "10.5.0.0/24")
--cni-bin-path strings search path for CNI binaries (VM only) (default [/home/user/.talos/cni/bin])
--cni-bundle-url string URL to download CNI bundle from (VM only) (default "https://github.com/talos-systems/talos/releases/download/v0.9.0-alpha.0/talosctl-cni-bundle-${ARCH}.tar.gz")
--cni-cache-dir string CNI cache directory path (VM only) (default "/home/user/.talos/cni/cache")
@ -96,12 +96,14 @@ talosctl cluster create [flags]
--initrd-path string the uncompressed kernel image to use (default "_out/initramfs-${ARCH}.xz")
-i, --input-dir string location of pre-generated config files
--install-image string the installer image to use (default "ghcr.io/talos-systems/installer:latest")
--ipv4 enable IPv4 network in the cluster (default true)
--ipv6 enable IPv6 network in the cluster (QEMU provisioner only)
--iso-path string the ISO path to use for the initial boot (VM only)
--kubernetes-version string desired kubernetes version to run (default "1.20.2")
--masters int the number of masters to create (default 1)
--memory int the limit on memory usage in MB (each container/VM) (default 2048)
--mtu int MTU of the cluster network (default 1500)
--nameservers strings list of nameservers to use (default [8.8.8.8,1.1.1.1])
--nameservers strings list of nameservers to use (default [8.8.8.8,1.1.1.1,2001:4860:4860::8888,2606:4700:4700::1111])
--registry-insecure-skip-verify strings list of registry hostnames to skip TLS verification for
--registry-mirror strings list of registry mirrors to use in format: <registry host>=<mirror URL>
--skip-injecting-config skip injecting config from embedded metadata server, write config files to current directory