feat: use 'registry.k8s.io' for Kubernetes images

See https://kubernetes.io/blog/2022/11/28/registry-k8s-io-faster-cheaper-ga/

Signed-off-by: Andrey Smirnov <andrey.smirnov@talos-systems.com>
This commit is contained in:
Andrey Smirnov
2022-11-28 13:39:00 +04:00
parent 1103c5ad24
commit fdbd380f60
16 changed files with 62 additions and 52 deletions

View File

@ -233,6 +233,16 @@ Talos keeps track of the owned node labels in the `talos.dev/owned-labels` annot
Talos no longer supports CRI config overrides placed in `/var/cri/conf.d` directory.
[New way](https://www.talos.dev/v1.3/talos-guides/configuration/containerd/) correctly handles merging of containerd/CRI plugin configuration.
"""
[notes.registry_k8s_io]
title = "registry.k8s.io"
description = """\
Talos now uses `registry.k8s.io` instead of `k8s.gcr.io` for Kubernetes container images.
See [Kubernetes documentation](https://kubernetes.io/blog/2022/11/28/registry-k8s-io-faster-cheaper-ga/) for additional details.
If using registry mirrors, or in air-gapped installations you may need to update your configuration.
"""
[make_deps]

View File

@ -10,9 +10,9 @@ docker run -d -p 5000:5000 \
--name registry-docker.io registry:2
docker run -d -p 5001:5000 \
-e REGISTRY_PROXY_REMOTEURL=https://k8s.gcr.io \
-e REGISTRY_PROXY_REMOTEURL=https://registry.k8s.io \
--restart always \
--name registry-k8s.gcr.io registry:2
--name registry-registry.k8s.io registry:2
docker run -d -p 5002:5000 \
-e REGISTRY_PROXY_REMOTEURL=https://quay.io \

View File

@ -200,7 +200,7 @@ function build_registry_mirrors {
if [[ "${CI:-false}" == "true" ]]; then
REGISTRY_MIRROR_FLAGS=
for registry in docker.io k8s.gcr.io quay.io gcr.io ghcr.io registry.dev.talos-systems.io; do
for registry in docker.io registry.k8s.io quay.io gcr.io ghcr.io registry.dev.talos-systems.io; do
local service="registry-${registry//./-}.ci.svc"
local addr=`python3 -c "import socket; print(socket.gethostbyname('${service}'))"`

View File

@ -6,7 +6,7 @@ case "${CI:-false}" in
true)
mirror_flag=""
for registry in docker.io k8s.gcr.io quay.io gcr.io ghcr.io registry.dev.talos-systems.io; do
for registry in docker.io k8s.gcr.io registry.k8s.io quay.io gcr.io ghcr.io registry.dev.talos-systems.io; do
service="registry-${registry//./-}.ci.svc"
addr=`python3 -c "import socket; print(socket.gethostbyname('${service}'))"`

View File

@ -125,7 +125,7 @@ func (contract *VersionContract) KubeletDefaultRuntimeSeccompProfileEnabled() bo
// KubernetesAlternateImageRegistries returns true if alternate image registries should be enabled by default.
// https://github.com/kubernetes/kubernetes/pull/109938
func (contract *VersionContract) KubernetesAlternateImageRegistries() bool {
return contract.Greater(TalosVersion1_1)
return contract.Greater(TalosVersion1_1) && !contract.Greater(TalosVersion1_2)
}
// KubernetesAllowSchedulingOnControlPlanes returns true if scheduling on control planes should be enabled by default.

View File

@ -58,7 +58,7 @@ func TestContractCurrent(t *testing.T) {
assert.True(t, contract.PodSecurityAdmissionEnabled())
assert.True(t, contract.StableHostnameEnabled())
assert.True(t, contract.KubeletDefaultRuntimeSeccompProfileEnabled())
assert.True(t, contract.KubernetesAlternateImageRegistries())
assert.False(t, contract.KubernetesAlternateImageRegistries())
assert.True(t, contract.KubernetesAllowSchedulingOnControlPlanes())
assert.True(t, contract.KubernetesDiscoveryBackendDisabled())
assert.True(t, contract.ApidExtKeyUsageCheckEnabled())
@ -81,7 +81,7 @@ func TestContract1_3(t *testing.T) {
assert.True(t, contract.PodSecurityAdmissionEnabled())
assert.True(t, contract.StableHostnameEnabled())
assert.True(t, contract.KubeletDefaultRuntimeSeccompProfileEnabled())
assert.True(t, contract.KubernetesAlternateImageRegistries())
assert.False(t, contract.KubernetesAlternateImageRegistries())
assert.True(t, contract.KubernetesAllowSchedulingOnControlPlanes())
assert.True(t, contract.KubernetesDiscoveryBackendDisabled())
assert.True(t, contract.ApidExtKeyUsageCheckEnabled())

View File

@ -303,19 +303,19 @@ const (
KubeletImage = "ghcr.io/siderolabs/kubelet"
// KubeProxyImage is the enforced kube-proxy image to use for the control plane.
KubeProxyImage = "k8s.gcr.io/kube-proxy"
KubeProxyImage = "registry.k8s.io/kube-proxy"
// KubernetesAPIServerImage is the enforced apiserver image to use for the control plane.
KubernetesAPIServerImage = "k8s.gcr.io/kube-apiserver"
KubernetesAPIServerImage = "registry.k8s.io/kube-apiserver"
// KubernetesControllerManagerImage is the enforced controllermanager image to use for the control plane.
KubernetesControllerManagerImage = "k8s.gcr.io/kube-controller-manager"
KubernetesControllerManagerImage = "registry.k8s.io/kube-controller-manager"
// KubernetesProxyImage is the enforced proxy image to use for the control plane.
KubernetesProxyImage = "k8s.gcr.io/kube-proxy"
KubernetesProxyImage = "registry.k8s.io/kube-proxy"
// KubernetesSchedulerImage is the enforced scheduler image to use for the control plane.
KubernetesSchedulerImage = "k8s.gcr.io/kube-scheduler"
KubernetesSchedulerImage = "registry.k8s.io/kube-scheduler"
// CoreDNSImage is the enforced CoreDNS image to use.
CoreDNSImage = "docker.io/coredns/coredns"

View File

@ -103,7 +103,7 @@ Identify all registry prefixes from `talosctl images`, for example:
- `docker.io`
- `gcr.io`
- `ghcr.io`
- `k8s.gcr.io`
- `registry.k8s.io`
- `quay.io`
The `talosctl cluster create` command provides conveniences for common configuration options.
@ -116,7 +116,7 @@ $ sudo --preserve-env=HOME talosctl cluster create --provisioner=qemu --install-
--registry-mirror docker.io=http://10.5.0.1:6000 \
--registry-mirror gcr.io=http://10.5.0.1:6000 \
--registry-mirror ghcr.io=http://10.5.0.1:6000 \
--registry-mirror k8s.gcr.io=http://10.5.0.1:6000 \
--registry-mirror registry.k8s.io=http://10.5.0.1:6000 \
--registry-mirror quay.io=http://10.5.0.1:6000
validating CIDR and reserving IPs
generating PKI and tokens
@ -154,7 +154,7 @@ machine:
ghcr.io:
endpoints:
- http://10.5.0.1:6000/
k8s.gcr.io:
registry.k8s.io:
endpoints:
- http://10.5.0.1:6000/
quay.io:

View File

@ -66,7 +66,7 @@ sudo --preserve-env=HOME _out/talosctl-linux-amd64 cluster create \
--provisioner=qemu \
--cidr=172.20.0.0/24 \
--registry-mirror docker.io=http://172.20.0.1:5000 \
--registry-mirror k8s.gcr.io=http://172.20.0.1:5001 \
--registry-mirror kregistry.k8s.io=http://172.20.0.1:5001 \
--registry-mirror quay.io=http://172.20.0.1:5002 \
--registry-mirror gcr.io=http://172.20.0.1:5003 \
--registry-mirror ghcr.io=http://172.20.0.1:5004 \

View File

@ -49,7 +49,7 @@ If the API server is not available, status of the static pod can also be inspect
```bash
$ talosctl containers --kubernetes
NODE NAMESPACE ID IMAGE PID STATUS
172.20.0.3 k8s.io default/nginx-talos-default-controlplane-2 k8s.gcr.io/pause:3.6 4886 SANDBOX_READY
172.20.0.3 k8s.io default/nginx-talos-default-controlplane-2 registry.k8s.io/pause:3.6 4886 SANDBOX_READY
172.20.0.3 k8s.io └─ default/nginx-talos-default-controlplane-2:nginx docker.io/library/nginx:latest
...
```

View File

@ -171,9 +171,9 @@ In any case, the status of the control plane components on each control plane no
```bash
$ talosctl -n <IP> containers --kubernetes
NODE NAMESPACE ID IMAGE PID STATUS
172.20.0.2 k8s.io kube-system/kube-apiserver-talos-default-controlplane-1 k8s.gcr.io/pause:3.2 2539 SANDBOX_READY
172.20.0.2 k8s.io └─ kube-system/kube-apiserver-talos-default-controlplane-1:kube-apiserver k8s.gcr.io/kube-apiserver:v{{< k8s_release >}} 2572 CONTAINER_RUNNING
NODE NAMESPACE ID IMAGE PID STATUS
172.20.0.2 k8s.io kube-system/kube-apiserver-talos-default-controlplane-1 registry.k8s.io/pause:3.2 2539 SANDBOX_READY
172.20.0.2 k8s.io └─ kube-system/kube-apiserver-talos-default-controlplane-1:kube-apiserver registry.k8s.io/kube-apiserver:v{{< k8s_release >}} 2572 CONTAINER_RUNNING
```
If `kube-apiserver` shows as `CONTAINER_EXITED`, it might have exited due to configuration error.
@ -273,10 +273,10 @@ If the control plane endpoint is not yet up, the container status of the control
$ talosctl -n <IP> c -k
NODE NAMESPACE ID IMAGE PID STATUS
...
172.20.0.2 k8s.io kube-system/kube-controller-manager-talos-default-controlplane-1 k8s.gcr.io/pause:3.2 2547 SANDBOX_READY
172.20.0.2 k8s.io └─ kube-system/kube-controller-manager-talos-default-controlplane-1:kube-controller-manager k8s.gcr.io/kube-controller-manager:v{{< k8s_release >}} 2580 CONTAINER_RUNNING
172.20.0.2 k8s.io kube-system/kube-scheduler-talos-default-controlplane-1 k8s.gcr.io/pause:3.2 2638 SANDBOX_READY
172.20.0.2 k8s.io └─ kube-system/kube-scheduler-talos-default-controlplane-1:kube-scheduler k8s.gcr.io/kube-scheduler:v{{< k8s_release >}} 2670 CONTAINER_RUNNING
172.20.0.2 k8s.io kube-system/kube-controller-manager-talos-default-controlplane-1 registry.k8s.io/pause:3.2 2547 SANDBOX_READY
172.20.0.2 k8s.io └─ kube-system/kube-controller-manager-talos-default-controlplane-1:kube-controller-manager registry.k8s.io/kube-controller-manager:v{{< k8s_release >}} 2580 CONTAINER_RUNNING
172.20.0.2 k8s.io kube-system/kube-scheduler-talos-default-controlplane-1 registry.k8s.io/pause:3.2 2638 SANDBOX_READY
172.20.0.2 k8s.io └─ kube-system/kube-scheduler-talos-default-controlplane-1:kube-scheduler registry.k8s.io/kube-scheduler:v{{< k8s_release >}} 2670 CONTAINER_RUNNING
...
```

View File

@ -120,7 +120,7 @@ talosctl --nodes <controlplane node> kubeconfig
Patch machine configuration using `talosctl patch` command:
```bash
$ talosctl -n <CONTROL_PLANE_IP_1> patch mc --mode=no-reboot -p '[{"op": "replace", "path": "/cluster/apiServer/image", "value": "k8s.gcr.io/kube-apiserver:v{{< k8s_release >}}"}]'
$ talosctl -n <CONTROL_PLANE_IP_1> patch mc --mode=no-reboot -p '[{"op": "replace", "path": "/cluster/apiServer/image", "value": "registry.k8s.io/kube-apiserver:v{{< k8s_release >}}"}]'
patched mc at the node 172.20.0.2
```
@ -140,7 +140,7 @@ metadata:
version: 5
phase: running
spec:
image: k8s.gcr.io/kube-apiserver:v{{< k8s_release >}}
image: registry.k8s.io/kube-apiserver:v{{< k8s_release >}}
cloudProvider: ""
controlPlaneEndpoint: https://172.20.0.1:6443
etcdServers:
@ -174,7 +174,7 @@ Repeat this process for every control plane node, verifying that state got propa
Patch machine configuration using `talosctl patch` command:
```bash
$ talosctl -n <CONTROL_PLANE_IP_1> patch mc --mode=no-reboot -p '[{"op": "replace", "path": "/cluster/controllerManager/image", "value": "k8s.gcr.io/kube-controller-manager:v{{< k8s_release >}}"}]'
$ talosctl -n <CONTROL_PLANE_IP_1> patch mc --mode=no-reboot -p '[{"op": "replace", "path": "/cluster/controllerManager/image", "value": "registry.k8s.io/kube-controller-manager:v{{< k8s_release >}}"}]'
patched mc at the node 172.20.0.2
```
@ -192,7 +192,7 @@ metadata:
version: 3
phase: running
spec:
image: k8s.gcr.io/kube-controller-manager:v{{< k8s_release >}}
image: registry.k8s.io/kube-controller-manager:v{{< k8s_release >}}
cloudProvider: ""
podCIDR: 10.244.0.0/16
serviceCIDR: 10.96.0.0/12
@ -223,7 +223,7 @@ Repeat this process for every control plane node, verifying that state propagate
Patch machine configuration using `talosctl patch` command:
```bash
$ talosctl -n <CONTROL_PLANE_IP_1> patch mc --mode=no-reboot -p '[{"op": "replace", "path": "/cluster/scheduler/image", "value": "k8s.gcr.io/kube-scheduler:v{{< k8s_release >}}"}]'
$ talosctl -n <CONTROL_PLANE_IP_1> patch mc --mode=no-reboot -p '[{"op": "replace", "path": "/cluster/scheduler/image", "value": "registry.k8s.io/kube-scheduler:v{{< k8s_release >}}"}]'
patched mc at the node 172.20.0.2
```
@ -241,7 +241,7 @@ metadata:
version: 3
phase: running
spec:
image: k8s.gcr.io/kube-scheduler:v{{< k8s_release >}}
image: registry.k8s.io/kube-scheduler:v{{< k8s_release >}}
extraArgs: {}
extraVolumes: []
```
@ -278,7 +278,7 @@ spec:
spec:
containers:
- name: kube-proxy
image: k8s.gcr.io/kube-proxy:v{{< k8s_release >}}
image: registry.k8s.io/kube-proxy:v{{< k8s_release >}}
tolerations:
- ...
```
@ -295,7 +295,7 @@ spec:
spec:
containers:
- name: kube-proxy
image: k8s.gcr.io/kube-proxy:v{{< k8s_release >}}
image: registry.k8s.io/kube-proxy:v{{< k8s_release >}}
tolerations:
- ...
- key: node-role.kubernetes.io/control-plane

View File

@ -509,7 +509,7 @@ serviceAccount:
{{< /highlight >}}</details> | |
|`apiServer` |<a href="#apiserverconfig">APIServerConfig</a> |API server specific configuration options. <details><summary>Show example(s)</summary>{{< highlight yaml >}}
apiServer:
image: k8s.gcr.io/kube-apiserver:v1.26.0-rc.0 # The container image used in the API server manifest.
image: registry.k8s.io/kube-apiserver:v1.26.0-rc.0 # The container image used in the API server manifest.
# Extra arguments to supply to the API server.
extraArgs:
feature-gates: ServerSideApply=true
@ -548,14 +548,14 @@ apiServer:
{{< /highlight >}}</details> | |
|`controllerManager` |<a href="#controllermanagerconfig">ControllerManagerConfig</a> |Controller manager server specific configuration options. <details><summary>Show example(s)</summary>{{< highlight yaml >}}
controllerManager:
image: k8s.gcr.io/kube-controller-manager:v1.26.0-rc.0 # The container image used in the controller manager manifest.
image: registry.k8s.io/kube-controller-manager:v1.26.0-rc.0 # The container image used in the controller manager manifest.
# Extra arguments to supply to the controller manager.
extraArgs:
feature-gates: ServerSideApply=true
{{< /highlight >}}</details> | |
|`proxy` |<a href="#proxyconfig">ProxyConfig</a> |Kube-proxy server-specific configuration options <details><summary>Show example(s)</summary>{{< highlight yaml >}}
proxy:
image: k8s.gcr.io/kube-proxy:v1.26.0-rc.0 # The container image used in the kube-proxy manifest.
image: registry.k8s.io/kube-proxy:v1.26.0-rc.0 # The container image used in the kube-proxy manifest.
mode: ipvs # proxy mode of kube-proxy.
# Extra arguments to supply to kube-proxy.
extraArgs:
@ -566,7 +566,7 @@ proxy:
{{< /highlight >}}</details> | |
|`scheduler` |<a href="#schedulerconfig">SchedulerConfig</a> |Scheduler server specific configuration options. <details><summary>Show example(s)</summary>{{< highlight yaml >}}
scheduler:
image: k8s.gcr.io/kube-scheduler:v1.26.0-rc.0 # The container image used in the scheduler manifest.
image: registry.k8s.io/kube-scheduler:v1.26.0-rc.0 # The container image used in the scheduler manifest.
# Extra arguments to supply to the scheduler.
extraArgs:
feature-gates: AllBeta=true
@ -1366,7 +1366,7 @@ Appears in:
{{< highlight yaml >}}
image: k8s.gcr.io/kube-apiserver:v1.26.0-rc.0 # The container image used in the API server manifest.
image: registry.k8s.io/kube-apiserver:v1.26.0-rc.0 # The container image used in the API server manifest.
# Extra arguments to supply to the API server.
extraArgs:
feature-gates: ServerSideApply=true
@ -1408,7 +1408,7 @@ certSANs:
| Field | Type | Description | Value(s) |
|-------|------|-------------|----------|
|`image` |string |The container image used in the API server manifest. <details><summary>Show example(s)</summary>{{< highlight yaml >}}
image: k8s.gcr.io/kube-apiserver:v1.26.0-rc.0
image: registry.k8s.io/kube-apiserver:v1.26.0-rc.0
{{< /highlight >}}</details> | |
|`extraArgs` |map[string]string |Extra arguments to supply to the API server. | |
|`extraVolumes` |[]<a href="#volumemountconfig">VolumeMountConfig</a> |Extra volumes to mount to the API server static pod. | |
@ -1494,7 +1494,7 @@ Appears in:
{{< highlight yaml >}}
image: k8s.gcr.io/kube-controller-manager:v1.26.0-rc.0 # The container image used in the controller manager manifest.
image: registry.k8s.io/kube-controller-manager:v1.26.0-rc.0 # The container image used in the controller manager manifest.
# Extra arguments to supply to the controller manager.
extraArgs:
feature-gates: ServerSideApply=true
@ -1504,7 +1504,7 @@ extraArgs:
| Field | Type | Description | Value(s) |
|-------|------|-------------|----------|
|`image` |string |The container image used in the controller manager manifest. <details><summary>Show example(s)</summary>{{< highlight yaml >}}
image: k8s.gcr.io/kube-controller-manager:v1.26.0-rc.0
image: registry.k8s.io/kube-controller-manager:v1.26.0-rc.0
{{< /highlight >}}</details> | |
|`extraArgs` |map[string]string |Extra arguments to supply to the controller manager. | |
|`extraVolumes` |[]<a href="#volumemountconfig">VolumeMountConfig</a> |Extra volumes to mount to the controller manager static pod. | |
@ -1523,7 +1523,7 @@ Appears in:
{{< highlight yaml >}}
image: k8s.gcr.io/kube-proxy:v1.26.0-rc.0 # The container image used in the kube-proxy manifest.
image: registry.k8s.io/kube-proxy:v1.26.0-rc.0 # The container image used in the kube-proxy manifest.
mode: ipvs # proxy mode of kube-proxy.
# Extra arguments to supply to kube-proxy.
extraArgs:
@ -1540,7 +1540,7 @@ extraArgs:
disabled: false
{{< /highlight >}}</details> | |
|`image` |string |The container image used in the kube-proxy manifest. <details><summary>Show example(s)</summary>{{< highlight yaml >}}
image: k8s.gcr.io/kube-proxy:v1.26.0-rc.0
image: registry.k8s.io/kube-proxy:v1.26.0-rc.0
{{< /highlight >}}</details> | |
|`mode` |string |<details><summary>proxy mode of kube-proxy.</summary>The default is 'iptables'.</details> | |
|`extraArgs` |map[string]string |Extra arguments to supply to kube-proxy. | |
@ -1558,7 +1558,7 @@ Appears in:
{{< highlight yaml >}}
image: k8s.gcr.io/kube-scheduler:v1.26.0-rc.0 # The container image used in the scheduler manifest.
image: registry.k8s.io/kube-scheduler:v1.26.0-rc.0 # The container image used in the scheduler manifest.
# Extra arguments to supply to the scheduler.
extraArgs:
feature-gates: AllBeta=true
@ -1568,7 +1568,7 @@ extraArgs:
| Field | Type | Description | Value(s) |
|-------|------|-------------|----------|
|`image` |string |The container image used in the scheduler manifest. <details><summary>Show example(s)</summary>{{< highlight yaml >}}
image: k8s.gcr.io/kube-scheduler:v1.26.0-rc.0
image: registry.k8s.io/kube-scheduler:v1.26.0-rc.0
{{< /highlight >}}</details> | |
|`extraArgs` |map[string]string |Extra arguments to supply to the scheduler. | |
|`extraVolumes` |[]<a href="#volumemountconfig">VolumeMountConfig</a> |Extra volumes to mount to the scheduler static pod. | |

View File

@ -61,5 +61,5 @@ NODE NAMESPACE ID IMAG
172.20.0.5 k8s.io └─ kube-system/kube-flannel-6hfck:install-config ghcr.io/siderolabs/flannel:v0.20.1 0 CONTAINER_EXITED
172.20.0.5 k8s.io └─ kube-system/kube-flannel-6hfck:kube-flannel ghcr.io/siderolabs/flannel:v0.20.1 2092 CONTAINER_RUNNING
172.20.0.5 k8s.io kube-system/kube-proxy-xp7jq registry.k8s.io/pause:3.8 1780 SANDBOX_READY
172.20.0.5 k8s.io └─ kube-system/kube-proxy-xp7jq:kube-proxy k8s.gcr.io/kube-proxy:v1.26.0-alpha.3 1843 CONTAINER_RUNNING
172.20.0.5 k8s.io └─ kube-system/kube-proxy-xp7jq:kube-proxy registry.k8s.io/kube-proxy:v1.26.0-alpha.3 1843 CONTAINER_RUNNING
```

View File

@ -124,7 +124,7 @@ patched mc at the node <IP>
Updating kube-apiserver version in immediate mode (without a reboot):
```bash
$ talosctl -n <IP> patch machineconfig --mode=no-reboot -p '[{"op": "replace", "path": "/cluster/apiServer/image", "value": "k8s.gcr.io/kube-apiserver:v{{< k8s_release >}}"}]'
$ talosctl -n <IP> patch machineconfig --mode=no-reboot -p '[{"op": "replace", "path": "/cluster/apiServer/image", "value": "registry.k8s.io/kube-apiserver:v{{< k8s_release >}}"}]'
patched mc at the node <IP>
```

View File

@ -27,7 +27,7 @@ The follow are requirements for creating the set of caching proxies:
## Launch the Caching Docker Registry Proxies
Talos pulls from `docker.io`, `k8s.gcr.io`, `quay.io`, `gcr.io`, and `ghcr.io` by default.
Talos pulls from `docker.io`, `registry.k8s.io`, `quay.io`, `gcr.io`, and `ghcr.io` by default.
If your configuration is different, you might need to modify the commands below:
```bash
@ -37,9 +37,9 @@ docker run -d -p 5000:5000 \
--name registry-docker.io registry:2
docker run -d -p 5001:5000 \
-e REGISTRY_PROXY_REMOTEURL=https://k8s.gcr.io \
-e REGISTRY_PROXY_REMOTEURL=https://registry.k8s.io \
--restart always \
--name registry-k8s.gcr.io registry:2
--name registry-registry.k8s.io registry:2
docker run -d -p 5002:5000 \
-e REGISTRY_PROXY_REMOTEURL=https://quay.io \
@ -71,7 +71,7 @@ As registry containers expose their ports on the host, we can use bridge IP to d
```bash
sudo talosctl cluster create --provisioner qemu \
--registry-mirror docker.io=http://10.5.0.1:5000 \
--registry-mirror k8s.gcr.io=http://10.5.0.1:5001 \
--registry-mirror registry.k8s.io=http://10.5.0.1:5001 \
--registry-mirror quay.io=http://10.5.0.1:5002 \
--registry-mirror gcr.io=http://10.5.0.1:5003 \
--registry-mirror ghcr.io=http://10.5.0.1:5004
@ -91,7 +91,7 @@ On Linux, the docker bridge address can be inspected with `ip addr show docker0`
```bash
talosctl cluster create --provisioner docker \
--registry-mirror docker.io=http://172.17.0.1:5000 \
--registry-mirror k8s.gcr.io=http://172.17.0.1:5001 \
--registry-mirror registry.k8s.io=http://172.17.0.1:5001 \
--registry-mirror quay.io=http://172.17.0.1:5002 \
--registry-mirror gcr.io=http://172.17.0.1:5003 \
--registry-mirror ghcr.io=http://172.17.0.1:5004
@ -103,7 +103,7 @@ To cleanup, run:
```bash
docker rm -f registry-docker.io
docker rm -f registry-k8s.gcr.io
docker rm -f registry-registry.k8s.io
docker rm -f registry-quay.io
docker rm -f registry-gcr.io
docker rm -f registry-ghcr.io