2019-06-06 09:28:53 -04:00
#!/bin/bash
set -eou pipefail
2019-07-29 16:11:09 -04:00
export KUBERNETES_VERSION = v1.15.0
export TALOS_IMG = " docker.io/autonomy/talos: ${ TAG } "
export TMP = "/tmp/e2e"
export OSCTL = " ${ PWD } /build/osctl-linux-amd64 "
export TALOSCONFIG = " ${ TMP } /talosconfig "
export KUBECONFIG = " ${ TMP } /kubeconfig "
export TIMEOUT = 300
## Create tmp dir
mkdir -p $TMP
2019-06-17 12:36:48 -04:00
2019-06-06 09:28:53 -04:00
run( ) {
docker run \
--rm \
--interactive \
2019-07-04 08:43:34 -07:00
--net= integration \
2019-06-06 09:28:53 -04:00
--entrypoint= bash \
2019-07-04 08:43:34 -07:00
--mount type = bind,source= ${ TMP } ,target= ${ TMP } \
--mount type = bind,source= ${ PWD } /hack/dev/manifests,target= /manifests \
-v ${ OSCTL } :/bin/osctl:ro \
2019-06-06 09:28:53 -04:00
-e KUBECONFIG = ${ KUBECONFIG } \
-e TALOSCONFIG = ${ TALOSCONFIG } \
k8s.gcr.io/hyperkube:${ KUBERNETES_VERSION } -c " ${ 1 } "
}
2019-07-15 15:00:31 -04:00
${ OSCTL } cluster create --name integration --image ${ TALOS_IMG } --mtu 1440
2019-06-06 09:28:53 -04:00
${ OSCTL } config target 10.5.0.2
2019-06-17 12:36:48 -04:00
## Fetch kubeconfig
run " timeout=\$((\$(date +%s) + ${ TIMEOUT } ))
2019-07-04 08:43:34 -07:00
until osctl kubeconfig > ${ KUBECONFIG }
2019-06-17 12:36:48 -04:00
do
2019-07-04 08:43:34 -07:00
if [ [ \$ ( date +%s) -gt \$ timeout ] ]
2019-06-17 12:36:48 -04:00
then
exit 1
fi
sleep 2
done "
## Wait for all nodes to report in
2019-07-04 08:43:34 -07:00
run " timeout=\$((\$(date +%s) + ${ TIMEOUT } ))
2019-06-17 12:36:48 -04:00
until kubectl get nodes -o json | jq '.items | length' | grep 4 >/dev/null
2019-07-04 08:43:34 -07:00
do
2019-06-17 12:36:48 -04:00
if [ [ \$ ( date +%s) -gt \$ timeout ] ]
then
exit 1
fi
kubectl get nodes -o wide
sleep 5
done "
## Deploy needed manifests
2019-07-04 08:43:34 -07:00
run "kubectl apply -f /manifests/psp.yaml -f /manifests/flannel.yaml -f /manifests/coredns.yaml"
2019-06-17 12:36:48 -04:00
## Wait for all nodes ready
run " kubectl wait --timeout= ${ TIMEOUT } s --for=condition=ready=true --all nodes "
## Verify that we have an HA controlplane
2019-06-06 09:28:53 -04:00
run "kubectl get nodes -l node-role.kubernetes.io/master='' -o json | jq '.items | length' | grep 3 >/dev/null"