2019-06-06 09:28:53 -04:00
#!/bin/bash
set -eou pipefail
2019-06-17 12:36:48 -04:00
## If we take longer than 5m in docker, we're probably boned anyways
TIMEOUT = 300
2019-07-03 18:06:47 -07:00
CONTAINER_ID = $( docker ps -f label = io.drone.build.number= ${ DRONE_BUILD_NUMBER } -f label = io.drone.repo.namespace= ${ DRONE_REPO_NAMESPACE } -f label = io.drone.repo.name= ${ DRONE_REPO_NAME } -f label = io.drone.step.name= basic-integration --format= '{{ .ID }}' )
2019-06-06 09:28:53 -04:00
run( ) {
docker run \
--rm \
--interactive \
2019-07-03 18:06:47 -07:00
--net= " ${ DRONE_COMMIT_SHA : 0 : 7 } " \
2019-06-06 09:28:53 -04:00
--entrypoint= bash \
2019-07-03 18:06:47 -07:00
--volumes-from= ${ CONTAINER_ID } \
2019-06-06 09:28:53 -04:00
-e KUBECONFIG = ${ KUBECONFIG } \
-e TALOSCONFIG = ${ TALOSCONFIG } \
k8s.gcr.io/hyperkube:${ KUBERNETES_VERSION } -c " ${ 1 } "
}
2019-07-03 18:06:47 -07:00
${ OSCTL } cluster create --name " ${ DRONE_COMMIT_SHA : 0 : 7 } "
2019-06-06 09:28:53 -04:00
${ OSCTL } config target 10.5.0.2
2019-06-17 12:36:48 -04:00
## Fetch kubeconfig
run " timeout=\$((\$(date +%s) + ${ TIMEOUT } ))
2019-07-03 18:06:47 -07:00
until ${ OSCTL } kubeconfig > ${ KUBECONFIG }
2019-06-17 12:36:48 -04:00
do
2019-07-03 18:06:47 -07:00
if [ [ \$ ( date +%s) -gt \$ timeout ] ]
2019-06-17 12:36:48 -04:00
then
exit 1
fi
sleep 2
done "
## Wait for all nodes to report in
2019-07-03 18:06:47 -07:00
run " timeout=\$((\$(date +%s) + ${ TIMEOUT } ))
2019-06-17 12:36:48 -04:00
until kubectl get nodes -o json | jq '.items | length' | grep 4 >/dev/null
2019-07-03 18:06:47 -07:00
do
2019-06-17 12:36:48 -04:00
if [ [ \$ ( date +%s) -gt \$ timeout ] ]
then
exit 1
fi
kubectl get nodes -o wide
sleep 5
done "
## Deploy needed manifests
2019-07-03 18:06:47 -07:00
MANIFESTS = " ${ PWD } /hack/dev/manifests "
run " kubectl apply -f ${ MANIFESTS } /psp.yaml -f ${ MANIFESTS } /flannel.yaml -f ${ MANIFESTS } /coredns.yaml "
2019-06-17 12:36:48 -04:00
## Wait for all nodes ready
run " kubectl wait --timeout= ${ TIMEOUT } s --for=condition=ready=true --all nodes "
## Verify that we have an HA controlplane
2019-06-06 09:28:53 -04:00
run "kubectl get nodes -l node-role.kubernetes.io/master='' -o json | jq '.items | length' | grep 3 >/dev/null"