nixpkgs-suyu/nixos/tests/kubernetes/dns.nix
Christian Albrecht 62f03750e4
nixos/kubernetes: Stabilize services startup across machines
by adding targets and curl wait loops to services to ensure services
are not started before their depended services are reachable.

Extra targets cfssl-online.target and kube-apiserver-online.target
syncronize starts across machines and node-online.target ensures
docker is restarted and ready to deploy containers on after flannel
has discussed the network cidr with apiserver.

Since flannel needs to be started before addon-manager to configure
the docker interface, it has to have its own rbac bootstrap service.

The curl wait loops within the other services exists to ensure that when
starting the service it is able to do its work immediately without
clobbering the log about failing conditions.

By ensuring kubernetes.target is only reached after starting the
cluster it can be used in the tests as a wait condition.

In kube-certmgr-bootstrap mkdir is needed for it to not fail to start.

The following is the relevant part of systemctl list-dependencies

default.target
● ├─certmgr.service
● ├─cfssl.service
● ├─docker.service
● ├─etcd.service
● ├─flannel.service
● ├─kubernetes.target
● │ ├─kube-addon-manager.service
● │ ├─kube-proxy.service
● │ ├─kube-apiserver-online.target
● │ │ ├─flannel-rbac-bootstrap.service
● │ │ ├─kube-apiserver-online.service
● │ │ ├─kube-apiserver.service
● │ │ ├─kube-controller-manager.service
● │ │ └─kube-scheduler.service
● │ └─node-online.target
● │   ├─node-online.service
● │   ├─flannel.target
● │   │ ├─flannel.service
● │   │ └─mk-docker-opts.service
● │   └─kubelet.target
● │     └─kubelet.service
● ├─network-online.target
● │ └─cfssl-online.target
● │   ├─certmgr.service
● │   ├─cfssl-online.service
● │   └─kube-certmgr-bootstrap.service
2019-03-03 19:39:02 +01:00

133 lines
4.6 KiB
Nix

{ system ? builtins.currentSystem, pkgs ? import <nixpkgs> { inherit system; } }:
with import ./base.nix { inherit system; };
let
domain = "my.zyx";
certs = import ./certs.nix { externalDomain = domain; kubelets = [ "machine1" "machine2" ]; };
redisPod = pkgs.writeText "redis-pod.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "redis";
metadata.labels.name = "redis";
spec.containers = [{
name = "redis";
image = "redis";
args = ["--bind" "0.0.0.0"];
imagePullPolicy = "Never";
ports = [{
name = "redis-server";
containerPort = 6379;
}];
}];
});
redisService = pkgs.writeText "redis-service.json" (builtins.toJSON {
kind = "Service";
apiVersion = "v1";
metadata.name = "redis";
spec = {
ports = [{port = 6379; targetPort = 6379;}];
selector = {name = "redis";};
};
});
redisImage = pkgs.dockerTools.buildImage {
name = "redis";
tag = "latest";
contents = [ pkgs.redis pkgs.bind.host ];
config.Entrypoint = "/bin/redis-server";
};
probePod = pkgs.writeText "probe-pod.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "probe";
metadata.labels.name = "probe";
spec.containers = [{
name = "probe";
image = "probe";
args = [ "-f" ];
tty = true;
imagePullPolicy = "Never";
}];
});
probeImage = pkgs.dockerTools.buildImage {
name = "probe";
tag = "latest";
contents = [ pkgs.bind.host pkgs.busybox ];
config.Entrypoint = "/bin/tail";
};
extraConfiguration = { config, pkgs, ... }: {
environment.systemPackages = [ pkgs.bind.host ];
# virtualisation.docker.extraOptions = "--dns=${config.services.kubernetes.addons.dns.clusterIp}";
services.dnsmasq.enable = true;
services.dnsmasq.servers = [
"/cluster.local/${config.services.kubernetes.addons.dns.clusterIp}#53"
];
};
base = {
name = "dns";
inherit domain extraConfiguration;
};
singleNodeTest = {
test = ''
# prepare machine1 for test
$machine1->waitForUnit("kubernetes.target");
$machine1->waitUntilSucceeds("kubectl get node machine1.${domain} | grep -w Ready");
$machine1->waitUntilSucceeds("docker load < ${redisImage}");
$machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
$machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
$machine1->waitUntilSucceeds("docker load < ${probeImage}");
$machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
# check if pods are running
$machine1->waitUntilSucceeds("kubectl get pod redis | grep Running");
$machine1->waitUntilSucceeds("kubectl get pod probe | grep Running");
$machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'");
# check dns on host (dnsmasq)
$machine1->succeed("host redis.default.svc.cluster.local");
# check dns inside the container
$machine1->succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local");
'';
};
multiNodeTest = {
test = ''
# Node token exchange
$machine1->waitUntilSucceeds("cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret");
$machine2->waitUntilSucceeds("cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join");
$machine1->waitForUnit("kubernetes.target");
$machine2->waitForUnit("kubernetes.target");
# prepare machines for test
$machine1->waitUntilSucceeds("kubectl get node machine2.${domain} | grep -w Ready");
$machine2->waitUntilSucceeds("docker load < ${redisImage}");
$machine1->waitUntilSucceeds("kubectl create -f ${redisPod}");
$machine1->waitUntilSucceeds("kubectl create -f ${redisService}");
$machine2->waitUntilSucceeds("docker load < ${probeImage}");
$machine1->waitUntilSucceeds("kubectl create -f ${probePod}");
# check if pods are running
$machine1->waitUntilSucceeds("kubectl get pod redis | grep Running");
$machine1->waitUntilSucceeds("kubectl get pod probe | grep Running");
$machine1->waitUntilSucceeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'");
# check dns on hosts (dnsmasq)
$machine1->succeed("host redis.default.svc.cluster.local");
$machine2->succeed("host redis.default.svc.cluster.local");
# check dns inside the container
$machine1->succeed("kubectl exec -ti probe -- /bin/host redis.default.svc.cluster.local");
'';
};
in {
singlenode = mkKubernetesSingleNodeTest (base // singleNodeTest);
multinode = mkKubernetesMultiNodeTest (base // multiNodeTest);
}