kubernetes: add tests

This commit is contained in:
Matej Cotman 2017-05-03 01:20:32 +02:00 committed by Robin Gloster
parent ed322f4235
commit 7f9d1a7aaf
9 changed files with 1150 additions and 0 deletions

View file

@ -0,0 +1,229 @@
{
pkgs ? import <nixpkgs> {},
servers ? {test = "1.2.3.4";},
internalDomain ? "cluster.local",
externalDomain ? "nixos.xyz"
}:
let
mkAltNames = ipFrom: dnsFrom:
pkgs.lib.concatImapStringsSep "\n" (i: v: "IP.${toString (i+ipFrom)} = ${v.ip}\nDNS.${toString (i+dnsFrom)} = ${v.name}.${externalDomain}") (pkgs.lib.mapAttrsToList (n: v: {name = n; ip = v;}) servers);
runWithOpenSSL = file: cmd: pkgs.runCommand file {
buildInputs = [ pkgs.openssl ];
passthru = { inherit file; };
} cmd;
ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048";
ca_pem = runWithOpenSSL "ca.pem" ''
openssl req \
-x509 -new -nodes -key ${ca_key} \
-days 10000 -out $out -subj "/CN=etcd-ca"
'';
etcd_cnf = pkgs.writeText "openssl.cnf" ''
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names
[alt_names]
DNS.1 = etcd.kubernetes.${externalDomain}
IP.1 = 127.0.0.1
'';
etcd_key = runWithOpenSSL "etcd-key.pem" "openssl genrsa -out $out 2048";
etcd_csr = runWithOpenSSL "etcd.csr" ''
openssl req \
-new -key ${etcd_key} \
-out $out -subj "/CN=etcd" \
-config ${etcd_cnf}
'';
etcd_cert = runWithOpenSSL "etcd.pem" ''
openssl x509 \
-req -in ${etcd_csr} \
-CA ${ca_pem} -CAkey ${ca_key} \
-CAcreateserial -out $out \
-days 3650 -extensions v3_req \
-extfile ${etcd_cnf}
'';
etcd_client_key = runWithOpenSSL "etcd-client-key.pem"
"openssl genrsa -out $out 2048";
etcd_client_csr = runWithOpenSSL "etcd-client.csr" ''
openssl req \
-new -key ${etcd_client_key} \
-out $out -subj "/CN=etcd-client" \
-config ${client_openssl_cnf}
'';
etcd_client_cert = runWithOpenSSL "etcd-client.crt" ''
openssl x509 \
-req -in ${etcd_client_csr} \
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
-out $out -days 3650 -extensions v3_req \
-extfile ${client_openssl_cnf}
'';
admin_key = runWithOpenSSL "admin-key.pem"
"openssl genrsa -out $out 2048";
admin_csr = runWithOpenSSL "admin.csr" ''
openssl req \
-new -key ${admin_key} \
-out $out -subj "/CN=admin/O=system:masters" \
-config ${client_openssl_cnf}
'';
admin_cert = runWithOpenSSL "admin.crt" ''
openssl x509 \
-req -in ${admin_csr} \
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
-out $out -days 3650 -extensions v3_req \
-extfile ${client_openssl_cnf}
'';
apiserver_key = runWithOpenSSL "apiserver-key.pem" "openssl genrsa -out $out 2048";
apiserver_csr = runWithOpenSSL "apiserver.csr" ''
openssl req \
-new -key ${apiserver_key} \
-out $out -subj "/CN=kube-apiserver" \
-config ${apiserver_cnf}
'';
apiserver_cert = runWithOpenSSL "apiserver.pem" ''
openssl x509 \
-req -in ${apiserver_csr} \
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
-out $out -days 3650 -extensions v3_req \
-extfile ${apiserver_cnf}
'';
worker_key = runWithOpenSSL "worker-key.pem" "openssl genrsa -out $out 2048";
worker_csr = runWithOpenSSL "worker.csr" ''
openssl req \
-new -key ${worker_key} \
-out $out -subj "/CN=kube-worker/O=system:authenticated" \
-config ${worker_cnf}
'';
worker_cert = runWithOpenSSL "worker.pem" ''
openssl x509 \
-req -in ${worker_csr} \
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
-out $out -days 3650 -extensions v3_req \
-extfile ${worker_cnf}
'';
openssl_cnf = pkgs.writeText "openssl.cnf" ''
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names
[alt_names]
DNS.1 = *.cluster.${externalDomain}
IP.1 = 127.0.0.1
${mkAltNames 1 1}
'';
client_openssl_cnf = pkgs.writeText "client-openssl.cnf" ''
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
subjectAltName = @alt_names
[alt_names]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.${internalDomain}
DNS.5 = kubernetes.${externalDomain}
DNS.6 = *.cluster.${externalDomain}
IP.1 = 10.1.10.1
${mkAltNames 1 6}
'';
apiserver_cnf = pkgs.writeText "apiserver-openssl.cnf" ''
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names
[alt_names]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.${internalDomain}
DNS.5 = kubernetes.${externalDomain}
DNS.6 = *.cluster.${externalDomain}
IP.1 = 10.1.10.1
${mkAltNames 1 6}
'';
worker_cnf = pkgs.writeText "worker-openssl.cnf" ''
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = *.cluster.${externalDomain}
IP.1 = 10.1.10.1
${mkAltNames 1 1}
'';
ln = cert: target: ''
cp -v ${cert} ${target}/${cert.file}
'';
in
pkgs.stdenv.mkDerivation rec {
name = "kubernetes-certs";
unpackPhase = "true";
installPhase = ''
set -xe
mkdir -p $out
${ln ca_key "$out"}
${ln ca_pem "$out"}
${ln etcd_key "$out"}
${ln etcd_csr "$out"}
${ln etcd_cert "$out"}
${ln etcd_client_key "$out"}
${ln etcd_client_csr "$out"}
${ln etcd_client_cert "$out"}
${ln apiserver_key "$out"}
${ln apiserver_csr "$out"}
${ln apiserver_cert "$out"}
${ln worker_key "$out"}
${ln worker_csr "$out"}
${ln worker_cert "$out"}
${ln admin_key "$out"}
${ln admin_csr "$out"}
${ln admin_cert "$out"}
'';
}

View file

@ -0,0 +1,7 @@
{ }:
{
kubernetes-singlenode = import ./singlenode.nix {};
kubernetes-multinode-kubectl = import ./multinode-kubectl.nix {};
kubernetes-rbac = import ./rbac.nix {};
kubernetes-dns = import ./dns.nix {};
}

View file

@ -0,0 +1,103 @@
{ system ? builtins.currentSystem }:
with import ../../lib/testing.nix { inherit system; };
with import ../../lib/qemu-flags.nix;
with pkgs.lib;
let
servers.master = "192.168.1.1";
servers.one = "192.168.1.10";
certs = import ./certs.nix { inherit servers; };
redisPod = pkgs.writeText "redis-master-pod.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "redis";
metadata.labels.name = "redis";
spec.containers = [{
name = "redis";
image = "redis";
args = ["--bind" "0.0.0.0"];
imagePullPolicy = "Never";
ports = [{
name = "redis-server";
containerPort = 6379;
}];
}];
});
redisService = pkgs.writeText "redis-service.json" (builtins.toJSON {
kind = "Service";
apiVersion = "v1";
metadata.name = "redis";
spec = {
ports = [{port = 6379; targetPort = 6379;}];
selector = {name = "redis";};
};
});
redisImage = pkgs.dockerTools.buildImage {
name = "redis";
tag = "latest";
contents = [ pkgs.redis pkgs.bind.dnsutils pkgs.coreutils pkgs.inetutils pkgs.nmap ];
config.Entrypoint = "/bin/redis-server";
};
test = ''
$master->waitUntilSucceeds("kubectl get node master.nixos.xyz | grep Ready");
$master->waitUntilSucceeds("kubectl get node one.nixos.xyz | grep Ready");
$one->execute("docker load < ${redisImage}");
$master->waitUntilSucceeds("kubectl create -f ${redisPod} || kubectl apply -f ${redisPod}");
$master->waitUntilSucceeds("kubectl create -f ${redisService} || kubectl apply -f ${redisService}");
$master->waitUntilSucceeds("kubectl get pod redis | grep Running");
$master->succeed("dig \@192.168.1.1 redis.default.svc.cluster.local");
$one->succeed("dig \@192.168.1.10 redis.default.svc.cluster.local");
$master->succeed("kubectl exec -ti redis -- cat /etc/resolv.conf | grep 'nameserver 192.168.1.10'");
$master->succeed("kubectl exec -ti redis -- dig \@192.168.1.10 redis.default.svc.cluster.local");
'';
in makeTest {
name = "kubernetes-dns";
nodes = {
master =
{ config, pkgs, lib, nodes, ... }:
mkMerge [
{
virtualisation.memorySize = 768;
virtualisation.diskSize = 4096;
networking.interfaces.eth1.ip4 = mkForce [{address = servers.master; prefixLength = 24;}];
networking.primaryIPAddress = mkForce servers.master;
}
(import ./kubernetes-common.nix { inherit pkgs config certs servers; })
(import ./kubernetes-master.nix { inherit pkgs config certs; })
];
one =
{ config, pkgs, lib, nodes, ... }:
mkMerge [
{
virtualisation.memorySize = 768;
virtualisation.diskSize = 4096;
networking.interfaces.eth1.ip4 = mkForce [{address = servers.one; prefixLength = 24;}];
networking.primaryIPAddress = mkForce servers.one;
services.kubernetes.roles = ["node"];
}
(import ./kubernetes-common.nix { inherit pkgs config certs servers; })
];
};
testScript = ''
startAll;
${test}
'';
}

View file

@ -0,0 +1,131 @@
{ config, pkgs, certs, servers }:
let
etcd_key = "${certs}/etcd-key.pem";
etcd_cert = "${certs}/etcd.pem";
ca_pem = "${certs}/ca.pem";
etcd_client_cert = "${certs}/etcd-client.crt";
etcd_client_key = "${certs}/etcd-client-key.pem";
worker_key = "${certs}/worker-key.pem";
worker_cert = "${certs}/worker.pem";
mkDockerOpts = "${pkgs.kubernetes.src}/cluster/centos/node/bin/mk-docker-opts.sh";
rootCaFile = pkgs.writeScript "rootCaFile.pem" ''
${pkgs.lib.readFile "${certs}/ca.pem"}
${pkgs.lib.readFile ("${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt")}
'';
mkHosts =
pkgs.lib.concatMapStringsSep "\n" (v: "${v.ip} ${v.name}.nixos.xyz") (pkgs.lib.mapAttrsToList (n: v: {name = n; ip = v;}) servers);
in
{
programs.bash.enableCompletion = true;
environment.systemPackages = with pkgs; [ netcat bind etcd.bin ];
networking = {
firewall = {
enable = true;
allowedTCPPorts = [
10250 80 443
];
allowedUDPPorts = [
8285 # flannel udp
8472 # flannel vxlan
];
};
extraHosts = ''
# register "external" domains
${servers.master} etcd.kubernetes.nixos.xyz
${servers.master} kubernetes.nixos.xyz
${mkHosts}
'';
};
virtualisation.docker.extraOptions = ''
--iptables=false $DOCKER_OPTS
'';
# lets create environment file for docker startup - network stuff
systemd.services."pre-docker" = {
description = "Pre-Docker Actions";
wantedBy = [ "flannel.service" ];
before = [ "docker.service" ];
after = [ "flannel.service" ];
path = [ pkgs.gawk pkgs.gnugrep ];
script = ''
mkdir -p /run/flannel
# bashInteractive needed for `compgen`
${pkgs.bashInteractive}/bin/bash ${mkDockerOpts} -d /run/flannel/docker
cat /run/flannel/docker # just for debugging
# allow container to host communication for DNS traffic
${pkgs.iptables}/bin/iptables -I nixos-fw -p tcp -m tcp -i docker0 --dport 53 -j nixos-fw-accept
${pkgs.iptables}/bin/iptables -I nixos-fw -p udp -m udp -i docker0 --dport 53 -j nixos-fw-accept
'';
serviceConfig.Type = "simple";
};
systemd.services.docker.serviceConfig.EnvironmentFile = "/run/flannel/docker";
services.flannel = {
enable = true;
network = "10.2.0.0/16";
iface = "eth1";
etcd = {
endpoints = ["https://etcd.kubernetes.nixos.xyz:2379"];
keyFile = etcd_client_key;
certFile = etcd_client_cert;
caFile = ca_pem;
};
};
environment.variables = {
ETCDCTL_CERT_FILE = "${etcd_client_cert}";
ETCDCTL_KEY_FILE = "${etcd_client_key}";
ETCDCTL_CA_FILE = "${rootCaFile}";
ETCDCTL_PEERS = "https://etcd.kubernetes.nixos.xyz:2379";
};
services.kubernetes = {
kubelet = {
networkPlugin = "cni";
cni.config = [{
name = "mynet";
type = "flannel";
delegate = {
isDefaultGateway = true;
bridge = "docker0";
};
}];
tlsKeyFile = worker_key;
tlsCertFile = worker_cert;
hostname = "${config.networking.hostName}.nixos.xyz";
extraOpts = "--node-ip ${config.networking.primaryIPAddress}";
clusterDns = config.networking.primaryIPAddress;
};
etcd = {
servers = ["https://etcd.kubernetes.nixos.xyz:2379"];
keyFile = etcd_client_key;
certFile = etcd_client_cert;
caFile = ca_pem;
};
kubeconfig = {
server = "https://kubernetes.nixos.xyz:4443";
caFile = rootCaFile;
certFile = worker_cert;
keyFile = worker_key;
};
# make sure you cover kubernetes.apiserver.portalNet and flannel networks
clusterCidr = "10.0.0.0/8";
dns.enable = true;
dns.port = 4453;
};
services.dnsmasq.enable = true;
services.dnsmasq.servers = ["/${config.services.kubernetes.dns.domain}/127.0.0.1#4453"];
virtualisation.docker.enable = true;
virtualisation.docker.storageDriver = "overlay";
}

View file

@ -0,0 +1,148 @@
{ config, pkgs, certs }:
let
etcd_key = "${certs}/etcd-key.pem";
etcd_cert = "${certs}/etcd.pem";
ca_pem = "${certs}/ca.pem";
etcd_client_cert = "${certs}/etcd-client.crt";
etcd_client_key = "${certs}/etcd-client-key.pem";
apiserver_key = "${certs}/apiserver-key.pem";
apiserver_cert = "${certs}/apiserver.pem";
worker_key = "${certs}/worker-key.pem";
worker_cert = "${certs}/worker.pem";
rootCaFile = pkgs.writeScript "rootCaFile.pem" ''
${pkgs.lib.readFile "${certs}/ca.pem"}
${pkgs.lib.readFile ("${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt")}
'';
in
{
networking = {
firewall = {
enable = true;
allowPing = true;
allowedTCPPorts = [
2379 2380 # etcd
4443 # kubernetes
];
};
};
services.etcd = {
enable = pkgs.lib.mkForce true;
keyFile = etcd_key;
certFile = etcd_cert;
trustedCaFile = rootCaFile;
peerClientCertAuth = true;
listenClientUrls = ["https://0.0.0.0:2379"];
listenPeerUrls = ["https://0.0.0.0:2380"];
advertiseClientUrls = ["https://etcd.kubernetes.nixos.xyz:2379"];
initialCluster = ["master=https://etcd.kubernetes.nixos.xyz:2380"];
initialAdvertisePeerUrls = ["https://etcd.kubernetes.nixos.xyz:2380"];
};
services.kubernetes = {
roles = ["master"];
scheduler.leaderElect = true;
controllerManager.leaderElect = true;
controllerManager.rootCaFile = rootCaFile;
controllerManager.serviceAccountKeyFile = apiserver_key;
apiserver = {
securePort = 4443;
publicAddress = "192.168.1.1";
advertiseAddress = "192.168.1.1";
tlsKeyFile = apiserver_key;
tlsCertFile = apiserver_cert;
clientCaFile = rootCaFile;
kubeletClientCaFile = rootCaFile;
kubeletClientKeyFile = worker_key;
kubeletClientCertFile = worker_cert;
portalNet = "10.1.10.0/24"; # --service-cluster-ip-range
runtimeConfig = "";
/*extraOpts = "--v=2";*/
authorizationMode = ["ABAC"];
authorizationPolicy = [
{
apiVersion = "abac.authorization.kubernetes.io/v1beta1";
kind = "Policy";
spec = {
user = "kubecfg";
namespace = "*";
resource = "*";
apiGroup = "*";
nonResourcePath = "*";
};
}
{
apiVersion = "abac.authorization.kubernetes.io/v1beta1";
kind = "Policy";
spec = {
user = "kubelet";
namespace = "*";
resource = "*";
apiGroup = "*";
nonResourcePath = "*";
};
}
{
apiVersion = "abac.authorization.kubernetes.io/v1beta1";
kind = "Policy";
spec = {
user = "kube-worker";
namespace = "*";
resource = "*";
apiGroup = "*";
nonResourcePath = "*";
};
}
{
apiVersion = "abac.authorization.kubernetes.io/v1beta1";
kind = "Policy";
spec = {
user = "kube_proxy";
namespace = "*";
resource = "*";
apiGroup = "*";
nonResourcePath = "*";
};
}
{
apiVersion = "abac.authorization.kubernetes.io/v1beta1";
kind = "Policy";
spec = {
user = "client";
namespace = "*";
resource = "*";
apiGroup = "*";
nonResourcePath = "*";
};
}
{
apiVersion = "abac.authorization.kubernetes.io/v1beta1";
kind = "Policy";
spec = {
group = "system:serviceaccounts";
namespace = "*";
resource = "*";
apiGroup = "*";
nonResourcePath = "*";
};
}
{
apiVersion = "abac.authorization.kubernetes.io/v1beta1";
kind = "Policy";
spec = {
group = "system:authenticated";
readonly = true;
namespace = "*";
resource = "*";
apiGroup = "*";
nonResourcePath = "*";
};
}
];
};
};
}

View file

@ -0,0 +1,147 @@
{ system ? builtins.currentSystem }:
with import ../../lib/testing.nix { inherit system; };
with import ../../lib/qemu-flags.nix;
with pkgs.lib;
let
servers.master = "192.168.1.1";
servers.one = "192.168.1.10";
servers.two = "192.168.1.20";
servers.three = "192.168.1.30";
certs = import ./certs.nix { inherit servers; };
kubectlPod = pkgs.writeText "kubectl-pod.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "kubectl";
metadata.labels.name = "kubectl";
spec.containers = [{
name = "kubectl";
image = "kubectl:latest";
command = ["${pkgs.busybox}/bin/tail" "-f"];
imagePullPolicy = "Never";
tty = true;
}];
});
kubectlImage = pkgs.dockerTools.buildImage {
name = "kubectl";
tag = "latest";
contents = [ pkgs.kubernetes pkgs.busybox certs kubeconfig ];
config.Entrypoint = "${pkgs.busybox}/bin/sh";
};
kubeconfig = pkgs.writeTextDir "kubeconfig.json" (builtins.toJSON {
apiVersion = "v1";
kind = "Config";
clusters = [{
name = "local";
cluster.certificate-authority = "/ca.pem";
cluster.server = "https://${servers.master}:4443/";
}];
users = [{
name = "kubelet";
user = {
client-certificate = "/admin.crt";
client-key = "/admin-key.pem";
};
}];
contexts = [{
context = {
cluster = "local";
user = "kubelet";
};
current-context = "kubelet-context";
}];
});
test = ''
$master->waitUntilSucceeds("kubectl get node master.nixos.xyz | grep Ready");
$master->waitUntilSucceeds("kubectl get node one.nixos.xyz | grep Ready");
$master->waitUntilSucceeds("kubectl get node two.nixos.xyz | grep Ready");
$master->waitUntilSucceeds("kubectl get node three.nixos.xyz | grep Ready");
$one->execute("docker load < ${kubectlImage}");
$two->execute("docker load < ${kubectlImage}");
$three->execute("docker load < ${kubectlImage}");
$master->waitUntilSucceeds("kubectl create -f ${kubectlPod} || kubectl apply -f ${kubectlPod}");
$master->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
$master->succeed("kubectl exec -ti kubectl -- kubectl --kubeconfig=/kubeconfig.json version");
'';
in makeTest {
name = "kubernetes-multinode-kubectl";
nodes = {
master =
{ config, pkgs, lib, nodes, ... }:
mkMerge [
{
virtualisation.memorySize = 768;
virtualisation.diskSize = 4096;
# networking.hostName = mkForce "master";
networking.interfaces.eth1.ip4 = mkForce [{address = servers.master; prefixLength = 24;}];
# networking.nat.externalIP = "192.168.1.1";
networking.primaryIPAddress = mkForce servers.master;
}
(import ./kubernetes-common.nix { inherit pkgs config certs servers; })
(import ./kubernetes-master.nix { inherit pkgs config certs; })
];
one =
{ config, pkgs, lib, nodes, ... }:
mkMerge [
{
virtualisation.memorySize = 768;
virtualisation.diskSize = 4096;
# networking.hostName = mkForce "one";
networking.interfaces.eth1.ip4 = mkForce [{address = servers.one; prefixLength = 24;}];
# networking.nat.externalIP = "192.168.1.2";
networking.primaryIPAddress = mkForce servers.one;
services.kubernetes.roles = ["node"];
}
(import ./kubernetes-common.nix { inherit pkgs config certs servers; })
];
two =
{ config, pkgs, lib, nodes, ... }:
mkMerge [
{
virtualisation.memorySize = 768;
virtualisation.diskSize = 4096;
# networking.hostName = mkForce "two";
networking.interfaces.eth1.ip4 = mkForce [{address = servers.two; prefixLength = 24;}];
# networking.nat.externalIP = "192.168.1.3";
networking.primaryIPAddress = mkForce servers.two;
services.kubernetes.roles = ["node"];
}
(import ./kubernetes-common.nix { inherit pkgs config certs servers; })
];
three =
{ config, pkgs, lib, nodes, ... }:
mkMerge [
{
virtualisation.memorySize = 768;
virtualisation.diskSize = 4096;
# networking.hostName = mkForce "three";
networking.interfaces.eth1.ip4 = mkForce [{address = servers.three; prefixLength = 24;}];
# networking.nat.externalIP = "192.168.1.4";
networking.primaryIPAddress = mkForce servers.three;
services.kubernetes.roles = ["node"];
}
(import ./kubernetes-common.nix { inherit pkgs config certs servers; })
];
};
testScript = ''
startAll;
${test}
'';
}

View file

@ -0,0 +1,206 @@
{ system ? builtins.currentSystem }:
with import ../../lib/testing.nix { inherit system; };
with import ../../lib/qemu-flags.nix;
with pkgs.lib;
let
servers.master = "192.168.1.1";
servers.one = "192.168.1.10";
certs = import ./certs.nix { inherit servers; };
roServiceAccount = pkgs.writeText "ro-service-account.json" (builtins.toJSON {
kind = "ServiceAccount";
apiVersion = "v1";
metadata = {
name = "read-only";
namespace = "default";
};
});
roRoleBinding = pkgs.writeText "ro-role-binding.json" (builtins.toJSON {
"apiVersion" = "rbac.authorization.k8s.io/v1beta1";
"kind" = "RoleBinding";
"metadata" = {
"name" = "read-pods";
"namespace" = "default";
};
"roleRef" = {
"apiGroup" = "rbac.authorization.k8s.io";
"kind" = "Role";
"name" = "pod-reader";
};
"subjects" = [{
"kind" = "ServiceAccount";
"name" = "read-only";
"namespace" = "default";
}];
});
roRole = pkgs.writeText "ro-role.json" (builtins.toJSON {
"apiVersion" = "rbac.authorization.k8s.io/v1beta1";
"kind" = "Role";
"metadata" = {
"name" = "pod-reader";
"namespace" = "default";
};
"rules" = [{
"apiGroups" = [""];
"resources" = ["pods"];
"verbs" = ["get" "list" "watch"];
}];
});
kubectlPod = pkgs.writeText "kubectl-pod.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "kubectl";
metadata.namespace = "default";
metadata.labels.name = "kubectl";
spec.serviceAccountName = "read-only";
spec.containers = [{
name = "kubectl";
image = "kubectl:latest";
command = ["${pkgs.busybox}/bin/tail" "-f"];
imagePullPolicy = "Never";
tty = true;
}];
});
kubectlPod2 = pkgs.writeTextDir "kubectl-pod-2.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "kubectl-2";
metadata.namespace = "default";
metadata.labels.name = "kubectl-2";
spec.serviceAccountName = "read-only";
spec.containers = [{
name = "kubectl-2";
image = "kubectl:latest";
command = ["${pkgs.busybox}/bin/tail" "-f"];
imagePullPolicy = "Never";
tty = true;
}];
});
kubectlImage = pkgs.dockerTools.buildImage {
name = "kubectl";
tag = "latest";
contents = [ pkgs.kubernetes pkgs.busybox kubectlPod2 ]; # certs kubeconfig
config.Entrypoint = "${pkgs.busybox}/bin/sh";
};
test = ''
$master->waitUntilSucceeds("kubectl get node master.nixos.xyz | grep Ready");
$master->waitUntilSucceeds("kubectl get node one.nixos.xyz | grep Ready");
$one->execute("docker load < ${kubectlImage}");
$master->waitUntilSucceeds("kubectl apply -f ${roServiceAccount}");
$master->waitUntilSucceeds("kubectl apply -f ${roRole}");
$master->waitUntilSucceeds("kubectl apply -f ${roRoleBinding}");
$master->waitUntilSucceeds("kubectl create -f ${kubectlPod} || kubectl apply -f ${kubectlPod}");
$master->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
$master->succeed("kubectl exec -ti kubectl -- kubectl get pods");
$master->fail("kubectl exec -ti kubectl -- kubectl create -f /kubectl-pod-2.json");
$master->fail("kubectl exec -ti kubectl -- kubectl delete pods -l name=kubectl");
'';
in makeTest {
name = "kubernetes-multinode-rbac";
nodes = {
master =
{ config, pkgs, lib, nodes, ... }:
mkMerge [
{
virtualisation.memorySize = 768;
virtualisation.diskSize = 4096;
networking.interfaces.eth1.ip4 = mkForce [{address = servers.master; prefixLength = 24;}];
networking.primaryIPAddress = mkForce servers.master;
services.kubernetes.apiserver.authorizationMode = mkForce ["ABAC" "RBAC"];
services.kubernetes.apiserver.authorizationPolicy = mkForce [
{
apiVersion = "abac.authorization.kubernetes.io/v1beta1";
kind = "Policy";
spec = {
user = "kubecfg";
namespace = "*";
resource = "*";
apiGroup = "*";
nonResourcePath = "*";
};
}
{
apiVersion = "abac.authorization.kubernetes.io/v1beta1";
kind = "Policy";
spec = {
user = "kubelet";
namespace = "*";
resource = "*";
apiGroup = "*";
nonResourcePath = "*";
};
}
{
apiVersion = "abac.authorization.kubernetes.io/v1beta1";
kind = "Policy";
spec = {
user = "kube-worker";
namespace = "*";
resource = "*";
apiGroup = "*";
nonResourcePath = "*";
};
}
{
apiVersion = "abac.authorization.kubernetes.io/v1beta1";
kind = "Policy";
spec = {
user = "kube_proxy";
namespace = "*";
resource = "*";
apiGroup = "*";
nonResourcePath = "*";
};
}
{
apiVersion = "abac.authorization.kubernetes.io/v1beta1";
kind = "Policy";
spec = {
user = "client";
namespace = "*";
resource = "*";
apiGroup = "*";
nonResourcePath = "*";
};
}
];
}
(import ./kubernetes-common.nix { inherit pkgs config certs servers; })
(import ./kubernetes-master.nix { inherit pkgs config certs; })
];
one =
{ config, pkgs, lib, nodes, ... }:
mkMerge [
{
virtualisation.memorySize = 768;
virtualisation.diskSize = 4096;
networking.interfaces.eth1.ip4 = mkForce [{address = servers.one; prefixLength = 24;}];
networking.primaryIPAddress = mkForce servers.one;
services.kubernetes.roles = ["node"];
}
(import ./kubernetes-common.nix { inherit pkgs config certs servers; })
];
};
testScript = ''
startAll;
${test}
'';
}

View file

@ -0,0 +1,97 @@
{ system ? builtins.currentSystem }:
with import ../../lib/testing.nix { inherit system; };
with import ../../lib/qemu-flags.nix;
with pkgs.lib;
let
certs = import ./certs.nix { servers = {}; };
kubectlPod = pkgs.writeText "kubectl-pod.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "kubectl";
metadata.labels.name = "kubectl";
spec.containers = [{
name = "kubectl";
image = "kubectl:latest";
command = ["${pkgs.busybox}/bin/tail" "-f"];
imagePullPolicy = "Never";
tty = true;
}];
});
kubectlImage = pkgs.dockerTools.buildImage {
name = "kubectl";
tag = "latest";
contents = [ pkgs.kubernetes pkgs.busybox certs kubeconfig ];
config.Entrypoint = "${pkgs.busybox}/bin/sh";
};
kubeconfig = pkgs.writeTextDir "kubeconfig.json" (builtins.toJSON {
apiVersion = "v1";
kind = "Config";
clusters = [{
name = "local";
cluster.certificate-authority = "/ca.pem";
cluster.server = "https://192.168.1.1:4443/";
}];
users = [{
name = "kubelet";
user = {
client-certificate = "/admin.crt";
client-key = "/admin-key.pem";
};
}];
contexts = [{
context = {
cluster = "local";
user = "kubelet";
};
current-context = "kubelet-context";
}];
});
test = ''
$kubernetes->execute("docker load < ${kubectlImage}");
$kubernetes->waitUntilSucceeds("kubectl create -f ${kubectlPod} || kubectl apply -f ${kubectlPod}");
$kubernetes->waitUntilSucceeds("kubectl get pod kubectl | grep Running");
# FIXME: this test fails, for some reason it can not reach host ip address
$kubernetes->succeed("kubectl exec -ti kubectl -- kubectl --kubeconfig=/kubeconfig.json version");
'';
in makeTest {
name = "kubernetes-singlenode-kubectl";
nodes = {
kubernetes =
{ config, pkgs, lib, nodes, ... }:
{
virtualisation.memorySize = 768;
virtualisation.diskSize = 4096;
programs.bash.enableCompletion = true;
environment.systemPackages = with pkgs; [ netcat bind ];
services.kubernetes.roles = ["master" "node"];
services.kubernetes.apiserver.securePort = 4443;
services.kubernetes.dns.port = 4453;
services.kubernetes.clusterCidr = "10.0.0.0/8";
virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false -b cbr0";
networking.interfaces.eth1.ip4 = mkForce [{address = "192.168.1.1"; prefixLength = 24;}];
networking.primaryIPAddress = mkForce "192.168.1.1";
networking.bridges.cbr0.interfaces = [];
networking.interfaces.cbr0 = {};
services.dnsmasq.enable = true;
services.dnsmasq.servers = ["/${config.services.kubernetes.dns.domain}/127.0.0.1#4453"];
};
};
testScript = ''
startAll;
${test}
'';
}

View file

@ -0,0 +1,82 @@
{ system ? builtins.currentSystem }:
with import ../../lib/testing.nix { inherit system; };
with import ../../lib/qemu-flags.nix;
with pkgs.lib;
let
redisPod = pkgs.writeText "redis-master-pod.json" (builtins.toJSON {
kind = "Pod";
apiVersion = "v1";
metadata.name = "redis";
metadata.labels.name = "redis";
spec.containers = [{
name = "redis";
image = "redis";
args = ["--bind" "0.0.0.0"];
imagePullPolicy = "Never";
ports = [{
name = "redis-server";
containerPort = 6379;
}];
}];
});
redisService = pkgs.writeText "redis-service.json" (builtins.toJSON {
kind = "Service";
apiVersion = "v1";
metadata.name = "redis";
spec = {
ports = [{port = 6379; targetPort = 6379;}];
selector = {name = "redis";};
};
});
redisImage = pkgs.dockerTools.buildImage {
name = "redis";
tag = "latest";
contents = pkgs.redis;
config.Entrypoint = "/bin/redis-server";
};
testSimplePod = ''
$kubernetes->execute("docker load < ${redisImage}");
$kubernetes->waitUntilSucceeds("kubectl create -f ${redisPod}");
$kubernetes->succeed("kubectl create -f ${redisService}");
$kubernetes->waitUntilSucceeds("kubectl get pod redis | grep Running");
$kubernetes->succeed("nc -z \$\(dig redis.default.svc.cluster.local +short\) 6379");
'';
in {
# This test runs kubernetes on a single node
singlenode = makeTest {
name = "kubernetes-singlenode";
nodes = {
kubernetes =
{ config, pkgs, lib, nodes, ... }:
{
virtualisation.memorySize = 768;
virtualisation.diskSize = 2048;
programs.bash.enableCompletion = true;
environment.systemPackages = with pkgs; [ netcat bind ];
services.kubernetes.roles = ["master" "node"];
services.kubernetes.dns.port = 4453;
virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false -b cbr0";
networking.bridges.cbr0.interfaces = [];
networking.interfaces.cbr0 = {};
services.dnsmasq.enable = true;
services.dnsmasq.servers = ["/${config.services.kubernetes.dns.domain}/127.0.0.1#4453"];
};
};
testScript = ''
startAll;
${testSimplePod}
'';
};
}