diff --git a/nixos/modules/services/cluster/kubernetes/default.nix b/nixos/modules/services/cluster/kubernetes/default.nix index 7160bcca1533..76b27ac0efba 100644 --- a/nixos/modules/services/cluster/kubernetes/default.nix +++ b/nixos/modules/services/cluster/kubernetes/default.nix @@ -75,13 +75,93 @@ let effect = mkOption { description = "Effect of taint."; example = "NoSchedule"; - type = types.str; + type = types.enum ["NoSchedule" "PreferNoSchedule" "NoExecute"]; }; }; }; taints = concatMapStringsSep "," (v: "${v.key}=${v.value}:${v.effect}") (mapAttrsToList (n: v: v) cfg.kubelet.taints); + defaultAuthorizationPolicy = (optionals (any (el: el == "ABAC") cfg.apiserver.authorizationMode) [ + { + apiVersion = "abac.authorization.kubernetes.io/v1beta1"; + kind = "Policy"; + spec = { + user = "kubecfg"; + namespace = "*"; + resource = "*"; + apiGroup = "*"; + nonResourcePath = "*"; + }; + } + { + apiVersion = "abac.authorization.kubernetes.io/v1beta1"; + kind = "Policy"; + spec = { + user = "kubelet"; + namespace = "*"; + resource = "*"; + apiGroup = "*"; + nonResourcePath = "*"; + }; + } + { + apiVersion = "abac.authorization.kubernetes.io/v1beta1"; + kind = "Policy"; + spec = { + user = "kube-worker"; + namespace = "*"; + resource = "*"; + apiGroup = "*"; + nonResourcePath = "*"; + }; + } + { + apiVersion = "abac.authorization.kubernetes.io/v1beta1"; + kind = "Policy"; + spec = { + user = "kube_proxy"; + namespace = "*"; + resource = "*"; + apiGroup = "*"; + nonResourcePath = "*"; + }; + } + { + apiVersion = "abac.authorization.kubernetes.io/v1beta1"; + kind = "Policy"; + spec = { + user = "client"; + namespace = "*"; + resource = "*"; + apiGroup = "*"; + nonResourcePath = "*"; + }; + } + ]) ++ (optionals (all (el: el != "RBAC") cfg.apiserver.authorizationMode) [ + { + apiVersion = "abac.authorization.kubernetes.io/v1beta1"; + kind = "Policy"; + spec = { + user = "admin"; + namespace = "*"; + resource = "*"; + apiGroup = "*"; + nonResourcePath = "*"; + }; + } + { + apiVersion = "abac.authorization.kubernetes.io/v1beta1"; + kind = "Policy"; + spec = { + group = "system:serviceaccounts"; + namespace = "*"; + resource = "*"; + apiGroup = "*"; + nonResourcePath = "*"; + }; + } + ]); in { ###### interface @@ -205,7 +285,7 @@ in { description = '' Kubernetes apiserver storage backend. ''; - default = "etcd2"; + default = "etcd3"; type = types.enum ["etcd2" "etcd3"]; }; @@ -264,7 +344,7 @@ in { Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/RBAC). See ''; - default = ["ABAC"]; + default = ["ABAC" "RBAC"]; type = types.listOf (types.enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "RBAC"]); }; @@ -273,89 +353,11 @@ in { Kubernetes apiserver authorization policy file. See ''; - default = [ - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - user = "admin"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - user = "kubecfg"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - user = "kubelet"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - user = "kube-worker"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - user = "kube_proxy"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - user = "client"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - group = "system:serviceaccounts"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - ]; + default = defaultAuthorizationPolicy; type = types.listOf types.attrs; }; - autorizationRBACSuperAdmin = mkOption { + authorizationRBACSuperAdmin = mkOption { description = "Role based authorization super admin."; default = "admin"; type = types.str; @@ -647,7 +649,7 @@ in { }; applyManifests = mkOption { - description = "Whether to apply manifests."; + description = "Whether to apply manifests (this is true for master node)."; default = false; type = types.bool; }; @@ -659,7 +661,7 @@ in { }; taints = mkOption { - description = "."; + description = "Node taints (https://kubernetes.io/docs/concepts/configuration/assign-pod-node/)."; default = {}; type = types.attrsOf (types.submodule [ taintOptions ]); }; @@ -878,7 +880,7 @@ in { }" } \ ${optionalString (elem "RBAC" cfg.apiserver.authorizationMode) - "--authorization-rbac-super-user=${cfg.apiserver.autorizationRBACSuperAdmin}"} \ + "--authorization-rbac-super-user=${cfg.apiserver.authorizationRBACSuperAdmin}"} \ --secure-port=${toString cfg.apiserver.securePort} \ --service-cluster-ip-range=${cfg.apiserver.portalNet} \ ${optionalString (cfg.apiserver.runtimeConfig != "") diff --git a/nixos/modules/services/cluster/kubernetes/dns.nix b/nixos/modules/services/cluster/kubernetes/dns.nix deleted file mode 100644 index ac59eaf87250..000000000000 --- a/nixos/modules/services/cluster/kubernetes/dns.nix +++ /dev/null @@ -1,240 +0,0 @@ -{ cfg }: { - "kubedns-cm" = { - "apiVersion" = "v1"; - "kind" = "ConfigMap"; - "metadata" = { - "labels" = { - "addonmanager.kubernetes.io/mode" = "EnsureExists"; - }; - "name" = "kube-dns"; - "namespace" = "kube-system"; - }; - }; - "kubedns-controller" = { - "apiVersion" = "extensions/v1beta1"; - "kind" = "Deployment"; - "metadata" = { - "labels" = { - "addonmanager.kubernetes.io/mode" = "Reconcile"; - "k8s-app" = "kube-dns"; - "kubernetes.io/cluster-service" = "true"; - }; - "name" = "kube-dns"; - "namespace" = "kube-system"; - }; - "spec" = { - "selector" = { - "matchLabels" = { - "k8s-app" = "kube-dns"; - }; - }; - "strategy" = { - "rollingUpdate" = { - "maxSurge" = "10%"; - "maxUnavailable" = 0; - }; - }; - "template" = { - "metadata" = { - "annotations" = { - "scheduler.alpha.kubernetes.io/critical-pod" = ""; - }; - "labels" = { - "k8s-app" = "kube-dns"; - }; - }; - "spec" = { - "containers" = [{ - "args" = ["--domain=${cfg.dns.domain}." - "--dns-port=10053" - "--config-dir=/kube-dns-config" - "--kube-master-url=${cfg.kubeconfig.server}" - "--v=2" - ]; - "env" = [{ - "name" = "PROMETHEUS_PORT"; - "value" = "10055"; - }]; - "image" = "gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1"; - "livenessProbe" = { - "failureThreshold" = 5; - "httpGet" = { - "path" = "/healthcheck/kubedns"; - "port" = 10054; - "scheme" = "HTTP"; - }; - "initialDelaySeconds" = 60; - "successThreshold" = 1; - "timeoutSeconds" = 5; - }; - "name" = "kubedns"; - "ports" = [{ - "containerPort" = 10053; - "name" = "dns-local"; - "protocol" = "UDP"; - } { - "containerPort" = 10053; - "name" = "dns-tcp-local"; - "protocol" = "TCP"; - } { - "containerPort" = 10055; - "name" = "metrics"; - "protocol" = "TCP"; - }]; - "readinessProbe" = { - "httpGet" = { - "path" = "/readiness"; - "port" = 8081; - "scheme" = "HTTP"; - }; - "initialDelaySeconds" = 3; - "timeoutSeconds" = 5; - }; - "resources" = { - "limits" = { - "memory" = "170Mi"; - }; - "requests" = { - "cpu" = "100m"; - "memory" = "70Mi"; - }; - }; - "volumeMounts" = [{ - "mountPath" = "/kube-dns-config"; - "name" = "kube-dns-config"; - }]; - } { - "args" = ["-v=2" - "-logtostderr" - "-configDir=/etc/k8s/dns/dnsmasq-nanny" - "-restartDnsmasq=true" - "--" - "-k" - "--cache-size=1000" - "--log-facility=-" - "--server=/${cfg.dns.domain}/127.0.0.1#10053" - "--server=/in-addr.arpa/127.0.0.1#10053" - "--server=/ip6.arpa/127.0.0.1#10053" - ]; - "image" = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1"; - "livenessProbe" = { - "failureThreshold" = 5; - "httpGet" = { - "path" = "/healthcheck/dnsmasq"; - "port" = 10054; - "scheme" = "HTTP"; - }; - "initialDelaySeconds" = 60; - "successThreshold" = 1; - "timeoutSeconds" = 5; - }; - "name" = "dnsmasq"; - "ports" = [{ - "containerPort" = 53; - "name" = "dns"; - "protocol" = "UDP"; - } { - "containerPort" = 53; - "name" = "dns-tcp"; - "protocol" = "TCP"; - }]; - "resources" = { - "requests" = { - "cpu" = "150m"; - "memory" = "20Mi"; - }; - }; - "volumeMounts" = [{ - "mountPath" = "/etc/k8s/dns/dnsmasq-nanny"; - "name" = "kube-dns-config"; - }]; - } { - "args" = ["--v=2" - "--logtostderr" - "--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.${cfg.dns.domain},5,A" - "--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.${cfg.dns.domain},5,A" - ]; - "image" = "gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1"; - "livenessProbe" = { - "failureThreshold" = 5; - "httpGet" = { - "path" = "/metrics"; - "port" = 10054; - "scheme" = "HTTP"; - }; - "initialDelaySeconds" = 60; - "successThreshold" = 1; - "timeoutSeconds" = 5; - }; - "name" = "sidecar"; - "ports" = [{ - "containerPort" = 10054; - "name" = "metrics"; - "protocol" = "TCP"; - }]; - "resources" = { - "requests" = { - "cpu" = "10m"; - "memory" = "20Mi"; - }; - }; - }]; - "dnsPolicy" = "Default"; - "serviceAccountName" = "kube-dns"; - "tolerations" = [{ - "key" = "CriticalAddonsOnly"; - "operator" = "Exists"; - }]; - "volumes" = [{ - "configMap" = { - "name" = "kube-dns"; - "optional" = true; - }; - "name" = "kube-dns-config"; - }]; - }; - }; - }; - }; - "kubedns-sa" = { - "apiVersion" = "v1"; - "kind" = "ServiceAccount"; - "metadata" = { - "labels" = { - "addonmanager.kubernetes.io/mode" = "Reconcile"; - "kubernetes.io/cluster-service" = "true"; - }; - "name" = "kube-dns"; - "namespace" = "kube-system"; - }; - }; - "kubedns-svc" = { - "apiVersion" = "v1"; - "kind" = "Service"; - "metadata" = { - "labels" = { - "addonmanager.kubernetes.io/mode" = "Reconcile"; - "k8s-app" = "kube-dns"; - "kubernetes.io/cluster-service" = "true"; - "kubernetes.io/name" = "KubeDNS"; - }; - "name" = "kube-dns"; - "namespace" = "kube-system"; - }; - "spec" = { - "clusterIP" = "${cfg.dns.serverIp}"; - "ports" = [{ - "name" = "dns"; - "port" = 53; - "protocol" = "UDP"; - } { - "name" = "dns-tcp"; - "port" = 53; - "protocol" = "TCP"; - }]; - "selector" = { - "k8s-app" = "kube-dns"; - }; - }; - }; -} diff --git a/nixos/release.nix b/nixos/release.nix index a200535f3f4a..ac7755a160f4 100644 --- a/nixos/release.nix +++ b/nixos/release.nix @@ -271,7 +271,7 @@ in rec { tests.kernel-latest = callTest tests/kernel-latest.nix {}; tests.kernel-lts = callTest tests/kernel-lts.nix {}; tests.keystone = callTest tests/keystone.nix {}; - tests.kubernetes = hydraJob (import tests/kubernetes.nix { system = "x86_64-linux"; }); + tests.kubernetes = hydraJob (import tests/kubernetes/default.nix { system = "x86_64-linux"; }); tests.latestKernel.login = callTest tests/login.nix { latestKernel = true; }; tests.ldap = callTest tests/ldap.nix {}; #tests.lightdm = callTest tests/lightdm.nix {}; diff --git a/nixos/tests/kubernetes.nix b/nixos/tests/kubernetes.nix deleted file mode 100644 index dcd25e211971..000000000000 --- a/nixos/tests/kubernetes.nix +++ /dev/null @@ -1,409 +0,0 @@ -{ system ? builtins.currentSystem }: - -with import ../lib/testing.nix { inherit system; }; -with import ../lib/qemu-flags.nix; -with pkgs.lib; - -let - redisPod = pkgs.writeText "redis-master-pod.json" (builtins.toJSON { - kind = "Pod"; - apiVersion = "v1"; - metadata.name = "redis"; - metadata.labels.name = "redis"; - spec.containers = [{ - name = "redis"; - image = "redis"; - args = ["--bind" "0.0.0.0"]; - imagePullPolicy = "Never"; - ports = [{ - name = "redis-server"; - containerPort = 6379; - }]; - }]; - }); - - redisService = pkgs.writeText "redis-service.json" (builtins.toJSON { - kind = "Service"; - apiVersion = "v1"; - metadata.name = "redis"; - spec = { - ports = [{port = 6379; targetPort = 6379;}]; - selector = {name = "redis";}; - }; - }); - - redisImage = pkgs.dockerTools.buildImage { - name = "redis"; - tag = "latest"; - contents = pkgs.redis; - config.Entrypoint = "/bin/redis-server"; - }; - - testSimplePod = '' - $kubernetes->execute("docker load < ${redisImage}"); - $kubernetes->waitUntilSucceeds("kubectl create -f ${redisPod}"); - $kubernetes->succeed("kubectl create -f ${redisService}"); - $kubernetes->waitUntilSucceeds("kubectl get pod redis | grep Running"); - $kubernetes->succeed("nc -z \$\(dig \@10.10.0.1 redis.default.svc.cluster.local +short\) 6379"); - ''; -in { - # This test runs kubernetes on a single node - trivial = makeTest { - name = "kubernetes-trivial"; - - nodes = { - kubernetes = - { config, pkgs, lib, nodes, ... }: - { - virtualisation.memorySize = 768; - virtualisation.diskSize = 2048; - - programs.bash.enableCompletion = true; - environment.systemPackages = with pkgs; [ netcat bind ]; - - services.kubernetes.roles = ["master" "node"]; - virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false -b cbr0"; - - networking.bridges.cbr0.interfaces = []; - networking.interfaces.cbr0 = {}; - }; - }; - - testScript = '' - startAll; - - $kubernetes->waitUntilSucceeds("kubectl get nodes | grep kubernetes | grep Ready"); - - ${testSimplePod} - ''; - }; - - cluster = let - runWithOpenSSL = file: cmd: pkgs.runCommand file { - buildInputs = [ pkgs.openssl ]; - } cmd; - - ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048"; - ca_pem = runWithOpenSSL "ca.pem" '' - openssl req \ - -x509 -new -nodes -key ${ca_key} \ - -days 10000 -out $out -subj "/CN=etcd-ca" - ''; - etcd_key = runWithOpenSSL "etcd-key.pem" "openssl genrsa -out $out 2048"; - etcd_csr = runWithOpenSSL "etcd.csr" '' - openssl req \ - -new -key ${etcd_key} \ - -out $out -subj "/CN=etcd" \ - -config ${openssl_cnf} - ''; - etcd_cert = runWithOpenSSL "etcd.pem" '' - openssl x509 \ - -req -in ${etcd_csr} \ - -CA ${ca_pem} -CAkey ${ca_key} \ - -CAcreateserial -out $out \ - -days 365 -extensions v3_req \ - -extfile ${openssl_cnf} - ''; - - etcd_client_key = runWithOpenSSL "etcd-client-key.pem" - "openssl genrsa -out $out 2048"; - - etcd_client_csr = runWithOpenSSL "etcd-client-key.pem" '' - openssl req \ - -new -key ${etcd_client_key} \ - -out $out -subj "/CN=etcd-client" \ - -config ${client_openssl_cnf} - ''; - - etcd_client_cert = runWithOpenSSL "etcd-client.crt" '' - openssl x509 \ - -req -in ${etcd_client_csr} \ - -CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \ - -out $out -days 365 -extensions v3_req \ - -extfile ${client_openssl_cnf} - ''; - - apiserver_key = runWithOpenSSL "apiserver-key.pem" "openssl genrsa -out $out 2048"; - - apiserver_csr = runWithOpenSSL "apiserver.csr" '' - openssl req \ - -new -key ${apiserver_key} \ - -out $out -subj "/CN=kube-apiserver" \ - -config ${apiserver_cnf} - ''; - - apiserver_cert = runWithOpenSSL "apiserver.pem" '' - openssl x509 \ - -req -in ${apiserver_csr} \ - -CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \ - -out $out -days 365 -extensions v3_req \ - -extfile ${apiserver_cnf} - ''; - - worker_key = runWithOpenSSL "worker-key.pem" "openssl genrsa -out $out 2048"; - - worker_csr = runWithOpenSSL "worker.csr" '' - openssl req \ - -new -key ${worker_key} \ - -out $out -subj "/CN=kube-worker" \ - -config ${worker_cnf} - ''; - - worker_cert = runWithOpenSSL "worker.pem" '' - openssl x509 \ - -req -in ${worker_csr} \ - -CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \ - -out $out -days 365 -extensions v3_req \ - -extfile ${worker_cnf} - ''; - - openssl_cnf = pkgs.writeText "openssl.cnf" '' - [req] - req_extensions = v3_req - distinguished_name = req_distinguished_name - [req_distinguished_name] - [ v3_req ] - basicConstraints = CA:FALSE - keyUsage = digitalSignature, keyEncipherment - extendedKeyUsage = serverAuth - subjectAltName = @alt_names - [alt_names] - DNS.1 = etcd1 - DNS.2 = etcd2 - DNS.3 = etcd3 - IP.1 = 127.0.0.1 - ''; - - client_openssl_cnf = pkgs.writeText "client-openssl.cnf" '' - [req] - req_extensions = v3_req - distinguished_name = req_distinguished_name - [req_distinguished_name] - [ v3_req ] - basicConstraints = CA:FALSE - keyUsage = digitalSignature, keyEncipherment - extendedKeyUsage = clientAuth - ''; - - apiserver_cnf = pkgs.writeText "apiserver-openssl.cnf" '' - [req] - req_extensions = v3_req - distinguished_name = req_distinguished_name - [req_distinguished_name] - [ v3_req ] - basicConstraints = CA:FALSE - keyUsage = nonRepudiation, digitalSignature, keyEncipherment - subjectAltName = @alt_names - [alt_names] - DNS.1 = kubernetes - DNS.2 = kubernetes.default - DNS.3 = kubernetes.default.svc - DNS.4 = kubernetes.default.svc.cluster.local - IP.1 = 10.10.10.1 - ''; - - worker_cnf = pkgs.writeText "worker-openssl.cnf" '' - [req] - req_extensions = v3_req - distinguished_name = req_distinguished_name - [req_distinguished_name] - [ v3_req ] - basicConstraints = CA:FALSE - keyUsage = nonRepudiation, digitalSignature, keyEncipherment - subjectAltName = @alt_names - [alt_names] - DNS.1 = kubeWorker1 - DNS.2 = kubeWorker2 - ''; - - etcdNodeConfig = { - virtualisation.memorySize = 128; - - services = { - etcd = { - enable = true; - keyFile = etcd_key; - certFile = etcd_cert; - trustedCaFile = ca_pem; - peerClientCertAuth = true; - listenClientUrls = ["https://0.0.0.0:2379"]; - listenPeerUrls = ["https://0.0.0.0:2380"]; - }; - }; - - environment.variables = { - ETCDCTL_CERT_FILE = "${etcd_client_cert}"; - ETCDCTL_KEY_FILE = "${etcd_client_key}"; - ETCDCTL_CA_FILE = "${ca_pem}"; - ETCDCTL_PEERS = "https://127.0.0.1:2379"; - }; - - networking.firewall.allowedTCPPorts = [ 2379 2380 ]; - }; - - kubeConfig = { - virtualisation.diskSize = 2048; - programs.bash.enableCompletion = true; - - services.flannel = { - enable = true; - network = "10.10.0.0/16"; - iface = "eth1"; - etcd = { - endpoints = ["https://etcd1:2379" "https://etcd2:2379" "https://etcd3:2379"]; - keyFile = etcd_client_key; - certFile = etcd_client_cert; - caFile = ca_pem; - }; - }; - - # vxlan - networking.firewall.allowedUDPPorts = [ 8472 ]; - - systemd.services.docker.after = ["flannel.service"]; - systemd.services.docker.serviceConfig.EnvironmentFile = "/run/flannel/subnet.env"; - virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false --bip $FLANNEL_SUBNET"; - - services.kubernetes.verbose = true; - services.kubernetes.etcd = { - servers = ["https://etcd1:2379" "https://etcd2:2379" "https://etcd3:2379"]; - keyFile = etcd_client_key; - certFile = etcd_client_cert; - caFile = ca_pem; - }; - - environment.systemPackages = [ pkgs.bind pkgs.tcpdump pkgs.utillinux ]; - }; - - kubeMasterConfig = {pkgs, ...}: { - require = [kubeConfig]; - - # kube apiserver - networking.firewall.allowedTCPPorts = [ 443 ]; - - virtualisation.memorySize = 512; - - services.kubernetes = { - roles = ["master"]; - scheduler.leaderElect = true; - controllerManager.leaderElect = true; - - apiserver = { - publicAddress = "0.0.0.0"; - advertiseAddress = "192.168.1.8"; - tlsKeyFile = apiserver_key; - tlsCertFile = apiserver_cert; - clientCaFile = ca_pem; - kubeletClientCaFile = ca_pem; - kubeletClientKeyFile = worker_key; - kubeletClientCertFile = worker_cert; - }; - }; - }; - - kubeWorkerConfig = { pkgs, ... }: { - require = [kubeConfig]; - - virtualisation.memorySize = 512; - - # kubelet - networking.firewall.allowedTCPPorts = [ 10250 ]; - - services.kubernetes = { - roles = ["node"]; - kubeconfig = { - server = "https://kubernetes:443"; - caFile = ca_pem; - certFile = worker_cert; - keyFile = worker_key; - }; - kubelet = { - tlsKeyFile = worker_key; - tlsCertFile = worker_cert; - }; - }; - }; - in makeTest { - name = "kubernetes-cluster"; - - nodes = { - etcd1 = { config, pkgs, nodes, ... }: { - require = [etcdNodeConfig]; - services.etcd = { - advertiseClientUrls = ["https://etcd1:2379"]; - initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"]; - initialAdvertisePeerUrls = ["https://etcd1:2380"]; - }; - }; - - etcd2 = { config, pkgs, ... }: { - require = [etcdNodeConfig]; - services.etcd = { - advertiseClientUrls = ["https://etcd2:2379"]; - initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"]; - initialAdvertisePeerUrls = ["https://etcd2:2380"]; - }; - }; - - etcd3 = { config, pkgs, ... }: { - require = [etcdNodeConfig]; - services.etcd = { - advertiseClientUrls = ["https://etcd3:2379"]; - initialCluster = ["etcd1=https://etcd1:2380" "etcd2=https://etcd2:2380" "etcd3=https://etcd3:2380"]; - initialAdvertisePeerUrls = ["https://etcd3:2380"]; - }; - }; - - kubeMaster1 = { config, pkgs, lib, nodes, ... }: { - require = [kubeMasterConfig]; - }; - - kubeMaster2 = { config, pkgs, lib, nodes, ... }: { - require = [kubeMasterConfig]; - }; - - # Kubernetes TCP load balancer - kubernetes = { config, pkgs, ... }: { - # kubernetes - networking.firewall.allowedTCPPorts = [ 443 ]; - - services.haproxy.enable = true; - services.haproxy.config = '' - global - log 127.0.0.1 local0 notice - user haproxy - group haproxy - - defaults - log global - retries 2 - timeout connect 3000 - timeout server 5000 - timeout client 5000 - - listen kubernetes - bind 0.0.0.0:443 - mode tcp - option ssl-hello-chk - balance roundrobin - server kube-master-1 kubeMaster1:443 check - server kube-master-2 kubeMaster2:443 check - ''; - }; - - kubeWorker1 = { config, pkgs, lib, nodes, ... }: { - require = [kubeWorkerConfig]; - }; - - kubeWorker2 = { config, pkgs, lib, nodes, ... }: { - require = [kubeWorkerConfig]; - }; - }; - - testScript = '' - startAll; - - ${testSimplePod} - ''; - }; -} diff --git a/nixos/tests/kubernetes/default.nix b/nixos/tests/kubernetes/default.nix index 6ba4f1904ea7..2b61980349eb 100644 --- a/nixos/tests/kubernetes/default.nix +++ b/nixos/tests/kubernetes/default.nix @@ -1,7 +1,7 @@ -{ }: +{ system ? builtins.currentSystem }: { - kubernetes-singlenode = import ./singlenode.nix {}; - kubernetes-multinode-kubectl = import ./multinode-kubectl.nix {}; - kubernetes-rbac = import ./rbac.nix {}; - kubernetes-dns = import ./dns.nix {}; + kubernetes-singlenode = import ./singlenode.nix { inherit system; }; + kubernetes-multinode-kubectl = import ./multinode-kubectl.nix { inherit system; }; + kubernetes-rbac = import ./rbac.nix { inherit system; }; + kubernetes-dns = import ./dns.nix { inherit system; }; } diff --git a/nixos/tests/kubernetes/kubernetes-master.nix b/nixos/tests/kubernetes/kubernetes-master.nix index b9577fa0964b..15e7e52e4832 100644 --- a/nixos/tests/kubernetes/kubernetes-master.nix +++ b/nixos/tests/kubernetes/kubernetes-master.nix @@ -62,87 +62,6 @@ in portalNet = "10.1.10.0/24"; # --service-cluster-ip-range runtimeConfig = ""; /*extraOpts = "--v=2";*/ - authorizationMode = ["ABAC"]; - authorizationPolicy = [ - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - user = "kubecfg"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - user = "kubelet"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - user = "kube-worker"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - user = "kube_proxy"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - user = "client"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - group = "system:serviceaccounts"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - group = "system:authenticated"; - readonly = true; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - ]; }; }; } diff --git a/nixos/tests/kubernetes/multinode-kubectl.nix b/nixos/tests/kubernetes/multinode-kubectl.nix index 97108163d2cc..4ea4c272b225 100644 --- a/nixos/tests/kubernetes/multinode-kubectl.nix +++ b/nixos/tests/kubernetes/multinode-kubectl.nix @@ -84,9 +84,7 @@ in makeTest { { virtualisation.memorySize = 768; virtualisation.diskSize = 4096; - # networking.hostName = mkForce "master"; networking.interfaces.eth1.ip4 = mkForce [{address = servers.master; prefixLength = 24;}]; - # networking.nat.externalIP = "192.168.1.1"; networking.primaryIPAddress = mkForce servers.master; } (import ./kubernetes-common.nix { inherit pkgs config certs servers; }) @@ -99,9 +97,7 @@ in makeTest { { virtualisation.memorySize = 768; virtualisation.diskSize = 4096; - # networking.hostName = mkForce "one"; networking.interfaces.eth1.ip4 = mkForce [{address = servers.one; prefixLength = 24;}]; - # networking.nat.externalIP = "192.168.1.2"; networking.primaryIPAddress = mkForce servers.one; services.kubernetes.roles = ["node"]; } @@ -114,9 +110,7 @@ in makeTest { { virtualisation.memorySize = 768; virtualisation.diskSize = 4096; - # networking.hostName = mkForce "two"; networking.interfaces.eth1.ip4 = mkForce [{address = servers.two; prefixLength = 24;}]; - # networking.nat.externalIP = "192.168.1.3"; networking.primaryIPAddress = mkForce servers.two; services.kubernetes.roles = ["node"]; } @@ -129,9 +123,7 @@ in makeTest { { virtualisation.memorySize = 768; virtualisation.diskSize = 4096; - # networking.hostName = mkForce "three"; networking.interfaces.eth1.ip4 = mkForce [{address = servers.three; prefixLength = 24;}]; - # networking.nat.externalIP = "192.168.1.4"; networking.primaryIPAddress = mkForce servers.three; services.kubernetes.roles = ["node"]; } diff --git a/nixos/tests/kubernetes/rbac.nix b/nixos/tests/kubernetes/rbac.nix index 6388fe7ceb95..dfb55e7e0580 100644 --- a/nixos/tests/kubernetes/rbac.nix +++ b/nixos/tests/kubernetes/rbac.nix @@ -39,16 +39,16 @@ let }); roRole = pkgs.writeText "ro-role.json" (builtins.toJSON { - "apiVersion" = "rbac.authorization.k8s.io/v1beta1"; - "kind" = "Role"; - "metadata" = { - "name" = "pod-reader"; - "namespace" = "default"; + apiVersion = "rbac.authorization.k8s.io/v1beta1"; + kind = "Role"; + metadata = { + name = "pod-reader"; + namespace = "default"; }; - "rules" = [{ - "apiGroups" = [""]; - "resources" = ["pods"]; - "verbs" = ["get" "list" "watch"]; + rules = [{ + apiGroups = [""]; + resources = ["pods"]; + verbs = ["get" "list" "watch"]; }]; }); @@ -110,7 +110,7 @@ let ''; in makeTest { - name = "kubernetes-multinode-rbac"; + name = "kubernetes-rbac"; nodes = { master = @@ -121,64 +121,6 @@ in makeTest { virtualisation.diskSize = 4096; networking.interfaces.eth1.ip4 = mkForce [{address = servers.master; prefixLength = 24;}]; networking.primaryIPAddress = mkForce servers.master; - services.kubernetes.apiserver.authorizationMode = mkForce ["ABAC" "RBAC"]; - services.kubernetes.apiserver.authorizationPolicy = mkForce [ - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - user = "kubecfg"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - user = "kubelet"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - user = "kube-worker"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - user = "kube_proxy"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - { - apiVersion = "abac.authorization.kubernetes.io/v1beta1"; - kind = "Policy"; - spec = { - user = "client"; - namespace = "*"; - resource = "*"; - apiGroup = "*"; - nonResourcePath = "*"; - }; - } - ]; } (import ./kubernetes-common.nix { inherit pkgs config certs servers; }) (import ./kubernetes-master.nix { inherit pkgs config certs; }) diff --git a/nixos/tests/kubernetes/singlenode-kubectl.nix b/nixos/tests/kubernetes/singlenode-kubectl.nix deleted file mode 100644 index d3a78a06e430..000000000000 --- a/nixos/tests/kubernetes/singlenode-kubectl.nix +++ /dev/null @@ -1,97 +0,0 @@ -{ system ? builtins.currentSystem }: - -with import ../../lib/testing.nix { inherit system; }; -with import ../../lib/qemu-flags.nix; -with pkgs.lib; - -let - certs = import ./certs.nix { servers = {}; }; - - kubectlPod = pkgs.writeText "kubectl-pod.json" (builtins.toJSON { - kind = "Pod"; - apiVersion = "v1"; - metadata.name = "kubectl"; - metadata.labels.name = "kubectl"; - spec.containers = [{ - name = "kubectl"; - image = "kubectl:latest"; - command = ["${pkgs.busybox}/bin/tail" "-f"]; - imagePullPolicy = "Never"; - tty = true; - }]; - }); - - kubectlImage = pkgs.dockerTools.buildImage { - name = "kubectl"; - tag = "latest"; - contents = [ pkgs.kubernetes pkgs.busybox certs kubeconfig ]; - config.Entrypoint = "${pkgs.busybox}/bin/sh"; - }; - - kubeconfig = pkgs.writeTextDir "kubeconfig.json" (builtins.toJSON { - apiVersion = "v1"; - kind = "Config"; - clusters = [{ - name = "local"; - cluster.certificate-authority = "/ca.pem"; - cluster.server = "https://192.168.1.1:4443/"; - }]; - users = [{ - name = "kubelet"; - user = { - client-certificate = "/admin.crt"; - client-key = "/admin-key.pem"; - }; - }]; - contexts = [{ - context = { - cluster = "local"; - user = "kubelet"; - }; - current-context = "kubelet-context"; - }]; - }); - - test = '' - $kubernetes->execute("docker load < ${kubectlImage}"); - $kubernetes->waitUntilSucceeds("kubectl create -f ${kubectlPod} || kubectl apply -f ${kubectlPod}"); - $kubernetes->waitUntilSucceeds("kubectl get pod kubectl | grep Running"); - - # FIXME: this test fails, for some reason it can not reach host ip address - $kubernetes->succeed("kubectl exec -ti kubectl -- kubectl --kubeconfig=/kubeconfig.json version"); - ''; -in makeTest { - name = "kubernetes-singlenode-kubectl"; - - nodes = { - kubernetes = - { config, pkgs, lib, nodes, ... }: - { - virtualisation.memorySize = 768; - virtualisation.diskSize = 4096; - - programs.bash.enableCompletion = true; - environment.systemPackages = with pkgs; [ netcat bind ]; - - services.kubernetes.roles = ["master" "node"]; - services.kubernetes.apiserver.securePort = 4443; - services.kubernetes.dns.port = 4453; - services.kubernetes.clusterCidr = "10.0.0.0/8"; - virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false -b cbr0"; - - networking.interfaces.eth1.ip4 = mkForce [{address = "192.168.1.1"; prefixLength = 24;}]; - networking.primaryIPAddress = mkForce "192.168.1.1"; - networking.bridges.cbr0.interfaces = []; - networking.interfaces.cbr0 = {}; - - services.dnsmasq.enable = true; - services.dnsmasq.servers = ["/${config.services.kubernetes.dns.domain}/127.0.0.1#4453"]; - }; - }; - - testScript = '' - startAll; - - ${test} - ''; -}