3
0
Fork 0
forked from mirrors/nixpkgs
nixpkgs/nixos/modules/services/cluster/kubernetes/controller-manager.nix

171 lines
5.4 KiB
Nix
Raw Normal View History

{ config, lib, pkgs, ... }:
with lib;
let
top = config.services.kubernetes;
cfg = top.controllerManager;
in
{
###### interface
options.services.kubernetes.controllerManager = with lib.types; {
allocateNodeCIDRs = mkOption {
description = "Whether to automatically allocate CIDR ranges for cluster nodes.";
default = true;
type = bool;
};
bindAddress = mkOption {
description = "Kubernetes controller manager listening address.";
default = "127.0.0.1";
type = str;
};
clusterCidr = mkOption {
description = "Kubernetes CIDR Range for Pods in cluster.";
default = top.clusterCidr;
type = str;
};
enable = mkEnableOption "Kubernetes controller manager.";
extraOpts = mkOption {
description = "Kubernetes controller manager extra command line options.";
default = "";
type = str;
};
featureGates = mkOption {
description = "List set of feature gates";
default = top.featureGates;
type = listOf str;
};
insecurePort = mkOption {
description = "Kubernetes controller manager insecure listening port.";
default = 0;
type = int;
};
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes controller manager";
leaderElect = mkOption {
description = "Whether to start leader election before executing main loop.";
type = bool;
default = true;
};
rootCaFile = mkOption {
description = ''
Kubernetes controller manager certificate authority file included in
service account's token secret.
'';
default = top.caFile;
type = nullOr path;
};
securePort = mkOption {
description = "Kubernetes controller manager secure listening port.";
default = 10252;
type = int;
};
serviceAccountKeyFile = mkOption {
description = ''
Kubernetes controller manager PEM-encoded private RSA key file used to
sign service account tokens
'';
default = null;
type = nullOr path;
};
tlsCertFile = mkOption {
description = "Kubernetes controller-manager certificate file.";
default = null;
type = nullOr path;
};
tlsKeyFile = mkOption {
description = "Kubernetes controller-manager private key file.";
default = null;
type = nullOr path;
};
verbosity = mkOption {
description = ''
Optional glog verbosity level for logging statements. See
<link xlink:href="https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md"/>
'';
default = null;
type = nullOr int;
};
};
###### implementation
config = mkIf cfg.enable {
systemd.services.kube-controller-manager = {
description = "Kubernetes Controller Manager Service";
wantedBy = [ "kube-control-plane-online.target" ];
after = [ "kube-apiserver.service" ];
before = [ "kube-control-plane-online.target" ];
nixos/kubernetes: Stabilize services startup across machines by adding targets and curl wait loops to services to ensure services are not started before their depended services are reachable. Extra targets cfssl-online.target and kube-apiserver-online.target syncronize starts across machines and node-online.target ensures docker is restarted and ready to deploy containers on after flannel has discussed the network cidr with apiserver. Since flannel needs to be started before addon-manager to configure the docker interface, it has to have its own rbac bootstrap service. The curl wait loops within the other services exists to ensure that when starting the service it is able to do its work immediately without clobbering the log about failing conditions. By ensuring kubernetes.target is only reached after starting the cluster it can be used in the tests as a wait condition. In kube-certmgr-bootstrap mkdir is needed for it to not fail to start. The following is the relevant part of systemctl list-dependencies default.target ● ├─certmgr.service ● ├─cfssl.service ● ├─docker.service ● ├─etcd.service ● ├─flannel.service ● ├─kubernetes.target ● │ ├─kube-addon-manager.service ● │ ├─kube-proxy.service ● │ ├─kube-apiserver-online.target ● │ │ ├─flannel-rbac-bootstrap.service ● │ │ ├─kube-apiserver-online.service ● │ │ ├─kube-apiserver.service ● │ │ ├─kube-controller-manager.service ● │ │ └─kube-scheduler.service ● │ └─node-online.target ● │ ├─node-online.service ● │ ├─flannel.target ● │ │ ├─flannel.service ● │ │ └─mk-docker-opts.service ● │ └─kubelet.target ● │ └─kubelet.service ● ├─network-online.target ● │ └─cfssl-online.target ● │ ├─certmgr.service ● │ ├─cfssl-online.service ● │ └─kube-certmgr-bootstrap.service
2019-03-01 07:44:45 +00:00
preStart = ''
${top.lib.mkWaitCurl ( with config.systemd.services.kube-controller-manager; {
nixos/kubernetes: Stabilize services startup across machines by adding targets and curl wait loops to services to ensure services are not started before their depended services are reachable. Extra targets cfssl-online.target and kube-apiserver-online.target syncronize starts across machines and node-online.target ensures docker is restarted and ready to deploy containers on after flannel has discussed the network cidr with apiserver. Since flannel needs to be started before addon-manager to configure the docker interface, it has to have its own rbac bootstrap service. The curl wait loops within the other services exists to ensure that when starting the service it is able to do its work immediately without clobbering the log about failing conditions. By ensuring kubernetes.target is only reached after starting the cluster it can be used in the tests as a wait condition. In kube-certmgr-bootstrap mkdir is needed for it to not fail to start. The following is the relevant part of systemctl list-dependencies default.target ● ├─certmgr.service ● ├─cfssl.service ● ├─docker.service ● ├─etcd.service ● ├─flannel.service ● ├─kubernetes.target ● │ ├─kube-addon-manager.service ● │ ├─kube-proxy.service ● │ ├─kube-apiserver-online.target ● │ │ ├─flannel-rbac-bootstrap.service ● │ │ ├─kube-apiserver-online.service ● │ │ ├─kube-apiserver.service ● │ │ ├─kube-controller-manager.service ● │ │ └─kube-scheduler.service ● │ └─node-online.target ● │ ├─node-online.service ● │ ├─flannel.target ● │ │ ├─flannel.service ● │ │ └─mk-docker-opts.service ● │ └─kubelet.target ● │ └─kubelet.service ● ├─network-online.target ● │ └─cfssl-online.target ● │ ├─certmgr.service ● │ ├─cfssl-online.service ● │ └─kube-certmgr-bootstrap.service
2019-03-01 07:44:45 +00:00
sleep = 1;
path = "/api";
cacert = top.caFile;
} // optionalAttrs (environment ? cert) { inherit (environment) cert key; })}
nixos/kubernetes: Stabilize services startup across machines by adding targets and curl wait loops to services to ensure services are not started before their depended services are reachable. Extra targets cfssl-online.target and kube-apiserver-online.target syncronize starts across machines and node-online.target ensures docker is restarted and ready to deploy containers on after flannel has discussed the network cidr with apiserver. Since flannel needs to be started before addon-manager to configure the docker interface, it has to have its own rbac bootstrap service. The curl wait loops within the other services exists to ensure that when starting the service it is able to do its work immediately without clobbering the log about failing conditions. By ensuring kubernetes.target is only reached after starting the cluster it can be used in the tests as a wait condition. In kube-certmgr-bootstrap mkdir is needed for it to not fail to start. The following is the relevant part of systemctl list-dependencies default.target ● ├─certmgr.service ● ├─cfssl.service ● ├─docker.service ● ├─etcd.service ● ├─flannel.service ● ├─kubernetes.target ● │ ├─kube-addon-manager.service ● │ ├─kube-proxy.service ● │ ├─kube-apiserver-online.target ● │ │ ├─flannel-rbac-bootstrap.service ● │ │ ├─kube-apiserver-online.service ● │ │ ├─kube-apiserver.service ● │ │ ├─kube-controller-manager.service ● │ │ └─kube-scheduler.service ● │ └─node-online.target ● │ ├─node-online.service ● │ ├─flannel.target ● │ │ ├─flannel.service ● │ │ └─mk-docker-opts.service ● │ └─kubelet.target ● │ └─kubelet.service ● ├─network-online.target ● │ └─cfssl-online.target ● │ ├─certmgr.service ● │ ├─cfssl-online.service ● │ └─kube-certmgr-bootstrap.service
2019-03-01 07:44:45 +00:00
'';
serviceConfig = {
RestartSec = "30s";
Restart = "on-failure";
Slice = "kubernetes.slice";
ExecStart = ''${top.package}/bin/kube-controller-manager \
--allocate-node-cidrs=${boolToString cfg.allocateNodeCIDRs} \
--bind-address=${cfg.bindAddress} \
${optionalString (cfg.clusterCidr!=null)
"--cluster-cidr=${cfg.clusterCidr}"} \
${optionalString (cfg.featureGates != [])
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
--kubeconfig=${top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig} \
--leader-elect=${boolToString cfg.leaderElect} \
${optionalString (cfg.rootCaFile!=null)
"--root-ca-file=${cfg.rootCaFile}"} \
--port=${toString cfg.insecurePort} \
--secure-port=${toString cfg.securePort} \
${optionalString (cfg.serviceAccountKeyFile!=null)
"--service-account-private-key-file=${cfg.serviceAccountKeyFile}"} \
${optionalString (cfg.tlsCertFile!=null)
"--tls-cert-file=${cfg.tlsCertFile}"} \
${optionalString (cfg.tlsKeyFile!=null)
"--tls-key-file=${cfg.tlsKeyFile}"} \
${optionalString (elem "RBAC" top.apiserver.authorizationMode)
"--use-service-account-credentials"} \
${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
${cfg.extraOpts}
'';
WorkingDirectory = top.dataDir;
User = "kubernetes";
Group = "kubernetes";
};
path = top.path;
};
services.kubernetes.pki.certs = with top.lib; {
controllerManager = mkCert {
name = "kube-controller-manager";
CN = "kube-controller-manager";
action = "systemctl restart kube-controller-manager.service";
};
controllerManagerClient = mkCert {
name = "kube-controller-manager-client";
CN = "system:kube-controller-manager";
action = "systemctl restart kube-controller-manager.service";
};
};
services.kubernetes.controllerManager.kubeconfig.server = mkDefault top.apiserverAddress;
};
}