forked from mirrors/nixpkgs
nixos/tests/k3s: nixpkgs-fmt
This commit is contained in:
parent
8bf9500e65
commit
83ed4b46fd
|
@ -1,80 +1,80 @@
|
|||
import ./make-test-python.nix ({ pkgs, ... }:
|
||||
|
||||
let
|
||||
# A suitable k3s pause image, also used for the test pod
|
||||
pauseImage = pkgs.dockerTools.buildImage {
|
||||
name = "test.local/pause";
|
||||
tag = "local";
|
||||
contents = with pkgs; [ tini coreutils busybox ];
|
||||
config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
|
||||
};
|
||||
# Don't use the default service account because there's a race where it may
|
||||
# not be created yet; make our own instead.
|
||||
testPodYaml = pkgs.writeText "test.yml" ''
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: test
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test
|
||||
spec:
|
||||
serviceAccountName: test
|
||||
containers:
|
||||
- name: test
|
||||
image: test.local/pause:local
|
||||
imagePullPolicy: Never
|
||||
command: ["sh", "-c", "sleep inf"]
|
||||
'';
|
||||
in
|
||||
{
|
||||
name = "k3s";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ euank ];
|
||||
};
|
||||
|
||||
machine = { pkgs, ... }: {
|
||||
environment.systemPackages = with pkgs; [ k3s gzip ];
|
||||
|
||||
# k3s uses enough resources the default vm fails.
|
||||
virtualisation.memorySize = pkgs.lib.mkDefault 1536;
|
||||
virtualisation.diskSize = pkgs.lib.mkDefault 4096;
|
||||
|
||||
services.k3s = {
|
||||
enable = true;
|
||||
role = "server";
|
||||
docker = true;
|
||||
# Slightly reduce resource usage
|
||||
extraFlags = "--no-deploy coredns,servicelb,traefik,local-storage,metrics-server --pause-image test.local/pause:local";
|
||||
let
|
||||
# A suitable k3s pause image, also used for the test pod
|
||||
pauseImage = pkgs.dockerTools.buildImage {
|
||||
name = "test.local/pause";
|
||||
tag = "local";
|
||||
contents = with pkgs; [ tini coreutils busybox ];
|
||||
config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
|
||||
};
|
||||
# Don't use the default service account because there's a race where it may
|
||||
# not be created yet; make our own instead.
|
||||
testPodYaml = pkgs.writeText "test.yml" ''
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: test
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test
|
||||
spec:
|
||||
serviceAccountName: test
|
||||
containers:
|
||||
- name: test
|
||||
image: test.local/pause:local
|
||||
imagePullPolicy: Never
|
||||
command: ["sh", "-c", "sleep inf"]
|
||||
'';
|
||||
in
|
||||
{
|
||||
name = "k3s";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ euank ];
|
||||
};
|
||||
|
||||
users.users = {
|
||||
noprivs = {
|
||||
isNormalUser = true;
|
||||
description = "Can't access k3s by default";
|
||||
password = "*";
|
||||
machine = { pkgs, ... }: {
|
||||
environment.systemPackages = with pkgs; [ k3s gzip ];
|
||||
|
||||
# k3s uses enough resources the default vm fails.
|
||||
virtualisation.memorySize = pkgs.lib.mkDefault 1536;
|
||||
virtualisation.diskSize = pkgs.lib.mkDefault 4096;
|
||||
|
||||
services.k3s = {
|
||||
enable = true;
|
||||
role = "server";
|
||||
docker = true;
|
||||
# Slightly reduce resource usage
|
||||
extraFlags = "--no-deploy coredns,servicelb,traefik,local-storage,metrics-server --pause-image test.local/pause:local";
|
||||
};
|
||||
|
||||
users.users = {
|
||||
noprivs = {
|
||||
isNormalUser = true;
|
||||
description = "Can't access k3s by default";
|
||||
password = "*";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
machine.wait_for_unit("k3s")
|
||||
machine.succeed("k3s kubectl cluster-info")
|
||||
machine.fail("sudo -u noprivs k3s kubectl cluster-info")
|
||||
# machine.succeed("k3s check-config") # fails with the current nixos kernel config, uncomment once this passes
|
||||
machine.wait_for_unit("k3s")
|
||||
machine.succeed("k3s kubectl cluster-info")
|
||||
machine.fail("sudo -u noprivs k3s kubectl cluster-info")
|
||||
# machine.succeed("k3s check-config") # fails with the current nixos kernel config, uncomment once this passes
|
||||
|
||||
machine.succeed(
|
||||
"zcat ${pauseImage} | docker load"
|
||||
)
|
||||
machine.succeed(
|
||||
"zcat ${pauseImage} | docker load"
|
||||
)
|
||||
|
||||
machine.succeed("k3s kubectl apply -f ${testPodYaml}")
|
||||
machine.succeed("k3s kubectl wait --for 'condition=Ready' pod/test")
|
||||
machine.succeed("k3s kubectl delete -f ${testPodYaml}")
|
||||
machine.succeed("k3s kubectl apply -f ${testPodYaml}")
|
||||
machine.succeed("k3s kubectl wait --for 'condition=Ready' pod/test")
|
||||
machine.succeed("k3s kubectl delete -f ${testPodYaml}")
|
||||
|
||||
machine.shutdown()
|
||||
'';
|
||||
})
|
||||
machine.shutdown()
|
||||
'';
|
||||
})
|
||||
|
|
|
@ -1,78 +1,78 @@
|
|||
import ./make-test-python.nix ({ pkgs, ... }:
|
||||
|
||||
let
|
||||
# A suitable k3s pause image, also used for the test pod
|
||||
pauseImage = pkgs.dockerTools.buildImage {
|
||||
name = "test.local/pause";
|
||||
tag = "local";
|
||||
contents = with pkgs; [ tini coreutils busybox ];
|
||||
config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
|
||||
};
|
||||
# Don't use the default service account because there's a race where it may
|
||||
# not be created yet; make our own instead.
|
||||
testPodYaml = pkgs.writeText "test.yml" ''
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: test
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test
|
||||
spec:
|
||||
serviceAccountName: test
|
||||
containers:
|
||||
- name: test
|
||||
image: test.local/pause:local
|
||||
imagePullPolicy: Never
|
||||
command: ["sh", "-c", "sleep inf"]
|
||||
'';
|
||||
in
|
||||
{
|
||||
name = "k3s";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ euank ];
|
||||
};
|
||||
let
|
||||
# A suitable k3s pause image, also used for the test pod
|
||||
pauseImage = pkgs.dockerTools.buildImage {
|
||||
name = "test.local/pause";
|
||||
tag = "local";
|
||||
contents = with pkgs; [ tini coreutils busybox ];
|
||||
config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
|
||||
};
|
||||
# Don't use the default service account because there's a race where it may
|
||||
# not be created yet; make our own instead.
|
||||
testPodYaml = pkgs.writeText "test.yml" ''
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: test
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test
|
||||
spec:
|
||||
serviceAccountName: test
|
||||
containers:
|
||||
- name: test
|
||||
image: test.local/pause:local
|
||||
imagePullPolicy: Never
|
||||
command: ["sh", "-c", "sleep inf"]
|
||||
'';
|
||||
in
|
||||
{
|
||||
name = "k3s";
|
||||
meta = with pkgs.lib.maintainers; {
|
||||
maintainers = [ euank ];
|
||||
};
|
||||
|
||||
machine = { pkgs, ... }: {
|
||||
environment.systemPackages = with pkgs; [ k3s gzip ];
|
||||
machine = { pkgs, ... }: {
|
||||
environment.systemPackages = with pkgs; [ k3s gzip ];
|
||||
|
||||
# k3s uses enough resources the default vm fails.
|
||||
virtualisation.memorySize = pkgs.lib.mkDefault 1536;
|
||||
virtualisation.diskSize = pkgs.lib.mkDefault 4096;
|
||||
# k3s uses enough resources the default vm fails.
|
||||
virtualisation.memorySize = pkgs.lib.mkDefault 1536;
|
||||
virtualisation.diskSize = pkgs.lib.mkDefault 4096;
|
||||
|
||||
services.k3s.enable = true;
|
||||
services.k3s.role = "server";
|
||||
services.k3s.package = pkgs.k3s;
|
||||
# Slightly reduce resource usage
|
||||
services.k3s.extraFlags = "--no-deploy coredns,servicelb,traefik,local-storage,metrics-server --pause-image test.local/pause:local";
|
||||
services.k3s.enable = true;
|
||||
services.k3s.role = "server";
|
||||
services.k3s.package = pkgs.k3s;
|
||||
# Slightly reduce resource usage
|
||||
services.k3s.extraFlags = "--no-deploy coredns,servicelb,traefik,local-storage,metrics-server --pause-image test.local/pause:local";
|
||||
|
||||
users.users = {
|
||||
noprivs = {
|
||||
isNormalUser = true;
|
||||
description = "Can't access k3s by default";
|
||||
password = "*";
|
||||
users.users = {
|
||||
noprivs = {
|
||||
isNormalUser = true;
|
||||
description = "Can't access k3s by default";
|
||||
password = "*";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
start_all()
|
||||
testScript = ''
|
||||
start_all()
|
||||
|
||||
machine.wait_for_unit("k3s")
|
||||
machine.succeed("k3s kubectl cluster-info")
|
||||
machine.fail("sudo -u noprivs k3s kubectl cluster-info")
|
||||
# machine.succeed("k3s check-config") # fails with the current nixos kernel config, uncomment once this passes
|
||||
machine.wait_for_unit("k3s")
|
||||
machine.succeed("k3s kubectl cluster-info")
|
||||
machine.fail("sudo -u noprivs k3s kubectl cluster-info")
|
||||
# machine.succeed("k3s check-config") # fails with the current nixos kernel config, uncomment once this passes
|
||||
|
||||
machine.succeed(
|
||||
"zcat ${pauseImage} | k3s ctr image import -"
|
||||
)
|
||||
machine.succeed(
|
||||
"zcat ${pauseImage} | k3s ctr image import -"
|
||||
)
|
||||
|
||||
machine.succeed("k3s kubectl apply -f ${testPodYaml}")
|
||||
machine.succeed("k3s kubectl wait --for 'condition=Ready' pod/test")
|
||||
machine.succeed("k3s kubectl delete -f ${testPodYaml}")
|
||||
machine.succeed("k3s kubectl apply -f ${testPodYaml}")
|
||||
machine.succeed("k3s kubectl wait --for 'condition=Ready' pod/test")
|
||||
machine.succeed("k3s kubectl delete -f ${testPodYaml}")
|
||||
|
||||
machine.shutdown()
|
||||
'';
|
||||
})
|
||||
machine.shutdown()
|
||||
'';
|
||||
})
|
||||
|
|
Loading…
Reference in a new issue