1
0
Fork 1
mirror of https://github.com/NixOS/nixpkgs.git synced 2025-03-06 23:31:34 +00:00

nixos/k3s: refactor multi-node test

The refactoring parallelizes preliminary tasks like the node start and
import of the pause image to speed up execution of the test. It also
uniforms the usage of extraFlags for all nodes.
This commit is contained in:
Robert Rose 2024-11-11 17:05:14 +01:00
parent 7efd5beaa1
commit cd96421ea9

View file

@ -16,10 +16,10 @@ import ../make-test-python.nix (
socat
];
};
pauseImage = pkgs.dockerTools.streamLayeredImage {
pauseImage = pkgs.dockerTools.buildImage {
name = "test.local/pause";
tag = "local";
contents = imageEnv;
copyToRoot = imageEnv;
config.Entrypoint = [
"/bin/tini"
"--"
@ -75,6 +75,7 @@ import ../make-test-python.nix (
enable = true;
role = "server";
package = k3s;
images = [ pauseImage ];
clusterInit = true;
extraFlags = [
"--disable coredns"
@ -117,23 +118,17 @@ import ../make-test-python.nix (
inherit tokenFile;
enable = true;
package = k3s;
images = [ pauseImage ];
serverAddr = "https://192.168.1.1:6443";
clusterInit = false;
extraFlags = builtins.toString [
"--disable"
"coredns"
"--disable"
"local-storage"
"--disable"
"metrics-server"
"--disable"
"servicelb"
"--disable"
"traefik"
"--node-ip"
"192.168.1.3"
"--pause-image"
"test.local/pause:local"
extraFlags = [
"--disable coredns"
"--disable local-storage"
"--disable metrics-server"
"--disable servicelb"
"--disable traefik"
"--node-ip 192.168.1.3"
"--pause-image test.local/pause:local"
];
};
networking.firewall.allowedTCPPorts = [
@ -163,12 +158,11 @@ import ../make-test-python.nix (
enable = true;
role = "agent";
package = k3s;
images = [ pauseImage ];
serverAddr = "https://192.168.1.3:6443";
extraFlags = lib.concatStringsSep " " [
"--pause-image"
"test.local/pause:local"
"--node-ip"
"192.168.1.2"
extraFlags = [
"--pause-image test.local/pause:local"
"--node-ip 192.168.1.2"
];
};
networking.firewall.allowedTCPPorts = [ 6443 ];
@ -185,52 +179,42 @@ import ../make-test-python.nix (
};
};
testScript = ''
machines = [server, server2, agent]
for m in machines:
m.start()
m.wait_for_unit("k3s")
testScript = # python
''
start_all()
is_aarch64 = "${toString pkgs.stdenv.hostPlatform.isAarch64}" == "1"
machines = [server, server2, agent]
for m in machines:
m.wait_for_unit("k3s")
# wait for the agent to show up
server.wait_until_succeeds("k3s kubectl get node agent")
# wait for the agent to show up
server.wait_until_succeeds("k3s kubectl get node agent")
for m in machines:
m.succeed("k3s check-config")
m.succeed(
"${pauseImage} | k3s ctr image import -"
)
for m in machines:
m.succeed("k3s check-config")
server.succeed("k3s kubectl cluster-info")
# Also wait for our service account to show up; it takes a sec
server.wait_until_succeeds("k3s kubectl get serviceaccount default")
server.succeed("k3s kubectl cluster-info")
# Also wait for our service account to show up; it takes a sec
server.wait_until_succeeds("k3s kubectl get serviceaccount default")
# Now create a pod on each node via a daemonset and verify they can talk to each other.
server.succeed("k3s kubectl apply -f ${networkTestDaemonset}")
server.wait_until_succeeds(f'[ "$(k3s kubectl get ds test -o json | jq .status.numberReady)" -eq {len(machines)} ]')
# Now create a pod on each node via a daemonset and verify they can talk to each other.
server.succeed("k3s kubectl apply -f ${networkTestDaemonset}")
server.wait_until_succeeds(f'[ "$(k3s kubectl get ds test -o json | jq .status.numberReady)" -eq {len(machines)} ]')
# Get pod IPs
pods = server.succeed("k3s kubectl get po -o json | jq '.items[].metadata.name' -r").splitlines()
pod_ips = [server.succeed(f"k3s kubectl get po {name} -o json | jq '.status.podIP' -cr").strip() for name in pods]
# Get pod IPs
pods = server.succeed("k3s kubectl get po -o json | jq '.items[].metadata.name' -r").splitlines()
pod_ips = [server.succeed(f"k3s kubectl get po {name} -o json | jq '.status.podIP' -cr").strip() for name in pods]
# Verify each server can ping each pod ip
for pod_ip in pod_ips:
server.succeed(f"ping -c 1 {pod_ip}")
agent.succeed(f"ping -c 1 {pod_ip}")
# Verify the pods can talk to each other
resp = server.wait_until_succeeds(f"k3s kubectl exec {pods[0]} -- socat TCP:{pod_ips[1]}:8000 -")
assert resp.strip() == "server"
resp = server.wait_until_succeeds(f"k3s kubectl exec {pods[1]} -- socat TCP:{pod_ips[0]}:8000 -")
assert resp.strip() == "server"
# Cleanup
server.succeed("k3s kubectl delete -f ${networkTestDaemonset}")
for m in machines:
m.shutdown()
'';
# Verify each server can ping each pod ip
for pod_ip in pod_ips:
server.succeed(f"ping -c 1 {pod_ip}")
server2.succeed(f"ping -c 1 {pod_ip}")
agent.succeed(f"ping -c 1 {pod_ip}")
# Verify the pods can talk to each other
for pod in pods:
resp = server.succeed(f"k3s kubectl exec {pod} -- socat TCP:{pod_ip}:8000 -")
assert resp.strip() == "server"
'';
meta.maintainers = lib.teams.k3s.members;
}