3
0
Fork 0
forked from mirrors/nixpkgs

nixos/nebula: Add enable option defaulting to true to Nebula networks

This commit is contained in:
Morgan Jones 2021-04-10 16:38:44 -06:00 committed by Tim
parent 002fe4f19d
commit 064e0af80b
2 changed files with 44 additions and 10 deletions

View file

@ -5,6 +5,7 @@ with lib;
let
cfg = config.services.nebula;
enabledNetworks = filterAttrs (n: v: v.enable) cfg.networks;
format = pkgs.formats.yaml {};
@ -20,6 +21,12 @@ in
default = {};
type = types.attrsOf (types.submodule {
options = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable or disable this network.";
};
package = mkOption {
type = types.package;
default = pkgs.nebula;
@ -137,11 +144,11 @@ in
};
# Implementation
config = mkIf (cfg.networks != {}) {
systemd.services = mkMerge (lib.mapAttrsToList (netName: netCfg:
config = mkIf (enabledNetworks != {}) {
systemd.services = mkMerge (mapAttrsToList (netName: netCfg:
let
networkId = nameToId netName;
settings = lib.recursiveUpdate {
settings = recursiveUpdate {
pki = {
ca = netCfg.ca;
cert = netCfg.cert;
@ -188,25 +195,25 @@ in
})
];
};
}) cfg.networks);
}) enabledNetworks);
# Open the chosen ports for UDP.
networking.firewall.allowedUDPPorts =
lib.unique (lib.mapAttrsToList (netName: netCfg: netCfg.listen.port) cfg.networks);
unique (mapAttrsToList (netName: netCfg: netCfg.listen.port) enabledNetworks);
# Create the service users and groups.
users.users = mkMerge (lib.mapAttrsToList (netName: netCfg:
users.users = mkMerge (mapAttrsToList (netName: netCfg:
mkIf netCfg.tun.disable {
${nameToId netName} = {
group = nameToId netName;
description = "Nebula service user for network ${netName}";
isSystemUser = true;
};
}) cfg.networks);
}) enabledNetworks);
users.groups = mkMerge (lib.mapAttrsToList (netName: netCfg:
users.groups = mkMerge (mapAttrsToList (netName: netCfg:
mkIf netCfg.tun.disable {
${nameToId netName} = {};
}) cfg.networks);
}) enabledNetworks);
};
}

View file

@ -88,6 +88,26 @@ in
}];
services.nebula.networks.smoke = {
enable = true;
staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
isLighthouse = false;
lighthouses = [ "10.0.100.1" ];
firewall = {
outbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
};
};
};
node5 = { ... } @ args:
makeNebulaNode args "node5" {
networking.interfaces.eth1.ipv4.addresses = [{
address = "192.168.1.5";
prefixLength = 24;
}];
services.nebula.networks.smoke = {
enable = false;
staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
isLighthouse = false;
lighthouses = [ "10.0.100.1" ];
@ -170,9 +190,16 @@ in
${signKeysFor "node4" "10.0.100.4/24"}
${restartAndCheckNebula "node4" "10.0.100.4"}
# The lighthouse can ping node2 and node3
# Create keys for node4's nebula service and test that it does not come up.
${setUpPrivateKey "node5"}
${signKeysFor "node5" "10.0.100.5/24"}
node5.fail("systemctl status nebula@smoke.service")
node5.fail("ping -c5 10.0.100.5")
# The lighthouse can ping node2 and node3 but not node5
lighthouse.succeed("ping -c3 10.0.100.2")
lighthouse.succeed("ping -c3 10.0.100.3")
lighthouse.fail("ping -c3 10.0.100.5")
# node2 can ping the lighthouse, but not node3 because of its inbound firewall
node2.succeed("ping -c3 10.0.100.1")