3
0
Fork 0
forked from mirrors/nixpkgs

Merge pull request #111518 from Jaculabilis/nebula

nixos/nebula: add basic module
This commit is contained in:
Oleksii Filonenko 2021-04-21 11:17:30 +03:00 committed by GitHub
commit c2900f685f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 444 additions and 0 deletions

View file

@ -731,6 +731,7 @@
./services/networking/nar-serve.nix
./services/networking/nat.nix
./services/networking/ndppd.nix
./services/networking/nebula.nix
./services/networking/networkmanager.nix
./services/networking/nextdns.nix
./services/networking/nftables.nix

View file

@ -0,0 +1,219 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.nebula;
enabledNetworks = filterAttrs (n: v: v.enable) cfg.networks;
format = pkgs.formats.yaml {};
nameToId = netName: "nebula-${netName}";
in
{
# Interface
options = {
services.nebula = {
networks = mkOption {
description = "Nebula network definitions.";
default = {};
type = types.attrsOf (types.submodule {
options = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable or disable this network.";
};
package = mkOption {
type = types.package;
default = pkgs.nebula;
defaultText = "pkgs.nebula";
description = "Nebula derivation to use.";
};
ca = mkOption {
type = types.path;
description = "Path to the certificate authority certificate.";
example = "/etc/nebula/ca.crt";
};
cert = mkOption {
type = types.path;
description = "Path to the host certificate.";
example = "/etc/nebula/host.crt";
};
key = mkOption {
type = types.path;
description = "Path to the host key.";
example = "/etc/nebula/host.key";
};
staticHostMap = mkOption {
type = types.attrsOf (types.listOf (types.str));
default = {};
description = ''
The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
'';
example = literalExample ''
{ "192.168.100.1" = [ "100.64.22.11:4242" ]; }
'';
};
isLighthouse = mkOption {
type = types.bool;
default = false;
description = "Whether this node is a lighthouse.";
};
lighthouses = mkOption {
type = types.listOf types.str;
default = [];
description = ''
List of IPs of lighthouse hosts this node should report to and query from. This should be empty on lighthouse
nodes. The IPs should be the lighthouse's Nebula IPs, not their external IPs.
'';
example = ''[ "192.168.100.1" ]'';
};
listen.host = mkOption {
type = types.str;
default = "0.0.0.0";
description = "IP address to listen on.";
};
listen.port = mkOption {
type = types.port;
default = 4242;
description = "Port number to listen on.";
};
tun.disable = mkOption {
type = types.bool;
default = false;
description = ''
When tun is disabled, a lighthouse can be started without a local tun interface (and therefore without root).
'';
};
tun.device = mkOption {
type = types.nullOr types.str;
default = null;
description = "Name of the tun device. Defaults to nebula.\${networkName}.";
};
firewall.outbound = mkOption {
type = types.listOf types.attrs;
default = [];
description = "Firewall rules for outbound traffic.";
example = ''[ { port = "any"; proto = "any"; host = "any"; } ]'';
};
firewall.inbound = mkOption {
type = types.listOf types.attrs;
default = [];
description = "Firewall rules for inbound traffic.";
example = ''[ { port = "any"; proto = "any"; host = "any"; } ]'';
};
settings = mkOption {
type = format.type;
default = {};
description = ''
Nebula configuration. Refer to
<link xlink:href="https://github.com/slackhq/nebula/blob/master/examples/config.yml"/>
for details on supported values.
'';
example = literalExample ''
{
lighthouse.dns = {
host = "0.0.0.0";
port = 53;
};
}
'';
};
};
});
};
};
};
# Implementation
config = mkIf (enabledNetworks != {}) {
systemd.services = mkMerge (mapAttrsToList (netName: netCfg:
let
networkId = nameToId netName;
settings = recursiveUpdate {
pki = {
ca = netCfg.ca;
cert = netCfg.cert;
key = netCfg.key;
};
static_host_map = netCfg.staticHostMap;
lighthouse = {
am_lighthouse = netCfg.isLighthouse;
hosts = netCfg.lighthouses;
};
listen = {
host = netCfg.listen.host;
port = netCfg.listen.port;
};
tun = {
disabled = netCfg.tun.disable;
dev = if (netCfg.tun.device != null) then netCfg.tun.device else "nebula.${netName}";
};
firewall = {
inbound = netCfg.firewall.inbound;
outbound = netCfg.firewall.outbound;
};
} netCfg.settings;
configFile = format.generate "nebula-config-${netName}.yml" settings;
in
{
# Create systemd service for Nebula.
"nebula@${netName}" = {
description = "Nebula VPN service for ${netName}";
wants = [ "basic.target" ];
after = [ "basic.target" "network.target" ];
before = [ "sshd.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = mkMerge [
{
Type = "simple";
Restart = "always";
ExecStart = "${netCfg.package}/bin/nebula -config ${configFile}";
}
# The service needs to launch as root to access the tun device, if it's enabled.
(mkIf netCfg.tun.disable {
User = networkId;
Group = networkId;
})
];
};
}) enabledNetworks);
# Open the chosen ports for UDP.
networking.firewall.allowedUDPPorts =
unique (mapAttrsToList (netName: netCfg: netCfg.listen.port) enabledNetworks);
# Create the service users and groups.
users.users = mkMerge (mapAttrsToList (netName: netCfg:
mkIf netCfg.tun.disable {
${nameToId netName} = {
group = nameToId netName;
description = "Nebula service user for network ${netName}";
isSystemUser = true;
};
}) enabledNetworks);
users.groups = mkMerge (mapAttrsToList (netName: netCfg:
mkIf netCfg.tun.disable {
${nameToId netName} = {};
}) enabledNetworks);
};
}

View file

@ -262,6 +262,7 @@ in
nat.standalone = handleTest ./nat.nix { withFirewall = false; };
ncdns = handleTest ./ncdns.nix {};
ndppd = handleTest ./ndppd.nix {};
nebula = handleTest ./nebula.nix {};
neo4j = handleTest ./neo4j.nix {};
netdata = handleTest ./netdata.nix {};
networking.networkd = handleTest ./networking.nix { networkd = true; };

223
nixos/tests/nebula.nix Normal file
View file

@ -0,0 +1,223 @@
import ./make-test-python.nix ({ pkgs, lib, ... }: let
# We'll need to be able to trade cert files between nodes via scp.
inherit (import ./ssh-keys.nix pkgs)
snakeOilPrivateKey snakeOilPublicKey;
makeNebulaNode = { config, ... }: name: extraConfig: lib.mkMerge [
{
# Expose nebula for doing cert signing.
environment.systemPackages = [ pkgs.nebula ];
users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
services.openssh.enable = true;
services.nebula.networks.smoke = {
# Note that these paths won't exist when the machine is first booted.
ca = "/etc/nebula/ca.crt";
cert = "/etc/nebula/${name}.crt";
key = "/etc/nebula/${name}.key";
listen = { host = "0.0.0.0"; port = 4242; };
};
}
extraConfig
];
in
{
name = "nebula";
nodes = {
lighthouse = { ... } @ args:
makeNebulaNode args "lighthouse" {
networking.interfaces.eth1.ipv4.addresses = [{
address = "192.168.1.1";
prefixLength = 24;
}];
services.nebula.networks.smoke = {
isLighthouse = true;
firewall = {
outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
};
};
};
node2 = { ... } @ args:
makeNebulaNode args "node2" {
networking.interfaces.eth1.ipv4.addresses = [{
address = "192.168.1.2";
prefixLength = 24;
}];
services.nebula.networks.smoke = {
staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
isLighthouse = false;
lighthouses = [ "10.0.100.1" ];
firewall = {
outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
};
};
};
node3 = { ... } @ args:
makeNebulaNode args "node3" {
networking.interfaces.eth1.ipv4.addresses = [{
address = "192.168.1.3";
prefixLength = 24;
}];
services.nebula.networks.smoke = {
staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
isLighthouse = false;
lighthouses = [ "10.0.100.1" ];
firewall = {
outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
inbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
};
};
};
node4 = { ... } @ args:
makeNebulaNode args "node4" {
networking.interfaces.eth1.ipv4.addresses = [{
address = "192.168.1.4";
prefixLength = 24;
}];
services.nebula.networks.smoke = {
enable = true;
staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
isLighthouse = false;
lighthouses = [ "10.0.100.1" ];
firewall = {
outbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
};
};
};
node5 = { ... } @ args:
makeNebulaNode args "node5" {
networking.interfaces.eth1.ipv4.addresses = [{
address = "192.168.1.5";
prefixLength = 24;
}];
services.nebula.networks.smoke = {
enable = false;
staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
isLighthouse = false;
lighthouses = [ "10.0.100.1" ];
firewall = {
outbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
};
};
};
};
testScript = let
setUpPrivateKey = name: ''
${name}.succeed(
"mkdir -p /root/.ssh",
"chown 700 /root/.ssh",
"cat '${snakeOilPrivateKey}' > /root/.ssh/id_snakeoil",
"chown 600 /root/.ssh/id_snakeoil",
)
'';
# From what I can tell, StrictHostKeyChecking=no is necessary for ssh to work between machines.
sshOpts = "-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oIdentityFile=/root/.ssh/id_snakeoil";
restartAndCheckNebula = name: ip: ''
${name}.systemctl("restart nebula@smoke.service")
${name}.succeed("ping -c5 ${ip}")
'';
# Create a keypair on the client node, then use the public key to sign a cert on the lighthouse.
signKeysFor = name: ip: ''
lighthouse.wait_for_unit("sshd.service")
${name}.wait_for_unit("sshd.service")
${name}.succeed(
"mkdir -p /etc/nebula",
"nebula-cert keygen -out-key /etc/nebula/${name}.key -out-pub /etc/nebula/${name}.pub",
"scp ${sshOpts} /etc/nebula/${name}.pub 192.168.1.1:/tmp/${name}.pub",
)
lighthouse.succeed(
'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "${name}" -groups "${name}" -ip "${ip}" -in-pub /tmp/${name}.pub -out-crt /tmp/${name}.crt',
)
${name}.succeed(
"scp ${sshOpts} 192.168.1.1:/tmp/${name}.crt /etc/nebula/${name}.crt",
"scp ${sshOpts} 192.168.1.1:/etc/nebula/ca.crt /etc/nebula/ca.crt",
)
'';
in ''
start_all()
# Create the certificate and sign the lighthouse's keys.
${setUpPrivateKey "lighthouse"}
lighthouse.succeed(
"mkdir -p /etc/nebula",
'nebula-cert ca -name "Smoke Test" -out-crt /etc/nebula/ca.crt -out-key /etc/nebula/ca.key',
'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "lighthouse" -groups "lighthouse" -ip "10.0.100.1/24" -out-crt /etc/nebula/lighthouse.crt -out-key /etc/nebula/lighthouse.key',
)
# Reboot the lighthouse and verify that the nebula service comes up on boot.
# Since rebooting takes a while, we'll just restart the service on the other nodes.
lighthouse.shutdown()
lighthouse.start()
lighthouse.wait_for_unit("nebula@smoke.service")
lighthouse.succeed("ping -c5 10.0.100.1")
# Create keys for node2's nebula service and test that it comes up.
${setUpPrivateKey "node2"}
${signKeysFor "node2" "10.0.100.2/24"}
${restartAndCheckNebula "node2" "10.0.100.2"}
# Create keys for node3's nebula service and test that it comes up.
${setUpPrivateKey "node3"}
${signKeysFor "node3" "10.0.100.3/24"}
${restartAndCheckNebula "node3" "10.0.100.3"}
# Create keys for node4's nebula service and test that it comes up.
${setUpPrivateKey "node4"}
${signKeysFor "node4" "10.0.100.4/24"}
${restartAndCheckNebula "node4" "10.0.100.4"}
# Create keys for node4's nebula service and test that it does not come up.
${setUpPrivateKey "node5"}
${signKeysFor "node5" "10.0.100.5/24"}
node5.fail("systemctl status nebula@smoke.service")
node5.fail("ping -c5 10.0.100.5")
# The lighthouse can ping node2 and node3 but not node5
lighthouse.succeed("ping -c3 10.0.100.2")
lighthouse.succeed("ping -c3 10.0.100.3")
lighthouse.fail("ping -c3 10.0.100.5")
# node2 can ping the lighthouse, but not node3 because of its inbound firewall
node2.succeed("ping -c3 10.0.100.1")
node2.fail("ping -c3 10.0.100.3")
# node3 can ping the lighthouse and node2
node3.succeed("ping -c3 10.0.100.1")
node3.succeed("ping -c3 10.0.100.2")
# node4 can ping the lighthouse but not node2 or node3
node4.succeed("ping -c3 10.0.100.1")
node4.fail("ping -c3 10.0.100.2")
node4.fail("ping -c3 10.0.100.3")
# node2 can ping node3 now that node3 pinged it first
node2.succeed("ping -c3 10.0.100.3")
# node4 can ping node2 if node2 pings it first
node2.succeed("ping -c3 10.0.100.4")
node4.succeed("ping -c3 10.0.100.2")
'';
})