nixpkgs-suyu/nixos/modules/virtualisation/lxd.nix

183 lines
5.8 KiB
Nix
Raw Normal View History

2015-09-14 08:27:31 +02:00
# Systemd services for lxd.
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.virtualisation.lxd;
in {
imports = [
(mkRemovedOptionModule [ "virtualisation" "lxd" "zfsPackage" ] "Override zfs in an overlay instead to override it globally")
];
2015-09-14 08:27:31 +02:00
###### interface
options = {
2018-03-16 10:58:54 +01:00
virtualisation.lxd = {
enable = mkOption {
2015-09-14 08:27:31 +02:00
type = types.bool;
default = false;
2018-03-16 10:58:54 +01:00
description = ''
This option enables lxd, a daemon that manages
containers. Users in the "lxd" group can interact with
the daemon (e.g. to start or stop containers) using the
<command>lxc</command> command line tool, among others.
Most of the time, you'll also want to start lxcfs, so
that containers can "see" the limits:
<code>
virtualisation.lxc.lxcfs.enable = true;
</code>
2018-03-16 10:58:54 +01:00
'';
2015-09-14 08:27:31 +02:00
};
package = mkOption {
type = types.package;
2020-06-05 12:57:18 +02:00
default = pkgs.lxd.override { nftablesSupport = config.networking.nftables.enable; };
defaultText = "pkgs.lxd";
description = ''
The LXD package to use.
'';
};
lxcPackage = mkOption {
type = types.package;
default = pkgs.lxc;
defaultText = "pkgs.lxc";
description = ''
The LXC package to use with LXD (required for AppArmor profiles).
'';
};
2018-03-16 10:58:54 +01:00
zfsSupport = mkOption {
type = types.bool;
default = config.boot.zfs.enabled;
defaultText = "config.boot.zfs.enabled";
2018-03-16 10:58:54 +01:00
description = ''
Enables lxd to use zfs as a storage for containers.
2018-03-16 10:58:54 +01:00
This option is enabled by default if a zfs pool is configured
with nixos.
'';
};
2020-06-05 12:57:18 +02:00
recommendedSysctlSettings = mkOption {
type = types.bool;
default = false;
description = ''
Enables various settings to avoid common pitfalls when
running containers requiring many file operations.
Fixes errors like "Too many open files" or
"neighbour: ndisc_cache: neighbor table overflow!".
See https://lxd.readthedocs.io/en/latest/production-setup/
for details.
'';
};
startTimeout = mkOption {
type = types.int;
default = 600;
apply = toString;
description = ''
Time to wait (in seconds) for LXD to become ready to process requests.
If LXD does not reply within the configured time, lxd.service will be
considered failed and systemd will attempt to restart it.
'';
};
2018-03-16 10:58:54 +01:00
};
2015-09-14 08:27:31 +02:00
};
###### implementation
config = mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
2015-09-14 08:27:31 +02:00
# Note: the following options are also declared in virtualisation.lxc, but
# the latter can't be simply enabled to reuse the formers, because it
# does a bunch of unrelated things.
systemd.tmpfiles.rules = [ "d /var/lib/lxc/rootfs 0755 root root -" ];
security.apparmor = {
packages = [ cfg.lxcPackage ];
policies = {
"bin.lxc-start".profile = ''
include ${cfg.lxcPackage}/etc/apparmor.d/usr.bin.lxc-start
'';
"lxc-containers".profile = ''
include ${cfg.lxcPackage}/etc/apparmor.d/lxc-containers
'';
};
};
2018-02-10 09:18:53 +01:00
# TODO: remove once LXD gets proper support for cgroupsv2
# (currently most of the e.g. CPU accounting stuff doesn't work)
systemd.enableUnifiedCgroupHierarchy = false;
systemd.sockets.lxd = {
description = "LXD UNIX socket";
wantedBy = [ "sockets.target" ];
socketConfig = {
ListenStream = "/var/lib/lxd/unix.socket";
SocketMode = "0660";
SocketGroup = "lxd";
Service = "lxd.service";
};
};
2018-03-16 10:58:54 +01:00
systemd.services.lxd = {
description = "LXD Container Management Daemon";
2015-09-14 08:27:31 +02:00
2018-03-16 10:58:54 +01:00
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" "lxcfs.service" ];
requires = [ "network-online.target" "lxd.socket" "lxcfs.service" ];
documentation = [ "man:lxd(1)" ];
2015-09-14 08:27:31 +02:00
path = optional cfg.zfsSupport config.boot.zfs.package;
2018-02-10 09:18:53 +01:00
2018-03-16 10:58:54 +01:00
serviceConfig = {
ExecStart = "@${cfg.package}/bin/lxd lxd --group lxd";
ExecStartPost = "${cfg.package}/bin/lxd waitready --timeout=${cfg.startTimeout}";
ExecStop = "${cfg.package}/bin/lxd shutdown";
2018-03-16 10:58:54 +01:00
KillMode = "process"; # when stopping, leave the containers alone
LimitMEMLOCK = "infinity";
LimitNOFILE = "1048576";
LimitNPROC = "infinity";
TasksMax = "infinity";
Restart = "on-failure";
TimeoutStartSec = "${cfg.startTimeout}s";
TimeoutStopSec = "30s";
# By default, `lxd` loads configuration files from hard-coded
# `/usr/share/lxc/config` - since this is a no-go for us, we have to
# explicitly tell it where the actual configuration files are
Environment = mkIf (config.virtualisation.lxc.lxcfs.enable)
"LXD_LXC_TEMPLATE_CONFIG=${pkgs.lxcfs}/share/lxc/config";
2015-09-14 08:27:31 +02:00
};
2018-03-16 10:58:54 +01:00
};
users.groups.lxd.gid = config.ids.gids.lxd;
2015-09-14 08:27:31 +02:00
users.users.root = {
2015-09-14 08:27:31 +02:00
subUidRanges = [ { startUid = 1000000; count = 65536; } ];
subGidRanges = [ { startGid = 1000000; count = 65536; } ];
};
boot.kernel.sysctl = mkIf cfg.recommendedSysctlSettings {
"fs.inotify.max_queued_events" = 1048576;
"fs.inotify.max_user_instances" = 1048576;
"fs.inotify.max_user_watches" = 1048576;
"vm.max_map_count" = 262144;
"kernel.dmesg_restrict" = 1;
"net.ipv4.neigh.default.gc_thresh3" = 8192;
"net.ipv6.neigh.default.gc_thresh3" = 8192;
"kernel.keys.maxkeys" = 2000;
};
nixos/lxd: explicitly load kernel modules This is analogous to #70447 and #76487. These are all needed to attach a container to the default bridge network, without which the final line of the following script fails with the error for each respective kernel module listed below. ```sh lxc storage create foo dir lxc launch -s foo ubuntu:trusty bar lxc network attach lxdbr0 bar ``` veth ---- > Error: Failed to start device 'lxdbr0': Failed to create the veth interfaces vethefbc3cd6 and vetha4abbcbc: Failed to run: ip link add dev vethefbc3cd6 type veth peer name vetha4abbcbc: RTNETLINK answers: Operation not supported iptable_mangle -------------- > lvl=eror msg="Failed to bring up network" err="Failed to list ipv4 rules for LXD network lxdbr0 (table mangle)" name=lxdbr0 xt_comment ---------- > lvl=error msg="Failed to bring up network" err="Failed to run: iptables -w -t filter -I INPUT -i lxdbr0 -p udp --dport 67 -j ACCEPT -m comment --comment generated for LXD network lxdbr0: iptables v1.8.4 (legacy): Couldn't load match `comment':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information." name=lxdbr0 xt_MASQUERADE ------------- > vl=eror msg="Failed to bring up network" err="Failed to run: iptables -w -t nat -I POSTROUTING -s 10.0.107.0/24 ! -d 10.0.107.0/24 -j MASQUERADE -m comment --comment generated for LXD network lxdbr0: iptables v1.8.4 (legacy): Couldn't load target `MASQUERADE':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information." name=lxdbr0
2020-09-10 18:13:07 +02:00
boot.kernelModules = [ "veth" "xt_comment" "xt_CHECKSUM" "xt_MASQUERADE" ]
nixos/lxd: explicitly load kernel modules This is analogous to #70447 and #76487. These are all needed to attach a container to the default bridge network, without which the final line of the following script fails with the error for each respective kernel module listed below. ```sh lxc storage create foo dir lxc launch -s foo ubuntu:trusty bar lxc network attach lxdbr0 bar ``` veth ---- > Error: Failed to start device 'lxdbr0': Failed to create the veth interfaces vethefbc3cd6 and vetha4abbcbc: Failed to run: ip link add dev vethefbc3cd6 type veth peer name vetha4abbcbc: RTNETLINK answers: Operation not supported iptable_mangle -------------- > lvl=eror msg="Failed to bring up network" err="Failed to list ipv4 rules for LXD network lxdbr0 (table mangle)" name=lxdbr0 xt_comment ---------- > lvl=error msg="Failed to bring up network" err="Failed to run: iptables -w -t filter -I INPUT -i lxdbr0 -p udp --dport 67 -j ACCEPT -m comment --comment generated for LXD network lxdbr0: iptables v1.8.4 (legacy): Couldn't load match `comment':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information." name=lxdbr0 xt_MASQUERADE ------------- > vl=eror msg="Failed to bring up network" err="Failed to run: iptables -w -t nat -I POSTROUTING -s 10.0.107.0/24 ! -d 10.0.107.0/24 -j MASQUERADE -m comment --comment generated for LXD network lxdbr0: iptables v1.8.4 (legacy): Couldn't load target `MASQUERADE':No such file or directory\n\nTry `iptables -h' or 'iptables --help' for more information." name=lxdbr0
2020-09-10 18:13:07 +02:00
++ optionals (!config.networking.nftables.enable) [ "iptable_mangle" ];
2015-09-14 08:27:31 +02:00
};
}