uboot: (firmwareOdroidC2/C4) don't invoke patch tool, use patches = [] instead

https://github.com/NixOS/nixpkgs/blob/master/pkgs/stdenv/generic/setup.sh#L948
this can do it nicely.

Signed-off-by: Anton Arapov <anton@deadbeef.mx>
This commit is contained in:
Anton Arapov 2021-04-03 12:58:10 +02:00 committed by Alan Daniels
commit 56de2bcd43
30691 changed files with 3076956 additions and 0 deletions

View file

@ -0,0 +1,119 @@
{ config, lib, pkgs, ... }:
let
inherit (lib) concatMapStringsSep concatStringsSep isInt isList literalExpression;
inherit (lib) mapAttrs mapAttrsToList mkDefault mkEnableOption mkIf mkOption optional types;
cfg = config.services.automysqlbackup;
pkg = pkgs.automysqlbackup;
user = "automysqlbackup";
group = "automysqlbackup";
toStr = val:
if isList val then "( ${concatMapStringsSep " " (val: "'${val}'") val} )"
else if isInt val then toString val
else if true == val then "'yes'"
else if false == val then "'no'"
else "'${toString val}'";
configFile = pkgs.writeText "automysqlbackup.conf" ''
#version=${pkg.version}
# DONT'T REMOVE THE PREVIOUS VERSION LINE!
#
${concatStringsSep "\n" (mapAttrsToList (name: value: "CONFIG_${name}=${toStr value}") cfg.config)}
'';
in
{
# interface
options = {
services.automysqlbackup = {
enable = mkEnableOption "AutoMySQLBackup";
calendar = mkOption {
type = types.str;
default = "01:15:00";
description = ''
Configured when to run the backup service systemd unit (DayOfWeek Year-Month-Day Hour:Minute:Second).
'';
};
config = mkOption {
type = with types; attrsOf (oneOf [ str int bool (listOf str) ]);
default = {};
description = ''
automysqlbackup configuration. Refer to
<filename>''${pkgs.automysqlbackup}/etc/automysqlbackup.conf</filename>
for details on supported values.
'';
example = literalExpression ''
{
db_names = [ "nextcloud" "matomo" ];
table_exclude = [ "nextcloud.oc_users" "nextcloud.oc_whats_new" ];
mailcontent = "log";
mail_address = "admin@example.org";
}
'';
};
};
};
# implementation
config = mkIf cfg.enable {
assertions = [
{ assertion = !config.services.mysqlBackup.enable;
message = "Please choose one of services.mysqlBackup or services.automysqlbackup.";
}
];
services.automysqlbackup.config = mapAttrs (name: mkDefault) {
mysql_dump_username = user;
mysql_dump_host = "localhost";
mysql_dump_socket = "/run/mysqld/mysqld.sock";
backup_dir = "/var/backup/mysql";
db_exclude = [ "information_schema" "performance_schema" ];
mailcontent = "stdout";
mysql_dump_single_transaction = true;
};
systemd.timers.automysqlbackup = {
description = "automysqlbackup timer";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = cfg.calendar;
AccuracySec = "5m";
};
};
systemd.services.automysqlbackup = {
description = "automysqlbackup service";
serviceConfig = {
User = user;
Group = group;
ExecStart = "${pkg}/bin/automysqlbackup ${configFile}";
};
};
environment.systemPackages = [ pkg ];
users.users.${user} = {
group = group;
isSystemUser = true;
};
users.groups.${group} = { };
systemd.tmpfiles.rules = [
"d '${cfg.config.backup_dir}' 0750 ${user} ${group} - -"
];
services.mysql.ensureUsers = optional (config.services.mysql.enable && cfg.config.mysql_dump_host == "localhost") {
name = user;
ensurePermissions = { "*.*" = "SELECT, SHOW VIEW, TRIGGER, LOCK TABLES, EVENT"; };
};
};
}

View file

@ -0,0 +1,578 @@
{ config, lib, pkgs, ... }:
# TODO: test configuration when building nixexpr (use -t parameter)
# TODO: support sqlite3 (it's deprecate?) and mysql
with lib;
let
libDir = "/var/lib/bacula";
fd_cfg = config.services.bacula-fd;
fd_conf = pkgs.writeText "bacula-fd.conf"
''
Client {
Name = "${fd_cfg.name}";
FDPort = ${toString fd_cfg.port};
WorkingDirectory = "${libDir}";
Pid Directory = "/run";
${fd_cfg.extraClientConfig}
}
${concatStringsSep "\n" (mapAttrsToList (name: value: ''
Director {
Name = "${name}";
Password = "${value.password}";
Monitor = "${value.monitor}";
}
'') fd_cfg.director)}
Messages {
Name = Standard;
syslog = all, !skipped, !restored
${fd_cfg.extraMessagesConfig}
}
'';
sd_cfg = config.services.bacula-sd;
sd_conf = pkgs.writeText "bacula-sd.conf"
''
Storage {
Name = "${sd_cfg.name}";
SDPort = ${toString sd_cfg.port};
WorkingDirectory = "${libDir}";
Pid Directory = "/run";
${sd_cfg.extraStorageConfig}
}
${concatStringsSep "\n" (mapAttrsToList (name: value: ''
Autochanger {
Name = "${name}";
Device = ${concatStringsSep ", " (map (a: "\"${a}\"") value.devices)};
Changer Device = "${value.changerDevice}";
Changer Command = "${value.changerCommand}";
${value.extraAutochangerConfig}
}
'') sd_cfg.autochanger)}
${concatStringsSep "\n" (mapAttrsToList (name: value: ''
Device {
Name = "${name}";
Archive Device = "${value.archiveDevice}";
Media Type = "${value.mediaType}";
${value.extraDeviceConfig}
}
'') sd_cfg.device)}
${concatStringsSep "\n" (mapAttrsToList (name: value: ''
Director {
Name = "${name}";
Password = "${value.password}";
Monitor = "${value.monitor}";
}
'') sd_cfg.director)}
Messages {
Name = Standard;
syslog = all, !skipped, !restored
${sd_cfg.extraMessagesConfig}
}
'';
dir_cfg = config.services.bacula-dir;
dir_conf = pkgs.writeText "bacula-dir.conf"
''
Director {
Name = "${dir_cfg.name}";
Password = "${dir_cfg.password}";
DirPort = ${toString dir_cfg.port};
Working Directory = "${libDir}";
Pid Directory = "/run/";
QueryFile = "${pkgs.bacula}/etc/query.sql";
${dir_cfg.extraDirectorConfig}
}
Catalog {
Name = "PostgreSQL";
dbname = "bacula";
user = "bacula";
}
Messages {
Name = Standard;
syslog = all, !skipped, !restored
${dir_cfg.extraMessagesConfig}
}
${dir_cfg.extraConfig}
'';
directorOptions = {...}:
{
options = {
password = mkOption {
type = types.str;
# TODO: required?
description = ''
Specifies the password that must be supplied for the default Bacula
Console to be authorized. The same password must appear in the
Director resource of the Console configuration file. For added
security, the password is never passed across the network but instead
a challenge response hash code created with the password. This
directive is required. If you have either /dev/random or bc on your
machine, Bacula will generate a random password during the
configuration process, otherwise it will be left blank and you must
manually supply it.
The password is plain text. It is not generated through any special
process but as noted above, it is better to use random text for
security reasons.
'';
};
monitor = mkOption {
type = types.enum [ "no" "yes" ];
default = "no";
example = "yes";
description = ''
If Monitor is set to <literal>no</literal>, this director will have
full access to this Storage daemon. If Monitor is set to
<literal>yes</literal>, this director will only be able to fetch the
current status of this Storage daemon.
Please note that if this director is being used by a Monitor, we
highly recommend to set this directive to yes to avoid serious
security problems.
'';
};
};
};
autochangerOptions = {...}:
{
options = {
changerDevice = mkOption {
type = types.str;
description = ''
The specified name-string must be the generic SCSI device name of the
autochanger that corresponds to the normal read/write Archive Device
specified in the Device resource. This generic SCSI device name
should be specified if you have an autochanger or if you have a
standard tape drive and want to use the Alert Command (see below).
For example, on Linux systems, for an Archive Device name of
<literal>/dev/nst0</literal>, you would specify
<literal>/dev/sg0</literal> for the Changer Device name. Depending
on your exact configuration, and the number of autochangers or the
type of autochanger, what you specify here can vary. This directive
is optional. See the Using AutochangersAutochangersChapter chapter of
this manual for more details of using this and the following
autochanger directives.
'';
};
changerCommand = mkOption {
type = types.str;
description = ''
The name-string specifies an external program to be called that will
automatically change volumes as required by Bacula. Normally, this
directive will be specified only in the AutoChanger resource, which
is then used for all devices. However, you may also specify the
different Changer Command in each Device resource. Most frequently,
you will specify the Bacula supplied mtx-changer script as follows:
<literal>"/path/mtx-changer %c %o %S %a %d"</literal>
and you will install the mtx on your system (found in the depkgs
release). An example of this command is in the default bacula-sd.conf
file. For more details on the substitution characters that may be
specified to configure your autochanger please see the
AutochangersAutochangersChapter chapter of this manual. For FreeBSD
users, you might want to see one of the several chio scripts in
examples/autochangers.
'';
default = "/etc/bacula/mtx-changer %c %o %S %a %d";
};
devices = mkOption {
description = "";
type = types.listOf types.str;
};
extraAutochangerConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration to be passed in Autochanger directive.
'';
example = ''
'';
};
};
};
deviceOptions = {...}:
{
options = {
archiveDevice = mkOption {
# TODO: required?
type = types.str;
description = ''
The specified name-string gives the system file name of the storage
device managed by this storage daemon. This will usually be the
device file name of a removable storage device (tape drive), for
example <literal>/dev/nst0</literal> or
<literal>/dev/rmt/0mbn</literal>. For a DVD-writer, it will be for
example <literal>/dev/hdc</literal>. It may also be a directory name
if you are archiving to disk storage. In this case, you must supply
the full absolute path to the directory. When specifying a tape
device, it is preferable that the "non-rewind" variant of the device
file name be given.
'';
};
mediaType = mkOption {
# TODO: required?
type = types.str;
description = ''
The specified name-string names the type of media supported by this
device, for example, <literal>DLT7000</literal>. Media type names are
arbitrary in that you set them to anything you want, but they must be
known to the volume database to keep track of which storage daemons
can read which volumes. In general, each different storage type
should have a unique Media Type associated with it. The same
name-string must appear in the appropriate Storage resource
definition in the Director's configuration file.
Even though the names you assign are arbitrary (i.e. you choose the
name you want), you should take care in specifying them because the
Media Type is used to determine which storage device Bacula will
select during restore. Thus you should probably use the same Media
Type specification for all drives where the Media can be freely
interchanged. This is not generally an issue if you have a single
Storage daemon, but it is with multiple Storage daemons, especially
if they have incompatible media.
For example, if you specify a Media Type of <literal>DDS-4</literal>
then during the restore, Bacula will be able to choose any Storage
Daemon that handles <literal>DDS-4</literal>. If you have an
autochanger, you might want to name the Media Type in a way that is
unique to the autochanger, unless you wish to possibly use the
Volumes in other drives. You should also ensure to have unique Media
Type names if the Media is not compatible between drives. This
specification is required for all devices.
In addition, if you are using disk storage, each Device resource will
generally have a different mount point or directory. In order for
Bacula to select the correct Device resource, each one must have a
unique Media Type.
'';
};
extraDeviceConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration to be passed in Device directive.
'';
example = ''
LabelMedia = yes
Random Access = no
AutomaticMount = no
RemovableMedia = no
MaximumOpenWait = 60
AlwaysOpen = no
'';
};
};
};
in {
options = {
services.bacula-fd = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable the Bacula File Daemon.
'';
};
name = mkOption {
default = "${config.networking.hostName}-fd";
defaultText = literalExpression ''"''${config.networking.hostName}-fd"'';
type = types.str;
description = ''
The client name that must be used by the Director when connecting.
Generally, it is a good idea to use a name related to the machine so
that error messages can be easily identified if you have multiple
Clients. This directive is required.
'';
};
port = mkOption {
default = 9102;
type = types.int;
description = ''
This specifies the port number on which the Client listens for
Director connections. It must agree with the FDPort specified in
the Client resource of the Director's configuration file.
'';
};
director = mkOption {
default = {};
description = ''
This option defines director resources in Bacula File Daemon.
'';
type = with types; attrsOf (submodule directorOptions);
};
extraClientConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration to be passed in Client directive.
'';
example = ''
Maximum Concurrent Jobs = 20;
Heartbeat Interval = 30;
'';
};
extraMessagesConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration to be passed in Messages directive.
'';
example = ''
console = all
'';
};
};
services.bacula-sd = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable Bacula Storage Daemon.
'';
};
name = mkOption {
default = "${config.networking.hostName}-sd";
defaultText = literalExpression ''"''${config.networking.hostName}-sd"'';
type = types.str;
description = ''
Specifies the Name of the Storage daemon.
'';
};
port = mkOption {
default = 9103;
type = types.int;
description = ''
Specifies port number on which the Storage daemon listens for
Director connections.
'';
};
director = mkOption {
default = {};
description = ''
This option defines Director resources in Bacula Storage Daemon.
'';
type = with types; attrsOf (submodule directorOptions);
};
device = mkOption {
default = {};
description = ''
This option defines Device resources in Bacula Storage Daemon.
'';
type = with types; attrsOf (submodule deviceOptions);
};
autochanger = mkOption {
default = {};
description = ''
This option defines Autochanger resources in Bacula Storage Daemon.
'';
type = with types; attrsOf (submodule autochangerOptions);
};
extraStorageConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration to be passed in Storage directive.
'';
example = ''
Maximum Concurrent Jobs = 20;
Heartbeat Interval = 30;
'';
};
extraMessagesConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration to be passed in Messages directive.
'';
example = ''
console = all
'';
};
};
services.bacula-dir = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether to enable Bacula Director Daemon.
'';
};
name = mkOption {
default = "${config.networking.hostName}-dir";
defaultText = literalExpression ''"''${config.networking.hostName}-dir"'';
type = types.str;
description = ''
The director name used by the system administrator. This directive is
required.
'';
};
port = mkOption {
default = 9101;
type = types.int;
description = ''
Specify the port (a positive integer) on which the Director daemon
will listen for Bacula Console connections. This same port number
must be specified in the Director resource of the Console
configuration file. The default is 9101, so normally this directive
need not be specified. This directive should not be used if you
specify DirAddresses (N.B plural) directive.
'';
};
password = mkOption {
# TODO: required?
type = types.str;
description = ''
Specifies the password that must be supplied for a Director.
'';
};
extraMessagesConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration to be passed in Messages directive.
'';
example = ''
console = all
'';
};
extraDirectorConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration to be passed in Director directive.
'';
example = ''
Maximum Concurrent Jobs = 20;
Heartbeat Interval = 30;
'';
};
extraConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration for Bacula Director Daemon.
'';
example = ''
TODO
'';
};
};
};
config = mkIf (fd_cfg.enable || sd_cfg.enable || dir_cfg.enable) {
systemd.services.bacula-fd = mkIf fd_cfg.enable {
after = [ "network.target" ];
description = "Bacula File Daemon";
wantedBy = [ "multi-user.target" ];
path = [ pkgs.bacula ];
serviceConfig = {
ExecStart = "${pkgs.bacula}/sbin/bacula-fd -f -u root -g bacula -c ${fd_conf}";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
LogsDirectory = "bacula";
StateDirectory = "bacula";
};
};
systemd.services.bacula-sd = mkIf sd_cfg.enable {
after = [ "network.target" ];
description = "Bacula Storage Daemon";
wantedBy = [ "multi-user.target" ];
path = [ pkgs.bacula ];
serviceConfig = {
ExecStart = "${pkgs.bacula}/sbin/bacula-sd -f -u bacula -g bacula -c ${sd_conf}";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
LogsDirectory = "bacula";
StateDirectory = "bacula";
};
};
services.postgresql.enable = dir_cfg.enable == true;
systemd.services.bacula-dir = mkIf dir_cfg.enable {
after = [ "network.target" "postgresql.service" ];
description = "Bacula Director Daemon";
wantedBy = [ "multi-user.target" ];
path = [ pkgs.bacula ];
serviceConfig = {
ExecStart = "${pkgs.bacula}/sbin/bacula-dir -f -u bacula -g bacula -c ${dir_conf}";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
LogsDirectory = "bacula";
StateDirectory = "bacula";
};
preStart = ''
if ! test -e "${libDir}/db-created"; then
${pkgs.postgresql}/bin/createuser --no-superuser --no-createdb --no-createrole bacula
#${pkgs.postgresql}/bin/createdb --owner bacula bacula
# populate DB
${pkgs.bacula}/etc/create_bacula_database postgresql
${pkgs.bacula}/etc/make_bacula_tables postgresql
${pkgs.bacula}/etc/grant_bacula_privileges postgresql
touch "${libDir}/db-created"
else
${pkgs.bacula}/etc/update_bacula_tables postgresql || true
fi
'';
};
environment.systemPackages = [ pkgs.bacula ];
users.users.bacula = {
group = "bacula";
uid = config.ids.uids.bacula;
home = "${libDir}";
createHome = true;
description = "Bacula Daemons user";
shell = "${pkgs.bash}/bin/bash";
};
users.groups.bacula.gid = config.ids.gids.bacula;
};
}

View file

@ -0,0 +1,730 @@
{ config, lib, pkgs, ... }:
with lib;
let
isLocalPath = x:
builtins.substring 0 1 x == "/" # absolute path
|| builtins.substring 0 1 x == "." # relative path
|| builtins.match "[.*:.*]" == null; # not machine:path
mkExcludeFile = cfg:
# Write each exclude pattern to a new line
pkgs.writeText "excludefile" (concatStringsSep "\n" cfg.exclude);
mkKeepArgs = cfg:
# If cfg.prune.keep e.g. has a yearly attribute,
# its content is passed on as --keep-yearly
concatStringsSep " "
(mapAttrsToList (x: y: "--keep-${x}=${toString y}") cfg.prune.keep);
mkBackupScript = cfg: ''
on_exit()
{
exitStatus=$?
# Reset the EXIT handler, or else we're called again on 'exit' below
trap - EXIT
${cfg.postHook}
exit $exitStatus
}
trap 'on_exit' INT TERM QUIT EXIT
archiveName="${if cfg.archiveBaseName == null then "" else cfg.archiveBaseName + "-"}$(date ${cfg.dateFormat})"
archiveSuffix="${optionalString cfg.appendFailedSuffix ".failed"}"
${cfg.preHook}
'' + optionalString cfg.doInit ''
# Run borg init if the repo doesn't exist yet
if ! borg list $extraArgs > /dev/null; then
borg init $extraArgs \
--encryption ${cfg.encryption.mode} \
$extraInitArgs
${cfg.postInit}
fi
'' + ''
(
set -o pipefail
${optionalString (cfg.dumpCommand != null) ''${escapeShellArg cfg.dumpCommand} | \''}
borg create $extraArgs \
--compression ${cfg.compression} \
--exclude-from ${mkExcludeFile cfg} \
$extraCreateArgs \
"::$archiveName$archiveSuffix" \
${if cfg.paths == null then "-" else escapeShellArgs cfg.paths}
)
'' + optionalString cfg.appendFailedSuffix ''
borg rename $extraArgs \
"::$archiveName$archiveSuffix" "$archiveName"
'' + ''
${cfg.postCreate}
'' + optionalString (cfg.prune.keep != { }) ''
borg prune $extraArgs \
${mkKeepArgs cfg} \
${optionalString (cfg.prune.prefix != null) "--prefix ${escapeShellArg cfg.prune.prefix} \\"}
$extraPruneArgs
${cfg.postPrune}
'';
mkPassEnv = cfg: with cfg.encryption;
if passCommand != null then
{ BORG_PASSCOMMAND = passCommand; }
else if passphrase != null then
{ BORG_PASSPHRASE = passphrase; }
else { };
mkBackupService = name: cfg:
let
userHome = config.users.users.${cfg.user}.home;
in nameValuePair "borgbackup-job-${name}" {
description = "BorgBackup job ${name}";
path = with pkgs; [
borgbackup openssh
];
script = mkBackupScript cfg;
serviceConfig = {
User = cfg.user;
Group = cfg.group;
# Only run when no other process is using CPU or disk
CPUSchedulingPolicy = "idle";
IOSchedulingClass = "idle";
ProtectSystem = "strict";
ReadWritePaths =
[ "${userHome}/.config/borg" "${userHome}/.cache/borg" ]
++ cfg.readWritePaths
# Borg needs write access to repo if it is not remote
++ optional (isLocalPath cfg.repo) cfg.repo;
PrivateTmp = cfg.privateTmp;
};
environment = {
BORG_REPO = cfg.repo;
inherit (cfg) extraArgs extraInitArgs extraCreateArgs extraPruneArgs;
} // (mkPassEnv cfg) // cfg.environment;
};
mkBackupTimers = name: cfg:
nameValuePair "borgbackup-job-${name}" {
description = "BorgBackup job ${name} timer";
wantedBy = [ "timers.target" ];
timerConfig = {
Persistent = cfg.persistentTimer;
OnCalendar = cfg.startAt;
};
# if remote-backup wait for network
after = optional (cfg.persistentTimer && !isLocalPath cfg.repo) "network-online.target";
};
# utility function around makeWrapper
mkWrapperDrv = {
original, name, set ? {}
}:
pkgs.runCommand "${name}-wrapper" {
buildInputs = [ pkgs.makeWrapper ];
} (with lib; ''
makeWrapper "${original}" "$out/bin/${name}" \
${concatStringsSep " \\\n " (mapAttrsToList (name: value: ''--set ${name} "${value}"'') set)}
'');
mkBorgWrapper = name: cfg: mkWrapperDrv {
original = "${pkgs.borgbackup}/bin/borg";
name = "borg-job-${name}";
set = { BORG_REPO = cfg.repo; } // (mkPassEnv cfg) // cfg.environment;
};
# Paths listed in ReadWritePaths must exist before service is started
mkActivationScript = name: cfg:
let
install = "install -o ${cfg.user} -g ${cfg.group}";
in
nameValuePair "borgbackup-job-${name}" (stringAfter [ "users" ] (''
# Ensure that the home directory already exists
# We can't assert createHome == true because that's not the case for root
cd "${config.users.users.${cfg.user}.home}"
${install} -d .config/borg
${install} -d .cache/borg
'' + optionalString (isLocalPath cfg.repo && !cfg.removableDevice) ''
${install} -d ${escapeShellArg cfg.repo}
''));
mkPassAssertion = name: cfg: {
assertion = with cfg.encryption;
mode != "none" -> passCommand != null || passphrase != null;
message =
"passCommand or passphrase has to be specified because"
+ '' borgbackup.jobs.${name}.encryption != "none"'';
};
mkRepoService = name: cfg:
nameValuePair "borgbackup-repo-${name}" {
description = "Create BorgBackup repository ${name} directory";
script = ''
mkdir -p ${escapeShellArg cfg.path}
chown ${cfg.user}:${cfg.group} ${escapeShellArg cfg.path}
'';
serviceConfig = {
# The service's only task is to ensure that the specified path exists
Type = "oneshot";
};
wantedBy = [ "multi-user.target" ];
};
mkAuthorizedKey = cfg: appendOnly: key:
let
# Because of the following line, clients do not need to specify an absolute repo path
cdCommand = "cd ${escapeShellArg cfg.path}";
restrictedArg = "--restrict-to-${if cfg.allowSubRepos then "path" else "repository"} .";
appendOnlyArg = optionalString appendOnly "--append-only";
quotaArg = optionalString (cfg.quota != null) "--storage-quota ${cfg.quota}";
serveCommand = "borg serve ${restrictedArg} ${appendOnlyArg} ${quotaArg}";
in
''command="${cdCommand} && ${serveCommand}",restrict ${key}'';
mkUsersConfig = name: cfg: {
users.${cfg.user} = {
openssh.authorizedKeys.keys =
(map (mkAuthorizedKey cfg false) cfg.authorizedKeys
++ map (mkAuthorizedKey cfg true) cfg.authorizedKeysAppendOnly);
useDefaultShell = true;
group = cfg.group;
isSystemUser = true;
};
groups.${cfg.group} = { };
};
mkKeysAssertion = name: cfg: {
assertion = cfg.authorizedKeys != [ ] || cfg.authorizedKeysAppendOnly != [ ];
message =
"borgbackup.repos.${name} does not make sense"
+ " without at least one public key";
};
mkSourceAssertions = name: cfg: {
assertion = count isNull [ cfg.dumpCommand cfg.paths ] == 1;
message = ''
Exactly one of borgbackup.jobs.${name}.paths or borgbackup.jobs.${name}.dumpCommand
must be set.
'';
};
mkRemovableDeviceAssertions = name: cfg: {
assertion = !(isLocalPath cfg.repo) -> !cfg.removableDevice;
message = ''
borgbackup.repos.${name}: repo isn't a local path, thus it can't be a removable device!
'';
};
in {
meta.maintainers = with maintainers; [ dotlambda ];
meta.doc = ./borgbackup.xml;
###### interface
options.services.borgbackup.jobs = mkOption {
description = ''
Deduplicating backups using BorgBackup.
Adding a job will cause a borg-job-NAME wrapper to be added
to your system path, so that you can perform maintenance easily.
See also the chapter about BorgBackup in the NixOS manual.
'';
default = { };
example = literalExpression ''
{ # for a local backup
rootBackup = {
paths = "/";
exclude = [ "/nix" ];
repo = "/path/to/local/repo";
encryption = {
mode = "repokey";
passphrase = "secret";
};
compression = "auto,lzma";
startAt = "weekly";
};
}
{ # Root backing each day up to a remote backup server. We assume that you have
# * created a password less key: ssh-keygen -N "" -t ed25519 -f /path/to/ssh_key
# best practices are: use -t ed25519, /path/to = /run/keys
# * the passphrase is in the file /run/keys/borgbackup_passphrase
# * you have initialized the repository manually
paths = [ "/etc" "/home" ];
exclude = [ "/nix" "'**/.cache'" ];
doInit = false;
repo = "user3@arep.repo.borgbase.com:repo";
encryption = {
mode = "repokey-blake2";
passCommand = "cat /path/to/passphrase";
};
environment = { BORG_RSH = "ssh -i /path/to/ssh_key"; };
compression = "auto,lzma";
startAt = "daily";
};
'';
type = types.attrsOf (types.submodule (let globalConfig = config; in
{ name, config, ... }: {
options = {
paths = mkOption {
type = with types; nullOr (coercedTo str lib.singleton (listOf str));
default = null;
description = ''
Path(s) to back up.
Mutually exclusive with <option>dumpCommand</option>.
'';
example = "/home/user";
};
dumpCommand = mkOption {
type = with types; nullOr path;
default = null;
description = ''
Backup the stdout of this program instead of filesystem paths.
Mutually exclusive with <option>paths</option>.
'';
example = "/path/to/createZFSsend.sh";
};
repo = mkOption {
type = types.str;
description = "Remote or local repository to back up to.";
example = "user@machine:/path/to/repo";
};
removableDevice = mkOption {
type = types.bool;
default = false;
description = "Whether the repo (which must be local) is a removable device.";
};
archiveBaseName = mkOption {
type = types.nullOr (types.strMatching "[^/{}]+");
default = "${globalConfig.networking.hostName}-${name}";
defaultText = literalExpression ''"''${config.networking.hostName}-<name>"'';
description = ''
How to name the created archives. A timestamp, whose format is
determined by <option>dateFormat</option>, will be appended. The full
name can be modified at runtime (<literal>$archiveName</literal>).
Placeholders like <literal>{hostname}</literal> must not be used.
Use <literal>null</literal> for no base name.
'';
};
dateFormat = mkOption {
type = types.str;
description = ''
Arguments passed to <command>date</command>
to create a timestamp suffix for the archive name.
'';
default = "+%Y-%m-%dT%H:%M:%S";
example = "-u +%s";
};
startAt = mkOption {
type = with types; either str (listOf str);
default = "daily";
description = ''
When or how often the backup should run.
Must be in the format described in
<citerefentry><refentrytitle>systemd.time</refentrytitle>
<manvolnum>7</manvolnum></citerefentry>.
If you do not want the backup to start
automatically, use <literal>[ ]</literal>.
It will generate a systemd service borgbackup-job-NAME.
You may trigger it manually via systemctl restart borgbackup-job-NAME.
'';
};
persistentTimer = mkOption {
default = false;
type = types.bool;
example = true;
description = ''
Set the <literal>persistentTimer</literal> option for the
<citerefentry><refentrytitle>systemd.timer</refentrytitle>
<manvolnum>5</manvolnum></citerefentry>
which triggers the backup immediately if the last trigger
was missed (e.g. if the system was powered down).
'';
};
user = mkOption {
type = types.str;
description = ''
The user <command>borg</command> is run as.
User or group need read permission
for the specified <option>paths</option>.
'';
default = "root";
};
group = mkOption {
type = types.str;
description = ''
The group borg is run as. User or group needs read permission
for the specified <option>paths</option>.
'';
default = "root";
};
encryption.mode = mkOption {
type = types.enum [
"repokey" "keyfile"
"repokey-blake2" "keyfile-blake2"
"authenticated" "authenticated-blake2"
"none"
];
description = ''
Encryption mode to use. Setting a mode
other than <literal>"none"</literal> requires
you to specify a <option>passCommand</option>
or a <option>passphrase</option>.
'';
example = "repokey-blake2";
};
encryption.passCommand = mkOption {
type = with types; nullOr str;
description = ''
A command which prints the passphrase to stdout.
Mutually exclusive with <option>passphrase</option>.
'';
default = null;
example = "cat /path/to/passphrase_file";
};
encryption.passphrase = mkOption {
type = with types; nullOr str;
description = ''
The passphrase the backups are encrypted with.
Mutually exclusive with <option>passCommand</option>.
If you do not want the passphrase to be stored in the
world-readable Nix store, use <option>passCommand</option>.
'';
default = null;
};
compression = mkOption {
# "auto" is optional,
# compression mode must be given,
# compression level is optional
type = types.strMatching "none|(auto,)?(lz4|zstd|zlib|lzma)(,[[:digit:]]{1,2})?";
description = ''
Compression method to use. Refer to
<command>borg help compression</command>
for all available options.
'';
default = "lz4";
example = "auto,lzma";
};
exclude = mkOption {
type = with types; listOf str;
description = ''
Exclude paths matching any of the given patterns. See
<command>borg help patterns</command> for pattern syntax.
'';
default = [ ];
example = [
"/home/*/.cache"
"/nix"
];
};
readWritePaths = mkOption {
type = with types; listOf path;
description = ''
By default, borg cannot write anywhere on the system but
<literal>$HOME/.config/borg</literal> and <literal>$HOME/.cache/borg</literal>.
If, for example, your preHook script needs to dump files
somewhere, put those directories here.
'';
default = [ ];
example = [
"/var/backup/mysqldump"
];
};
privateTmp = mkOption {
type = types.bool;
description = ''
Set the <literal>PrivateTmp</literal> option for
the systemd-service. Set to false if you need sockets
or other files from global /tmp.
'';
default = true;
};
doInit = mkOption {
type = types.bool;
description = ''
Run <command>borg init</command> if the
specified <option>repo</option> does not exist.
You should set this to <literal>false</literal>
if the repository is located on an external drive
that might not always be mounted.
'';
default = true;
};
appendFailedSuffix = mkOption {
type = types.bool;
description = ''
Append a <literal>.failed</literal> suffix
to the archive name, which is only removed if
<command>borg create</command> has a zero exit status.
'';
default = true;
};
prune.keep = mkOption {
# Specifying e.g. `prune.keep.yearly = -1`
# means there is no limit of yearly archives to keep
# The regex is for use with e.g. --keep-within 1y
type = with types; attrsOf (either int (strMatching "[[:digit:]]+[Hdwmy]"));
description = ''
Prune a repository by deleting all archives not matching any of the
specified retention options. See <command>borg help prune</command>
for the available options.
'';
default = { };
example = literalExpression ''
{
within = "1d"; # Keep all archives from the last day
daily = 7;
weekly = 4;
monthly = -1; # Keep at least one archive for each month
}
'';
};
prune.prefix = mkOption {
type = types.nullOr (types.str);
description = ''
Only consider archive names starting with this prefix for pruning.
By default, only archives created by this job are considered.
Use <literal>""</literal> or <literal>null</literal> to consider all archives.
'';
default = config.archiveBaseName;
defaultText = literalExpression "archiveBaseName";
};
environment = mkOption {
type = with types; attrsOf str;
description = ''
Environment variables passed to the backup script.
You can for example specify which SSH key to use.
'';
default = { };
example = { BORG_RSH = "ssh -i /path/to/key"; };
};
preHook = mkOption {
type = types.lines;
description = ''
Shell commands to run before the backup.
This can for example be used to mount file systems.
'';
default = "";
example = ''
# To add excluded paths at runtime
extraCreateArgs="$extraCreateArgs --exclude /some/path"
'';
};
postInit = mkOption {
type = types.lines;
description = ''
Shell commands to run after <command>borg init</command>.
'';
default = "";
};
postCreate = mkOption {
type = types.lines;
description = ''
Shell commands to run after <command>borg create</command>. The name
of the created archive is stored in <literal>$archiveName</literal>.
'';
default = "";
};
postPrune = mkOption {
type = types.lines;
description = ''
Shell commands to run after <command>borg prune</command>.
'';
default = "";
};
postHook = mkOption {
type = types.lines;
description = ''
Shell commands to run just before exit. They are executed
even if a previous command exits with a non-zero exit code.
The latter is available as <literal>$exitStatus</literal>.
'';
default = "";
};
extraArgs = mkOption {
type = types.str;
description = ''
Additional arguments for all <command>borg</command> calls the
service has. Handle with care.
'';
default = "";
example = "--remote-path=/path/to/borg";
};
extraInitArgs = mkOption {
type = types.str;
description = ''
Additional arguments for <command>borg init</command>.
Can also be set at runtime using <literal>$extraInitArgs</literal>.
'';
default = "";
example = "--append-only";
};
extraCreateArgs = mkOption {
type = types.str;
description = ''
Additional arguments for <command>borg create</command>.
Can also be set at runtime using <literal>$extraCreateArgs</literal>.
'';
default = "";
example = "--stats --checkpoint-interval 600";
};
extraPruneArgs = mkOption {
type = types.str;
description = ''
Additional arguments for <command>borg prune</command>.
Can also be set at runtime using <literal>$extraPruneArgs</literal>.
'';
default = "";
example = "--save-space";
};
};
}
));
};
options.services.borgbackup.repos = mkOption {
description = ''
Serve BorgBackup repositories to given public SSH keys,
restricting their access to the repository only.
See also the chapter about BorgBackup in the NixOS manual.
Also, clients do not need to specify the absolute path when accessing the repository,
i.e. <literal>user@machine:.</literal> is enough. (Note colon and dot.)
'';
default = { };
type = types.attrsOf (types.submodule (
{ ... }: {
options = {
path = mkOption {
type = types.path;
description = ''
Where to store the backups. Note that the directory
is created automatically, with correct permissions.
'';
default = "/var/lib/borgbackup";
};
user = mkOption {
type = types.str;
description = ''
The user <command>borg serve</command> is run as.
User or group needs write permission
for the specified <option>path</option>.
'';
default = "borg";
};
group = mkOption {
type = types.str;
description = ''
The group <command>borg serve</command> is run as.
User or group needs write permission
for the specified <option>path</option>.
'';
default = "borg";
};
authorizedKeys = mkOption {
type = with types; listOf str;
description = ''
Public SSH keys that are given full write access to this repository.
You should use a different SSH key for each repository you write to, because
the specified keys are restricted to running <command>borg serve</command>
and can only access this single repository.
'';
default = [ ];
};
authorizedKeysAppendOnly = mkOption {
type = with types; listOf str;
description = ''
Public SSH keys that can only be used to append new data (archives) to the repository.
Note that archives can still be marked as deleted and are subsequently removed from disk
upon accessing the repo with full write access, e.g. when pruning.
'';
default = [ ];
};
allowSubRepos = mkOption {
type = types.bool;
description = ''
Allow clients to create repositories in subdirectories of the
specified <option>path</option>. These can be accessed using
<literal>user@machine:path/to/subrepo</literal>. Note that a
<option>quota</option> applies to repositories independently.
Therefore, if this is enabled, clients can create multiple
repositories and upload an arbitrary amount of data.
'';
default = false;
};
quota = mkOption {
# See the definition of parse_file_size() in src/borg/helpers/parseformat.py
type = with types; nullOr (strMatching "[[:digit:].]+[KMGTP]?");
description = ''
Storage quota for the repository. This quota is ensured for all
sub-repositories if <option>allowSubRepos</option> is enabled
but not for the overall storage space used.
'';
default = null;
example = "100G";
};
};
}
));
};
###### implementation
config = mkIf (with config.services.borgbackup; jobs != { } || repos != { })
(with config.services.borgbackup; {
assertions =
mapAttrsToList mkPassAssertion jobs
++ mapAttrsToList mkKeysAssertion repos
++ mapAttrsToList mkSourceAssertions jobs
++ mapAttrsToList mkRemovableDeviceAssertions jobs;
system.activationScripts = mapAttrs' mkActivationScript jobs;
systemd.services =
# A job named "foo" is mapped to systemd.services.borgbackup-job-foo
mapAttrs' mkBackupService jobs
# A repo named "foo" is mapped to systemd.services.borgbackup-repo-foo
// mapAttrs' mkRepoService repos;
# A job named "foo" is mapped to systemd.timers.borgbackup-job-foo
# only generate the timer if interval (startAt) is set
systemd.timers = mapAttrs' mkBackupTimers (filterAttrs (_: cfg: cfg.startAt != []) jobs);
users = mkMerge (mapAttrsToList mkUsersConfig repos);
environment.systemPackages = with pkgs; [ borgbackup ] ++ (mapAttrsToList mkBorgWrapper jobs);
});
}

View file

@ -0,0 +1,209 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="module-borgbase">
<title>BorgBackup</title>
<para>
<emphasis>Source:</emphasis>
<filename>modules/services/backup/borgbackup.nix</filename>
</para>
<para>
<emphasis>Upstream documentation:</emphasis>
<link xlink:href="https://borgbackup.readthedocs.io/"/>
</para>
<para>
<link xlink:href="https://www.borgbackup.org/">BorgBackup</link> (short: Borg)
is a deduplicating backup program. Optionally, it supports compression and
authenticated encryption.
</para>
<para>
The main goal of Borg is to provide an efficient and secure way to backup
data. The data deduplication technique used makes Borg suitable for daily
backups since only changes are stored. The authenticated encryption technique
makes it suitable for backups to not fully trusted targets.
</para>
<section xml:id="module-services-backup-borgbackup-configuring">
<title>Configuring</title>
<para>
A complete list of options for the Borgbase module may be found
<link linkend="opt-services.borgbackup.jobs">here</link>.
</para>
</section>
<section xml:id="opt-services-backup-borgbackup-local-directory">
<title>Basic usage for a local backup</title>
<para>
A very basic configuration for backing up to a locally accessible directory
is:
<programlisting>
{
opt.services.borgbackup.jobs = {
{ rootBackup = {
paths = "/";
exclude = [ "/nix" "/path/to/local/repo" ];
repo = "/path/to/local/repo";
doInit = true;
encryption = {
mode = "repokey";
passphrase = "secret";
};
compression = "auto,lzma";
startAt = "weekly";
};
}
};
}</programlisting>
</para>
<warning>
<para>
If you do not want the passphrase to be stored in the world-readable
Nix store, use passCommand. You find an example below.
</para>
</warning>
</section>
<section xml:id="opt-services-backup-create-server">
<title>Create a borg backup server</title>
<para>You should use a different SSH key for each repository you write to,
because the specified keys are restricted to running borg serve and can only
access this single repository. You need the output of the generate pub file.
</para>
<para>
<screen>
<prompt># </prompt>sudo ssh-keygen -N '' -t ed25519 -f /run/keys/id_ed25519_my_borg_repo
<prompt># </prompt>cat /run/keys/id_ed25519_my_borg_repo
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID78zmOyA+5uPG4Ot0hfAy+sLDPU1L4AiIoRYEIVbbQ/ root@nixos</screen>
</para>
<para>
Add the following snippet to your NixOS configuration:
<programlisting>
{
services.borgbackup.repos = {
my_borg_repo = {
authorizedKeys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID78zmOyA+5uPG4Ot0hfAy+sLDPU1L4AiIoRYEIVbbQ/ root@nixos"
] ;
path = "/var/lib/my_borg_repo" ;
};
};
}</programlisting>
</para>
</section>
<section xml:id="opt-services-backup-borgbackup-remote-server">
<title>Backup to the borg repository server</title>
<para>The following NixOS snippet creates an hourly backup to the service
(on the host nixos) as created in the section above. We assume
that you have stored a secret passphrasse in the file
<code>/run/keys/borgbackup_passphrase</code>, which should be only
accessible by root
</para>
<para>
<programlisting>
{
services.borgbackup.jobs = {
backupToLocalServer = {
paths = [ "/etc/nixos" ];
doInit = true;
repo = "borg@nixos:." ;
encryption = {
mode = "repokey-blake2";
passCommand = "cat /run/keys/borgbackup_passphrase";
};
environment = { BORG_RSH = "ssh -i /run/keys/id_ed25519_my_borg_repo"; };
compression = "auto,lzma";
startAt = "hourly";
};
};
};</programlisting>
</para>
<para>The following few commands (run as root) let you test your backup.
<programlisting>
> nixos-rebuild switch
...restarting the following units: polkit.service
> systemctl restart borgbackup-job-backupToLocalServer
> sleep 10
> systemctl restart borgbackup-job-backupToLocalServer
> export BORG_PASSPHRASE=topSecrect
> borg list --rsh='ssh -i /run/keys/id_ed25519_my_borg_repo' borg@nixos:.
nixos-backupToLocalServer-2020-03-30T21:46:17 Mon, 2020-03-30 21:46:19 [84feb97710954931ca384182f5f3cb90665f35cef214760abd7350fb064786ac]
nixos-backupToLocalServer-2020-03-30T21:46:30 Mon, 2020-03-30 21:46:32 [e77321694ecd160ca2228611747c6ad1be177d6e0d894538898de7a2621b6e68]</programlisting>
</para>
</section>
<section xml:id="opt-services-backup-borgbackup-borgbase">
<title>Backup to a hosting service</title>
<para>
Several companies offer <link
xlink:href="https://www.borgbackup.org/support/commercial.html">(paid)
hosting services</link> for Borg repositories.
</para>
<para>
To backup your home directory to borgbase you have to:
</para>
<itemizedlist>
<listitem>
<para>
Generate a SSH key without a password, to access the remote server. E.g.
</para>
<para>
<programlisting>sudo ssh-keygen -N '' -t ed25519 -f /run/keys/id_ed25519_borgbase</programlisting>
</para>
</listitem>
<listitem>
<para>
Create the repository on the server by following the instructions for your
hosting server.
</para>
</listitem>
<listitem>
<para>
Initialize the repository on the server. Eg.
<programlisting>
sudo borg init --encryption=repokey-blake2 \
-rsh "ssh -i /run/keys/id_ed25519_borgbase" \
zzz2aaaaa@zzz2aaaaa.repo.borgbase.com:repo</programlisting>
</para>
</listitem>
<listitem>
<para>Add it to your NixOS configuration, e.g.
<programlisting>
{
services.borgbackup.jobs = {
my_Remote_Backup = {
paths = [ "/" ];
exclude = [ "/nix" "'**/.cache'" ];
repo = "zzz2aaaaa@zzz2aaaaa.repo.borgbase.com:repo";
encryption = {
mode = "repokey-blake2";
passCommand = "cat /run/keys/borgbackup_passphrase";
};
BORG_RSH = "ssh -i /run/keys/id_ed25519_borgbase";
compression = "auto,lzma";
startAt = "daily";
};
};
}}</programlisting>
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="opt-services-backup-borgbackup-vorta">
<title>Vorta backup client for the desktop</title>
<para>
Vorta is a backup client for macOS and Linux desktops. It integrates the
mighty BorgBackup with your desktop environment to protect your data from
disk failure, ransomware and theft.
</para>
<para>
It can be installed in NixOS e.g. by adding <package>pkgs.vorta</package>
to <xref linkend="opt-environment.systemPackages" />.
</para>
<para>
Details about using Vorta can be found under <link
xlink:href="https://vorta.borgbase.com/usage">https://vorta.borgbase.com
</link>.
</para>
</section>
</chapter>

View file

@ -0,0 +1,58 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.borgmatic;
settingsFormat = pkgs.formats.yaml { };
cfgfile = settingsFormat.generate "config.yaml" cfg.settings;
in {
options.services.borgmatic = {
enable = mkEnableOption "borgmatic";
settings = mkOption {
description = ''
See https://torsion.org/borgmatic/docs/reference/configuration/
'';
type = types.submodule {
freeformType = settingsFormat.type;
options.location = {
source_directories = mkOption {
type = types.listOf types.str;
description = ''
List of source directories to backup (required). Globs and
tildes are expanded.
'';
example = [ "/home" "/etc" "/var/log/syslog*" ];
};
repositories = mkOption {
type = types.listOf types.str;
description = ''
Paths to local or remote repositories (required). Tildes are
expanded. Multiple repositories are backed up to in
sequence. Borg placeholders can be used. See the output of
"borg help placeholders" for details. See ssh_command for
SSH options like identity file or port. If systemd service
is used, then add local repository paths in the systemd
service file to the ReadWritePaths list.
'';
example = [
"user@backupserver:sourcehostname.borg"
"user@backupserver:{fqdn}"
];
};
};
};
};
};
config = mkIf cfg.enable {
environment.systemPackages = [ pkgs.borgmatic ];
environment.etc."borgmatic/config.yaml".source = cfgfile;
systemd.packages = [ pkgs.borgmatic ];
};
}

View file

@ -0,0 +1,244 @@
{ config, pkgs, lib, ... }:
let
inherit (lib)
concatMapStringsSep
concatStringsSep
filterAttrs
flatten
isAttrs
isString
literalExpression
mapAttrs'
mapAttrsToList
mkIf
mkOption
optionalString
partition
typeOf
types
;
cfg = config.services.btrbk;
sshEnabled = cfg.sshAccess != [ ];
serviceEnabled = cfg.instances != { };
attr2Lines = attr:
let
pairs = mapAttrsToList (name: value: { inherit name value; }) attr;
isSubsection = value:
if isAttrs value then true
else if isString value then false
else throw "invalid type in btrbk config ${typeOf value}";
sortedPairs = partition (x: isSubsection x.value) pairs;
in
flatten (
# non subsections go first
(
map (pair: [ "${pair.name} ${pair.value}" ]) sortedPairs.wrong
)
++ # subsections go last
(
map
(
pair:
mapAttrsToList
(
childname: value:
[ "${pair.name} ${childname}" ] ++ (map (x: " " + x) (attr2Lines value))
)
pair.value
)
sortedPairs.right
)
)
;
addDefaults = settings: { backend = "btrfs-progs-sudo"; } // settings;
mkConfigFile = settings: concatStringsSep "\n" (attr2Lines (addDefaults settings));
mkTestedConfigFile = name: settings:
let
configFile = pkgs.writeText "btrbk-${name}.conf" (mkConfigFile settings);
in
pkgs.runCommand "btrbk-${name}-tested.conf" { } ''
mkdir foo
cp ${configFile} $out
if (set +o pipefail; ${pkgs.btrbk}/bin/btrbk -c $out ls foo 2>&1 | grep $out);
then
echo btrbk configuration is invalid
cat $out
exit 1
fi;
'';
in
{
meta.maintainers = with lib.maintainers; [ oxalica ];
options = {
services.btrbk = {
extraPackages = mkOption {
description = "Extra packages for btrbk, like compression utilities for <literal>stream_compress</literal>";
type = types.listOf types.package;
default = [ ];
example = literalExpression "[ pkgs.xz ]";
};
niceness = mkOption {
description = "Niceness for local instances of btrbk. Also applies to remote ones connecting via ssh when positive.";
type = types.ints.between (-20) 19;
default = 10;
};
ioSchedulingClass = mkOption {
description = "IO scheduling class for btrbk (see ionice(1) for a quick description). Applies to local instances, and remote ones connecting by ssh if set to idle.";
type = types.enum [ "idle" "best-effort" "realtime" ];
default = "best-effort";
};
instances = mkOption {
description = "Set of btrbk instances. The instance named <literal>btrbk</literal> is the default one.";
type = with types;
attrsOf (
submodule {
options = {
onCalendar = mkOption {
type = types.nullOr types.str;
default = "daily";
description = ''
How often this btrbk instance is started. See systemd.time(7) for more information about the format.
Setting it to null disables the timer, thus this instance can only be started manually.
'';
};
settings = mkOption {
type = let t = types.attrsOf (types.either types.str (t // { description = "instances of this type recursively"; })); in t;
default = { };
example = {
snapshot_preserve_min = "2d";
snapshot_preserve = "14d";
volume = {
"/mnt/btr_pool" = {
target = "/mnt/btr_backup/mylaptop";
subvolume = {
"rootfs" = { };
"home" = { snapshot_create = "always"; };
};
};
};
};
description = "configuration options for btrbk. Nested attrsets translate to subsections.";
};
};
}
);
default = { };
};
sshAccess = mkOption {
description = "SSH keys that should be able to make or push snapshots on this system remotely with btrbk";
type = with types; listOf (
submodule {
options = {
key = mkOption {
type = str;
description = "SSH public key allowed to login as user <literal>btrbk</literal> to run remote backups.";
};
roles = mkOption {
type = listOf (enum [ "info" "source" "target" "delete" "snapshot" "send" "receive" ]);
example = [ "source" "info" "send" ];
description = "What actions can be performed with this SSH key. See ssh_filter_btrbk(1) for details";
};
};
}
);
default = [ ];
};
};
};
config = mkIf (sshEnabled || serviceEnabled) {
environment.systemPackages = [ pkgs.btrbk ] ++ cfg.extraPackages;
security.sudo.extraRules = [
{
users = [ "btrbk" ];
commands = [
{ command = "${pkgs.btrfs-progs}/bin/btrfs"; options = [ "NOPASSWD" ]; }
{ command = "${pkgs.coreutils}/bin/mkdir"; options = [ "NOPASSWD" ]; }
{ command = "${pkgs.coreutils}/bin/readlink"; options = [ "NOPASSWD" ]; }
# for ssh, they are not the same than the one hard coded in ${pkgs.btrbk}
{ command = "/run/current-system/bin/btrfs"; options = [ "NOPASSWD" ]; }
{ command = "/run/current-system/sw/bin/mkdir"; options = [ "NOPASSWD" ]; }
{ command = "/run/current-system/sw/bin/readlink"; options = [ "NOPASSWD" ]; }
];
}
];
users.users.btrbk = {
isSystemUser = true;
# ssh needs a home directory
home = "/var/lib/btrbk";
createHome = true;
shell = "${pkgs.bash}/bin/bash";
group = "btrbk";
openssh.authorizedKeys.keys = map
(
v:
let
options = concatMapStringsSep " " (x: "--" + x) v.roles;
ioniceClass = {
"idle" = 3;
"best-effort" = 2;
"realtime" = 1;
}.${cfg.ioSchedulingClass};
in
''command="${pkgs.util-linux}/bin/ionice -t -c ${toString ioniceClass} ${optionalString (cfg.niceness >= 1) "${pkgs.coreutils}/bin/nice -n ${toString cfg.niceness}"} ${pkgs.btrbk}/share/btrbk/scripts/ssh_filter_btrbk.sh --sudo ${options}" ${v.key}''
)
cfg.sshAccess;
};
users.groups.btrbk = { };
systemd.tmpfiles.rules = [
"d /var/lib/btrbk 0750 btrbk btrbk"
"d /var/lib/btrbk/.ssh 0700 btrbk btrbk"
"f /var/lib/btrbk/.ssh/config 0700 btrbk btrbk - StrictHostKeyChecking=accept-new"
];
environment.etc = mapAttrs'
(
name: instance: {
name = "btrbk/${name}.conf";
value.source = mkTestedConfigFile name instance.settings;
}
)
cfg.instances;
systemd.services = mapAttrs'
(
name: _: {
name = "btrbk-${name}";
value = {
description = "Takes BTRFS snapshots and maintains retention policies.";
unitConfig.Documentation = "man:btrbk(1)";
path = [ "/run/wrappers" ] ++ cfg.extraPackages;
serviceConfig = {
User = "btrbk";
Group = "btrbk";
Type = "oneshot";
ExecStart = "${pkgs.btrbk}/bin/btrbk -c /etc/btrbk/${name}.conf run";
Nice = cfg.niceness;
IOSchedulingClass = cfg.ioSchedulingClass;
StateDirectory = "btrbk";
};
};
}
)
cfg.instances;
systemd.timers = mapAttrs'
(
name: instance: {
name = "btrbk-${name}";
value = {
description = "Timer to take BTRFS snapshots and maintain retention policies.";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = instance.onCalendar;
AccuracySec = "10min";
Persistent = true;
};
};
}
)
(filterAttrs (name: instance: instance.onCalendar != null)
cfg.instances);
};
}

View file

@ -0,0 +1,86 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.duplicati;
in
{
options = {
services.duplicati = {
enable = mkEnableOption "Duplicati";
port = mkOption {
default = 8200;
type = types.int;
description = ''
Port serving the web interface
'';
};
dataDir = mkOption {
type = types.str;
default = "/var/lib/duplicati";
description = ''
The directory where Duplicati stores its data files.
<note><para>
If left as the default value this directory will automatically be created
before the Duplicati server starts, otherwise you are responsible for ensuring
the directory exists with appropriate ownership and permissions.
</para></note>
'';
};
interface = mkOption {
default = "127.0.0.1";
type = types.str;
description = ''
Listening interface for the web UI
Set it to "any" to listen on all available interfaces
'';
};
user = mkOption {
default = "duplicati";
type = types.str;
description = ''
Duplicati runs as it's own user. It will only be able to backup world-readable files.
Run as root with special care.
'';
};
};
};
config = mkIf cfg.enable {
environment.systemPackages = [ pkgs.duplicati ];
systemd.services.duplicati = {
description = "Duplicati backup";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = mkMerge [
{
User = cfg.user;
Group = "duplicati";
ExecStart = "${pkgs.duplicati}/bin/duplicati-server --webservice-interface=${cfg.interface} --webservice-port=${toString cfg.port} --server-datafolder=${cfg.dataDir}";
Restart = "on-failure";
}
(mkIf (cfg.dataDir == "/var/lib/duplicati") {
StateDirectory = "duplicati";
})
];
};
users.users = lib.optionalAttrs (cfg.user == "duplicati") {
duplicati = {
uid = config.ids.uids.duplicati;
home = cfg.dataDir;
group = "duplicati";
};
};
users.groups.duplicati.gid = config.ids.gids.duplicati;
};
}

View file

@ -0,0 +1,196 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.duplicity;
stateDirectory = "/var/lib/duplicity";
localTarget =
if hasPrefix "file://" cfg.targetUrl
then removePrefix "file://" cfg.targetUrl else null;
in
{
options.services.duplicity = {
enable = mkEnableOption "backups with duplicity";
root = mkOption {
type = types.path;
default = "/";
description = ''
Root directory to backup.
'';
};
include = mkOption {
type = types.listOf types.str;
default = [ ];
example = [ "/home" ];
description = ''
List of paths to include into the backups. See the FILE SELECTION
section in <citerefentry><refentrytitle>duplicity</refentrytitle>
<manvolnum>1</manvolnum></citerefentry> for details on the syntax.
'';
};
exclude = mkOption {
type = types.listOf types.str;
default = [ ];
description = ''
List of paths to exclude from backups. See the FILE SELECTION section in
<citerefentry><refentrytitle>duplicity</refentrytitle>
<manvolnum>1</manvolnum></citerefentry> for details on the syntax.
'';
};
targetUrl = mkOption {
type = types.str;
example = "s3://host:port/prefix";
description = ''
Target url to backup to. See the URL FORMAT section in
<citerefentry><refentrytitle>duplicity</refentrytitle>
<manvolnum>1</manvolnum></citerefentry> for supported urls.
'';
};
secretFile = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
Path of a file containing secrets (gpg passphrase, access key...) in
the format of EnvironmentFile as described by
<citerefentry><refentrytitle>systemd.exec</refentrytitle>
<manvolnum>5</manvolnum></citerefentry>. For example:
<programlisting>
PASSPHRASE=<replaceable>...</replaceable>
AWS_ACCESS_KEY_ID=<replaceable>...</replaceable>
AWS_SECRET_ACCESS_KEY=<replaceable>...</replaceable>
</programlisting>
'';
};
frequency = mkOption {
type = types.nullOr types.str;
default = "daily";
description = ''
Run duplicity with the given frequency (see
<citerefentry><refentrytitle>systemd.time</refentrytitle>
<manvolnum>7</manvolnum></citerefentry> for the format).
If null, do not run automatically.
'';
};
extraFlags = mkOption {
type = types.listOf types.str;
default = [ ];
example = [ "--backend-retry-delay" "100" ];
description = ''
Extra command-line flags passed to duplicity. See
<citerefentry><refentrytitle>duplicity</refentrytitle>
<manvolnum>1</manvolnum></citerefentry>.
'';
};
fullIfOlderThan = mkOption {
type = types.str;
default = "never";
example = "1M";
description = ''
If <literal>"never"</literal> (the default) always do incremental
backups (the first backup will be a full backup, of course). If
<literal>"always"</literal> always do full backups. Otherwise, this
must be a string representing a duration. Full backups will be made
when the latest full backup is older than this duration. If this is not
the case, an incremental backup is performed.
'';
};
cleanup = {
maxAge = mkOption {
type = types.nullOr types.str;
default = null;
example = "6M";
description = ''
If non-null, delete all backup sets older than the given time. Old backup sets
will not be deleted if backup sets newer than time depend on them.
'';
};
maxFull = mkOption {
type = types.nullOr types.int;
default = null;
example = 2;
description = ''
If non-null, delete all backups sets that are older than the count:th last full
backup (in other words, keep the last count full backups and
associated incremental sets).
'';
};
maxIncr = mkOption {
type = types.nullOr types.int;
default = null;
example = 1;
description = ''
If non-null, delete incremental sets of all backups sets that are
older than the count:th last full backup (in other words, keep only
old full backups and not their increments).
'';
};
};
};
config = mkIf cfg.enable {
systemd = {
services.duplicity = {
description = "backup files with duplicity";
environment.HOME = stateDirectory;
script =
let
target = escapeShellArg cfg.targetUrl;
extra = escapeShellArgs ([ "--archive-dir" stateDirectory ] ++ cfg.extraFlags);
dup = "${pkgs.duplicity}/bin/duplicity";
in
''
set -x
${dup} cleanup ${target} --force ${extra}
${lib.optionalString (cfg.cleanup.maxAge != null) "${dup} remove-older-than ${lib.escapeShellArg cfg.cleanup.maxAge} ${target} --force ${extra}"}
${lib.optionalString (cfg.cleanup.maxFull != null) "${dup} remove-all-but-n-full ${toString cfg.cleanup.maxFull} ${target} --force ${extra}"}
${lib.optionalString (cfg.cleanup.maxIncr != null) "${dup} remove-all-inc-of-but-n-full ${toString cfg.cleanup.maxIncr} ${target} --force ${extra}"}
exec ${dup} ${if cfg.fullIfOlderThan == "always" then "full" else "incr"} ${lib.escapeShellArgs (
[ cfg.root cfg.targetUrl ]
++ concatMap (p: [ "--include" p ]) cfg.include
++ concatMap (p: [ "--exclude" p ]) cfg.exclude
++ (lib.optionals (cfg.fullIfOlderThan != "never" && cfg.fullIfOlderThan != "always") [ "--full-if-older-than" cfg.fullIfOlderThan ])
)} ${extra}
'';
serviceConfig = {
PrivateTmp = true;
ProtectSystem = "strict";
ProtectHome = "read-only";
StateDirectory = baseNameOf stateDirectory;
} // optionalAttrs (localTarget != null) {
ReadWritePaths = localTarget;
} // optionalAttrs (cfg.secretFile != null) {
EnvironmentFile = cfg.secretFile;
};
} // optionalAttrs (cfg.frequency != null) {
startAt = cfg.frequency;
};
tmpfiles.rules = optional (localTarget != null) "d ${localTarget} 0700 root root -";
};
assertions = singleton {
# Duplicity will fail if the last file selection option is an include. It
# is not always possible to detect but this simple case can be caught.
assertion = cfg.include != [ ] -> cfg.exclude != [ ] || cfg.extraFlags != [ ];
message = ''
Duplicity will fail if you only specify included paths ("Because the
default is to include all files, the expression is redundant. Exiting
because this probably isn't what you meant.")
'';
};
};
}

View file

@ -0,0 +1,130 @@
{ config, lib, pkgs, ... }:
with lib;
let
inherit (pkgs) mariadb gzip;
cfg = config.services.mysqlBackup;
defaultUser = "mysqlbackup";
backupScript = ''
set -o pipefail
failed=""
${concatMapStringsSep "\n" backupDatabaseScript cfg.databases}
if [ -n "$failed" ]; then
echo "Backup of database(s) failed:$failed"
exit 1
fi
'';
backupDatabaseScript = db: ''
dest="${cfg.location}/${db}.gz"
if ${mariadb}/bin/mysqldump ${if cfg.singleTransaction then "--single-transaction" else ""} ${db} | ${gzip}/bin/gzip -c > $dest.tmp; then
mv $dest.tmp $dest
echo "Backed up to $dest"
else
echo "Failed to back up to $dest"
rm -f $dest.tmp
failed="$failed ${db}"
fi
'';
in
{
options = {
services.mysqlBackup = {
enable = mkEnableOption "MySQL backups";
calendar = mkOption {
type = types.str;
default = "01:15:00";
description = ''
Configured when to run the backup service systemd unit (DayOfWeek Year-Month-Day Hour:Minute:Second).
'';
};
user = mkOption {
type = types.str;
default = defaultUser;
description = ''
User to be used to perform backup.
'';
};
databases = mkOption {
default = [];
type = types.listOf types.str;
description = ''
List of database names to dump.
'';
};
location = mkOption {
type = types.path;
default = "/var/backup/mysql";
description = ''
Location to put the gzipped MySQL database dumps.
'';
};
singleTransaction = mkOption {
default = false;
type = types.bool;
description = ''
Whether to create database dump in a single transaction
'';
};
};
};
config = mkIf cfg.enable {
users.users = optionalAttrs (cfg.user == defaultUser) {
${defaultUser} = {
isSystemUser = true;
createHome = false;
home = cfg.location;
group = "nogroup";
};
};
services.mysql.ensureUsers = [{
name = cfg.user;
ensurePermissions = with lib;
let
privs = "SELECT, SHOW VIEW, TRIGGER, LOCK TABLES";
grant = db: nameValuePair "${db}.*" privs;
in
listToAttrs (map grant cfg.databases);
}];
systemd = {
timers.mysql-backup = {
description = "Mysql backup timer";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = cfg.calendar;
AccuracySec = "5m";
Unit = "mysql-backup.service";
};
};
services.mysql-backup = {
description = "MySQL backup service";
enable = true;
serviceConfig = {
Type = "oneshot";
User = cfg.user;
};
script = backupScript;
};
tmpfiles.rules = [
"d ${cfg.location} 0700 ${cfg.user} - - -"
];
};
};
}

View file

@ -0,0 +1,164 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.postgresqlBackup;
postgresqlBackupService = db: dumpCmd:
let
compressSuffixes = {
"none" = "";
"gzip" = ".gz";
"zstd" = ".zstd";
};
compressSuffix = getAttr cfg.compression compressSuffixes;
compressCmd = getAttr cfg.compression {
"none" = "cat";
"gzip" = "${pkgs.gzip}/bin/gzip -c";
"zstd" = "${pkgs.zstd}/bin/zstd -c";
};
mkSqlPath = prefix: suffix: "${cfg.location}/${db}${prefix}.sql${suffix}";
curFile = mkSqlPath "" compressSuffix;
prevFile = mkSqlPath ".prev" compressSuffix;
prevFiles = map (mkSqlPath ".prev") (attrValues compressSuffixes);
inProgressFile = mkSqlPath ".in-progress" compressSuffix;
in {
enable = true;
description = "Backup of ${db} database(s)";
requires = [ "postgresql.service" ];
path = [ pkgs.coreutils config.services.postgresql.package ];
script = ''
set -e -o pipefail
umask 0077 # ensure backup is only readable by postgres user
if [ -e ${curFile} ]; then
rm -f ${toString prevFiles}
mv ${curFile} ${prevFile}
fi
${dumpCmd} \
| ${compressCmd} \
> ${inProgressFile}
mv ${inProgressFile} ${curFile}
'';
serviceConfig = {
Type = "oneshot";
User = "postgres";
};
startAt = cfg.startAt;
};
in {
imports = [
(mkRemovedOptionModule [ "services" "postgresqlBackup" "period" ] ''
A systemd timer is now used instead of cron.
The starting time can be configured via <literal>services.postgresqlBackup.startAt</literal>.
'')
];
options = {
services.postgresqlBackup = {
enable = mkEnableOption "PostgreSQL dumps";
startAt = mkOption {
default = "*-*-* 01:15:00";
type = with types; either (listOf str) str;
description = ''
This option defines (see <literal>systemd.time</literal> for format) when the
databases should be dumped.
The default is to update at 01:15 (at night) every day.
'';
};
backupAll = mkOption {
default = cfg.databases == [];
defaultText = literalExpression "services.postgresqlBackup.databases == []";
type = lib.types.bool;
description = ''
Backup all databases using pg_dumpall.
This option is mutual exclusive to
<literal>services.postgresqlBackup.databases</literal>.
The resulting backup dump will have the name all.sql.gz.
This option is the default if no databases are specified.
'';
};
databases = mkOption {
default = [];
type = types.listOf types.str;
description = ''
List of database names to dump.
'';
};
location = mkOption {
default = "/var/backup/postgresql";
type = types.path;
description = ''
Path of directory where the PostgreSQL database dumps will be placed.
'';
};
pgdumpOptions = mkOption {
type = types.separatedString " ";
default = "-C";
description = ''
Command line options for pg_dump. This options is not used
if <literal>config.services.postgresqlBackup.backupAll</literal> is enabled.
Note that config.services.postgresqlBackup.backupAll is also active,
when no databases where specified.
'';
};
compression = mkOption {
type = types.enum ["none" "gzip" "zstd"];
default = "gzip";
description = ''
The type of compression to use on the generated database dump.
'';
};
};
};
config = mkMerge [
{
assertions = [{
assertion = cfg.backupAll -> cfg.databases == [];
message = "config.services.postgresqlBackup.backupAll cannot be used together with config.services.postgresqlBackup.databases";
}];
}
(mkIf cfg.enable {
systemd.tmpfiles.rules = [
"d '${cfg.location}' 0700 postgres - - -"
];
})
(mkIf (cfg.enable && cfg.backupAll) {
systemd.services.postgresqlBackup =
postgresqlBackupService "all" "pg_dumpall";
})
(mkIf (cfg.enable && !cfg.backupAll) {
systemd.services = listToAttrs (map (db:
let
cmd = "pg_dump ${cfg.pgdumpOptions} ${db}";
in {
name = "postgresqlBackup-${db}";
value = postgresqlBackupService db cmd;
}) cfg.databases);
})
];
}

View file

@ -0,0 +1,204 @@
{ config, lib, pkgs, ... }:
with lib;
let
receiverSubmodule = {
options = {
postgresqlPackage = mkOption {
type = types.package;
example = literalExpression "pkgs.postgresql_11";
description = ''
PostgreSQL package to use.
'';
};
directory = mkOption {
type = types.path;
example = literalExpression "/mnt/pg_wal/main/";
description = ''
Directory to write the output to.
'';
};
statusInterval = mkOption {
type = types.int;
default = 10;
description = ''
Specifies the number of seconds between status packets sent back to the server.
This allows for easier monitoring of the progress from server.
A value of zero disables the periodic status updates completely,
although an update will still be sent when requested by the server, to avoid timeout disconnect.
'';
};
slot = mkOption {
type = types.str;
default = "";
example = "some_slot_name";
description = ''
Require <command>pg_receivewal</command> to use an existing replication slot (see
<link xlink:href="https://www.postgresql.org/docs/current/warm-standby.html#STREAMING-REPLICATION-SLOTS">Section 26.2.6 of the PostgreSQL manual</link>).
When this option is used, <command>pg_receivewal</command> will report a flush position to the server,
indicating when each segment has been synchronized to disk so that the server can remove that segment if it is not otherwise needed.
When the replication client of <command>pg_receivewal</command> is configured on the server as a synchronous standby,
then using a replication slot will report the flush position to the server, but only when a WAL file is closed.
Therefore, that configuration will cause transactions on the primary to wait for a long time and effectively not work satisfactorily.
The option <option>synchronous</option> must be specified in addition to make this work correctly.
'';
};
synchronous = mkOption {
type = types.bool;
default = false;
description = ''
Flush the WAL data to disk immediately after it has been received.
Also send a status packet back to the server immediately after flushing, regardless of <option>statusInterval</option>.
This option should be specified if the replication client of <command>pg_receivewal</command> is configured on the server as a synchronous standby,
to ensure that timely feedback is sent to the server.
'';
};
compress = mkOption {
type = types.ints.between 0 9;
default = 0;
description = ''
Enables gzip compression of write-ahead logs, and specifies the compression level
(<literal>0</literal> through <literal>9</literal>, <literal>0</literal> being no compression and <literal>9</literal> being best compression).
The suffix <literal>.gz</literal> will automatically be added to all filenames.
This option requires PostgreSQL >= 10.
'';
};
connection = mkOption {
type = types.str;
example = "postgresql://user@somehost";
description = ''
Specifies parameters used to connect to the server, as a connection string.
See <link xlink:href="https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING">Section 34.1.1 of the PostgreSQL manual</link> for more information.
Because <command>pg_receivewal</command> doesn't connect to any particular database in the cluster,
database name in the connection string will be ignored.
'';
};
extraArgs = mkOption {
type = with types; listOf str;
default = [ ];
example = literalExpression ''
[
"--no-sync"
]
'';
description = ''
A list of extra arguments to pass to the <command>pg_receivewal</command> command.
'';
};
environment = mkOption {
type = with types; attrsOf str;
default = { };
example = literalExpression ''
{
PGPASSFILE = "/private/passfile";
PGSSLMODE = "require";
}
'';
description = ''
Environment variables passed to the service.
Usable parameters are listed in <link xlink:href="https://www.postgresql.org/docs/current/libpq-envars.html">Section 34.14 of the PostgreSQL manual</link>.
'';
};
};
};
in {
options = {
services.postgresqlWalReceiver = {
receivers = mkOption {
type = with types; attrsOf (submodule receiverSubmodule);
default = { };
example = literalExpression ''
{
main = {
postgresqlPackage = pkgs.postgresql_11;
directory = /mnt/pg_wal/main/;
slot = "main_wal_receiver";
connection = "postgresql://user@somehost";
};
}
'';
description = ''
PostgreSQL WAL receivers.
Stream write-ahead logs from a PostgreSQL server using <command>pg_receivewal</command> (formerly <command>pg_receivexlog</command>).
See <link xlink:href="https://www.postgresql.org/docs/current/app-pgreceivewal.html">the man page</link> for more information.
'';
};
};
};
config = let
receivers = config.services.postgresqlWalReceiver.receivers;
in mkIf (receivers != { }) {
users = {
users.postgres = {
uid = config.ids.uids.postgres;
group = "postgres";
description = "PostgreSQL server user";
};
groups.postgres = {
gid = config.ids.gids.postgres;
};
};
assertions = concatLists (attrsets.mapAttrsToList (name: config: [
{
assertion = config.compress > 0 -> versionAtLeast config.postgresqlPackage.version "10";
message = "Invalid configuration for WAL receiver \"${name}\": compress requires PostgreSQL version >= 10.";
}
]) receivers);
systemd.tmpfiles.rules = mapAttrsToList (name: config: ''
d ${escapeShellArg config.directory} 0750 postgres postgres - -
'') receivers;
systemd.services = with attrsets; mapAttrs' (name: config: nameValuePair "postgresql-wal-receiver-${name}" {
description = "PostgreSQL WAL receiver (${name})";
wantedBy = [ "multi-user.target" ];
startLimitIntervalSec = 0; # retry forever, useful in case of network disruption
serviceConfig = {
User = "postgres";
Group = "postgres";
KillSignal = "SIGINT";
Restart = "always";
RestartSec = 60;
};
inherit (config) environment;
script = let
receiverCommand = postgresqlPackage:
if (versionAtLeast postgresqlPackage.version "10")
then "${postgresqlPackage}/bin/pg_receivewal"
else "${postgresqlPackage}/bin/pg_receivexlog";
in ''
${receiverCommand config.postgresqlPackage} \
--no-password \
--directory=${escapeShellArg config.directory} \
--status-interval=${toString config.statusInterval} \
--dbname=${escapeShellArg config.connection} \
${optionalString (config.compress > 0) "--compress=${toString config.compress}"} \
${optionalString (config.slot != "") "--slot=${escapeShellArg config.slot}"} \
${optionalString config.synchronous "--synchronous"} \
${concatStringsSep " " config.extraArgs}
'';
}) receivers;
};
meta.maintainers = with maintainers; [ pacien ];
}

View file

@ -0,0 +1,111 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.restic.server;
in
{
meta.maintainers = [ maintainers.bachp ];
options.services.restic.server = {
enable = mkEnableOption "Restic REST Server";
listenAddress = mkOption {
default = ":8000";
example = "127.0.0.1:8080";
type = types.str;
description = "Listen on a specific IP address and port.";
};
dataDir = mkOption {
default = "/var/lib/restic";
type = types.path;
description = "The directory for storing the restic repository.";
};
appendOnly = mkOption {
default = false;
type = types.bool;
description = ''
Enable append only mode.
This mode allows creation of new backups but prevents deletion and modification of existing backups.
This can be useful when backing up systems that have a potential of being hacked.
'';
};
privateRepos = mkOption {
default = false;
type = types.bool;
description = ''
Enable private repos.
Grants access only when a subdirectory with the same name as the user is specified in the repository URL.
'';
};
prometheus = mkOption {
default = false;
type = types.bool;
description = "Enable Prometheus metrics at /metrics.";
};
extraFlags = mkOption {
type = types.listOf types.str;
default = [];
description = ''
Extra commandline options to pass to Restic REST server.
'';
};
package = mkOption {
default = pkgs.restic-rest-server;
defaultText = literalExpression "pkgs.restic-rest-server";
type = types.package;
description = "Restic REST server package to use.";
};
};
config = mkIf cfg.enable {
systemd.services.restic-rest-server = {
description = "Restic REST Server";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = ''
${cfg.package}/bin/rest-server \
--listen ${cfg.listenAddress} \
--path ${cfg.dataDir} \
${optionalString cfg.appendOnly "--append-only"} \
${optionalString cfg.privateRepos "--private-repos"} \
${optionalString cfg.prometheus "--prometheus"} \
${escapeShellArgs cfg.extraFlags} \
'';
Type = "simple";
User = "restic";
Group = "restic";
# Security hardening
ReadWritePaths = [ cfg.dataDir ];
PrivateTmp = true;
ProtectSystem = "strict";
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectControlGroups = true;
PrivateDevices = true;
};
};
systemd.tmpfiles.rules = mkIf cfg.privateRepos [
"f ${cfg.dataDir}/.htpasswd 0700 restic restic -"
];
users.users.restic = {
group = "restic";
home = cfg.dataDir;
createHome = true;
uid = config.ids.uids.restic;
};
users.groups.restic.gid = config.ids.uids.restic;
};
}

View file

@ -0,0 +1,334 @@
{ config, lib, pkgs, utils, ... }:
with lib;
let
# Type for a valid systemd unit option. Needed for correctly passing "timerConfig" to "systemd.timers"
inherit (utils.systemdUtils.unitOptions) unitOption;
in
{
options.services.restic.backups = mkOption {
description = ''
Periodic backups to create with Restic.
'';
type = types.attrsOf (types.submodule ({ config, name, ... }: {
options = {
passwordFile = mkOption {
type = types.str;
description = ''
Read the repository password from a file.
'';
example = "/etc/nixos/restic-password";
};
environmentFile = mkOption {
type = with types; nullOr str;
# added on 2021-08-28, s3CredentialsFile should
# be removed in the future (+ remember the warning)
default = config.s3CredentialsFile;
description = ''
file containing the credentials to access the repository, in the
format of an EnvironmentFile as described by systemd.exec(5)
'';
};
s3CredentialsFile = mkOption {
type = with types; nullOr str;
default = null;
description = ''
file containing the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
for an S3-hosted repository, in the format of an EnvironmentFile
as described by systemd.exec(5)
'';
};
rcloneOptions = mkOption {
type = with types; nullOr (attrsOf (oneOf [ str bool ]));
default = null;
description = ''
Options to pass to rclone to control its behavior.
See <link xlink:href="https://rclone.org/docs/#options"/> for
available options. When specifying option names, strip the
leading <literal>--</literal>. To set a flag such as
<literal>--drive-use-trash</literal>, which does not take a value,
set the value to the Boolean <literal>true</literal>.
'';
example = {
bwlimit = "10M";
drive-use-trash = "true";
};
};
rcloneConfig = mkOption {
type = with types; nullOr (attrsOf (oneOf [ str bool ]));
default = null;
description = ''
Configuration for the rclone remote being used for backup.
See the remote's specific options under rclone's docs at
<link xlink:href="https://rclone.org/docs/"/>. When specifying
option names, use the "config" name specified in the docs.
For example, to set <literal>--b2-hard-delete</literal> for a B2
remote, use <literal>hard_delete = true</literal> in the
attribute set.
Warning: Secrets set in here will be world-readable in the Nix
store! Consider using the <literal>rcloneConfigFile</literal>
option instead to specify secret values separately. Note that
options set here will override those set in the config file.
'';
example = {
type = "b2";
account = "xxx";
key = "xxx";
hard_delete = true;
};
};
rcloneConfigFile = mkOption {
type = with types; nullOr path;
default = null;
description = ''
Path to the file containing rclone configuration. This file
must contain configuration for the remote specified in this backup
set and also must be readable by root. Options set in
<literal>rcloneConfig</literal> will override those set in this
file.
'';
};
repository = mkOption {
type = with types; nullOr str;
default = null;
description = ''
repository to backup to.
'';
example = "sftp:backup@192.168.1.100:/backups/${name}";
};
repositoryFile = mkOption {
type = with types; nullOr path;
default = null;
description = ''
Path to the file containing the repository location to backup to.
'';
};
paths = mkOption {
type = types.nullOr (types.listOf types.str);
default = null;
description = ''
Which paths to backup. If null or an empty array, no
backup command will be run. This can be used to create a
prune-only job.
'';
example = [
"/var/lib/postgresql"
"/home/user/backup"
];
};
timerConfig = mkOption {
type = types.attrsOf unitOption;
default = {
OnCalendar = "daily";
};
description = ''
When to run the backup. See man systemd.timer for details.
'';
example = {
OnCalendar = "00:05";
RandomizedDelaySec = "5h";
};
};
user = mkOption {
type = types.str;
default = "root";
description = ''
As which user the backup should run.
'';
example = "postgresql";
};
extraBackupArgs = mkOption {
type = types.listOf types.str;
default = [ ];
description = ''
Extra arguments passed to restic backup.
'';
example = [
"--exclude-file=/etc/nixos/restic-ignore"
];
};
extraOptions = mkOption {
type = types.listOf types.str;
default = [ ];
description = ''
Extra extended options to be passed to the restic --option flag.
'';
example = [
"sftp.command='ssh backup@192.168.1.100 -i /home/user/.ssh/id_rsa -s sftp'"
];
};
initialize = mkOption {
type = types.bool;
default = false;
description = ''
Create the repository if it doesn't exist.
'';
};
pruneOpts = mkOption {
type = types.listOf types.str;
default = [ ];
description = ''
A list of options (--keep-* et al.) for 'restic forget
--prune', to automatically prune old snapshots. The
'forget' command is run *after* the 'backup' command, so
keep that in mind when constructing the --keep-* options.
'';
example = [
"--keep-daily 7"
"--keep-weekly 5"
"--keep-monthly 12"
"--keep-yearly 75"
];
};
dynamicFilesFrom = mkOption {
type = with types; nullOr str;
default = null;
description = ''
A script that produces a list of files to back up. The
results of this command are given to the '--files-from'
option.
'';
example = "find /home/matt/git -type d -name .git";
};
backupPrepareCommand = mkOption {
type = with types; nullOr str;
default = null;
description = ''
A script that must run before starting the backup process.
'';
};
backupCleanupCommand = mkOption {
type = with types; nullOr str;
default = null;
description = ''
A script that must run after finishing the backup process.
'';
};
};
}));
default = { };
example = {
localbackup = {
paths = [ "/home" ];
repository = "/mnt/backup-hdd";
passwordFile = "/etc/nixos/secrets/restic-password";
initialize = true;
};
remotebackup = {
paths = [ "/home" ];
repository = "sftp:backup@host:/backups/home";
passwordFile = "/etc/nixos/secrets/restic-password";
extraOptions = [
"sftp.command='ssh backup@host -i /etc/nixos/secrets/backup-private-key -s sftp'"
];
timerConfig = {
OnCalendar = "00:05";
RandomizedDelaySec = "5h";
};
};
};
};
config = {
warnings = mapAttrsToList (n: v: "services.restic.backups.${n}.s3CredentialsFile is deprecated, please use services.restic.backups.${n}.environmentFile instead.") (filterAttrs (n: v: v.s3CredentialsFile != null) config.services.restic.backups);
systemd.services =
mapAttrs'
(name: backup:
let
extraOptions = concatMapStrings (arg: " -o ${arg}") backup.extraOptions;
resticCmd = "${pkgs.restic}/bin/restic${extraOptions}";
filesFromTmpFile = "/run/restic-backups-${name}/includes";
backupPaths =
if (backup.dynamicFilesFrom == null)
then if (backup.paths != null) then concatStringsSep " " backup.paths else ""
else "--files-from ${filesFromTmpFile}";
pruneCmd = optionals (builtins.length backup.pruneOpts > 0) [
(resticCmd + " forget --prune " + (concatStringsSep " " backup.pruneOpts))
(resticCmd + " check")
];
# Helper functions for rclone remotes
rcloneRemoteName = builtins.elemAt (splitString ":" backup.repository) 1;
rcloneAttrToOpt = v: "RCLONE_" + toUpper (builtins.replaceStrings [ "-" ] [ "_" ] v);
rcloneAttrToConf = v: "RCLONE_CONFIG_" + toUpper (rcloneRemoteName + "_" + v);
toRcloneVal = v: if lib.isBool v then lib.boolToString v else v;
in
nameValuePair "restic-backups-${name}" ({
environment = {
RESTIC_PASSWORD_FILE = backup.passwordFile;
RESTIC_REPOSITORY = backup.repository;
RESTIC_REPOSITORY_FILE = backup.repositoryFile;
} // optionalAttrs (backup.rcloneOptions != null) (mapAttrs'
(name: value:
nameValuePair (rcloneAttrToOpt name) (toRcloneVal value)
)
backup.rcloneOptions) // optionalAttrs (backup.rcloneConfigFile != null) {
RCLONE_CONFIG = backup.rcloneConfigFile;
} // optionalAttrs (backup.rcloneConfig != null) (mapAttrs'
(name: value:
nameValuePair (rcloneAttrToConf name) (toRcloneVal value)
)
backup.rcloneConfig);
path = [ pkgs.openssh ];
restartIfChanged = false;
serviceConfig = {
Type = "oneshot";
ExecStart = (optionals (backupPaths != "") [ "${resticCmd} backup --cache-dir=%C/restic-backups-${name} ${concatStringsSep " " backup.extraBackupArgs} ${backupPaths}" ])
++ pruneCmd;
User = backup.user;
RuntimeDirectory = "restic-backups-${name}";
CacheDirectory = "restic-backups-${name}";
CacheDirectoryMode = "0700";
} // optionalAttrs (backup.environmentFile != null) {
EnvironmentFile = backup.environmentFile;
};
} // optionalAttrs (backup.initialize || backup.dynamicFilesFrom != null || backup.backupPrepareCommand != null) {
preStart = ''
${optionalString (backup.backupPrepareCommand != null) ''
${pkgs.writeScript "backupPrepareCommand" backup.backupPrepareCommand}
''}
${optionalString (backup.initialize) ''
${resticCmd} snapshots || ${resticCmd} init
''}
${optionalString (backup.dynamicFilesFrom != null) ''
${pkgs.writeScript "dynamicFilesFromScript" backup.dynamicFilesFrom} > ${filesFromTmpFile}
''}
'';
} // optionalAttrs (backup.dynamicFilesFrom != null || backup.backupCleanupCommand != null) {
postStart = ''
${optionalString (backup.backupCleanupCommand != null) ''
${pkgs.writeScript "backupCleanupCommand" backup.backupCleanupCommand}
''}
${optionalString (backup.dynamicFilesFrom != null) ''
rm ${filesFromTmpFile}
''}
'';
})
)
config.services.restic.backups;
systemd.timers =
mapAttrs'
(name: backup: nameValuePair "restic-backups-${name}" {
wantedBy = [ "timers.target" ];
timerConfig = backup.timerConfig;
})
config.services.restic.backups;
};
}

View file

@ -0,0 +1,75 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.rsnapshot;
cfgfile = pkgs.writeText "rsnapshot.conf" ''
config_version 1.2
cmd_cp ${pkgs.coreutils}/bin/cp
cmd_rm ${pkgs.coreutils}/bin/rm
cmd_rsync ${pkgs.rsync}/bin/rsync
cmd_ssh ${pkgs.openssh}/bin/ssh
cmd_logger ${pkgs.inetutils}/bin/logger
cmd_du ${pkgs.coreutils}/bin/du
cmd_rsnapshot_diff ${pkgs.rsnapshot}/bin/rsnapshot-diff
lockfile /run/rsnapshot.pid
link_dest 1
${cfg.extraConfig}
'';
in
{
options = {
services.rsnapshot = {
enable = mkEnableOption "rsnapshot backups";
enableManualRsnapshot = mkOption {
description = "Whether to enable manual usage of the rsnapshot command with this module.";
default = true;
type = types.bool;
};
extraConfig = mkOption {
default = "";
example = ''
retains hourly 24
retain daily 365
backup /home/ localhost/
'';
type = types.lines;
description = ''
rsnapshot configuration option in addition to the defaults from
rsnapshot and this module.
Note that tabs are required to separate option arguments, and
directory names require trailing slashes.
The "extra" in the option name might be a little misleading right
now, as it is required to get a functional configuration.
'';
};
cronIntervals = mkOption {
default = {};
example = { hourly = "0 * * * *"; daily = "50 21 * * *"; };
type = types.attrsOf types.str;
description = ''
Periodicity at which intervals should be run by cron.
Note that the intervals also have to exist in configuration
as retain options.
'';
};
};
};
config = mkIf cfg.enable (mkMerge [
{
services.cron.systemCronJobs =
mapAttrsToList (interval: time: "${time} root ${pkgs.rsnapshot}/bin/rsnapshot -c ${cfgfile} ${interval}") cfg.cronIntervals;
}
(mkIf cfg.enableManualRsnapshot {
environment.systemPackages = [ pkgs.rsnapshot ];
environment.etc."rsnapshot.conf".source = cfgfile;
})
]);
}

View file

@ -0,0 +1,204 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.sanoid;
datasetSettingsType = with types;
(attrsOf (nullOr (oneOf [ str int bool (listOf str) ]))) // {
description = "dataset/template options";
};
commonOptions = {
hourly = mkOption {
description = "Number of hourly snapshots.";
type = with types; nullOr ints.unsigned;
default = null;
};
daily = mkOption {
description = "Number of daily snapshots.";
type = with types; nullOr ints.unsigned;
default = null;
};
monthly = mkOption {
description = "Number of monthly snapshots.";
type = with types; nullOr ints.unsigned;
default = null;
};
yearly = mkOption {
description = "Number of yearly snapshots.";
type = with types; nullOr ints.unsigned;
default = null;
};
autoprune = mkOption {
description = "Whether to automatically prune old snapshots.";
type = with types; nullOr bool;
default = null;
};
autosnap = mkOption {
description = "Whether to automatically take snapshots.";
type = with types; nullOr bool;
default = null;
};
};
datasetOptions = rec {
use_template = mkOption {
description = "Names of the templates to use for this dataset.";
type = types.listOf (types.str // {
check = (types.enum (attrNames cfg.templates)).check;
description = "configured template name";
});
default = [ ];
};
useTemplate = use_template;
recursive = mkOption {
description = ''
Whether to recursively snapshot dataset children.
You can also set this to <literal>"zfs"</literal> to handle datasets
recursively in an atomic way without the possibility to
override settings for child datasets.
'';
type = with types; oneOf [ bool (enum [ "zfs" ]) ];
default = false;
};
process_children_only = mkOption {
description = "Whether to only snapshot child datasets if recursing.";
type = types.bool;
default = false;
};
processChildrenOnly = process_children_only;
};
# Extract unique dataset names
datasets = unique (attrNames cfg.datasets);
# Function to build "zfs allow" and "zfs unallow" commands for the
# filesystems we've delegated permissions to.
buildAllowCommand = zfsAction: permissions: dataset: lib.escapeShellArgs [
# Here we explicitly use the booted system to guarantee the stable API needed by ZFS
"-+/run/booted-system/sw/bin/zfs"
zfsAction
"sanoid"
(concatStringsSep "," permissions)
dataset
];
configFile =
let
mkValueString = v:
if builtins.isList v then concatStringsSep "," v
else generators.mkValueStringDefault { } v;
mkKeyValue = k: v:
if v == null then ""
else if k == "processChildrenOnly" then ""
else if k == "useTemplate" then ""
else generators.mkKeyValueDefault { inherit mkValueString; } "=" k v;
in
generators.toINI { inherit mkKeyValue; } cfg.settings;
in
{
# Interface
options.services.sanoid = {
enable = mkEnableOption "Sanoid ZFS snapshotting service";
interval = mkOption {
type = types.str;
default = "hourly";
example = "daily";
description = ''
Run sanoid at this interval. The default is to run hourly.
The format is described in
<citerefentry><refentrytitle>systemd.time</refentrytitle>
<manvolnum>7</manvolnum></citerefentry>.
'';
};
datasets = mkOption {
type = types.attrsOf (types.submodule ({ config, options, ... }: {
freeformType = datasetSettingsType;
options = commonOptions // datasetOptions;
config.use_template = mkAliasDefinitions (mkDefault options.useTemplate or { });
config.process_children_only = mkAliasDefinitions (mkDefault options.processChildrenOnly or { });
}));
default = { };
description = "Datasets to snapshot.";
};
templates = mkOption {
type = types.attrsOf (types.submodule {
freeformType = datasetSettingsType;
options = commonOptions;
});
default = { };
description = "Templates for datasets.";
};
settings = mkOption {
type = types.attrsOf datasetSettingsType;
description = ''
Free-form settings written directly to the config file. See
<link xlink:href="https://github.com/jimsalterjrs/sanoid/blob/master/sanoid.defaults.conf"/>
for allowed values.
'';
};
extraArgs = mkOption {
type = types.listOf types.str;
default = [ ];
example = [ "--verbose" "--readonly" "--debug" ];
description = ''
Extra arguments to pass to sanoid. See
<link xlink:href="https://github.com/jimsalterjrs/sanoid/#sanoid-command-line-options"/>
for allowed options.
'';
};
};
# Implementation
config = mkIf cfg.enable {
services.sanoid.settings = mkMerge [
(mapAttrs' (d: v: nameValuePair ("template_" + d) v) cfg.templates)
(mapAttrs (d: v: v) cfg.datasets)
];
systemd.services.sanoid = {
description = "Sanoid snapshot service";
serviceConfig = {
ExecStartPre = (map (buildAllowCommand "allow" [ "snapshot" "mount" "destroy" ]) datasets);
ExecStopPost = (map (buildAllowCommand "unallow" [ "snapshot" "mount" "destroy" ]) datasets);
ExecStart = lib.escapeShellArgs ([
"${pkgs.sanoid}/bin/sanoid"
"--cron"
"--configdir"
(pkgs.writeTextDir "sanoid.conf" configFile)
] ++ cfg.extraArgs);
User = "sanoid";
Group = "sanoid";
DynamicUser = true;
RuntimeDirectory = "sanoid";
CacheDirectory = "sanoid";
};
# Prevents missing snapshots during DST changes
environment.TZ = "UTC";
after = [ "zfs.target" ];
startAt = cfg.interval;
};
};
meta.maintainers = with maintainers; [ lopsided98 ];
}

View file

@ -0,0 +1,421 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.syncoid;
# Extract local dasaset names (so no datasets containing "@")
localDatasetName = d: optionals (d != null) (
let m = builtins.match "([^/@]+[^@]*)" d; in
optionals (m != null) m
);
# Escape as required by: https://www.freedesktop.org/software/systemd/man/systemd.unit.html
escapeUnitName = name:
lib.concatMapStrings (s: if lib.isList s then "-" else s)
(builtins.split "[^a-zA-Z0-9_.\\-]+" name);
# Function to build "zfs allow" commands for the filesystems we've
# delegated permissions to. It also checks if the target dataset
# exists before delegating permissions, if it doesn't exist we
# delegate it to the parent dataset. This should solve the case of
# provisoning new datasets.
buildAllowCommand = permissions: dataset: (
"-+${pkgs.writeShellScript "zfs-allow-${dataset}" ''
# Here we explicitly use the booted system to guarantee the stable API needed by ZFS
# Run a ZFS list on the dataset to check if it exists
if ${lib.escapeShellArgs [
"/run/booted-system/sw/bin/zfs"
"list"
dataset
]} 2> /dev/null; then
${lib.escapeShellArgs [
"/run/booted-system/sw/bin/zfs"
"allow"
cfg.user
(concatStringsSep "," permissions)
dataset
]}
else
${lib.escapeShellArgs [
"/run/booted-system/sw/bin/zfs"
"allow"
cfg.user
(concatStringsSep "," permissions)
# Remove the last part of the path
(builtins.dirOf dataset)
]}
fi
''}"
);
# Function to build "zfs unallow" commands for the filesystems we've
# delegated permissions to. Here we unallow both the target but also
# on the parent dataset because at this stage we have no way of
# knowing if the allow command did execute on the parent dataset or
# not in the pre-hook. We can't run the same if in the post hook
# since the dataset should have been created at this point.
buildUnallowCommand = permissions: dataset: (
"-+${pkgs.writeShellScript "zfs-unallow-${dataset}" ''
# Here we explicitly use the booted system to guarantee the stable API needed by ZFS
${lib.escapeShellArgs [
"/run/booted-system/sw/bin/zfs"
"unallow"
cfg.user
(concatStringsSep "," permissions)
dataset
]}
${lib.escapeShellArgs [
"/run/booted-system/sw/bin/zfs"
"unallow"
cfg.user
(concatStringsSep "," permissions)
# Remove the last part of the path
(builtins.dirOf dataset)
]}
''}"
);
in
{
# Interface
options.services.syncoid = {
enable = mkEnableOption "Syncoid ZFS synchronization service";
interval = mkOption {
type = types.str;
default = "hourly";
example = "*-*-* *:15:00";
description = ''
Run syncoid at this interval. The default is to run hourly.
The format is described in
<citerefentry><refentrytitle>systemd.time</refentrytitle>
<manvolnum>7</manvolnum></citerefentry>.
'';
};
user = mkOption {
type = types.str;
default = "syncoid";
example = "backup";
description = ''
The user for the service. ZFS privilege delegation will be
automatically configured for any local pools used by syncoid if this
option is set to a user other than root. The user will be given the
"hold" and "send" privileges on any pool that has datasets being sent
and the "create", "mount", "receive", and "rollback" privileges on
any pool that has datasets being received.
'';
};
group = mkOption {
type = types.str;
default = "syncoid";
example = "backup";
description = "The group for the service.";
};
sshKey = mkOption {
type = types.nullOr types.path;
# Prevent key from being copied to store
apply = mapNullable toString;
default = null;
description = ''
SSH private key file to use to login to the remote system. Can be
overridden in individual commands.
'';
};
localSourceAllow = mkOption {
type = types.listOf types.str;
# Permissions snapshot and destroy are in case --no-sync-snap is not used
default = [ "bookmark" "hold" "send" "snapshot" "destroy" ];
description = ''
Permissions granted for the <option>services.syncoid.user</option> user
for local source datasets. See
<link xlink:href="https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html"/>
for available permissions.
'';
};
localTargetAllow = mkOption {
type = types.listOf types.str;
default = [ "change-key" "compression" "create" "mount" "mountpoint" "receive" "rollback" ];
example = [ "create" "mount" "receive" "rollback" ];
description = ''
Permissions granted for the <option>services.syncoid.user</option> user
for local target datasets. See
<link xlink:href="https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html"/>
for available permissions.
Make sure to include the <literal>change-key</literal> permission if you send raw encrypted datasets,
the <literal>compression</literal> permission if you send raw compressed datasets, and so on.
For remote target datasets you'll have to set your remote user permissions by yourself.
'';
};
commonArgs = mkOption {
type = types.listOf types.str;
default = [ ];
example = [ "--no-sync-snap" ];
description = ''
Arguments to add to every syncoid command, unless disabled for that
command. See
<link xlink:href="https://github.com/jimsalterjrs/sanoid/#syncoid-command-line-options"/>
for available options.
'';
};
service = mkOption {
type = types.attrs;
default = { };
description = ''
Systemd configuration common to all syncoid services.
'';
};
commands = mkOption {
type = types.attrsOf (types.submodule ({ name, ... }: {
options = {
source = mkOption {
type = types.str;
example = "pool/dataset";
description = ''
Source ZFS dataset. Can be either local or remote. Defaults to
the attribute name.
'';
};
target = mkOption {
type = types.str;
example = "user@server:pool/dataset";
description = ''
Target ZFS dataset. Can be either local
(<replaceable>pool/dataset</replaceable>) or remote
(<replaceable>user@server:pool/dataset</replaceable>).
'';
};
recursive = mkEnableOption ''the transfer of child datasets'';
sshKey = mkOption {
type = types.nullOr types.path;
# Prevent key from being copied to store
apply = mapNullable toString;
description = ''
SSH private key file to use to login to the remote system.
Defaults to <option>services.syncoid.sshKey</option> option.
'';
};
localSourceAllow = mkOption {
type = types.listOf types.str;
description = ''
Permissions granted for the <option>services.syncoid.user</option> user
for local source datasets. See
<link xlink:href="https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html"/>
for available permissions.
Defaults to <option>services.syncoid.localSourceAllow</option> option.
'';
};
localTargetAllow = mkOption {
type = types.listOf types.str;
description = ''
Permissions granted for the <option>services.syncoid.user</option> user
for local target datasets. See
<link xlink:href="https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html"/>
for available permissions.
Make sure to include the <literal>change-key</literal> permission if you send raw encrypted datasets,
the <literal>compression</literal> permission if you send raw compressed datasets, and so on.
For remote target datasets you'll have to set your remote user permissions by yourself.
'';
};
sendOptions = mkOption {
type = types.separatedString " ";
default = "";
example = "Lc e";
description = ''
Advanced options to pass to zfs send. Options are specified
without their leading dashes and separated by spaces.
'';
};
recvOptions = mkOption {
type = types.separatedString " ";
default = "";
example = "ux recordsize o compression=lz4";
description = ''
Advanced options to pass to zfs recv. Options are specified
without their leading dashes and separated by spaces.
'';
};
useCommonArgs = mkOption {
type = types.bool;
default = true;
description = ''
Whether to add the configured common arguments to this command.
'';
};
service = mkOption {
type = types.attrs;
default = { };
description = ''
Systemd configuration specific to this syncoid service.
'';
};
extraArgs = mkOption {
type = types.listOf types.str;
default = [ ];
example = [ "--sshport 2222" ];
description = "Extra syncoid arguments for this command.";
};
};
config = {
source = mkDefault name;
sshKey = mkDefault cfg.sshKey;
localSourceAllow = mkDefault cfg.localSourceAllow;
localTargetAllow = mkDefault cfg.localTargetAllow;
};
}));
default = { };
example = literalExpression ''
{
"pool/test".target = "root@target:pool/test";
}
'';
description = "Syncoid commands to run.";
};
};
# Implementation
config = mkIf cfg.enable {
users = {
users = mkIf (cfg.user == "syncoid") {
syncoid = {
group = cfg.group;
isSystemUser = true;
# For syncoid to be able to create /var/lib/syncoid/.ssh/
# and to use custom ssh_config or known_hosts.
home = "/var/lib/syncoid";
createHome = false;
};
};
groups = mkIf (cfg.group == "syncoid") {
syncoid = { };
};
};
systemd.services = mapAttrs'
(name: c:
nameValuePair "syncoid-${escapeUnitName name}" (mkMerge [
{
description = "Syncoid ZFS synchronization from ${c.source} to ${c.target}";
after = [ "zfs.target" ];
startAt = cfg.interval;
# syncoid may need zpool to get feature@extensible_dataset
path = [ "/run/booted-system/sw/bin/" ];
serviceConfig = {
ExecStartPre =
(map (buildAllowCommand c.localSourceAllow) (localDatasetName c.source)) ++
(map (buildAllowCommand c.localTargetAllow) (localDatasetName c.target));
ExecStopPost =
(map (buildUnallowCommand c.localSourceAllow) (localDatasetName c.source)) ++
(map (buildUnallowCommand c.localTargetAllow) (localDatasetName c.target));
ExecStart = lib.escapeShellArgs ([ "${pkgs.sanoid}/bin/syncoid" ]
++ optionals c.useCommonArgs cfg.commonArgs
++ optional c.recursive "-r"
++ optionals (c.sshKey != null) [ "--sshkey" c.sshKey ]
++ c.extraArgs
++ [
"--sendoptions"
c.sendOptions
"--recvoptions"
c.recvOptions
"--no-privilege-elevation"
c.source
c.target
]);
User = cfg.user;
Group = cfg.group;
StateDirectory = [ "syncoid" ];
StateDirectoryMode = "700";
# Prevent SSH control sockets of different syncoid services from interfering
PrivateTmp = true;
# Permissive access to /proc because syncoid
# calls ps(1) to detect ongoing `zfs receive`.
ProcSubset = "all";
ProtectProc = "default";
# The following options are only for optimizing:
# systemd-analyze security | grep syncoid-'*'
AmbientCapabilities = "";
CapabilityBoundingSet = "";
DeviceAllow = [ "/dev/zfs" ];
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateMounts = true;
PrivateNetwork = mkDefault false;
PrivateUsers = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectSystem = "strict";
RemoveIPC = true;
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
RootDirectory = "/run/syncoid/${escapeUnitName name}";
RootDirectoryStartOnly = true;
BindPaths = [ "/dev/zfs" ];
BindReadOnlyPaths = [ builtins.storeDir "/etc" "/run" "/bin/sh" ];
# Avoid useless mounting of RootDirectory= in the own RootDirectory= of ExecStart='s mount namespace.
InaccessiblePaths = [ "-+/run/syncoid/${escapeUnitName name}" ];
MountAPIVFS = true;
# Create RootDirectory= in the host's mount namespace.
RuntimeDirectory = [ "syncoid/${escapeUnitName name}" ];
RuntimeDirectoryMode = "700";
SystemCallFilter = [
"@system-service"
# Groups in @system-service which do not contain a syscall listed by:
# perf stat -x, 2>perf.log -e 'syscalls:sys_enter_*' syncoid …
# awk >perf.syscalls -F "," '$1 > 0 {sub("syscalls:sys_enter_","",$3); print $3}' perf.log
# systemd-analyze syscall-filter | grep -v -e '#' | sed -e ':loop; /^[^ ]/N; s/\n //; t loop' | grep $(printf ' -e \\<%s\\>' $(cat perf.syscalls)) | cut -f 1 -d ' '
"~@aio"
"~@chown"
"~@keyring"
"~@memlock"
"~@privileged"
"~@resources"
"~@setuid"
"~@timer"
];
SystemCallArchitectures = "native";
# This is for BindPaths= and BindReadOnlyPaths=
# to allow traversal of directories they create in RootDirectory=.
UMask = "0066";
};
}
cfg.service
c.service
]))
cfg.commands;
};
meta.maintainers = with maintainers; [ julm lopsided98 ];
}

View file

@ -0,0 +1,408 @@
{ config, lib, options, pkgs, utils, ... }:
with lib;
let
gcfg = config.services.tarsnap;
opt = options.services.tarsnap;
configFile = name: cfg: ''
keyfile ${cfg.keyfile}
${optionalString (cfg.cachedir != null) "cachedir ${cfg.cachedir}"}
${optionalString cfg.nodump "nodump"}
${optionalString cfg.printStats "print-stats"}
${optionalString cfg.printStats "humanize-numbers"}
${optionalString (cfg.checkpointBytes != null) ("checkpoint-bytes "+cfg.checkpointBytes)}
${optionalString cfg.aggressiveNetworking "aggressive-networking"}
${concatStringsSep "\n" (map (v: "exclude ${v}") cfg.excludes)}
${concatStringsSep "\n" (map (v: "include ${v}") cfg.includes)}
${optionalString cfg.lowmem "lowmem"}
${optionalString cfg.verylowmem "verylowmem"}
${optionalString (cfg.maxbw != null) "maxbw ${toString cfg.maxbw}"}
${optionalString (cfg.maxbwRateUp != null) "maxbw-rate-up ${toString cfg.maxbwRateUp}"}
${optionalString (cfg.maxbwRateDown != null) "maxbw-rate-down ${toString cfg.maxbwRateDown}"}
'';
in
{
imports = [
(mkRemovedOptionModule [ "services" "tarsnap" "cachedir" ] "Use services.tarsnap.archives.<name>.cachedir")
];
options = {
services.tarsnap = {
enable = mkEnableOption "periodic tarsnap backups";
keyfile = mkOption {
type = types.str;
default = "/root/tarsnap.key";
description = ''
The keyfile which associates this machine with your tarsnap
account.
Create the keyfile with <command>tarsnap-keygen</command>.
Note that each individual archive (specified below) may also have its
own individual keyfile specified. Tarsnap does not allow multiple
concurrent backups with the same cache directory and key (starting a
new backup will cause another one to fail). If you have multiple
archives specified, you should either spread out your backups to be
far apart, or specify a separate key for each archive. By default
every archive defaults to using
<literal>"/root/tarsnap.key"</literal>.
It's recommended for backups that you generate a key for every archive
using <literal>tarsnap-keygen(1)</literal>, and then generate a
write-only tarsnap key using <literal>tarsnap-keymgmt(1)</literal>,
and keep your master key(s) for a particular machine off-site.
The keyfile name should be given as a string and not a path, to
avoid the key being copied into the Nix store.
'';
};
archives = mkOption {
type = types.attrsOf (types.submodule ({ config, options, ... }:
{
options = {
keyfile = mkOption {
type = types.str;
default = gcfg.keyfile;
defaultText = literalExpression "config.${opt.keyfile}";
description = ''
Set a specific keyfile for this archive. This defaults to
<literal>"/root/tarsnap.key"</literal> if left unspecified.
Use this option if you want to run multiple backups
concurrently - each archive must have a unique key. You can
generate a write-only key derived from your master key (which
is recommended) using <literal>tarsnap-keymgmt(1)</literal>.
Note: every archive must have an individual master key. You
must generate multiple keys with
<literal>tarsnap-keygen(1)</literal>, and then generate write
only keys from those.
The keyfile name should be given as a string and not a path, to
avoid the key being copied into the Nix store.
'';
};
cachedir = mkOption {
type = types.nullOr types.path;
default = "/var/cache/tarsnap/${utils.escapeSystemdPath config.keyfile}";
defaultText = literalExpression ''
"/var/cache/tarsnap/''${utils.escapeSystemdPath config.${options.keyfile}}"
'';
description = ''
The cache allows tarsnap to identify previously stored data
blocks, reducing archival time and bandwidth usage.
Should the cache become desynchronized or corrupted, tarsnap
will refuse to run until you manually rebuild the cache with
<command>tarsnap --fsck</command>.
Set to <literal>null</literal> to disable caching.
'';
};
nodump = mkOption {
type = types.bool;
default = true;
description = ''
Exclude files with the <literal>nodump</literal> flag.
'';
};
printStats = mkOption {
type = types.bool;
default = true;
description = ''
Print global archive statistics upon completion.
The output is available via
<command>systemctl status tarsnap-archive-name</command>.
'';
};
checkpointBytes = mkOption {
type = types.nullOr types.str;
default = "1GB";
description = ''
Create a checkpoint every <literal>checkpointBytes</literal>
of uploaded data (optionally specified using an SI prefix).
1GB is the minimum value. A higher value is recommended,
as checkpointing is expensive.
Set to <literal>null</literal> to disable checkpointing.
'';
};
period = mkOption {
type = types.str;
default = "01:15";
example = "hourly";
description = ''
Create archive at this interval.
The format is described in
<citerefentry><refentrytitle>systemd.time</refentrytitle>
<manvolnum>7</manvolnum></citerefentry>.
'';
};
aggressiveNetworking = mkOption {
type = types.bool;
default = false;
description = ''
Upload data over multiple TCP connections, potentially
increasing tarsnap's bandwidth utilisation at the cost
of slowing down all other network traffic. Not
recommended unless TCP congestion is the dominant
limiting factor.
'';
};
directories = mkOption {
type = types.listOf types.path;
default = [];
description = "List of filesystem paths to archive.";
};
excludes = mkOption {
type = types.listOf types.str;
default = [];
description = ''
Exclude files and directories matching these patterns.
'';
};
includes = mkOption {
type = types.listOf types.str;
default = [];
description = ''
Include only files and directories matching these
patterns (the empty list includes everything).
Exclusions have precedence over inclusions.
'';
};
lowmem = mkOption {
type = types.bool;
default = false;
description = ''
Reduce memory consumption by not caching small files.
Possibly beneficial if the average file size is smaller
than 1 MB and the number of files is lower than the
total amount of RAM in KB.
'';
};
verylowmem = mkOption {
type = types.bool;
default = false;
description = ''
Reduce memory consumption by a factor of 2 beyond what
<literal>lowmem</literal> does, at the cost of significantly
slowing down the archiving process.
'';
};
maxbw = mkOption {
type = types.nullOr types.int;
default = null;
description = ''
Abort archival if upstream bandwidth usage in bytes
exceeds this threshold.
'';
};
maxbwRateUp = mkOption {
type = types.nullOr types.int;
default = null;
example = literalExpression "25 * 1000";
description = ''
Upload bandwidth rate limit in bytes.
'';
};
maxbwRateDown = mkOption {
type = types.nullOr types.int;
default = null;
example = literalExpression "50 * 1000";
description = ''
Download bandwidth rate limit in bytes.
'';
};
verbose = mkOption {
type = types.bool;
default = false;
description = ''
Whether to produce verbose logging output.
'';
};
explicitSymlinks = mkOption {
type = types.bool;
default = false;
description = ''
Whether to follow symlinks specified as archives.
'';
};
followSymlinks = mkOption {
type = types.bool;
default = false;
description = ''
Whether to follow all symlinks in archive trees.
'';
};
};
}
));
default = {};
example = literalExpression ''
{
nixos =
{ directories = [ "/home" "/root/ssl" ];
};
gamedata =
{ directories = [ "/var/lib/minecraft" ];
period = "*:30";
};
}
'';
description = ''
Tarsnap archive configurations. Each attribute names an archive
to be created at a given time interval, according to the options
associated with it. When uploading to the tarsnap server,
archive names are suffixed by a 1 second resolution timestamp,
with the format <literal>%Y%m%d%H%M%S</literal>.
For each member of the set is created a timer which triggers the
instanced <literal>tarsnap-archive-name</literal> service unit. You may use
<command>systemctl start tarsnap-archive-name</command> to
manually trigger creation of <literal>archive-name</literal> at
any time.
'';
};
};
};
config = mkIf gcfg.enable {
assertions =
(mapAttrsToList (name: cfg:
{ assertion = cfg.directories != [];
message = "Must specify paths for tarsnap to back up";
}) gcfg.archives) ++
(mapAttrsToList (name: cfg:
{ assertion = !(cfg.lowmem && cfg.verylowmem);
message = "You cannot set both lowmem and verylowmem";
}) gcfg.archives);
systemd.services =
(mapAttrs' (name: cfg: nameValuePair "tarsnap-${name}" {
description = "Tarsnap archive '${name}'";
requires = [ "network-online.target" ];
after = [ "network-online.target" ];
path = with pkgs; [ iputils tarsnap util-linux ];
# In order for the persistent tarsnap timer to work reliably, we have to
# make sure that the tarsnap server is reachable after systemd starts up
# the service - therefore we sleep in a loop until we can ping the
# endpoint.
preStart = ''
while ! ping -4 -q -c 1 v1-0-0-server.tarsnap.com &> /dev/null; do sleep 3; done
'';
script = let
tarsnap = ''tarsnap --configfile "/etc/tarsnap/${name}.conf"'';
run = ''${tarsnap} -c -f "${name}-$(date +"%Y%m%d%H%M%S")" \
${optionalString cfg.verbose "-v"} \
${optionalString cfg.explicitSymlinks "-H"} \
${optionalString cfg.followSymlinks "-L"} \
${concatStringsSep " " cfg.directories}'';
cachedir = escapeShellArg cfg.cachedir;
in if (cfg.cachedir != null) then ''
mkdir -p ${cachedir}
chmod 0700 ${cachedir}
( flock 9
if [ ! -e ${cachedir}/firstrun ]; then
( flock 10
flock -u 9
${tarsnap} --fsck
flock 9
) 10>${cachedir}/firstrun
fi
) 9>${cachedir}/lockf
exec flock ${cachedir}/firstrun ${run}
'' else "exec ${run}";
serviceConfig = {
Type = "oneshot";
IOSchedulingClass = "idle";
NoNewPrivileges = "true";
CapabilityBoundingSet = [ "CAP_DAC_READ_SEARCH" ];
PermissionsStartOnly = "true";
};
}) gcfg.archives) //
(mapAttrs' (name: cfg: nameValuePair "tarsnap-restore-${name}"{
description = "Tarsnap restore '${name}'";
requires = [ "network-online.target" ];
path = with pkgs; [ iputils tarsnap util-linux ];
script = let
tarsnap = ''tarsnap --configfile "/etc/tarsnap/${name}.conf"'';
lastArchive = "$(${tarsnap} --list-archives | sort | tail -1)";
run = ''${tarsnap} -x -f "${lastArchive}" ${optionalString cfg.verbose "-v"}'';
cachedir = escapeShellArg cfg.cachedir;
in if (cfg.cachedir != null) then ''
mkdir -p ${cachedir}
chmod 0700 ${cachedir}
( flock 9
if [ ! -e ${cachedir}/firstrun ]; then
( flock 10
flock -u 9
${tarsnap} --fsck
flock 9
) 10>${cachedir}/firstrun
fi
) 9>${cachedir}/lockf
exec flock ${cachedir}/firstrun ${run}
'' else "exec ${run}";
serviceConfig = {
Type = "oneshot";
IOSchedulingClass = "idle";
NoNewPrivileges = "true";
CapabilityBoundingSet = [ "CAP_DAC_READ_SEARCH" ];
PermissionsStartOnly = "true";
};
}) gcfg.archives);
# Note: the timer must be Persistent=true, so that systemd will start it even
# if e.g. your laptop was asleep while the latest interval occurred.
systemd.timers = mapAttrs' (name: cfg: nameValuePair "tarsnap-${name}"
{ timerConfig.OnCalendar = cfg.period;
timerConfig.Persistent = "true";
wantedBy = [ "timers.target" ];
}) gcfg.archives;
environment.etc =
mapAttrs' (name: cfg: nameValuePair "tarsnap/${name}.conf"
{ text = configFile name cfg;
}) gcfg.archives;
environment.systemPackages = [ pkgs.tarsnap ];
};
}

View file

@ -0,0 +1,125 @@
{ config, lib, ... }:
let
inherit (lib.attrsets) hasAttr;
inherit (lib.modules) mkDefault mkIf;
inherit (lib.options) mkEnableOption mkOption;
inherit (lib.types) nonEmptyStr nullOr;
options.services.tsmBackup = {
enable = mkEnableOption ''
automatic backups with the
IBM Spectrum Protect (Tivoli Storage Manager, TSM) client.
This also enables
<option>programs.tsmClient.enable</option>
'';
command = mkOption {
type = nonEmptyStr;
default = "backup";
example = "incr";
description = ''
The actual command passed to the
<literal>dsmc</literal> executable to start the backup.
'';
};
servername = mkOption {
type = nonEmptyStr;
example = "mainTsmServer";
description = ''
Create a systemd system service
<literal>tsm-backup.service</literal> that starts
a backup based on the given servername's stanza.
Note that this server's
<option>passwdDir</option> will default to
<filename>/var/lib/tsm-backup/password</filename>
(but may be overridden);
also, the service will use
<filename>/var/lib/tsm-backup</filename> as
<literal>HOME</literal> when calling
<literal>dsmc</literal>.
'';
};
autoTime = mkOption {
type = nullOr nonEmptyStr;
default = null;
example = "12:00";
description = ''
The backup service will be invoked
automatically at the given date/time,
which must be in the format described in
<citerefentry><refentrytitle>systemd.time</refentrytitle><manvolnum>5</manvolnum></citerefentry>.
The default <literal>null</literal>
disables automatic backups.
'';
};
};
cfg = config.services.tsmBackup;
cfgPrg = config.programs.tsmClient;
assertions = [
{
assertion = hasAttr cfg.servername cfgPrg.servers;
message = "TSM service servername not found in list of servers";
}
{
assertion = cfgPrg.servers.${cfg.servername}.genPasswd;
message = "TSM service requires automatic password generation";
}
];
in
{
inherit options;
config = mkIf cfg.enable {
inherit assertions;
programs.tsmClient.enable = true;
programs.tsmClient.servers.${cfg.servername}.passwdDir =
mkDefault "/var/lib/tsm-backup/password";
systemd.services.tsm-backup = {
description = "IBM Spectrum Protect (Tivoli Storage Manager) Backup";
# DSM_LOG needs a trailing slash to have it treated as a directory.
# `/var/log` would be littered with TSM log files otherwise.
environment.DSM_LOG = "/var/log/tsm-backup/";
# TSM needs a HOME dir to store certificates.
environment.HOME = "/var/lib/tsm-backup";
serviceConfig = {
# for exit status description see
# https://www.ibm.com/docs/en/spectrum-protect/8.1.13?topic=clients-client-return-codes
SuccessExitStatus = "4 8";
# The `-se` option must come after the command.
# The `-optfile` option suppresses a `dsm.opt`-not-found warning.
ExecStart =
"${cfgPrg.wrappedPackage}/bin/dsmc ${cfg.command} -se='${cfg.servername}' -optfile=/dev/null";
LogsDirectory = "tsm-backup";
StateDirectory = "tsm-backup";
StateDirectoryMode = "0750";
# systemd sandboxing
LockPersonality = true;
NoNewPrivileges = true;
PrivateDevices = true;
#PrivateTmp = true; # would break backup of {/var,}/tmp
#PrivateUsers = true; # would block backup of /home/*
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = "read-only";
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "noaccess";
ProtectSystem = "strict";
RestrictNamespaces = true;
RestrictSUIDSGID = true;
};
startAt = mkIf (cfg.autoTime!=null) cfg.autoTime;
};
};
meta.maintainers = [ lib.maintainers.yarny ];
}

View file

@ -0,0 +1,90 @@
{ lib, pkgs, config, ... }:
with lib;
let
cfg = config.services.zfs.autoReplication;
recursive = optionalString cfg.recursive " --recursive";
followDelete = optionalString cfg.followDelete " --follow-delete";
in {
options = {
services.zfs.autoReplication = {
enable = mkEnableOption "ZFS snapshot replication.";
followDelete = mkOption {
description = "Remove remote snapshots that don't have a local correspondant.";
default = true;
type = types.bool;
};
host = mkOption {
description = "Remote host where snapshots should be sent. <literal>lz4</literal> is expected to be installed on this host.";
example = "example.com";
type = types.str;
};
identityFilePath = mkOption {
description = "Path to SSH key used to login to host.";
example = "/home/username/.ssh/id_rsa";
type = types.path;
};
localFilesystem = mkOption {
description = "Local ZFS fileystem from which snapshots should be sent. Defaults to the attribute name.";
example = "pool/file/path";
type = types.str;
};
remoteFilesystem = mkOption {
description = "Remote ZFS filesystem where snapshots should be sent.";
example = "pool/file/path";
type = types.str;
};
recursive = mkOption {
description = "Recursively discover snapshots to send.";
default = true;
type = types.bool;
};
username = mkOption {
description = "Username used by SSH to login to remote host.";
example = "username";
type = types.str;
};
};
};
config = lib.mkIf cfg.enable {
environment.systemPackages = [
pkgs.lz4
];
systemd.services.zfs-replication = {
after = [
"zfs-snapshot-daily.service"
"zfs-snapshot-frequent.service"
"zfs-snapshot-hourly.service"
"zfs-snapshot-monthly.service"
"zfs-snapshot-weekly.service"
];
description = "ZFS Snapshot Replication";
documentation = [
"https://github.com/alunduil/zfs-replicate"
];
restartIfChanged = false;
serviceConfig.ExecStart = "${pkgs.zfs-replicate}/bin/zfs-replicate${recursive} -l ${escapeShellArg cfg.username} -i ${escapeShellArg cfg.identityFilePath}${followDelete} ${escapeShellArg cfg.host} ${escapeShellArg cfg.remoteFilesystem} ${escapeShellArg cfg.localFilesystem}";
wantedBy = [
"zfs-snapshot-daily.service"
"zfs-snapshot-frequent.service"
"zfs-snapshot-hourly.service"
"zfs-snapshot-monthly.service"
"zfs-snapshot-weekly.service"
];
};
};
meta = {
maintainers = with lib.maintainers; [ alunduil ];
};
}

View file

@ -0,0 +1,469 @@
{ config, lib, pkgs, ... }:
with lib;
with types;
let
planDescription = ''
The znapzend backup plan to use for the source.
The plan specifies how often to backup and for how long to keep the
backups. It consists of a series of retention periodes to interval
associations:
<literal>
retA=>intA,retB=>intB,...
</literal>
Both intervals and retention periods are expressed in standard units
of time or multiples of them. You can use both the full name or a
shortcut according to the following listing:
<literal>
second|sec|s, minute|min, hour|h, day|d, week|w, month|mon|m, year|y
</literal>
See <citerefentry><refentrytitle>znapzendzetup</refentrytitle><manvolnum>1</manvolnum></citerefentry> for more info.
'';
planExample = "1h=>10min,1d=>1h,1w=>1d,1m=>1w,1y=>1m";
# A type for a string of the form number{b|k|M|G}
mbufferSizeType = str // {
check = x: str.check x && builtins.isList (builtins.match "^[0-9]+[bkMG]$" x);
description = "string of the form number{b|k|M|G}";
};
enabledFeatures = concatLists (mapAttrsToList (name: enabled: optional enabled name) cfg.features);
# Type for a string that must contain certain other strings (the list parameter).
# Note that these would need regex escaping.
stringContainingStrings = list: let
matching = s: map (str: builtins.match ".*${str}.*" s) list;
in str // {
check = x: str.check x && all isList (matching x);
description = "string containing all of the characters ${concatStringsSep ", " list}";
};
timestampType = stringContainingStrings [ "%Y" "%m" "%d" "%H" "%M" "%S" ];
destType = srcConfig: submodule ({ name, ... }: {
options = {
label = mkOption {
type = str;
description = "Label for this destination. Defaults to the attribute name.";
};
plan = mkOption {
type = str;
description = planDescription;
example = planExample;
};
dataset = mkOption {
type = str;
description = "Dataset name to send snapshots to.";
example = "tank/main";
};
host = mkOption {
type = nullOr str;
description = ''
Host to use for the destination dataset. Can be prefixed with
<literal>user@</literal> to specify the ssh user.
'';
default = null;
example = "john@example.com";
};
presend = mkOption {
type = nullOr str;
description = ''
Command to run before sending the snapshot to the destination.
Intended to run a remote script via <command>ssh</command> on the
destination, e.g. to bring up a backup disk or server or to put a
zpool online/offline. See also <option>postsend</option>.
'';
default = null;
example = "ssh root@bserv zpool import -Nf tank";
};
postsend = mkOption {
type = nullOr str;
description = ''
Command to run after sending the snapshot to the destination.
Intended to run a remote script via <command>ssh</command> on the
destination, e.g. to bring up a backup disk or server or to put a
zpool online/offline. See also <option>presend</option>.
'';
default = null;
example = "ssh root@bserv zpool export tank";
};
};
config = {
label = mkDefault name;
plan = mkDefault srcConfig.plan;
};
});
srcType = submodule ({ name, config, ... }: {
options = {
enable = mkOption {
type = bool;
description = "Whether to enable this source.";
default = true;
};
recursive = mkOption {
type = bool;
description = "Whether to do recursive snapshots.";
default = false;
};
mbuffer = {
enable = mkOption {
type = bool;
description = "Whether to use <command>mbuffer</command>.";
default = false;
};
port = mkOption {
type = nullOr ints.u16;
description = ''
Port to use for <command>mbuffer</command>.
If this is null, it will run <command>mbuffer</command> through
ssh.
If this is not null, it will run <command>mbuffer</command>
directly through TCP, which is not encrypted but faster. In that
case the given port needs to be open on the destination host.
'';
default = null;
};
size = mkOption {
type = mbufferSizeType;
description = ''
The size for <command>mbuffer</command>.
Supports the units b, k, M, G.
'';
default = "1G";
example = "128M";
};
};
presnap = mkOption {
type = nullOr str;
description = ''
Command to run before snapshots are taken on the source dataset,
e.g. for database locking/flushing. See also
<option>postsnap</option>.
'';
default = null;
example = literalExpression ''
'''''${pkgs.mariadb}/bin/mysql -e "set autocommit=0;flush tables with read lock;\\! ''${pkgs.coreutils}/bin/sleep 600" & ''${pkgs.coreutils}/bin/echo $! > /tmp/mariadblock.pid ; sleep 10'''
'';
};
postsnap = mkOption {
type = nullOr str;
description = ''
Command to run after snapshots are taken on the source dataset,
e.g. for database unlocking. See also <option>presnap</option>.
'';
default = null;
example = literalExpression ''
"''${pkgs.coreutils}/bin/kill `''${pkgs.coreutils}/bin/cat /tmp/mariadblock.pid`;''${pkgs.coreutils}/bin/rm /tmp/mariadblock.pid"
'';
};
timestampFormat = mkOption {
type = timestampType;
description = ''
The timestamp format to use for constructing snapshot names.
The syntax is <literal>strftime</literal>-like. The string must
consist of the mandatory <literal>%Y %m %d %H %M %S</literal>.
Optionally <literal>- _ . :</literal> characters as well as any
alphanumeric character are allowed. If suffixed by a
<literal>Z</literal>, times will be in UTC.
'';
default = "%Y-%m-%d-%H%M%S";
example = "znapzend-%m.%d.%Y-%H%M%SZ";
};
sendDelay = mkOption {
type = int;
description = ''
Specify delay (in seconds) before sending snaps to the destination.
May be useful if you want to control sending time.
'';
default = 0;
example = 60;
};
plan = mkOption {
type = str;
description = planDescription;
example = planExample;
};
dataset = mkOption {
type = str;
description = "The dataset to use for this source.";
example = "tank/home";
};
destinations = mkOption {
type = attrsOf (destType config);
description = "Additional destinations.";
default = {};
example = literalExpression ''
{
local = {
dataset = "btank/backup";
presend = "zpool import -N btank";
postsend = "zpool export btank";
};
remote = {
host = "john@example.com";
dataset = "tank/john";
};
};
'';
};
};
config = {
dataset = mkDefault name;
};
});
### Generating the configuration from here
cfg = config.services.znapzend;
onOff = b: if b then "on" else "off";
nullOff = b: if b == null then "off" else toString b;
stripSlashes = replaceStrings [ "/" ] [ "." ];
attrsToFile = config: concatStringsSep "\n" (builtins.attrValues (
mapAttrs (n: v: "${n}=${v}") config));
mkDestAttrs = dst: with dst;
mapAttrs' (n: v: nameValuePair "dst_${label}${n}" v) ({
"" = optionalString (host != null) "${host}:" + dataset;
_plan = plan;
} // optionalAttrs (presend != null) {
_precmd = presend;
} // optionalAttrs (postsend != null) {
_pstcmd = postsend;
});
mkSrcAttrs = srcCfg: with srcCfg; {
enabled = onOff enable;
# mbuffer is not referenced by its full path to accomodate non-NixOS systems or differing mbuffer versions between source and target
mbuffer = with mbuffer; if enable then "mbuffer"
+ optionalString (port != null) ":${toString port}" else "off";
mbuffer_size = mbuffer.size;
post_znap_cmd = nullOff postsnap;
pre_znap_cmd = nullOff presnap;
recursive = onOff recursive;
src = dataset;
src_plan = plan;
tsformat = timestampFormat;
zend_delay = toString sendDelay;
} // foldr (a: b: a // b) {} (
map mkDestAttrs (builtins.attrValues destinations)
);
files = mapAttrs' (n: srcCfg: let
fileText = attrsToFile (mkSrcAttrs srcCfg);
in {
name = srcCfg.dataset;
value = pkgs.writeText (stripSlashes srcCfg.dataset) fileText;
}) cfg.zetup;
in
{
options = {
services.znapzend = {
enable = mkEnableOption "ZnapZend ZFS backup daemon";
logLevel = mkOption {
default = "debug";
example = "warning";
type = enum ["debug" "info" "warning" "err" "alert"];
description = ''
The log level when logging to file. Any of debug, info, warning, err,
alert. Default in daemonized form is debug.
'';
};
logTo = mkOption {
type = str;
default = "syslog::daemon";
example = "/var/log/znapzend.log";
description = ''
Where to log to (syslog::&lt;facility&gt; or &lt;filepath&gt;).
'';
};
noDestroy = mkOption {
type = bool;
default = false;
description = "Does all changes to the filesystem except destroy.";
};
autoCreation = mkOption {
type = bool;
default = false;
description = "Automatically create the destination dataset if it does not exist.";
};
zetup = mkOption {
type = attrsOf srcType;
description = "Znapzend configuration.";
default = {};
example = literalExpression ''
{
"tank/home" = {
# Make snapshots of tank/home every hour, keep those for 1 day,
# keep every days snapshot for 1 month, etc.
plan = "1d=>1h,1m=>1d,1y=>1m";
recursive = true;
# Send all those snapshots to john@example.com:rtank/john as well
destinations.remote = {
host = "john@example.com";
dataset = "rtank/john";
};
};
};
'';
};
pure = mkOption {
type = bool;
description = ''
Do not persist any stateful znapzend setups. If this option is
enabled, your previously set znapzend setups will be cleared and only
the ones defined with this module will be applied.
'';
default = false;
};
features.oracleMode = mkEnableOption ''
Destroy snapshots one by one instead of using one long argument list.
If source and destination are out of sync for a long time, you may have
so many snapshots to destroy that the argument gets is too long and the
command fails.
'';
features.recvu = mkEnableOption ''
recvu feature which uses <literal>-u</literal> on the receiving end to keep the destination
filesystem unmounted.
'';
features.compressed = mkEnableOption ''
compressed feature which adds the options <literal>-Lce</literal> to
the <command>zfs send</command> command. When this is enabled, make
sure that both the sending and receiving pool have the same relevant
features enabled. Using <literal>-c</literal> will skip unneccessary
decompress-compress stages, <literal>-L</literal> is for large block
support and -e is for embedded data support. see
<citerefentry><refentrytitle>znapzend</refentrytitle><manvolnum>1</manvolnum></citerefentry>
and <citerefentry><refentrytitle>zfs</refentrytitle><manvolnum>8</manvolnum></citerefentry>
for more info.
'';
features.sendRaw = mkEnableOption ''
sendRaw feature which adds the options <literal>-w</literal> to the
<command>zfs send</command> command. For encrypted source datasets this
instructs zfs not to decrypt before sending which results in a remote
backup that can't be read without the encryption key/passphrase, useful
when the remote isn't fully trusted or not physically secure. This
option must be used consistently, raw incrementals cannot be based on
non-raw snapshots and vice versa.
'';
features.skipIntermediates = mkEnableOption ''
Enable the skipIntermediates feature to send a single increment
between latest common snapshot and the newly made one. It may skip
several source snaps if the destination was offline for some time, and
it should skip snapshots not managed by znapzend. Normally for online
destinations, the new snapshot is sent as soon as it is created on the
source, so there are no automatic increments to skip.
'';
features.lowmemRecurse = mkEnableOption ''
use lowmemRecurse on systems where you have too many datasets, so a
recursive listing of attributes to find backup plans exhausts the
memory available to <command>znapzend</command>: instead, go the slower
way to first list all impacted dataset names, and then query their
configs one by one.
'';
features.zfsGetType = mkEnableOption ''
use zfsGetType if your <command>zfs get</command> supports a
<literal>-t</literal> argument for filtering by dataset type at all AND
lists properties for snapshots by default when recursing, so that there
is too much data to process while searching for backup plans.
If these two conditions apply to your system, the time needed for a
<literal>--recursive</literal> search for backup plans can literally
differ by hundreds of times (depending on the amount of snapshots in
that dataset tree... and a decent backup plan will ensure you have a lot
of those), so you would benefit from requesting this feature.
'';
};
};
config = mkIf cfg.enable {
environment.systemPackages = [ pkgs.znapzend ];
systemd.services = {
znapzend = {
description = "ZnapZend - ZFS Backup System";
wantedBy = [ "zfs.target" ];
after = [ "zfs.target" ];
path = with pkgs; [ zfs mbuffer openssh ];
preStart = optionalString cfg.pure ''
echo Resetting znapzend zetups
${pkgs.znapzend}/bin/znapzendzetup list \
| grep -oP '(?<=\*\*\* backup plan: ).*(?= \*\*\*)' \
| xargs -I{} ${pkgs.znapzend}/bin/znapzendzetup delete "{}"
'' + concatStringsSep "\n" (mapAttrsToList (dataset: config: ''
echo Importing znapzend zetup ${config} for dataset ${dataset}
${pkgs.znapzend}/bin/znapzendzetup import --write ${dataset} ${config} &
'') files) + ''
wait
'';
serviceConfig = {
# znapzendzetup --import apparently tries to connect to the backup
# host 3 times with a timeout of 30 seconds, leading to a startup
# delay of >90s when the host is down, which is just above the default
# service timeout of 90 seconds. Increase the timeout so it doesn't
# make the service fail in that case.
TimeoutStartSec = 180;
# Needs to have write access to ZFS
User = "root";
ExecStart = let
args = concatStringsSep " " [
"--logto=${cfg.logTo}"
"--loglevel=${cfg.logLevel}"
(optionalString cfg.noDestroy "--nodestroy")
(optionalString cfg.autoCreation "--autoCreation")
(optionalString (enabledFeatures != [])
"--features=${concatStringsSep "," enabledFeatures}")
]; in "${pkgs.znapzend}/bin/znapzend ${args}";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
Restart = "on-failure";
};
};
};
};
meta.maintainers = with maintainers; [ infinisil SlothOfAnarchy ];
}

View file

@ -0,0 +1,57 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.zrepl;
format = pkgs.formats.yaml { };
configFile = format.generate "zrepl.yml" cfg.settings;
in
{
meta.maintainers = with maintainers; [ cole-h ];
options = {
services.zrepl = {
enable = mkEnableOption "zrepl";
settings = mkOption {
default = { };
description = ''
Configuration for zrepl. See <link
xlink:href="https://zrepl.github.io/configuration.html"/>
for more information.
'';
type = types.submodule {
freeformType = format.type;
};
};
};
};
### Implementation ###
config = mkIf cfg.enable {
environment.systemPackages = [ pkgs.zrepl ];
# zrepl looks for its config in this location by default. This
# allows the use of e.g. `zrepl signal wakeup <job>` without having
# to specify the storepath of the config.
environment.etc."zrepl/zrepl.yml".source = configFile;
systemd.packages = [ pkgs.zrepl ];
# Note that pkgs.zrepl copies and adapts the upstream systemd unit, and
# the fields defined here only override certain fields from that unit.
systemd.services.zrepl = {
requires = [ "local-fs.target" ];
wantedBy = [ "zfs.target" ];
after = [ "zfs.target" ];
path = [ config.boot.zfs.package ];
restartTriggers = [ configFile ];
serviceConfig = {
Restart = "on-failure";
};
};
};
}