uboot: (firmwareOdroidC2/C4) don't invoke patch tool, use patches = [] instead
https://github.com/NixOS/nixpkgs/blob/master/pkgs/stdenv/generic/setup.sh#L948 this can do it nicely. Signed-off-by: Anton Arapov <anton@deadbeef.mx>
This commit is contained in:
commit
56de2bcd43
30691 changed files with 3076956 additions and 0 deletions
111
nixos/modules/services/monitoring/alerta.nix
Normal file
111
nixos/modules/services/monitoring/alerta.nix
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.alerta;
|
||||
|
||||
alertaConf = pkgs.writeTextFile {
|
||||
name = "alertad.conf";
|
||||
text = ''
|
||||
DATABASE_URL = '${cfg.databaseUrl}'
|
||||
DATABASE_NAME = '${cfg.databaseName}'
|
||||
LOG_FILE = '${cfg.logDir}/alertad.log'
|
||||
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
CORS_ORIGINS = [ ${concatMapStringsSep ", " (s: "\"" + s + "\"") cfg.corsOrigins} ];
|
||||
AUTH_REQUIRED = ${if cfg.authenticationRequired then "True" else "False"}
|
||||
SIGNUP_ENABLED = ${if cfg.signupEnabled then "True" else "False"}
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
options.services.alerta = {
|
||||
enable = mkEnableOption "alerta";
|
||||
|
||||
port = mkOption {
|
||||
type = types.int;
|
||||
default = 5000;
|
||||
description = "Port of Alerta";
|
||||
};
|
||||
|
||||
bind = mkOption {
|
||||
type = types.str;
|
||||
default = "0.0.0.0";
|
||||
description = "Address to bind to. The default is to bind to all addresses";
|
||||
};
|
||||
|
||||
logDir = mkOption {
|
||||
type = types.path;
|
||||
description = "Location where the logfiles are stored";
|
||||
default = "/var/log/alerta";
|
||||
};
|
||||
|
||||
databaseUrl = mkOption {
|
||||
type = types.str;
|
||||
description = "URL of the MongoDB or PostgreSQL database to connect to";
|
||||
default = "mongodb://localhost";
|
||||
};
|
||||
|
||||
databaseName = mkOption {
|
||||
type = types.str;
|
||||
description = "Name of the database instance to connect to";
|
||||
default = "monitoring";
|
||||
};
|
||||
|
||||
corsOrigins = mkOption {
|
||||
type = types.listOf types.str;
|
||||
description = "List of URLs that can access the API for Cross-Origin Resource Sharing (CORS)";
|
||||
default = [ "http://localhost" "http://localhost:5000" ];
|
||||
};
|
||||
|
||||
authenticationRequired = mkOption {
|
||||
type = types.bool;
|
||||
description = "Whether users must authenticate when using the web UI or command-line tool";
|
||||
default = false;
|
||||
};
|
||||
|
||||
signupEnabled = mkOption {
|
||||
type = types.bool;
|
||||
description = "Whether to prevent sign-up of new users via the web UI";
|
||||
default = true;
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
description = "These lines go into alertad.conf verbatim.";
|
||||
default = "";
|
||||
type = types.lines;
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.tmpfiles.rules = [
|
||||
"d '${cfg.logDir}' - alerta alerta - -"
|
||||
];
|
||||
|
||||
systemd.services.alerta = {
|
||||
description = "Alerta Monitoring System";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "networking.target" ];
|
||||
environment = {
|
||||
ALERTA_SVR_CONF_FILE = alertaConf;
|
||||
};
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.alerta-server}/bin/alertad run --port ${toString cfg.port} --host ${cfg.bind}";
|
||||
User = "alerta";
|
||||
Group = "alerta";
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages = [ pkgs.alerta ];
|
||||
|
||||
users.users.alerta = {
|
||||
uid = config.ids.uids.alerta;
|
||||
description = "Alerta user";
|
||||
};
|
||||
|
||||
users.groups.alerta = {
|
||||
gid = config.ids.gids.alerta;
|
||||
};
|
||||
};
|
||||
}
|
||||
191
nixos/modules/services/monitoring/apcupsd.nix
Normal file
191
nixos/modules/services/monitoring/apcupsd.nix
Normal file
|
|
@ -0,0 +1,191 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.apcupsd;
|
||||
|
||||
configFile = pkgs.writeText "apcupsd.conf" ''
|
||||
## apcupsd.conf v1.1 ##
|
||||
# apcupsd complains if the first line is not like above.
|
||||
${cfg.configText}
|
||||
SCRIPTDIR ${toString scriptDir}
|
||||
'';
|
||||
|
||||
# List of events from "man apccontrol"
|
||||
eventList = [
|
||||
"annoyme"
|
||||
"battattach"
|
||||
"battdetach"
|
||||
"changeme"
|
||||
"commfailure"
|
||||
"commok"
|
||||
"doreboot"
|
||||
"doshutdown"
|
||||
"emergency"
|
||||
"failing"
|
||||
"killpower"
|
||||
"loadlimit"
|
||||
"mainsback"
|
||||
"onbattery"
|
||||
"offbattery"
|
||||
"powerout"
|
||||
"remotedown"
|
||||
"runlimit"
|
||||
"timeout"
|
||||
"startselftest"
|
||||
"endselftest"
|
||||
];
|
||||
|
||||
shellCmdsForEventScript = eventname: commands: ''
|
||||
echo "#!${pkgs.runtimeShell}" > "$out/${eventname}"
|
||||
echo '${commands}' >> "$out/${eventname}"
|
||||
chmod a+x "$out/${eventname}"
|
||||
'';
|
||||
|
||||
eventToShellCmds = event: if builtins.hasAttr event cfg.hooks then (shellCmdsForEventScript event (builtins.getAttr event cfg.hooks)) else "";
|
||||
|
||||
scriptDir = pkgs.runCommand "apcupsd-scriptdir" { preferLocalBuild = true; } (''
|
||||
mkdir "$out"
|
||||
# Copy SCRIPTDIR from apcupsd package
|
||||
cp -r ${pkgs.apcupsd}/etc/apcupsd/* "$out"/
|
||||
# Make the files writeable (nix will unset the write bits afterwards)
|
||||
chmod u+w "$out"/*
|
||||
# Remove the sample event notification scripts, because they don't work
|
||||
# anyways (they try to send mail to "root" with the "mail" command)
|
||||
(cd "$out" && rm changeme commok commfailure onbattery offbattery)
|
||||
# Remove the sample apcupsd.conf file (we're generating our own)
|
||||
rm "$out/apcupsd.conf"
|
||||
# Set the SCRIPTDIR= line in apccontrol to the dir we're creating now
|
||||
sed -i -e "s|^SCRIPTDIR=.*|SCRIPTDIR=$out|" "$out/apccontrol"
|
||||
'' + concatStringsSep "\n" (map eventToShellCmds eventList)
|
||||
|
||||
);
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
|
||||
###### interface
|
||||
|
||||
options = {
|
||||
|
||||
services.apcupsd = {
|
||||
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = ''
|
||||
Whether to enable the APC UPS daemon. apcupsd monitors your UPS and
|
||||
permits orderly shutdown of your computer in the event of a power
|
||||
failure. User manual: http://www.apcupsd.com/manual/manual.html.
|
||||
Note that apcupsd runs as root (to allow shutdown of computer).
|
||||
You can check the status of your UPS with the "apcaccess" command.
|
||||
'';
|
||||
};
|
||||
|
||||
configText = mkOption {
|
||||
default = ''
|
||||
UPSTYPE usb
|
||||
NISIP 127.0.0.1
|
||||
BATTERYLEVEL 50
|
||||
MINUTES 5
|
||||
'';
|
||||
type = types.lines;
|
||||
description = ''
|
||||
Contents of the runtime configuration file, apcupsd.conf. The default
|
||||
settings makes apcupsd autodetect USB UPSes, limit network access to
|
||||
localhost and shutdown the system when the battery level is below 50
|
||||
percent, or when the UPS has calculated that it has 5 minutes or less
|
||||
of remaining power-on time. See man apcupsd.conf for details.
|
||||
'';
|
||||
};
|
||||
|
||||
hooks = mkOption {
|
||||
default = {};
|
||||
example = {
|
||||
doshutdown = "# shell commands to notify that the computer is shutting down";
|
||||
};
|
||||
type = types.attrsOf types.lines;
|
||||
description = ''
|
||||
Each attribute in this option names an apcupsd event and the string
|
||||
value it contains will be executed in a shell, in response to that
|
||||
event (prior to the default action). See "man apccontrol" for the
|
||||
list of events and what they represent.
|
||||
|
||||
A hook script can stop apccontrol from doing its default action by
|
||||
exiting with value 99. Do not do this unless you know what you're
|
||||
doing.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
assertions = [ {
|
||||
assertion = let hooknames = builtins.attrNames cfg.hooks; in all (x: elem x eventList) hooknames;
|
||||
message = ''
|
||||
One (or more) attribute names in services.apcupsd.hooks are invalid.
|
||||
Current attribute names: ${toString (builtins.attrNames cfg.hooks)}
|
||||
Valid attribute names : ${toString eventList}
|
||||
'';
|
||||
} ];
|
||||
|
||||
# Give users access to the "apcaccess" tool
|
||||
environment.systemPackages = [ pkgs.apcupsd ];
|
||||
|
||||
# NOTE 1: apcupsd runs as root because it needs permission to run
|
||||
# "shutdown"
|
||||
#
|
||||
# NOTE 2: When apcupsd calls "wall", it prints an error because stdout is
|
||||
# not connected to a tty (it is connected to the journal):
|
||||
# wall: cannot get tty name: Inappropriate ioctl for device
|
||||
# The message still gets through.
|
||||
systemd.services.apcupsd = {
|
||||
description = "APC UPS Daemon";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
preStart = "mkdir -p /run/apcupsd/";
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.apcupsd}/bin/apcupsd -b -f ${configFile} -d1";
|
||||
# TODO: When apcupsd has initiated a shutdown, systemd always ends up
|
||||
# waiting for it to stop ("A stop job is running for UPS daemon"). This
|
||||
# is weird, because in the journal one can clearly see that apcupsd has
|
||||
# received the SIGTERM signal and has already quit (or so it seems).
|
||||
# This reduces the wait time from 90 seconds (default) to just 5. Then
|
||||
# systemd kills it with SIGKILL.
|
||||
TimeoutStopSec = 5;
|
||||
};
|
||||
unitConfig.Documentation = "man:apcupsd(8)";
|
||||
};
|
||||
|
||||
# A special service to tell the UPS to power down/hibernate just before the
|
||||
# computer shuts down. (The UPS has a built in delay before it actually
|
||||
# shuts off power.) Copied from here:
|
||||
# http://forums.opensuse.org/english/get-technical-help-here/applications/479499-apcupsd-systemd-killpower-issues.html
|
||||
systemd.services.apcupsd-killpower = {
|
||||
description = "APC UPS Kill Power";
|
||||
after = [ "shutdown.target" ]; # append umount.target?
|
||||
before = [ "final.target" ];
|
||||
wantedBy = [ "shutdown.target" ];
|
||||
unitConfig = {
|
||||
ConditionPathExists = "/run/apcupsd/powerfail";
|
||||
DefaultDependencies = "no";
|
||||
};
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = "${pkgs.apcupsd}/bin/apcupsd --killpower -f ${configFile}";
|
||||
TimeoutSec = "infinity";
|
||||
StandardOutput = "tty";
|
||||
RemainAfterExit = "yes";
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
62
nixos/modules/services/monitoring/arbtt.nix
Normal file
62
nixos/modules/services/monitoring/arbtt.nix
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.arbtt;
|
||||
in {
|
||||
options = {
|
||||
services.arbtt = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Enable the arbtt statistics capture service.
|
||||
'';
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.haskellPackages.arbtt;
|
||||
defaultText = literalExpression "pkgs.haskellPackages.arbtt";
|
||||
description = ''
|
||||
The package to use for the arbtt binaries.
|
||||
'';
|
||||
};
|
||||
|
||||
logFile = mkOption {
|
||||
type = types.str;
|
||||
default = "%h/.arbtt/capture.log";
|
||||
example = "/home/username/.arbtt-capture.log";
|
||||
description = ''
|
||||
The log file for captured samples.
|
||||
'';
|
||||
};
|
||||
|
||||
sampleRate = mkOption {
|
||||
type = types.int;
|
||||
default = 60;
|
||||
example = 120;
|
||||
description = ''
|
||||
The sampling interval in seconds.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.user.services.arbtt = {
|
||||
description = "arbtt statistics capture service";
|
||||
wantedBy = [ "graphical-session.target" ];
|
||||
partOf = [ "graphical-session.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
ExecStart = "${cfg.package}/bin/arbtt-capture --logfile=${cfg.logFile} --sample-rate=${toString cfg.sampleRate}";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
meta.maintainers = [ maintainers.michaelpj ];
|
||||
}
|
||||
165
nixos/modules/services/monitoring/bosun.nix
Normal file
165
nixos/modules/services/monitoring/bosun.nix
Normal file
|
|
@ -0,0 +1,165 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.bosun;
|
||||
|
||||
configFile = pkgs.writeText "bosun.conf" ''
|
||||
${optionalString (cfg.opentsdbHost !=null) "tsdbHost = ${cfg.opentsdbHost}"}
|
||||
${optionalString (cfg.influxHost !=null) "influxHost = ${cfg.influxHost}"}
|
||||
httpListen = ${cfg.listenAddress}
|
||||
stateFile = ${cfg.stateFile}
|
||||
ledisDir = ${cfg.ledisDir}
|
||||
checkFrequency = ${cfg.checkFrequency}
|
||||
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
|
||||
in {
|
||||
|
||||
options = {
|
||||
|
||||
services.bosun = {
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to run bosun.
|
||||
'';
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.bosun;
|
||||
defaultText = literalExpression "pkgs.bosun";
|
||||
description = ''
|
||||
bosun binary to use.
|
||||
'';
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "bosun";
|
||||
description = ''
|
||||
User account under which bosun runs.
|
||||
'';
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
default = "bosun";
|
||||
description = ''
|
||||
Group account under which bosun runs.
|
||||
'';
|
||||
};
|
||||
|
||||
opentsdbHost = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = "localhost:4242";
|
||||
description = ''
|
||||
Host and port of the OpenTSDB database that stores bosun data.
|
||||
To disable opentsdb you can pass null as parameter.
|
||||
'';
|
||||
};
|
||||
|
||||
influxHost = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "localhost:8086";
|
||||
description = ''
|
||||
Host and port of the influxdb database.
|
||||
'';
|
||||
};
|
||||
|
||||
listenAddress = mkOption {
|
||||
type = types.str;
|
||||
default = ":8070";
|
||||
description = ''
|
||||
The host address and port that bosun's web interface will listen on.
|
||||
'';
|
||||
};
|
||||
|
||||
stateFile = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/lib/bosun/bosun.state";
|
||||
description = ''
|
||||
Path to bosun's state file.
|
||||
'';
|
||||
};
|
||||
|
||||
ledisDir = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/lib/bosun/ledis_data";
|
||||
description = ''
|
||||
Path to bosun's ledis data dir
|
||||
'';
|
||||
};
|
||||
|
||||
checkFrequency = mkOption {
|
||||
type = types.str;
|
||||
default = "5m";
|
||||
description = ''
|
||||
Bosun's check frequency
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
description = ''
|
||||
Extra configuration options for Bosun. You should describe your
|
||||
desired templates, alerts, macros, etc through this configuration
|
||||
option.
|
||||
|
||||
A detailed description of the supported syntax can be found at-spi2-atk
|
||||
http://bosun.org/configuration.html
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
systemd.services.bosun = {
|
||||
description = "bosun metrics collector (part of Bosun)";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
preStart = ''
|
||||
mkdir -p "$(dirname "${cfg.stateFile}")";
|
||||
touch "${cfg.stateFile}"
|
||||
touch "${cfg.stateFile}.tmp"
|
||||
|
||||
mkdir -p "${cfg.ledisDir}";
|
||||
|
||||
if [ "$(id -u)" = 0 ]; then
|
||||
chown ${cfg.user}:${cfg.group} "${cfg.stateFile}"
|
||||
chown ${cfg.user}:${cfg.group} "${cfg.stateFile}.tmp"
|
||||
chown ${cfg.user}:${cfg.group} "${cfg.ledisDir}"
|
||||
fi
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
PermissionsStartOnly = true;
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
ExecStart = ''
|
||||
${cfg.package}/bin/bosun -c ${configFile}
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
users.users.bosun = {
|
||||
description = "bosun user";
|
||||
group = "bosun";
|
||||
uid = config.ids.uids.bosun;
|
||||
};
|
||||
|
||||
users.groups.bosun.gid = config.ids.gids.bosun;
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
142
nixos/modules/services/monitoring/cadvisor.nix
Normal file
142
nixos/modules/services/monitoring/cadvisor.nix
Normal file
|
|
@ -0,0 +1,142 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.cadvisor;
|
||||
|
||||
in {
|
||||
options = {
|
||||
services.cadvisor = {
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = "Whether to enable cadvisor service.";
|
||||
};
|
||||
|
||||
listenAddress = mkOption {
|
||||
default = "127.0.0.1";
|
||||
type = types.str;
|
||||
description = "Cadvisor listening host";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
default = 8080;
|
||||
type = types.int;
|
||||
description = "Cadvisor listening port";
|
||||
};
|
||||
|
||||
storageDriver = mkOption {
|
||||
default = null;
|
||||
type = types.nullOr types.str;
|
||||
example = "influxdb";
|
||||
description = "Cadvisor storage driver.";
|
||||
};
|
||||
|
||||
storageDriverHost = mkOption {
|
||||
default = "localhost:8086";
|
||||
type = types.str;
|
||||
description = "Cadvisor storage driver host.";
|
||||
};
|
||||
|
||||
storageDriverDb = mkOption {
|
||||
default = "root";
|
||||
type = types.str;
|
||||
description = "Cadvisord storage driver database name.";
|
||||
};
|
||||
|
||||
storageDriverUser = mkOption {
|
||||
default = "root";
|
||||
type = types.str;
|
||||
description = "Cadvisor storage driver username.";
|
||||
};
|
||||
|
||||
storageDriverPassword = mkOption {
|
||||
default = "root";
|
||||
type = types.str;
|
||||
description = ''
|
||||
Cadvisor storage driver password.
|
||||
|
||||
Warning: this password is stored in the world-readable Nix store. It's
|
||||
recommended to use the <option>storageDriverPasswordFile</option> option
|
||||
since that gives you control over the security of the password.
|
||||
<option>storageDriverPasswordFile</option> also takes precedence over <option>storageDriverPassword</option>.
|
||||
'';
|
||||
};
|
||||
|
||||
storageDriverPasswordFile = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
File that contains the cadvisor storage driver password.
|
||||
|
||||
<option>storageDriverPasswordFile</option> takes precedence over <option>storageDriverPassword</option>
|
||||
|
||||
Warning: when <option>storageDriverPassword</option> is non-empty this defaults to a file in the
|
||||
world-readable Nix store that contains the value of <option>storageDriverPassword</option>.
|
||||
|
||||
It's recommended to override this with a path not in the Nix store.
|
||||
Tip: use <link xlink:href='https://nixos.org/nixops/manual/#idm140737318306400'>nixops key management</link>
|
||||
'';
|
||||
};
|
||||
|
||||
storageDriverSecure = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = "Cadvisor storage driver, enable secure communication.";
|
||||
};
|
||||
|
||||
extraOptions = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
description = ''
|
||||
Additional cadvisor options.
|
||||
|
||||
See <link xlink:href='https://github.com/google/cadvisor/blob/master/docs/runtime_options.md'/> for available options.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkMerge [
|
||||
{ services.cadvisor.storageDriverPasswordFile = mkIf (cfg.storageDriverPassword != "") (
|
||||
mkDefault (toString (pkgs.writeTextFile {
|
||||
name = "cadvisor-storage-driver-password";
|
||||
text = cfg.storageDriverPassword;
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
(mkIf cfg.enable {
|
||||
systemd.services.cadvisor = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" "docker.service" "influxdb.service" ];
|
||||
|
||||
path = optionals config.boot.zfs.enabled [ pkgs.zfs ];
|
||||
|
||||
postStart = mkBefore ''
|
||||
until ${pkgs.curl.bin}/bin/curl -s -o /dev/null 'http://${cfg.listenAddress}:${toString cfg.port}/containers/'; do
|
||||
sleep 1;
|
||||
done
|
||||
'';
|
||||
|
||||
script = ''
|
||||
exec ${pkgs.cadvisor}/bin/cadvisor \
|
||||
-logtostderr=true \
|
||||
-listen_ip="${cfg.listenAddress}" \
|
||||
-port="${toString cfg.port}" \
|
||||
${escapeShellArgs cfg.extraOptions} \
|
||||
${optionalString (cfg.storageDriver != null) ''
|
||||
-storage_driver "${cfg.storageDriver}" \
|
||||
-storage_driver_user "${cfg.storageDriverHost}" \
|
||||
-storage_driver_db "${cfg.storageDriverDb}" \
|
||||
-storage_driver_user "${cfg.storageDriverUser}" \
|
||||
-storage_driver_password "$(cat "${cfg.storageDriverPasswordFile}")" \
|
||||
${optionalString cfg.storageDriverSecure "-storage_driver_secure"}
|
||||
''}
|
||||
'';
|
||||
|
||||
serviceConfig.TimeoutStartSec=300;
|
||||
};
|
||||
})
|
||||
];
|
||||
}
|
||||
166
nixos/modules/services/monitoring/collectd.nix
Normal file
166
nixos/modules/services/monitoring/collectd.nix
Normal file
|
|
@ -0,0 +1,166 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.collectd;
|
||||
|
||||
baseDirLine = ''BaseDir "${cfg.dataDir}"'';
|
||||
unvalidated_conf = pkgs.writeText "collectd-unvalidated.conf" cfg.extraConfig;
|
||||
|
||||
conf = if cfg.validateConfig then
|
||||
pkgs.runCommand "collectd.conf" {} ''
|
||||
echo testing ${unvalidated_conf}
|
||||
cp ${unvalidated_conf} collectd.conf
|
||||
# collectd -t fails if BaseDir does not exist.
|
||||
substituteInPlace collectd.conf --replace ${lib.escapeShellArgs [ baseDirLine ]} 'BaseDir "."'
|
||||
${package}/bin/collectd -t -C collectd.conf
|
||||
cp ${unvalidated_conf} $out
|
||||
'' else unvalidated_conf;
|
||||
|
||||
package =
|
||||
if cfg.buildMinimalPackage
|
||||
then minimalPackage
|
||||
else cfg.package;
|
||||
|
||||
minimalPackage = cfg.package.override {
|
||||
enabledPlugins = [ "syslog" ] ++ builtins.attrNames cfg.plugins;
|
||||
};
|
||||
|
||||
in {
|
||||
options.services.collectd = with types; {
|
||||
enable = mkEnableOption "collectd agent";
|
||||
|
||||
validateConfig = mkOption {
|
||||
default = true;
|
||||
description = ''
|
||||
Validate the syntax of collectd configuration file at build time.
|
||||
Disable this if you use the Include directive on files unavailable in
|
||||
the build sandbox, or when cross-compiling.
|
||||
'';
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
default = pkgs.collectd;
|
||||
defaultText = literalExpression "pkgs.collectd";
|
||||
description = ''
|
||||
Which collectd package to use.
|
||||
'';
|
||||
type = types.package;
|
||||
};
|
||||
|
||||
buildMinimalPackage = mkOption {
|
||||
default = false;
|
||||
description = ''
|
||||
Build a minimal collectd package with only the configured `services.collectd.plugins`
|
||||
'';
|
||||
type = bool;
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
default = "collectd";
|
||||
description = ''
|
||||
User under which to run collectd.
|
||||
'';
|
||||
type = nullOr str;
|
||||
};
|
||||
|
||||
dataDir = mkOption {
|
||||
default = "/var/lib/collectd";
|
||||
description = ''
|
||||
Data directory for collectd agent.
|
||||
'';
|
||||
type = path;
|
||||
};
|
||||
|
||||
autoLoadPlugin = mkOption {
|
||||
default = false;
|
||||
description = ''
|
||||
Enable plugin autoloading.
|
||||
'';
|
||||
type = bool;
|
||||
};
|
||||
|
||||
include = mkOption {
|
||||
default = [];
|
||||
description = ''
|
||||
Additional paths to load config from.
|
||||
'';
|
||||
type = listOf str;
|
||||
};
|
||||
|
||||
plugins = mkOption {
|
||||
default = {};
|
||||
example = { cpu = ""; memory = ""; network = "Server 192.168.1.1 25826"; };
|
||||
description = ''
|
||||
Attribute set of plugin names to plugin config segments
|
||||
'';
|
||||
type = attrsOf lines;
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
default = "";
|
||||
description = ''
|
||||
Extra configuration for collectd. Use mkBefore to add lines before the
|
||||
default config, and mkAfter to add them below.
|
||||
'';
|
||||
type = lines;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
# 1200 is after the default (1000) but before mkAfter (1500).
|
||||
services.collectd.extraConfig = lib.mkOrder 1200 ''
|
||||
${baseDirLine}
|
||||
AutoLoadPlugin ${boolToString cfg.autoLoadPlugin}
|
||||
Hostname "${config.networking.hostName}"
|
||||
|
||||
LoadPlugin syslog
|
||||
<Plugin "syslog">
|
||||
LogLevel "info"
|
||||
NotifyLevel "OKAY"
|
||||
</Plugin>
|
||||
|
||||
${concatStrings (mapAttrsToList (plugin: pluginConfig: ''
|
||||
LoadPlugin ${plugin}
|
||||
<Plugin "${plugin}">
|
||||
${pluginConfig}
|
||||
</Plugin>
|
||||
'') cfg.plugins)}
|
||||
|
||||
${concatMapStrings (f: ''
|
||||
Include "${f}"
|
||||
'') cfg.include}
|
||||
'';
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d '${cfg.dataDir}' - ${cfg.user} - - -"
|
||||
];
|
||||
|
||||
systemd.services.collectd = {
|
||||
description = "Collectd Monitoring Agent";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${package}/sbin/collectd -C ${conf} -f";
|
||||
User = cfg.user;
|
||||
Restart = "on-failure";
|
||||
RestartSec = 3;
|
||||
};
|
||||
};
|
||||
|
||||
users.users = optionalAttrs (cfg.user == "collectd") {
|
||||
collectd = {
|
||||
isSystemUser = true;
|
||||
group = "collectd";
|
||||
};
|
||||
};
|
||||
|
||||
users.groups = optionalAttrs (cfg.user == "collectd") {
|
||||
collectd = {};
|
||||
};
|
||||
};
|
||||
}
|
||||
34
nixos/modules/services/monitoring/das_watchdog.nix
Normal file
34
nixos/modules/services/monitoring/das_watchdog.nix
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
# A general watchdog for the linux operating system that should run in the
|
||||
# background at all times to ensure a realtime process won't hang the machine
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
inherit (pkgs) das_watchdog;
|
||||
|
||||
in {
|
||||
###### interface
|
||||
|
||||
options = {
|
||||
services.das_watchdog.enable = mkEnableOption "realtime watchdog";
|
||||
};
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf config.services.das_watchdog.enable {
|
||||
environment.systemPackages = [ das_watchdog ];
|
||||
systemd.services.das_watchdog = {
|
||||
description = "Watchdog to ensure a realtime process won't hang the machine";
|
||||
after = [ "multi-user.target" "sound.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
User = "root";
|
||||
Type = "simple";
|
||||
ExecStart = "${das_watchdog}/bin/das_watchdog";
|
||||
RemainAfterExit = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
296
nixos/modules/services/monitoring/datadog-agent.nix
Normal file
296
nixos/modules/services/monitoring/datadog-agent.nix
Normal file
|
|
@ -0,0 +1,296 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.datadog-agent;
|
||||
|
||||
ddConf = {
|
||||
skip_ssl_validation = false;
|
||||
confd_path = "/etc/datadog-agent/conf.d";
|
||||
additional_checksd = "/etc/datadog-agent/checks.d";
|
||||
use_dogstatsd = true;
|
||||
}
|
||||
// optionalAttrs (cfg.logLevel != null) { log_level = cfg.logLevel; }
|
||||
// optionalAttrs (cfg.hostname != null) { inherit (cfg) hostname; }
|
||||
// optionalAttrs (cfg.ddUrl != null) { dd_url = cfg.ddUrl; }
|
||||
// optionalAttrs (cfg.site != null) { site = cfg.site; }
|
||||
// optionalAttrs (cfg.tags != null ) { tags = concatStringsSep ", " cfg.tags; }
|
||||
// optionalAttrs (cfg.enableLiveProcessCollection) { process_config = { enabled = "true"; }; }
|
||||
// optionalAttrs (cfg.enableTraceAgent) { apm_config = { enabled = true; }; }
|
||||
// cfg.extraConfig;
|
||||
|
||||
# Generate Datadog configuration files for each configured checks.
|
||||
# This works because check configurations have predictable paths,
|
||||
# and because JSON is a valid subset of YAML.
|
||||
makeCheckConfigs = entries: mapAttrs' (name: conf: {
|
||||
name = "datadog-agent/conf.d/${name}.d/conf.yaml";
|
||||
value.source = pkgs.writeText "${name}-check-conf.yaml" (builtins.toJSON conf);
|
||||
}) entries;
|
||||
|
||||
defaultChecks = {
|
||||
disk = cfg.diskCheck;
|
||||
network = cfg.networkCheck;
|
||||
};
|
||||
|
||||
# Assemble all check configurations and the top-level agent
|
||||
# configuration.
|
||||
etcfiles = with pkgs; with builtins;
|
||||
{ "datadog-agent/datadog.yaml" = {
|
||||
source = writeText "datadog.yaml" (toJSON ddConf);
|
||||
};
|
||||
} // makeCheckConfigs (cfg.checks // defaultChecks);
|
||||
|
||||
# Apply the configured extraIntegrations to the provided agent
|
||||
# package. See the documentation of `dd-agent/integrations-core.nix`
|
||||
# for detailed information on this.
|
||||
datadogPkg = cfg.package.override {
|
||||
pythonPackages = pkgs.datadog-integrations-core cfg.extraIntegrations;
|
||||
};
|
||||
in {
|
||||
options.services.datadog-agent = {
|
||||
enable = mkOption {
|
||||
description = ''
|
||||
Whether to enable the datadog-agent v7 monitoring service
|
||||
'';
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
default = pkgs.datadog-agent;
|
||||
defaultText = literalExpression "pkgs.datadog-agent";
|
||||
description = ''
|
||||
Which DataDog v7 agent package to use. Note that the provided
|
||||
package is expected to have an overridable `pythonPackages`-attribute
|
||||
which configures the Python environment with the Datadog
|
||||
checks.
|
||||
'';
|
||||
type = types.package;
|
||||
};
|
||||
|
||||
apiKeyFile = mkOption {
|
||||
description = ''
|
||||
Path to a file containing the Datadog API key to associate the
|
||||
agent with your account.
|
||||
'';
|
||||
example = "/run/keys/datadog_api_key";
|
||||
type = types.path;
|
||||
};
|
||||
|
||||
ddUrl = mkOption {
|
||||
description = ''
|
||||
Custom dd_url to configure the agent with. Useful if traffic to datadog
|
||||
needs to go through a proxy.
|
||||
Don't use this to point to another datadog site (EU) - use site instead.
|
||||
'';
|
||||
default = null;
|
||||
example = "http://haproxy.example.com:3834";
|
||||
type = types.nullOr types.str;
|
||||
};
|
||||
|
||||
site = mkOption {
|
||||
description = ''
|
||||
The datadog site to point the agent towards.
|
||||
Set to datadoghq.eu to point it to their EU site.
|
||||
'';
|
||||
default = null;
|
||||
example = "datadoghq.eu";
|
||||
type = types.nullOr types.str;
|
||||
};
|
||||
|
||||
tags = mkOption {
|
||||
description = "The tags to mark this Datadog agent";
|
||||
example = [ "test" "service" ];
|
||||
default = null;
|
||||
type = types.nullOr (types.listOf types.str);
|
||||
};
|
||||
|
||||
hostname = mkOption {
|
||||
description = "The hostname to show in the Datadog dashboard (optional)";
|
||||
default = null;
|
||||
example = "mymachine.mydomain";
|
||||
type = types.nullOr types.str;
|
||||
};
|
||||
|
||||
logLevel = mkOption {
|
||||
description = "Logging verbosity.";
|
||||
default = null;
|
||||
type = types.nullOr (types.enum ["DEBUG" "INFO" "WARN" "ERROR"]);
|
||||
};
|
||||
|
||||
extraIntegrations = mkOption {
|
||||
default = {};
|
||||
type = types.attrs;
|
||||
|
||||
description = ''
|
||||
Extra integrations from the Datadog core-integrations
|
||||
repository that should be built and included.
|
||||
|
||||
By default the included integrations are disk, mongo, network,
|
||||
nginx and postgres.
|
||||
|
||||
To include additional integrations the name of the derivation
|
||||
and a function to filter its dependencies from the Python
|
||||
package set must be provided.
|
||||
'';
|
||||
|
||||
example = literalExpression ''
|
||||
{
|
||||
ntp = pythonPackages: [ pythonPackages.ntplib ];
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
default = {};
|
||||
type = types.attrs;
|
||||
description = ''
|
||||
Extra configuration options that will be merged into the
|
||||
main config file <filename>datadog.yaml</filename>.
|
||||
'';
|
||||
};
|
||||
|
||||
enableLiveProcessCollection = mkOption {
|
||||
description = ''
|
||||
Whether to enable the live process collection agent.
|
||||
'';
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
enableTraceAgent = mkOption {
|
||||
description = ''
|
||||
Whether to enable the trace agent.
|
||||
'';
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
checks = mkOption {
|
||||
description = ''
|
||||
Configuration for all Datadog checks. Keys of this attribute
|
||||
set will be used as the name of the check to create the
|
||||
appropriate configuration in `conf.d/$check.d/conf.yaml`.
|
||||
|
||||
The configuration is converted into JSON from the plain Nix
|
||||
language configuration, meaning that you should write
|
||||
configuration adhering to Datadog's documentation - but in Nix
|
||||
language.
|
||||
|
||||
Refer to the implementation of this module (specifically the
|
||||
definition of `defaultChecks`) for an example.
|
||||
|
||||
Note: The 'disk' and 'network' check are configured in
|
||||
separate options because they exist by default. Attempting to
|
||||
override their configuration here will have no effect.
|
||||
'';
|
||||
|
||||
example = {
|
||||
http_check = {
|
||||
init_config = null; # sic!
|
||||
instances = [
|
||||
{
|
||||
name = "some-service";
|
||||
url = "http://localhost:1337/healthz";
|
||||
tags = [ "some-service" ];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
default = {};
|
||||
|
||||
# sic! The structure of the values is up to the check, so we can
|
||||
# not usefully constrain the type further.
|
||||
type = with types; attrsOf attrs;
|
||||
};
|
||||
|
||||
diskCheck = mkOption {
|
||||
description = "Disk check config";
|
||||
type = types.attrs;
|
||||
default = {
|
||||
init_config = {};
|
||||
instances = [ { use_mount = "false"; } ];
|
||||
};
|
||||
};
|
||||
|
||||
networkCheck = mkOption {
|
||||
description = "Network check config";
|
||||
type = types.attrs;
|
||||
default = {
|
||||
init_config = {};
|
||||
# Network check only supports one configured instance
|
||||
instances = [ { collect_connection_state = false;
|
||||
excluded_interfaces = [ "lo" "lo0" ]; } ];
|
||||
};
|
||||
};
|
||||
};
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = [ datadogPkg pkgs.sysstat pkgs.procps pkgs.iproute2 ];
|
||||
|
||||
users.users.datadog = {
|
||||
description = "Datadog Agent User";
|
||||
uid = config.ids.uids.datadog;
|
||||
group = "datadog";
|
||||
home = "/var/log/datadog/";
|
||||
createHome = true;
|
||||
};
|
||||
|
||||
users.groups.datadog.gid = config.ids.gids.datadog;
|
||||
|
||||
systemd.services = let
|
||||
makeService = attrs: recursiveUpdate {
|
||||
path = [ datadogPkg pkgs.python pkgs.sysstat pkgs.procps pkgs.iproute2 ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
User = "datadog";
|
||||
Group = "datadog";
|
||||
Restart = "always";
|
||||
RestartSec = 2;
|
||||
};
|
||||
restartTriggers = [ datadogPkg ] ++ map (x: x.source) (attrValues etcfiles);
|
||||
} attrs;
|
||||
in {
|
||||
datadog-agent = makeService {
|
||||
description = "Datadog agent monitor";
|
||||
preStart = ''
|
||||
chown -R datadog: /etc/datadog-agent
|
||||
rm -f /etc/datadog-agent/auth_token
|
||||
'';
|
||||
script = ''
|
||||
export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
|
||||
exec ${datadogPkg}/bin/agent run -c /etc/datadog-agent/datadog.yaml
|
||||
'';
|
||||
serviceConfig.PermissionsStartOnly = true;
|
||||
};
|
||||
|
||||
dd-jmxfetch = lib.mkIf (lib.hasAttr "jmx" cfg.checks) (makeService {
|
||||
description = "Datadog JMX Fetcher";
|
||||
path = [ datadogPkg pkgs.python pkgs.sysstat pkgs.procps pkgs.jdk ];
|
||||
serviceConfig.ExecStart = "${datadogPkg}/bin/dd-jmxfetch";
|
||||
});
|
||||
|
||||
datadog-process-agent = lib.mkIf cfg.enableLiveProcessCollection (makeService {
|
||||
description = "Datadog Live Process Agent";
|
||||
path = [ ];
|
||||
script = ''
|
||||
export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
|
||||
${pkgs.datadog-process-agent}/bin/process-agent --config /etc/datadog-agent/datadog.yaml
|
||||
'';
|
||||
});
|
||||
|
||||
datadog-trace-agent = lib.mkIf cfg.enableTraceAgent (makeService {
|
||||
description = "Datadog Trace Agent";
|
||||
path = [ ];
|
||||
script = ''
|
||||
export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
|
||||
${datadogPkg}/bin/trace-agent -config /etc/datadog-agent/datadog.yaml
|
||||
'';
|
||||
});
|
||||
|
||||
};
|
||||
|
||||
environment.etc = etcfiles;
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
# Generated using update-dd-agent-default, please re-run after updating dd-agent. DO NOT EDIT MANUALLY.
|
||||
[
|
||||
"auto_conf"
|
||||
"agent_metrics.yaml.default"
|
||||
"disk.yaml.default"
|
||||
"network.yaml.default"
|
||||
"ntp.yaml.default"
|
||||
]
|
||||
236
nixos/modules/services/monitoring/dd-agent/dd-agent.nix
Normal file
236
nixos/modules/services/monitoring/dd-agent/dd-agent.nix
Normal file
|
|
@ -0,0 +1,236 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.dd-agent;
|
||||
|
||||
ddConf = pkgs.writeText "datadog.conf" ''
|
||||
[Main]
|
||||
dd_url: https://app.datadoghq.com
|
||||
skip_ssl_validation: no
|
||||
api_key: ${cfg.api_key}
|
||||
${optionalString (cfg.hostname != null) "hostname: ${cfg.hostname}"}
|
||||
|
||||
collector_log_file: /var/log/datadog/collector.log
|
||||
forwarder_log_file: /var/log/datadog/forwarder.log
|
||||
dogstatsd_log_file: /var/log/datadog/dogstatsd.log
|
||||
pup_log_file: /var/log/datadog/pup.log
|
||||
|
||||
# proxy_host: my-proxy.com
|
||||
# proxy_port: 3128
|
||||
# proxy_user: user
|
||||
# proxy_password: password
|
||||
|
||||
# tags: mytag0, mytag1
|
||||
${optionalString (cfg.tags != null ) "tags: ${concatStringsSep ", " cfg.tags }"}
|
||||
|
||||
# collect_ec2_tags: no
|
||||
# recent_point_threshold: 30
|
||||
# use_mount: no
|
||||
# listen_port: 17123
|
||||
# graphite_listen_port: 17124
|
||||
# non_local_traffic: no
|
||||
# use_curl_http_client: False
|
||||
# bind_host: localhost
|
||||
|
||||
# use_pup: no
|
||||
# pup_port: 17125
|
||||
# pup_interface: localhost
|
||||
# pup_url: http://localhost:17125
|
||||
|
||||
# dogstatsd_port : 8125
|
||||
# dogstatsd_interval : 10
|
||||
# dogstatsd_normalize : yes
|
||||
# statsd_forward_host: address_of_own_statsd_server
|
||||
# statsd_forward_port: 8125
|
||||
|
||||
# device_blacklist_re: .*\/dev\/mapper\/lxc-box.*
|
||||
|
||||
# ganglia_host: localhost
|
||||
# ganglia_port: 8651
|
||||
'';
|
||||
|
||||
diskConfig = pkgs.writeText "disk.yaml" ''
|
||||
init_config:
|
||||
|
||||
instances:
|
||||
- use_mount: no
|
||||
'';
|
||||
|
||||
networkConfig = pkgs.writeText "network.yaml" ''
|
||||
init_config:
|
||||
|
||||
instances:
|
||||
# Network check only supports one configured instance
|
||||
- collect_connection_state: false
|
||||
excluded_interfaces:
|
||||
- lo
|
||||
- lo0
|
||||
'';
|
||||
|
||||
postgresqlConfig = pkgs.writeText "postgres.yaml" cfg.postgresqlConfig;
|
||||
nginxConfig = pkgs.writeText "nginx.yaml" cfg.nginxConfig;
|
||||
mongoConfig = pkgs.writeText "mongo.yaml" cfg.mongoConfig;
|
||||
jmxConfig = pkgs.writeText "jmx.yaml" cfg.jmxConfig;
|
||||
processConfig = pkgs.writeText "process.yaml" cfg.processConfig;
|
||||
|
||||
etcfiles =
|
||||
let
|
||||
defaultConfd = import ./dd-agent-defaults.nix;
|
||||
in
|
||||
listToAttrs (map (f: {
|
||||
name = "dd-agent/conf.d/${f}";
|
||||
value.source = "${pkgs.dd-agent}/agent/conf.d-system/${f}";
|
||||
}) defaultConfd) //
|
||||
{
|
||||
"dd-agent/datadog.conf".source = ddConf;
|
||||
"dd-agent/conf.d/disk.yaml".source = diskConfig;
|
||||
"dd-agent/conf.d/network.yaml".source = networkConfig;
|
||||
} //
|
||||
(optionalAttrs (cfg.postgresqlConfig != null)
|
||||
{
|
||||
"dd-agent/conf.d/postgres.yaml".source = postgresqlConfig;
|
||||
}) //
|
||||
(optionalAttrs (cfg.nginxConfig != null)
|
||||
{
|
||||
"dd-agent/conf.d/nginx.yaml".source = nginxConfig;
|
||||
}) //
|
||||
(optionalAttrs (cfg.mongoConfig != null)
|
||||
{
|
||||
"dd-agent/conf.d/mongo.yaml".source = mongoConfig;
|
||||
}) //
|
||||
(optionalAttrs (cfg.processConfig != null)
|
||||
{
|
||||
"dd-agent/conf.d/process.yaml".source = processConfig;
|
||||
}) //
|
||||
(optionalAttrs (cfg.jmxConfig != null)
|
||||
{
|
||||
"dd-agent/conf.d/jmx.yaml".source = jmxConfig;
|
||||
});
|
||||
|
||||
in {
|
||||
options.services.dd-agent = {
|
||||
enable = mkOption {
|
||||
description = ''
|
||||
Whether to enable the dd-agent v5 monitoring service.
|
||||
For datadog-agent v6, see <option>services.datadog-agent.enable</option>.
|
||||
'';
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
api_key = mkOption {
|
||||
description = ''
|
||||
The Datadog API key to associate the agent with your account.
|
||||
|
||||
Warning: this key is stored in cleartext within the world-readable
|
||||
Nix store! Consider using the new v6
|
||||
<option>services.datadog-agent</option> module instead.
|
||||
'';
|
||||
example = "ae0aa6a8f08efa988ba0a17578f009ab";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
tags = mkOption {
|
||||
description = "The tags to mark this Datadog agent";
|
||||
example = [ "test" "service" ];
|
||||
default = null;
|
||||
type = types.nullOr (types.listOf types.str);
|
||||
};
|
||||
|
||||
hostname = mkOption {
|
||||
description = "The hostname to show in the Datadog dashboard (optional)";
|
||||
default = null;
|
||||
example = "mymachine.mydomain";
|
||||
type = types.nullOr types.str;
|
||||
};
|
||||
|
||||
postgresqlConfig = mkOption {
|
||||
description = "Datadog PostgreSQL integration configuration";
|
||||
default = null;
|
||||
type = types.nullOr types.lines;
|
||||
};
|
||||
|
||||
nginxConfig = mkOption {
|
||||
description = "Datadog nginx integration configuration";
|
||||
default = null;
|
||||
type = types.nullOr types.lines;
|
||||
};
|
||||
|
||||
mongoConfig = mkOption {
|
||||
description = "MongoDB integration configuration";
|
||||
default = null;
|
||||
type = types.nullOr types.lines;
|
||||
};
|
||||
|
||||
jmxConfig = mkOption {
|
||||
description = "JMX integration configuration";
|
||||
default = null;
|
||||
type = types.nullOr types.lines;
|
||||
};
|
||||
|
||||
processConfig = mkOption {
|
||||
description = ''
|
||||
Process integration configuration
|
||||
See <link xlink:href="https://docs.datadoghq.com/integrations/process/"/>
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr types.lines;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = [ pkgs.dd-agent pkgs.sysstat pkgs.procps ];
|
||||
|
||||
users.users.datadog = {
|
||||
description = "Datadog Agent User";
|
||||
uid = config.ids.uids.datadog;
|
||||
group = "datadog";
|
||||
home = "/var/log/datadog/";
|
||||
createHome = true;
|
||||
};
|
||||
|
||||
users.groups.datadog.gid = config.ids.gids.datadog;
|
||||
|
||||
systemd.services = let
|
||||
makeService = attrs: recursiveUpdate {
|
||||
path = [ pkgs.dd-agent pkgs.python pkgs.sysstat pkgs.procps pkgs.gohai ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
User = "datadog";
|
||||
Group = "datadog";
|
||||
Restart = "always";
|
||||
RestartSec = 2;
|
||||
PrivateTmp = true;
|
||||
};
|
||||
restartTriggers = [ pkgs.dd-agent ddConf diskConfig networkConfig postgresqlConfig nginxConfig mongoConfig jmxConfig processConfig ];
|
||||
} attrs;
|
||||
in {
|
||||
dd-agent = makeService {
|
||||
description = "Datadog agent monitor";
|
||||
serviceConfig.ExecStart = "${pkgs.dd-agent}/bin/dd-agent foreground";
|
||||
};
|
||||
|
||||
dogstatsd = makeService {
|
||||
description = "Datadog statsd";
|
||||
environment.TMPDIR = "/run/dogstatsd";
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.dd-agent}/bin/dogstatsd start";
|
||||
Type = "forking";
|
||||
PIDFile = "/run/dogstatsd/dogstatsd.pid";
|
||||
RuntimeDirectory = "dogstatsd";
|
||||
};
|
||||
};
|
||||
|
||||
dd-jmxfetch = lib.mkIf (cfg.jmxConfig != null) {
|
||||
description = "Datadog JMX Fetcher";
|
||||
path = [ pkgs.dd-agent pkgs.python pkgs.sysstat pkgs.procps pkgs.jdk ];
|
||||
serviceConfig.ExecStart = "${pkgs.dd-agent}/bin/dd-jmxfetch";
|
||||
};
|
||||
};
|
||||
|
||||
environment.etc = etcfiles;
|
||||
};
|
||||
}
|
||||
9
nixos/modules/services/monitoring/dd-agent/update-dd-agent-defaults
Executable file
9
nixos/modules/services/monitoring/dd-agent/update-dd-agent-defaults
Executable file
|
|
@ -0,0 +1,9 @@
|
|||
#!/usr/bin/env bash
|
||||
dd=$(nix-build --no-out-link -A dd-agent ../../../..)
|
||||
echo '# Generated using update-dd-agent-default, please re-run after updating dd-agent. DO NOT EDIT MANUALLY.' > dd-agent-defaults.nix
|
||||
echo '[' >> dd-agent-defaults.nix
|
||||
echo ' "auto_conf"' >> dd-agent-defaults.nix
|
||||
for f in $(find $dd/agent/conf.d-system -maxdepth 1 -type f | grep -v '\.example' | sort); do
|
||||
echo " \"$(basename $f)\"" >> dd-agent-defaults.nix
|
||||
done
|
||||
echo ']' >> dd-agent-defaults.nix
|
||||
25
nixos/modules/services/monitoring/do-agent.nix
Normal file
25
nixos/modules/services/monitoring/do-agent.nix
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.do-agent;
|
||||
|
||||
in
|
||||
{
|
||||
options.services.do-agent = {
|
||||
enable = mkEnableOption "do-agent, the DigitalOcean droplet metrics agent";
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.packages = [ pkgs.do-agent ];
|
||||
|
||||
systemd.services.do-agent = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = [ "" "${pkgs.do-agent}/bin/do-agent --syslog" ];
|
||||
DynamicUser = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
63
nixos/modules/services/monitoring/fusion-inventory.nix
Normal file
63
nixos/modules/services/monitoring/fusion-inventory.nix
Normal file
|
|
@ -0,0 +1,63 @@
|
|||
# Fusion Inventory daemon.
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.fusionInventory;
|
||||
|
||||
configFile = pkgs.writeText "fusion_inventory.conf" ''
|
||||
server = ${concatStringsSep ", " cfg.servers}
|
||||
|
||||
logger = stderr
|
||||
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
|
||||
in {
|
||||
|
||||
###### interface
|
||||
|
||||
options = {
|
||||
|
||||
services.fusionInventory = {
|
||||
|
||||
enable = mkEnableOption "Fusion Inventory Agent";
|
||||
|
||||
servers = mkOption {
|
||||
type = types.listOf types.str;
|
||||
description = ''
|
||||
The urls of the OCS/GLPI servers to connect to.
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
default = "";
|
||||
type = types.lines;
|
||||
description = ''
|
||||
Configuration that is injected verbatim into the configuration file.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
users.users.fusion-inventory = {
|
||||
description = "FusionInventory user";
|
||||
isSystemUser = true;
|
||||
};
|
||||
|
||||
systemd.services.fusion-inventory = {
|
||||
description = "Fusion Inventory Agent";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.fusionInventory}/bin/fusioninventory-agent --conf-file=${configFile} --daemon --no-fork";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
150
nixos/modules/services/monitoring/grafana-image-renderer.nix
Normal file
150
nixos/modules/services/monitoring/grafana-image-renderer.nix
Normal file
|
|
@ -0,0 +1,150 @@
|
|||
{ lib, pkgs, config, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.grafana-image-renderer;
|
||||
|
||||
format = pkgs.formats.json { };
|
||||
|
||||
configFile = format.generate "grafana-image-renderer-config.json" cfg.settings;
|
||||
in {
|
||||
options.services.grafana-image-renderer = {
|
||||
enable = mkEnableOption "grafana-image-renderer";
|
||||
|
||||
chromium = mkOption {
|
||||
type = types.package;
|
||||
description = ''
|
||||
The chromium to use for image rendering.
|
||||
'';
|
||||
};
|
||||
|
||||
verbose = mkEnableOption "verbosity for the service";
|
||||
|
||||
provisionGrafana = mkEnableOption "Grafana configuration for grafana-image-renderer";
|
||||
|
||||
settings = mkOption {
|
||||
type = types.submodule {
|
||||
freeformType = format.type;
|
||||
|
||||
options = {
|
||||
service = {
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 8081;
|
||||
description = ''
|
||||
The TCP port to use for the rendering server.
|
||||
'';
|
||||
};
|
||||
logging.level = mkOption {
|
||||
type = types.enum [ "error" "warning" "info" "debug" ];
|
||||
default = "info";
|
||||
description = ''
|
||||
The log-level of the <filename>grafana-image-renderer.service</filename>-unit.
|
||||
'';
|
||||
};
|
||||
};
|
||||
rendering = {
|
||||
width = mkOption {
|
||||
default = 1000;
|
||||
type = types.ints.positive;
|
||||
description = ''
|
||||
Width of the PNG used to display the alerting graph.
|
||||
'';
|
||||
};
|
||||
height = mkOption {
|
||||
default = 500;
|
||||
type = types.ints.positive;
|
||||
description = ''
|
||||
Height of the PNG used to display the alerting graph.
|
||||
'';
|
||||
};
|
||||
mode = mkOption {
|
||||
default = "default";
|
||||
type = types.enum [ "default" "reusable" "clustered" ];
|
||||
description = ''
|
||||
Rendering mode of <package>grafana-image-renderer</package>:
|
||||
<itemizedlist>
|
||||
<listitem><para><literal>default:</literal> Creates on browser-instance
|
||||
per rendering request.</para></listitem>
|
||||
<listitem><para><literal>reusable:</literal> One browser instance
|
||||
will be started and reused for each rendering request.</para></listitem>
|
||||
<listitem><para><literal>clustered:</literal> allows to precisely
|
||||
configure how many browser-instances are supposed to be used. The values
|
||||
for that mode can be declared in <literal>rendering.clustering</literal>.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
'';
|
||||
};
|
||||
args = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ "--no-sandbox" ];
|
||||
description = ''
|
||||
List of CLI flags passed to <package>chromium</package>.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
default = {};
|
||||
|
||||
description = ''
|
||||
Configuration attributes for <package>grafana-image-renderer</package>.
|
||||
|
||||
See <link xlink:href="https://github.com/grafana/grafana-image-renderer/blob/ce1f81438e5f69c7fd7c73ce08bab624c4c92e25/default.json" />
|
||||
for supported values.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
assertions = [
|
||||
{ assertion = cfg.provisionGrafana -> config.services.grafana.enable;
|
||||
message = ''
|
||||
To provision a Grafana instance to use grafana-image-renderer,
|
||||
`services.grafana.enable` must be set to `true`!
|
||||
'';
|
||||
}
|
||||
];
|
||||
|
||||
services.grafana.extraOptions = mkIf cfg.provisionGrafana {
|
||||
RENDERING_SERVER_URL = "http://localhost:${toString cfg.settings.service.port}/render";
|
||||
RENDERING_CALLBACK_URL = "http://localhost:${toString config.services.grafana.port}";
|
||||
};
|
||||
|
||||
services.grafana-image-renderer.chromium = mkDefault pkgs.chromium;
|
||||
|
||||
services.grafana-image-renderer.settings = {
|
||||
rendering = mapAttrs (const mkDefault) {
|
||||
chromeBin = "${cfg.chromium}/bin/chromium";
|
||||
verboseLogging = cfg.verbose;
|
||||
timezone = config.time.timeZone;
|
||||
};
|
||||
|
||||
service = {
|
||||
logging.level = mkIf cfg.verbose (mkDefault "debug");
|
||||
metrics.enabled = mkDefault false;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.grafana-image-renderer = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
description = " A Grafana backend plugin that handles rendering of panels & dashboards to PNGs using headless browser (Chromium/Chrome)";
|
||||
|
||||
environment = {
|
||||
PUPPETEER_SKIP_CHROMIUM_DOWNLOAD = "true";
|
||||
};
|
||||
|
||||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
PrivateTmp = true;
|
||||
ExecStart = "${pkgs.grafana-image-renderer}/bin/grafana-image-renderer server --config=${configFile}";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
meta.maintainers = with maintainers; [ ma27 ];
|
||||
}
|
||||
67
nixos/modules/services/monitoring/grafana-reporter.nix
Normal file
67
nixos/modules/services/monitoring/grafana-reporter.nix
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.grafana_reporter;
|
||||
|
||||
in {
|
||||
options.services.grafana_reporter = {
|
||||
enable = mkEnableOption "grafana_reporter";
|
||||
|
||||
grafana = {
|
||||
protocol = mkOption {
|
||||
description = "Grafana protocol.";
|
||||
default = "http";
|
||||
type = types.enum ["http" "https"];
|
||||
};
|
||||
addr = mkOption {
|
||||
description = "Grafana address.";
|
||||
default = "127.0.0.1";
|
||||
type = types.str;
|
||||
};
|
||||
port = mkOption {
|
||||
description = "Grafana port.";
|
||||
default = 3000;
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
};
|
||||
addr = mkOption {
|
||||
description = "Listening address.";
|
||||
default = "127.0.0.1";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Listening port.";
|
||||
default = 8686;
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
templateDir = mkOption {
|
||||
description = "Optional template directory to use custom tex templates";
|
||||
default = pkgs.grafana_reporter;
|
||||
defaultText = literalExpression "pkgs.grafana_reporter";
|
||||
type = types.either types.str types.path;
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.grafana_reporter = {
|
||||
description = "Grafana Reporter Service Daemon";
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["network.target"];
|
||||
serviceConfig = let
|
||||
args = lib.concatStringsSep " " [
|
||||
"-proto ${cfg.grafana.protocol}://"
|
||||
"-ip ${cfg.grafana.addr}:${toString cfg.grafana.port}"
|
||||
"-port :${toString cfg.port}"
|
||||
"-templates ${cfg.templateDir}"
|
||||
];
|
||||
in {
|
||||
ExecStart = "${pkgs.grafana_reporter}/bin/grafana-reporter ${args}";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
808
nixos/modules/services/monitoring/grafana.nix
Normal file
808
nixos/modules/services/monitoring/grafana.nix
Normal file
|
|
@ -0,0 +1,808 @@
|
|||
{ options, config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.grafana;
|
||||
opt = options.services.grafana;
|
||||
declarativePlugins = pkgs.linkFarm "grafana-plugins" (builtins.map (pkg: { name = pkg.pname; path = pkg; }) cfg.declarativePlugins);
|
||||
useMysql = cfg.database.type == "mysql";
|
||||
usePostgresql = cfg.database.type == "postgres";
|
||||
|
||||
envOptions = {
|
||||
PATHS_DATA = cfg.dataDir;
|
||||
PATHS_PLUGINS = if builtins.isNull cfg.declarativePlugins then "${cfg.dataDir}/plugins" else declarativePlugins;
|
||||
PATHS_LOGS = "${cfg.dataDir}/log";
|
||||
|
||||
SERVER_SERVE_FROM_SUBPATH = boolToString cfg.server.serveFromSubPath;
|
||||
SERVER_PROTOCOL = cfg.protocol;
|
||||
SERVER_HTTP_ADDR = cfg.addr;
|
||||
SERVER_HTTP_PORT = cfg.port;
|
||||
SERVER_SOCKET = cfg.socket;
|
||||
SERVER_DOMAIN = cfg.domain;
|
||||
SERVER_ROOT_URL = cfg.rootUrl;
|
||||
SERVER_STATIC_ROOT_PATH = cfg.staticRootPath;
|
||||
SERVER_CERT_FILE = cfg.certFile;
|
||||
SERVER_CERT_KEY = cfg.certKey;
|
||||
|
||||
DATABASE_TYPE = cfg.database.type;
|
||||
DATABASE_HOST = cfg.database.host;
|
||||
DATABASE_NAME = cfg.database.name;
|
||||
DATABASE_USER = cfg.database.user;
|
||||
DATABASE_PASSWORD = cfg.database.password;
|
||||
DATABASE_PATH = cfg.database.path;
|
||||
DATABASE_CONN_MAX_LIFETIME = cfg.database.connMaxLifetime;
|
||||
|
||||
SECURITY_ADMIN_USER = cfg.security.adminUser;
|
||||
SECURITY_ADMIN_PASSWORD = cfg.security.adminPassword;
|
||||
SECURITY_SECRET_KEY = cfg.security.secretKey;
|
||||
|
||||
USERS_ALLOW_SIGN_UP = boolToString cfg.users.allowSignUp;
|
||||
USERS_ALLOW_ORG_CREATE = boolToString cfg.users.allowOrgCreate;
|
||||
USERS_AUTO_ASSIGN_ORG = boolToString cfg.users.autoAssignOrg;
|
||||
USERS_AUTO_ASSIGN_ORG_ROLE = cfg.users.autoAssignOrgRole;
|
||||
|
||||
AUTH_DISABLE_LOGIN_FORM = boolToString cfg.auth.disableLoginForm;
|
||||
|
||||
AUTH_ANONYMOUS_ENABLED = boolToString cfg.auth.anonymous.enable;
|
||||
AUTH_ANONYMOUS_ORG_NAME = cfg.auth.anonymous.org_name;
|
||||
AUTH_ANONYMOUS_ORG_ROLE = cfg.auth.anonymous.org_role;
|
||||
|
||||
AUTH_AZUREAD_NAME = "Azure AD";
|
||||
AUTH_AZUREAD_ENABLED = boolToString cfg.auth.azuread.enable;
|
||||
AUTH_AZUREAD_ALLOW_SIGN_UP = boolToString cfg.auth.azuread.allowSignUp;
|
||||
AUTH_AZUREAD_CLIENT_ID = cfg.auth.azuread.clientId;
|
||||
AUTH_AZUREAD_SCOPES = "openid email profile";
|
||||
AUTH_AZUREAD_AUTH_URL = "https://login.microsoftonline.com/${cfg.auth.azuread.tenantId}/oauth2/v2.0/authorize";
|
||||
AUTH_AZUREAD_TOKEN_URL = "https://login.microsoftonline.com/${cfg.auth.azuread.tenantId}/oauth2/v2.0/token";
|
||||
AUTH_AZUREAD_ALLOWED_DOMAINS = cfg.auth.azuread.allowedDomains;
|
||||
AUTH_AZUREAD_ALLOWED_GROUPS = cfg.auth.azuread.allowedGroups;
|
||||
AUTH_AZUREAD_ROLE_ATTRIBUTE_STRICT = false;
|
||||
|
||||
AUTH_GOOGLE_ENABLED = boolToString cfg.auth.google.enable;
|
||||
AUTH_GOOGLE_ALLOW_SIGN_UP = boolToString cfg.auth.google.allowSignUp;
|
||||
AUTH_GOOGLE_CLIENT_ID = cfg.auth.google.clientId;
|
||||
|
||||
ANALYTICS_REPORTING_ENABLED = boolToString cfg.analytics.reporting.enable;
|
||||
|
||||
SMTP_ENABLED = boolToString cfg.smtp.enable;
|
||||
SMTP_HOST = cfg.smtp.host;
|
||||
SMTP_USER = cfg.smtp.user;
|
||||
SMTP_PASSWORD = cfg.smtp.password;
|
||||
SMTP_FROM_ADDRESS = cfg.smtp.fromAddress;
|
||||
} // cfg.extraOptions;
|
||||
|
||||
datasourceConfiguration = {
|
||||
apiVersion = 1;
|
||||
datasources = cfg.provision.datasources;
|
||||
};
|
||||
|
||||
datasourceFile = pkgs.writeText "datasource.yaml" (builtins.toJSON datasourceConfiguration);
|
||||
|
||||
dashboardConfiguration = {
|
||||
apiVersion = 1;
|
||||
providers = cfg.provision.dashboards;
|
||||
};
|
||||
|
||||
dashboardFile = pkgs.writeText "dashboard.yaml" (builtins.toJSON dashboardConfiguration);
|
||||
|
||||
notifierConfiguration = {
|
||||
apiVersion = 1;
|
||||
notifiers = cfg.provision.notifiers;
|
||||
};
|
||||
|
||||
notifierFile = pkgs.writeText "notifier.yaml" (builtins.toJSON notifierConfiguration);
|
||||
|
||||
provisionConfDir = pkgs.runCommand "grafana-provisioning" { } ''
|
||||
mkdir -p $out/{datasources,dashboards,notifiers}
|
||||
ln -sf ${datasourceFile} $out/datasources/datasource.yaml
|
||||
ln -sf ${dashboardFile} $out/dashboards/dashboard.yaml
|
||||
ln -sf ${notifierFile} $out/notifiers/notifier.yaml
|
||||
'';
|
||||
|
||||
# Get a submodule without any embedded metadata:
|
||||
_filter = x: filterAttrs (k: v: k != "_module") x;
|
||||
|
||||
# http://docs.grafana.org/administration/provisioning/#datasources
|
||||
grafanaTypes.datasourceConfig = types.submodule {
|
||||
options = {
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
description = "Name of the datasource. Required.";
|
||||
};
|
||||
type = mkOption {
|
||||
type = types.str;
|
||||
description = "Datasource type. Required.";
|
||||
};
|
||||
access = mkOption {
|
||||
type = types.enum ["proxy" "direct"];
|
||||
default = "proxy";
|
||||
description = "Access mode. proxy or direct (Server or Browser in the UI). Required.";
|
||||
};
|
||||
orgId = mkOption {
|
||||
type = types.int;
|
||||
default = 1;
|
||||
description = "Org id. will default to orgId 1 if not specified.";
|
||||
};
|
||||
url = mkOption {
|
||||
type = types.str;
|
||||
description = "Url of the datasource.";
|
||||
};
|
||||
password = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "Database password, if used.";
|
||||
};
|
||||
user = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "Database user, if used.";
|
||||
};
|
||||
database = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "Database name, if used.";
|
||||
};
|
||||
basicAuth = mkOption {
|
||||
type = types.nullOr types.bool;
|
||||
default = null;
|
||||
description = "Enable/disable basic auth.";
|
||||
};
|
||||
basicAuthUser = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "Basic auth username.";
|
||||
};
|
||||
basicAuthPassword = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = "Basic auth password.";
|
||||
};
|
||||
withCredentials = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Enable/disable with credentials headers.";
|
||||
};
|
||||
isDefault = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Mark as default datasource. Max one per org.";
|
||||
};
|
||||
jsonData = mkOption {
|
||||
type = types.nullOr types.attrs;
|
||||
default = null;
|
||||
description = "Datasource specific configuration.";
|
||||
};
|
||||
secureJsonData = mkOption {
|
||||
type = types.nullOr types.attrs;
|
||||
default = null;
|
||||
description = "Datasource specific secure configuration.";
|
||||
};
|
||||
version = mkOption {
|
||||
type = types.int;
|
||||
default = 1;
|
||||
description = "Version.";
|
||||
};
|
||||
editable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Allow users to edit datasources from the UI.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# http://docs.grafana.org/administration/provisioning/#dashboards
|
||||
grafanaTypes.dashboardConfig = types.submodule {
|
||||
options = {
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
default = "default";
|
||||
description = "Provider name.";
|
||||
};
|
||||
orgId = mkOption {
|
||||
type = types.int;
|
||||
default = 1;
|
||||
description = "Organization ID.";
|
||||
};
|
||||
folder = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
description = "Add dashboards to the specified folder.";
|
||||
};
|
||||
type = mkOption {
|
||||
type = types.str;
|
||||
default = "file";
|
||||
description = "Dashboard provider type.";
|
||||
};
|
||||
disableDeletion = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Disable deletion when JSON file is removed.";
|
||||
};
|
||||
updateIntervalSeconds = mkOption {
|
||||
type = types.int;
|
||||
default = 10;
|
||||
description = "How often Grafana will scan for changed dashboards.";
|
||||
};
|
||||
options = {
|
||||
path = mkOption {
|
||||
type = types.path;
|
||||
description = "Path grafana will watch for dashboards.";
|
||||
};
|
||||
foldersFromFilesStructure = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Use folder names from filesystem to create folders in Grafana.";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
grafanaTypes.notifierConfig = types.submodule {
|
||||
options = {
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
default = "default";
|
||||
description = "Notifier name.";
|
||||
};
|
||||
type = mkOption {
|
||||
type = types.enum ["dingding" "discord" "email" "googlechat" "hipchat" "kafka" "line" "teams" "opsgenie" "pagerduty" "prometheus-alertmanager" "pushover" "sensu" "sensugo" "slack" "telegram" "threema" "victorops" "webhook"];
|
||||
description = "Notifier type.";
|
||||
};
|
||||
uid = mkOption {
|
||||
type = types.str;
|
||||
description = "Unique notifier identifier.";
|
||||
};
|
||||
org_id = mkOption {
|
||||
type = types.int;
|
||||
default = 1;
|
||||
description = "Organization ID.";
|
||||
};
|
||||
org_name = mkOption {
|
||||
type = types.str;
|
||||
default = "Main Org.";
|
||||
description = "Organization name.";
|
||||
};
|
||||
is_default = mkOption {
|
||||
type = types.bool;
|
||||
description = "Is the default notifier.";
|
||||
default = false;
|
||||
};
|
||||
send_reminder = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = "Should the notifier be sent reminder notifications while alerts continue to fire.";
|
||||
};
|
||||
frequency = mkOption {
|
||||
type = types.str;
|
||||
default = "5m";
|
||||
description = "How frequently should the notifier be sent reminders.";
|
||||
};
|
||||
disable_resolve_message = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Turn off the message that sends when an alert returns to OK.";
|
||||
};
|
||||
settings = mkOption {
|
||||
type = types.nullOr types.attrs;
|
||||
default = null;
|
||||
description = "Settings for the notifier type.";
|
||||
};
|
||||
secure_settings = mkOption {
|
||||
type = types.nullOr types.attrs;
|
||||
default = null;
|
||||
description = "Secure settings for the notifier type.";
|
||||
};
|
||||
};
|
||||
};
|
||||
in {
|
||||
options.services.grafana = {
|
||||
enable = mkEnableOption "grafana";
|
||||
|
||||
protocol = mkOption {
|
||||
description = "Which protocol to listen.";
|
||||
default = "http";
|
||||
type = types.enum ["http" "https" "socket"];
|
||||
};
|
||||
|
||||
addr = mkOption {
|
||||
description = "Listening address.";
|
||||
default = "127.0.0.1";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Listening port.";
|
||||
default = 3000;
|
||||
type = types.port;
|
||||
};
|
||||
|
||||
socket = mkOption {
|
||||
description = "Listening socket.";
|
||||
default = "/run/grafana/grafana.sock";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
domain = mkOption {
|
||||
description = "The public facing domain name used to access grafana from a browser.";
|
||||
default = "localhost";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
rootUrl = mkOption {
|
||||
description = "Full public facing url.";
|
||||
default = "%(protocol)s://%(domain)s:%(http_port)s/";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
certFile = mkOption {
|
||||
description = "Cert file for ssl.";
|
||||
default = "";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
certKey = mkOption {
|
||||
description = "Cert key for ssl.";
|
||||
default = "";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
staticRootPath = mkOption {
|
||||
description = "Root path for static assets.";
|
||||
default = "${cfg.package}/share/grafana/public";
|
||||
defaultText = literalExpression ''"''${package}/share/grafana/public"'';
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
description = "Package to use.";
|
||||
default = pkgs.grafana;
|
||||
defaultText = literalExpression "pkgs.grafana";
|
||||
type = types.package;
|
||||
};
|
||||
|
||||
declarativePlugins = mkOption {
|
||||
type = with types; nullOr (listOf path);
|
||||
default = null;
|
||||
description = "If non-null, then a list of packages containing Grafana plugins to install. If set, plugins cannot be manually installed.";
|
||||
example = literalExpression "with pkgs.grafanaPlugins; [ grafana-piechart-panel ]";
|
||||
# Make sure each plugin is added only once; otherwise building
|
||||
# the link farm fails, since the same path is added multiple
|
||||
# times.
|
||||
apply = x: if isList x then lib.unique x else x;
|
||||
};
|
||||
|
||||
dataDir = mkOption {
|
||||
description = "Data directory.";
|
||||
default = "/var/lib/grafana";
|
||||
type = types.path;
|
||||
};
|
||||
|
||||
database = {
|
||||
type = mkOption {
|
||||
description = "Database type.";
|
||||
default = "sqlite3";
|
||||
type = types.enum ["mysql" "sqlite3" "postgres"];
|
||||
};
|
||||
|
||||
host = mkOption {
|
||||
description = "Database host.";
|
||||
default = "127.0.0.1:3306";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
name = mkOption {
|
||||
description = "Database name.";
|
||||
default = "grafana";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
description = "Database user.";
|
||||
default = "root";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
password = mkOption {
|
||||
description = ''
|
||||
Database password.
|
||||
This option is mutual exclusive with the passwordFile option.
|
||||
'';
|
||||
default = "";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
passwordFile = mkOption {
|
||||
description = ''
|
||||
File that containts the database password.
|
||||
This option is mutual exclusive with the password option.
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
path = mkOption {
|
||||
description = "Database path.";
|
||||
default = "${cfg.dataDir}/data/grafana.db";
|
||||
defaultText = literalExpression ''"''${config.${opt.dataDir}}/data/grafana.db"'';
|
||||
type = types.path;
|
||||
};
|
||||
|
||||
connMaxLifetime = mkOption {
|
||||
description = ''
|
||||
Sets the maximum amount of time (in seconds) a connection may be reused.
|
||||
For MySQL this setting should be shorter than the `wait_timeout' variable.
|
||||
'';
|
||||
default = "unlimited";
|
||||
example = 14400;
|
||||
type = types.either types.int (types.enum [ "unlimited" ]);
|
||||
};
|
||||
};
|
||||
|
||||
provision = {
|
||||
enable = mkEnableOption "provision";
|
||||
datasources = mkOption {
|
||||
description = "Grafana datasources configuration.";
|
||||
default = [];
|
||||
type = types.listOf grafanaTypes.datasourceConfig;
|
||||
apply = x: map _filter x;
|
||||
};
|
||||
dashboards = mkOption {
|
||||
description = "Grafana dashboard configuration.";
|
||||
default = [];
|
||||
type = types.listOf grafanaTypes.dashboardConfig;
|
||||
apply = x: map _filter x;
|
||||
};
|
||||
notifiers = mkOption {
|
||||
description = "Grafana notifier configuration.";
|
||||
default = [];
|
||||
type = types.listOf grafanaTypes.notifierConfig;
|
||||
apply = x: map _filter x;
|
||||
};
|
||||
};
|
||||
|
||||
security = {
|
||||
adminUser = mkOption {
|
||||
description = "Default admin username.";
|
||||
default = "admin";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
adminPassword = mkOption {
|
||||
description = ''
|
||||
Default admin password.
|
||||
This option is mutual exclusive with the adminPasswordFile option.
|
||||
'';
|
||||
default = "admin";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
adminPasswordFile = mkOption {
|
||||
description = ''
|
||||
Default admin password.
|
||||
This option is mutual exclusive with the <literal>adminPassword</literal> option.
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
|
||||
secretKey = mkOption {
|
||||
description = "Secret key used for signing.";
|
||||
default = "SW2YcwTIb9zpOOhoPsMm";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
secretKeyFile = mkOption {
|
||||
description = "Secret key used for signing.";
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
};
|
||||
|
||||
server = {
|
||||
serveFromSubPath = mkOption {
|
||||
description = "Serve Grafana from subpath specified in rootUrl setting";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
};
|
||||
|
||||
smtp = {
|
||||
enable = mkEnableOption "smtp";
|
||||
host = mkOption {
|
||||
description = "Host to connect to.";
|
||||
default = "localhost:25";
|
||||
type = types.str;
|
||||
};
|
||||
user = mkOption {
|
||||
description = "User used for authentication.";
|
||||
default = "";
|
||||
type = types.str;
|
||||
};
|
||||
password = mkOption {
|
||||
description = ''
|
||||
Password used for authentication.
|
||||
This option is mutual exclusive with the passwordFile option.
|
||||
'';
|
||||
default = "";
|
||||
type = types.str;
|
||||
};
|
||||
passwordFile = mkOption {
|
||||
description = ''
|
||||
Password used for authentication.
|
||||
This option is mutual exclusive with the password option.
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
fromAddress = mkOption {
|
||||
description = "Email address used for sending.";
|
||||
default = "admin@grafana.localhost";
|
||||
type = types.str;
|
||||
};
|
||||
};
|
||||
|
||||
users = {
|
||||
allowSignUp = mkOption {
|
||||
description = "Disable user signup / registration.";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
allowOrgCreate = mkOption {
|
||||
description = "Whether user is allowed to create organizations.";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
autoAssignOrg = mkOption {
|
||||
description = "Whether to automatically assign new users to default org.";
|
||||
default = true;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
autoAssignOrgRole = mkOption {
|
||||
description = "Default role new users will be auto assigned.";
|
||||
default = "Viewer";
|
||||
type = types.enum ["Viewer" "Editor"];
|
||||
};
|
||||
};
|
||||
|
||||
auth = {
|
||||
disableLoginForm = mkOption {
|
||||
description = "Set to true to disable (hide) the login form, useful if you use OAuth";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
anonymous = {
|
||||
enable = mkOption {
|
||||
description = "Whether to allow anonymous access.";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
org_name = mkOption {
|
||||
description = "Which organization to allow anonymous access to.";
|
||||
default = "Main Org.";
|
||||
type = types.str;
|
||||
};
|
||||
org_role = mkOption {
|
||||
description = "Which role anonymous users have in the organization.";
|
||||
default = "Viewer";
|
||||
type = types.str;
|
||||
};
|
||||
};
|
||||
azuread = {
|
||||
enable = mkOption {
|
||||
description = "Whether to allow Azure AD OAuth.";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
allowSignUp = mkOption {
|
||||
description = "Whether to allow sign up with Azure AD OAuth.";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
clientId = mkOption {
|
||||
description = "Azure AD OAuth client ID.";
|
||||
default = "";
|
||||
type = types.str;
|
||||
};
|
||||
clientSecretFile = mkOption {
|
||||
description = "Azure AD OAuth client secret.";
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
tenantId = mkOption {
|
||||
description = ''
|
||||
Tenant id used to create auth and token url. Default to "common"
|
||||
, let user sign in with any tenant.
|
||||
'';
|
||||
default = "common";
|
||||
type = types.str;
|
||||
};
|
||||
allowedDomains = mkOption {
|
||||
description = ''
|
||||
To limit access to authenticated users who are members of one or more groups,
|
||||
set allowedGroups to a comma- or space-separated list of group object IDs.
|
||||
You can find object IDs for a specific group on the Azure portal.
|
||||
'';
|
||||
default = "";
|
||||
type = types.str;
|
||||
};
|
||||
allowedGroups = mkOption {
|
||||
description = ''
|
||||
Limits access to users who belong to specific domains.
|
||||
Separate domains with space or comma.
|
||||
'';
|
||||
default = "";
|
||||
type = types.str;
|
||||
};
|
||||
};
|
||||
google = {
|
||||
enable = mkOption {
|
||||
description = "Whether to allow Google OAuth2.";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
allowSignUp = mkOption {
|
||||
description = "Whether to allow sign up with Google OAuth2.";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
clientId = mkOption {
|
||||
description = "Google OAuth2 client ID.";
|
||||
default = "";
|
||||
type = types.str;
|
||||
};
|
||||
clientSecretFile = mkOption {
|
||||
description = "Google OAuth2 client secret.";
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
analytics.reporting = {
|
||||
enable = mkOption {
|
||||
description = "Whether to allow anonymous usage reporting to stats.grafana.net.";
|
||||
default = true;
|
||||
type = types.bool;
|
||||
};
|
||||
};
|
||||
|
||||
extraOptions = mkOption {
|
||||
description = ''
|
||||
Extra configuration options passed as env variables as specified in
|
||||
<link xlink:href="http://docs.grafana.org/installation/configuration/">documentation</link>,
|
||||
but without GF_ prefix
|
||||
'';
|
||||
default = {};
|
||||
type = with types; attrsOf (either str path);
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
warnings = flatten [
|
||||
(optional (
|
||||
cfg.database.password != opt.database.password.default ||
|
||||
cfg.security.adminPassword != opt.security.adminPassword.default
|
||||
) "Grafana passwords will be stored as plaintext in the Nix store!")
|
||||
(optional (
|
||||
any (x: x.password != null || x.basicAuthPassword != null || x.secureJsonData != null) cfg.provision.datasources
|
||||
) "Datasource passwords will be stored as plaintext in the Nix store!")
|
||||
(optional (
|
||||
any (x: x.secure_settings != null) cfg.provision.notifiers
|
||||
) "Notifier secure settings will be stored as plaintext in the Nix store!")
|
||||
];
|
||||
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
assertions = [
|
||||
{
|
||||
assertion = cfg.database.password != opt.database.password.default -> cfg.database.passwordFile == null;
|
||||
message = "Cannot set both password and passwordFile";
|
||||
}
|
||||
{
|
||||
assertion = cfg.security.adminPassword != opt.security.adminPassword.default -> cfg.security.adminPasswordFile == null;
|
||||
message = "Cannot set both adminPassword and adminPasswordFile";
|
||||
}
|
||||
{
|
||||
assertion = cfg.security.secretKey != opt.security.secretKey.default -> cfg.security.secretKeyFile == null;
|
||||
message = "Cannot set both secretKey and secretKeyFile";
|
||||
}
|
||||
{
|
||||
assertion = cfg.smtp.password != opt.smtp.password.default -> cfg.smtp.passwordFile == null;
|
||||
message = "Cannot set both password and passwordFile";
|
||||
}
|
||||
];
|
||||
|
||||
systemd.services.grafana = {
|
||||
description = "Grafana Service Daemon";
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["networking.target"] ++ lib.optional usePostgresql "postgresql.service" ++ lib.optional useMysql "mysql.service";
|
||||
environment = {
|
||||
QT_QPA_PLATFORM = "offscreen";
|
||||
} // mapAttrs' (n: v: nameValuePair "GF_${n}" (toString v)) envOptions;
|
||||
script = ''
|
||||
set -o errexit -o pipefail -o nounset -o errtrace
|
||||
shopt -s inherit_errexit
|
||||
|
||||
${optionalString (cfg.auth.azuread.clientSecretFile != null) ''
|
||||
GF_AUTH_AZUREAD_CLIENT_SECRET="$(<${escapeShellArg cfg.auth.azuread.clientSecretFile})"
|
||||
export GF_AUTH_AZUREAD_CLIENT_SECRET
|
||||
''}
|
||||
${optionalString (cfg.auth.google.clientSecretFile != null) ''
|
||||
GF_AUTH_GOOGLE_CLIENT_SECRET="$(<${escapeShellArg cfg.auth.google.clientSecretFile})"
|
||||
export GF_AUTH_GOOGLE_CLIENT_SECRET
|
||||
''}
|
||||
${optionalString (cfg.database.passwordFile != null) ''
|
||||
GF_DATABASE_PASSWORD="$(<${escapeShellArg cfg.database.passwordFile})"
|
||||
export GF_DATABASE_PASSWORD
|
||||
''}
|
||||
${optionalString (cfg.security.adminPasswordFile != null) ''
|
||||
GF_SECURITY_ADMIN_PASSWORD="$(<${escapeShellArg cfg.security.adminPasswordFile})"
|
||||
export GF_SECURITY_ADMIN_PASSWORD
|
||||
''}
|
||||
${optionalString (cfg.security.secretKeyFile != null) ''
|
||||
GF_SECURITY_SECRET_KEY="$(<${escapeShellArg cfg.security.secretKeyFile})"
|
||||
export GF_SECURITY_SECRET_KEY
|
||||
''}
|
||||
${optionalString (cfg.smtp.passwordFile != null) ''
|
||||
GF_SMTP_PASSWORD="$(<${escapeShellArg cfg.smtp.passwordFile})"
|
||||
export GF_SMTP_PASSWORD
|
||||
''}
|
||||
${optionalString cfg.provision.enable ''
|
||||
export GF_PATHS_PROVISIONING=${provisionConfDir};
|
||||
''}
|
||||
exec ${cfg.package}/bin/grafana-server -homepath ${cfg.dataDir}
|
||||
'';
|
||||
serviceConfig = {
|
||||
WorkingDirectory = cfg.dataDir;
|
||||
User = "grafana";
|
||||
RuntimeDirectory = "grafana";
|
||||
RuntimeDirectoryMode = "0755";
|
||||
# Hardening
|
||||
AmbientCapabilities = lib.mkIf (cfg.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
|
||||
CapabilityBoundingSet = if (cfg.port < 1024) then [ "CAP_NET_BIND_SERVICE" ] else [ "" ];
|
||||
DeviceAllow = [ "" ];
|
||||
LockPersonality = true;
|
||||
NoNewPrivileges = true;
|
||||
PrivateDevices = true;
|
||||
PrivateTmp = true;
|
||||
ProtectClock = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHome = true;
|
||||
ProtectHostname = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectProc = "invisible";
|
||||
ProtectSystem = "full";
|
||||
RemoveIPC = true;
|
||||
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
SystemCallArchitectures = "native";
|
||||
# Upstream grafana is not setting SystemCallFilter for compatibility
|
||||
# reasons, see https://github.com/grafana/grafana/pull/40176
|
||||
SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
|
||||
UMask = "0027";
|
||||
};
|
||||
preStart = ''
|
||||
ln -fs ${cfg.package}/share/grafana/conf ${cfg.dataDir}
|
||||
ln -fs ${cfg.package}/share/grafana/tools ${cfg.dataDir}
|
||||
'';
|
||||
};
|
||||
|
||||
users.users.grafana = {
|
||||
uid = config.ids.uids.grafana;
|
||||
description = "Grafana user";
|
||||
home = cfg.dataDir;
|
||||
createHome = true;
|
||||
group = "grafana";
|
||||
};
|
||||
users.groups.grafana = {};
|
||||
};
|
||||
}
|
||||
582
nixos/modules/services/monitoring/graphite.nix
Normal file
582
nixos/modules/services/monitoring/graphite.nix
Normal file
|
|
@ -0,0 +1,582 @@
|
|||
{ config, lib, options, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.graphite;
|
||||
opt = options.services.graphite;
|
||||
writeTextOrNull = f: t: mapNullable (pkgs.writeTextDir f) t;
|
||||
|
||||
dataDir = cfg.dataDir;
|
||||
staticDir = cfg.dataDir + "/static";
|
||||
|
||||
graphiteLocalSettingsDir = pkgs.runCommand "graphite_local_settings" {
|
||||
inherit graphiteLocalSettings;
|
||||
preferLocalBuild = true;
|
||||
} ''
|
||||
mkdir -p $out
|
||||
ln -s $graphiteLocalSettings $out/graphite_local_settings.py
|
||||
'';
|
||||
|
||||
graphiteLocalSettings = pkgs.writeText "graphite_local_settings.py" (
|
||||
"STATIC_ROOT = '${staticDir}'\n" +
|
||||
optionalString (config.time.timeZone != null) "TIME_ZONE = '${config.time.timeZone}'\n"
|
||||
+ cfg.web.extraConfig
|
||||
);
|
||||
|
||||
graphiteApiConfig = pkgs.writeText "graphite-api.yaml" ''
|
||||
search_index: ${dataDir}/index
|
||||
${optionalString (config.time.timeZone != null) "time_zone: ${config.time.timeZone}"}
|
||||
${optionalString (cfg.api.finders != []) "finders:"}
|
||||
${concatMapStringsSep "\n" (f: " - " + f.moduleName) cfg.api.finders}
|
||||
${optionalString (cfg.api.functions != []) "functions:"}
|
||||
${concatMapStringsSep "\n" (f: " - " + f) cfg.api.functions}
|
||||
${cfg.api.extraConfig}
|
||||
'';
|
||||
|
||||
seyrenConfig = {
|
||||
SEYREN_URL = cfg.seyren.seyrenUrl;
|
||||
MONGO_URL = cfg.seyren.mongoUrl;
|
||||
GRAPHITE_URL = cfg.seyren.graphiteUrl;
|
||||
} // cfg.seyren.extraConfig;
|
||||
|
||||
configDir = pkgs.buildEnv {
|
||||
name = "graphite-config";
|
||||
paths = lists.filter (el: el != null) [
|
||||
(writeTextOrNull "carbon.conf" cfg.carbon.config)
|
||||
(writeTextOrNull "storage-aggregation.conf" cfg.carbon.storageAggregation)
|
||||
(writeTextOrNull "storage-schemas.conf" cfg.carbon.storageSchemas)
|
||||
(writeTextOrNull "blacklist.conf" cfg.carbon.blacklist)
|
||||
(writeTextOrNull "whitelist.conf" cfg.carbon.whitelist)
|
||||
(writeTextOrNull "rewrite-rules.conf" cfg.carbon.rewriteRules)
|
||||
(writeTextOrNull "relay-rules.conf" cfg.carbon.relayRules)
|
||||
(writeTextOrNull "aggregation-rules.conf" cfg.carbon.aggregationRules)
|
||||
];
|
||||
};
|
||||
|
||||
carbonOpts = name: with config.ids; ''
|
||||
--nodaemon --syslog --prefix=${name} --pidfile /run/${name}/${name}.pid ${name}
|
||||
'';
|
||||
|
||||
carbonEnv = {
|
||||
PYTHONPATH = let
|
||||
cenv = pkgs.python3.buildEnv.override {
|
||||
extraLibs = [ pkgs.python3Packages.carbon ];
|
||||
};
|
||||
in "${cenv}/${pkgs.python3.sitePackages}";
|
||||
GRAPHITE_ROOT = dataDir;
|
||||
GRAPHITE_CONF_DIR = configDir;
|
||||
GRAPHITE_STORAGE_DIR = dataDir;
|
||||
};
|
||||
|
||||
in {
|
||||
|
||||
imports = [
|
||||
(mkRemovedOptionModule ["services" "graphite" "pager"] "")
|
||||
];
|
||||
|
||||
###### interface
|
||||
|
||||
options.services.graphite = {
|
||||
dataDir = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/db/graphite";
|
||||
description = ''
|
||||
Data directory for graphite.
|
||||
'';
|
||||
};
|
||||
|
||||
web = {
|
||||
enable = mkOption {
|
||||
description = "Whether to enable graphite web frontend.";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
listenAddress = mkOption {
|
||||
description = "Graphite web frontend listen address.";
|
||||
default = "127.0.0.1";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Graphite web frontend port.";
|
||||
default = 8080;
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
description = ''
|
||||
Graphite webapp settings. See:
|
||||
<link xlink:href="http://graphite.readthedocs.io/en/latest/config-local-settings.html"/>
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
api = {
|
||||
enable = mkOption {
|
||||
description = ''
|
||||
Whether to enable graphite api. Graphite api is lightweight alternative
|
||||
to graphite web, with api and without dashboard. It's advised to use
|
||||
grafana as alternative dashboard and influxdb as alternative to
|
||||
graphite carbon.
|
||||
|
||||
For more information visit
|
||||
<link xlink:href="https://graphite-api.readthedocs.org/en/latest/"/>
|
||||
'';
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
finders = mkOption {
|
||||
description = "List of finder plugins to load.";
|
||||
default = [];
|
||||
example = literalExpression "[ pkgs.python3Packages.influxgraph ]";
|
||||
type = types.listOf types.package;
|
||||
};
|
||||
|
||||
functions = mkOption {
|
||||
description = "List of functions to load.";
|
||||
default = [
|
||||
"graphite_api.functions.SeriesFunctions"
|
||||
"graphite_api.functions.PieFunctions"
|
||||
];
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
|
||||
listenAddress = mkOption {
|
||||
description = "Graphite web service listen address.";
|
||||
default = "127.0.0.1";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Graphite api service port.";
|
||||
default = 8080;
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
description = "Package to use for graphite api.";
|
||||
default = pkgs.python3Packages.graphite_api;
|
||||
defaultText = literalExpression "pkgs.python3Packages.graphite_api";
|
||||
type = types.package;
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
description = "Extra configuration for graphite api.";
|
||||
default = ''
|
||||
whisper:
|
||||
directories:
|
||||
- ${dataDir}/whisper
|
||||
'';
|
||||
defaultText = literalExpression ''
|
||||
'''
|
||||
whisper:
|
||||
directories:
|
||||
- ''${config.${opt.dataDir}}/whisper
|
||||
'''
|
||||
'';
|
||||
example = ''
|
||||
allowed_origins:
|
||||
- dashboard.example.com
|
||||
cheat_times: true
|
||||
influxdb:
|
||||
host: localhost
|
||||
port: 8086
|
||||
user: influxdb
|
||||
pass: influxdb
|
||||
db: metrics
|
||||
cache:
|
||||
CACHE_TYPE: 'filesystem'
|
||||
CACHE_DIR: '/tmp/graphite-api-cache'
|
||||
'';
|
||||
type = types.lines;
|
||||
};
|
||||
};
|
||||
|
||||
carbon = {
|
||||
config = mkOption {
|
||||
description = "Content of carbon configuration file.";
|
||||
default = ''
|
||||
[cache]
|
||||
# Listen on localhost by default for security reasons
|
||||
UDP_RECEIVER_INTERFACE = 127.0.0.1
|
||||
PICKLE_RECEIVER_INTERFACE = 127.0.0.1
|
||||
LINE_RECEIVER_INTERFACE = 127.0.0.1
|
||||
CACHE_QUERY_INTERFACE = 127.0.0.1
|
||||
# Do not log every update
|
||||
LOG_UPDATES = False
|
||||
LOG_CACHE_HITS = False
|
||||
'';
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
enableCache = mkOption {
|
||||
description = "Whether to enable carbon cache, the graphite storage daemon.";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
storageAggregation = mkOption {
|
||||
description = "Defines how to aggregate data to lower-precision retentions.";
|
||||
default = null;
|
||||
type = types.nullOr types.str;
|
||||
example = ''
|
||||
[all_min]
|
||||
pattern = \.min$
|
||||
xFilesFactor = 0.1
|
||||
aggregationMethod = min
|
||||
'';
|
||||
};
|
||||
|
||||
storageSchemas = mkOption {
|
||||
description = "Defines retention rates for storing metrics.";
|
||||
default = "";
|
||||
type = types.nullOr types.str;
|
||||
example = ''
|
||||
[apache_busyWorkers]
|
||||
pattern = ^servers\.www.*\.workers\.busyWorkers$
|
||||
retentions = 15s:7d,1m:21d,15m:5y
|
||||
'';
|
||||
};
|
||||
|
||||
blacklist = mkOption {
|
||||
description = "Any metrics received which match one of the experssions will be dropped.";
|
||||
default = null;
|
||||
type = types.nullOr types.str;
|
||||
example = "^some\\.noisy\\.metric\\.prefix\\..*";
|
||||
};
|
||||
|
||||
whitelist = mkOption {
|
||||
description = "Only metrics received which match one of the experssions will be persisted.";
|
||||
default = null;
|
||||
type = types.nullOr types.str;
|
||||
example = ".*";
|
||||
};
|
||||
|
||||
rewriteRules = mkOption {
|
||||
description = ''
|
||||
Regular expression patterns that can be used to rewrite metric names
|
||||
in a search and replace fashion.
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr types.str;
|
||||
example = ''
|
||||
[post]
|
||||
_sum$ =
|
||||
_avg$ =
|
||||
'';
|
||||
};
|
||||
|
||||
enableRelay = mkOption {
|
||||
description = "Whether to enable carbon relay, the carbon replication and sharding service.";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
relayRules = mkOption {
|
||||
description = "Relay rules are used to send certain metrics to a certain backend.";
|
||||
default = null;
|
||||
type = types.nullOr types.str;
|
||||
example = ''
|
||||
[example]
|
||||
pattern = ^mydata\.foo\..+
|
||||
servers = 10.1.2.3, 10.1.2.4:2004, myserver.mydomain.com
|
||||
'';
|
||||
};
|
||||
|
||||
enableAggregator = mkOption {
|
||||
description = "Whether to enable carbon aggregator, the carbon buffering service.";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
aggregationRules = mkOption {
|
||||
description = "Defines if and how received metrics will be aggregated.";
|
||||
default = null;
|
||||
type = types.nullOr types.str;
|
||||
example = ''
|
||||
<env>.applications.<app>.all.requests (60) = sum <env>.applications.<app>.*.requests
|
||||
<env>.applications.<app>.all.latency (60) = avg <env>.applications.<app>.*.latency
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
seyren = {
|
||||
enable = mkOption {
|
||||
description = "Whether to enable seyren service.";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Seyren listening port.";
|
||||
default = 8081;
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
seyrenUrl = mkOption {
|
||||
default = "http://localhost:${toString cfg.seyren.port}/";
|
||||
defaultText = literalExpression ''"http://localhost:''${toString config.${opt.seyren.port}}/"'';
|
||||
description = "Host where seyren is accessible.";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
graphiteUrl = mkOption {
|
||||
default = "http://${cfg.web.listenAddress}:${toString cfg.web.port}";
|
||||
defaultText = literalExpression ''"http://''${config.${opt.web.listenAddress}}:''${toString config.${opt.web.port}}"'';
|
||||
description = "Host where graphite service runs.";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
mongoUrl = mkOption {
|
||||
default = "mongodb://${config.services.mongodb.bind_ip}:27017/seyren";
|
||||
defaultText = literalExpression ''"mongodb://''${config.services.mongodb.bind_ip}:27017/seyren"'';
|
||||
description = "Mongodb connection string.";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
default = {};
|
||||
description = ''
|
||||
Extra seyren configuration. See
|
||||
<link xlink:href='https://github.com/scobal/seyren#config' />
|
||||
'';
|
||||
type = types.attrsOf types.str;
|
||||
example = literalExpression ''
|
||||
{
|
||||
GRAPHITE_USERNAME = "user";
|
||||
GRAPHITE_PASSWORD = "pass";
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
beacon = {
|
||||
enable = mkEnableOption "graphite beacon";
|
||||
|
||||
config = mkOption {
|
||||
description = "Graphite beacon configuration.";
|
||||
default = {};
|
||||
type = types.attrs;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkMerge [
|
||||
(mkIf cfg.carbon.enableCache {
|
||||
systemd.services.carbonCache = let name = "carbon-cache"; in {
|
||||
description = "Graphite Data Storage Backend";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
environment = carbonEnv;
|
||||
serviceConfig = {
|
||||
RuntimeDirectory = name;
|
||||
ExecStart = "${pkgs.python3Packages.twisted}/bin/twistd ${carbonOpts name}";
|
||||
User = "graphite";
|
||||
Group = "graphite";
|
||||
PermissionsStartOnly = true;
|
||||
PIDFile="/run/${name}/${name}.pid";
|
||||
};
|
||||
preStart = ''
|
||||
install -dm0700 -o graphite -g graphite ${cfg.dataDir}
|
||||
install -dm0700 -o graphite -g graphite ${cfg.dataDir}/whisper
|
||||
'';
|
||||
};
|
||||
})
|
||||
|
||||
(mkIf cfg.carbon.enableAggregator {
|
||||
systemd.services.carbonAggregator = let name = "carbon-aggregator"; in {
|
||||
enable = cfg.carbon.enableAggregator;
|
||||
description = "Carbon Data Aggregator";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
environment = carbonEnv;
|
||||
serviceConfig = {
|
||||
RuntimeDirectory = name;
|
||||
ExecStart = "${pkgs.python3Packages.twisted}/bin/twistd ${carbonOpts name}";
|
||||
User = "graphite";
|
||||
Group = "graphite";
|
||||
PIDFile="/run/${name}/${name}.pid";
|
||||
};
|
||||
};
|
||||
})
|
||||
|
||||
(mkIf cfg.carbon.enableRelay {
|
||||
systemd.services.carbonRelay = let name = "carbon-relay"; in {
|
||||
description = "Carbon Data Relay";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
environment = carbonEnv;
|
||||
serviceConfig = {
|
||||
RuntimeDirectory = name;
|
||||
ExecStart = "${pkgs.python3Packages.twisted}/bin/twistd ${carbonOpts name}";
|
||||
User = "graphite";
|
||||
Group = "graphite";
|
||||
PIDFile="/run/${name}/${name}.pid";
|
||||
};
|
||||
};
|
||||
})
|
||||
|
||||
(mkIf (cfg.carbon.enableCache || cfg.carbon.enableAggregator || cfg.carbon.enableRelay) {
|
||||
environment.systemPackages = [
|
||||
pkgs.python3Packages.carbon
|
||||
];
|
||||
})
|
||||
|
||||
(mkIf cfg.web.enable ({
|
||||
systemd.services.graphiteWeb = {
|
||||
description = "Graphite Web Interface";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
path = [ pkgs.perl ];
|
||||
environment = {
|
||||
PYTHONPATH = let
|
||||
penv = pkgs.python3.buildEnv.override {
|
||||
extraLibs = [
|
||||
pkgs.python3Packages.graphite-web
|
||||
];
|
||||
};
|
||||
penvPack = "${penv}/${pkgs.python3.sitePackages}";
|
||||
in concatStringsSep ":" [
|
||||
"${graphiteLocalSettingsDir}"
|
||||
"${penvPack}"
|
||||
# explicitly adding pycairo in path because it cannot be imported via buildEnv
|
||||
"${pkgs.python3Packages.pycairo}/${pkgs.python3.sitePackages}"
|
||||
];
|
||||
DJANGO_SETTINGS_MODULE = "graphite.settings";
|
||||
GRAPHITE_SETTINGS_MODULE = "graphite_local_settings";
|
||||
GRAPHITE_CONF_DIR = configDir;
|
||||
GRAPHITE_STORAGE_DIR = dataDir;
|
||||
LD_LIBRARY_PATH = "${pkgs.cairo.out}/lib";
|
||||
};
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.python3Packages.waitress-django}/bin/waitress-serve-django \
|
||||
--host=${cfg.web.listenAddress} --port=${toString cfg.web.port}
|
||||
'';
|
||||
User = "graphite";
|
||||
Group = "graphite";
|
||||
PermissionsStartOnly = true;
|
||||
};
|
||||
preStart = ''
|
||||
if ! test -e ${dataDir}/db-created; then
|
||||
mkdir -p ${dataDir}/{whisper/,log/webapp/}
|
||||
chmod 0700 ${dataDir}/{whisper/,log/webapp/}
|
||||
|
||||
${pkgs.python3Packages.django}/bin/django-admin.py migrate --noinput
|
||||
|
||||
chown -R graphite:graphite ${dataDir}
|
||||
|
||||
touch ${dataDir}/db-created
|
||||
fi
|
||||
|
||||
# Only collect static files when graphite_web changes.
|
||||
if ! [ "${dataDir}/current_graphite_web" -ef "${pkgs.python3Packages.graphite-web}" ]; then
|
||||
mkdir -p ${staticDir}
|
||||
${pkgs.python3Packages.django}/bin/django-admin.py collectstatic --noinput --clear
|
||||
chown -R graphite:graphite ${staticDir}
|
||||
ln -sfT "${pkgs.python3Packages.graphite-web}" "${dataDir}/current_graphite_web"
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
environment.systemPackages = [ pkgs.python3Packages.graphite-web ];
|
||||
}))
|
||||
|
||||
(mkIf cfg.api.enable {
|
||||
systemd.services.graphiteApi = {
|
||||
description = "Graphite Api Interface";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
environment = {
|
||||
PYTHONPATH = let
|
||||
aenv = pkgs.python3.buildEnv.override {
|
||||
extraLibs = [ cfg.api.package pkgs.cairo pkgs.python3Packages.cffi ] ++ cfg.api.finders;
|
||||
};
|
||||
in "${aenv}/${pkgs.python3.sitePackages}";
|
||||
GRAPHITE_API_CONFIG = graphiteApiConfig;
|
||||
LD_LIBRARY_PATH = "${pkgs.cairo.out}/lib";
|
||||
};
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.python3Packages.waitress}/bin/waitress-serve \
|
||||
--host=${cfg.api.listenAddress} --port=${toString cfg.api.port} \
|
||||
graphite_api.app:app
|
||||
'';
|
||||
User = "graphite";
|
||||
Group = "graphite";
|
||||
PermissionsStartOnly = true;
|
||||
};
|
||||
preStart = ''
|
||||
if ! test -e ${dataDir}/db-created; then
|
||||
mkdir -p ${dataDir}/cache/
|
||||
chmod 0700 ${dataDir}/cache/
|
||||
|
||||
chown graphite:graphite ${cfg.dataDir}
|
||||
chown -R graphite:graphite ${cfg.dataDir}/cache
|
||||
|
||||
touch ${dataDir}/db-created
|
||||
fi
|
||||
'';
|
||||
};
|
||||
})
|
||||
|
||||
(mkIf cfg.seyren.enable {
|
||||
systemd.services.seyren = {
|
||||
description = "Graphite Alerting Dashboard";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" "mongodb.service" ];
|
||||
environment = seyrenConfig;
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.seyren}/bin/seyren -httpPort ${toString cfg.seyren.port}";
|
||||
WorkingDirectory = dataDir;
|
||||
User = "graphite";
|
||||
Group = "graphite";
|
||||
};
|
||||
preStart = ''
|
||||
if ! test -e ${dataDir}/db-created; then
|
||||
mkdir -p ${dataDir}
|
||||
chown graphite:graphite ${dataDir}
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
services.mongodb.enable = mkDefault true;
|
||||
})
|
||||
|
||||
(mkIf cfg.beacon.enable {
|
||||
systemd.services.graphite-beacon = {
|
||||
description = "Grpahite Beacon Alerting Daemon";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.python3Packages.graphite_beacon}/bin/graphite-beacon \
|
||||
--config=${pkgs.writeText "graphite-beacon.json" (builtins.toJSON cfg.beacon.config)}
|
||||
'';
|
||||
User = "graphite";
|
||||
Group = "graphite";
|
||||
};
|
||||
};
|
||||
})
|
||||
|
||||
(mkIf (
|
||||
cfg.carbon.enableCache || cfg.carbon.enableAggregator || cfg.carbon.enableRelay ||
|
||||
cfg.web.enable || cfg.api.enable ||
|
||||
cfg.seyren.enable || cfg.beacon.enable
|
||||
) {
|
||||
users.users.graphite = {
|
||||
uid = config.ids.uids.graphite;
|
||||
group = "graphite";
|
||||
description = "Graphite daemon user";
|
||||
home = dataDir;
|
||||
};
|
||||
users.groups.graphite.gid = config.ids.gids.graphite;
|
||||
})
|
||||
];
|
||||
}
|
||||
23
nixos/modules/services/monitoring/hdaps.nix
Normal file
23
nixos/modules/services/monitoring/hdaps.nix
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.hdapsd;
|
||||
hdapsd = [ pkgs.hdapsd ];
|
||||
in
|
||||
{
|
||||
options = {
|
||||
services.hdapsd.enable = mkEnableOption
|
||||
''
|
||||
Hard Drive Active Protection System Daemon,
|
||||
devices are detected and managed automatically by udev and systemd
|
||||
'';
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
boot.kernelModules = [ "hdapsd" ];
|
||||
services.udev.packages = hdapsd;
|
||||
systemd.packages = hdapsd;
|
||||
};
|
||||
}
|
||||
59
nixos/modules/services/monitoring/heapster.nix
Normal file
59
nixos/modules/services/monitoring/heapster.nix
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.heapster;
|
||||
in {
|
||||
options.services.heapster = {
|
||||
enable = mkOption {
|
||||
description = "Whether to enable heapster monitoring";
|
||||
default = false;
|
||||
type = types.bool;
|
||||
};
|
||||
|
||||
source = mkOption {
|
||||
description = "Heapster metric source";
|
||||
example = "kubernetes:https://kubernetes.default";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
sink = mkOption {
|
||||
description = "Heapster metic sink";
|
||||
example = "influxdb:http://localhost:8086";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
extraOpts = mkOption {
|
||||
description = "Heapster extra options";
|
||||
default = "";
|
||||
type = types.separatedString " ";
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
description = "Package to use by heapster";
|
||||
default = pkgs.heapster;
|
||||
defaultText = literalExpression "pkgs.heapster";
|
||||
type = types.package;
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.heapster = {
|
||||
wantedBy = ["multi-user.target"];
|
||||
after = ["cadvisor.service" "kube-apiserver.service"];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${cfg.package}/bin/heapster --source=${cfg.source} --sink=${cfg.sink} ${cfg.extraOpts}";
|
||||
User = "heapster";
|
||||
};
|
||||
};
|
||||
|
||||
users.users.heapster = {
|
||||
isSystemUser = true;
|
||||
group = "heapster";
|
||||
description = "Heapster user";
|
||||
};
|
||||
users.groups.heapster = {};
|
||||
};
|
||||
}
|
||||
103
nixos/modules/services/monitoring/incron.nix
Normal file
103
nixos/modules/services/monitoring/incron.nix
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
cfg = config.services.incron;
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
options = {
|
||||
|
||||
services.incron = {
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to enable the incron daemon.
|
||||
|
||||
Note that commands run under incrontab only support common Nix profiles for the <envar>PATH</envar> provided variable.
|
||||
'';
|
||||
};
|
||||
|
||||
allow = mkOption {
|
||||
type = types.nullOr (types.listOf types.str);
|
||||
default = null;
|
||||
description = ''
|
||||
Users allowed to use incrontab.
|
||||
|
||||
If empty then no user will be allowed to have their own incrontab.
|
||||
If <literal>null</literal> then will defer to <option>deny</option>.
|
||||
If both <option>allow</option> and <option>deny</option> are null
|
||||
then all users will be allowed to have their own incrontab.
|
||||
'';
|
||||
};
|
||||
|
||||
deny = mkOption {
|
||||
type = types.nullOr (types.listOf types.str);
|
||||
default = null;
|
||||
description = "Users forbidden from using incrontab.";
|
||||
};
|
||||
|
||||
systab = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
description = "The system incrontab contents.";
|
||||
example = ''
|
||||
/var/mail IN_CLOSE_WRITE abc $@/$#
|
||||
/tmp IN_ALL_EVENTS efg $@/$# $&
|
||||
'';
|
||||
};
|
||||
|
||||
extraPackages = mkOption {
|
||||
type = types.listOf types.package;
|
||||
default = [];
|
||||
example = literalExpression "[ pkgs.rsync ]";
|
||||
description = "Extra packages available to the system incrontab.";
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
warnings = optional (cfg.allow != null && cfg.deny != null)
|
||||
"If `services.incron.allow` is set then `services.incron.deny` will be ignored.";
|
||||
|
||||
environment.systemPackages = [ pkgs.incron ];
|
||||
|
||||
security.wrappers.incrontab =
|
||||
{ setuid = true;
|
||||
owner = "root";
|
||||
group = "root";
|
||||
source = "${pkgs.incron}/bin/incrontab";
|
||||
};
|
||||
|
||||
# incron won't read symlinks
|
||||
environment.etc."incron.d/system" = {
|
||||
mode = "0444";
|
||||
text = cfg.systab;
|
||||
};
|
||||
environment.etc."incron.allow" = mkIf (cfg.allow != null) {
|
||||
text = concatStringsSep "\n" cfg.allow;
|
||||
};
|
||||
environment.etc."incron.deny" = mkIf (cfg.deny != null) {
|
||||
text = concatStringsSep "\n" cfg.deny;
|
||||
};
|
||||
|
||||
systemd.services.incron = {
|
||||
description = "File System Events Scheduler";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = cfg.extraPackages;
|
||||
serviceConfig.PIDFile = "/run/incrond.pid";
|
||||
serviceConfig.ExecStartPre = "${pkgs.coreutils}/bin/mkdir -m 710 -p /var/spool/incron";
|
||||
serviceConfig.ExecStart = "${pkgs.incron}/bin/incrond --foreground";
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
188
nixos/modules/services/monitoring/kapacitor.nix
Normal file
188
nixos/modules/services/monitoring/kapacitor.nix
Normal file
|
|
@ -0,0 +1,188 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.kapacitor;
|
||||
|
||||
kapacitorConf = pkgs.writeTextFile {
|
||||
name = "kapacitord.conf";
|
||||
text = ''
|
||||
hostname="${config.networking.hostName}"
|
||||
data_dir="${cfg.dataDir}"
|
||||
|
||||
[http]
|
||||
bind-address = "${cfg.bind}:${toString cfg.port}"
|
||||
log-enabled = false
|
||||
auth-enabled = false
|
||||
|
||||
[task]
|
||||
dir = "${cfg.dataDir}/tasks"
|
||||
snapshot-interval = "${cfg.taskSnapshotInterval}"
|
||||
|
||||
[replay]
|
||||
dir = "${cfg.dataDir}/replay"
|
||||
|
||||
[storage]
|
||||
boltdb = "${cfg.dataDir}/kapacitor.db"
|
||||
|
||||
${optionalString (cfg.loadDirectory != null) ''
|
||||
[load]
|
||||
enabled = true
|
||||
dir = "${cfg.loadDirectory}"
|
||||
''}
|
||||
|
||||
${optionalString (cfg.defaultDatabase.enable) ''
|
||||
[[influxdb]]
|
||||
name = "default"
|
||||
enabled = true
|
||||
default = true
|
||||
urls = [ "${cfg.defaultDatabase.url}" ]
|
||||
username = "${cfg.defaultDatabase.username}"
|
||||
password = "${cfg.defaultDatabase.password}"
|
||||
''}
|
||||
|
||||
${optionalString (cfg.alerta.enable) ''
|
||||
[alerta]
|
||||
enabled = true
|
||||
url = "${cfg.alerta.url}"
|
||||
token = "${cfg.alerta.token}"
|
||||
environment = "${cfg.alerta.environment}"
|
||||
origin = "${cfg.alerta.origin}"
|
||||
''}
|
||||
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
options.services.kapacitor = {
|
||||
enable = mkEnableOption "kapacitor";
|
||||
|
||||
dataDir = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/lib/kapacitor";
|
||||
description = "Location where Kapacitor stores its state";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.int;
|
||||
default = 9092;
|
||||
description = "Port of Kapacitor";
|
||||
};
|
||||
|
||||
bind = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
example = "0.0.0.0";
|
||||
description = "Address to bind to. The default is to bind to all addresses";
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
description = "These lines go into kapacitord.conf verbatim.";
|
||||
default = "";
|
||||
type = types.lines;
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "kapacitor";
|
||||
description = "User account under which Kapacitor runs";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
default = "kapacitor";
|
||||
description = "Group under which Kapacitor runs";
|
||||
};
|
||||
|
||||
taskSnapshotInterval = mkOption {
|
||||
type = types.str;
|
||||
description = "Specifies how often to snapshot the task state (in InfluxDB time units)";
|
||||
default = "1m0s";
|
||||
};
|
||||
|
||||
loadDirectory = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
description = "Directory where to load services from, such as tasks, templates and handlers (or null to disable service loading on startup)";
|
||||
default = null;
|
||||
};
|
||||
|
||||
defaultDatabase = {
|
||||
enable = mkEnableOption "kapacitor.defaultDatabase";
|
||||
|
||||
url = mkOption {
|
||||
description = "The URL to an InfluxDB server that serves as the default database";
|
||||
example = "http://localhost:8086";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
username = mkOption {
|
||||
description = "The username to connect to the remote InfluxDB server";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
password = mkOption {
|
||||
description = "The password to connect to the remote InfluxDB server";
|
||||
type = types.str;
|
||||
};
|
||||
};
|
||||
|
||||
alerta = {
|
||||
enable = mkEnableOption "kapacitor alerta integration";
|
||||
|
||||
url = mkOption {
|
||||
description = "The URL to the Alerta REST API";
|
||||
default = "http://localhost:5000";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
token = mkOption {
|
||||
description = "Default Alerta authentication token";
|
||||
type = types.str;
|
||||
default = "";
|
||||
};
|
||||
|
||||
environment = mkOption {
|
||||
description = "Default Alerta environment";
|
||||
type = types.str;
|
||||
default = "Production";
|
||||
};
|
||||
|
||||
origin = mkOption {
|
||||
description = "Default origin of alert";
|
||||
type = types.str;
|
||||
default = "kapacitor";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = [ pkgs.kapacitor ];
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d '${cfg.dataDir}' - ${cfg.user} ${cfg.group} - -"
|
||||
];
|
||||
|
||||
systemd.services.kapacitor = {
|
||||
description = "Kapacitor Real-Time Stream Processing Engine";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "networking.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.kapacitor}/bin/kapacitord -config ${kapacitorConf}";
|
||||
User = "kapacitor";
|
||||
Group = "kapacitor";
|
||||
};
|
||||
};
|
||||
|
||||
users.users.kapacitor = {
|
||||
uid = config.ids.uids.kapacitor;
|
||||
description = "Kapacitor user";
|
||||
home = cfg.dataDir;
|
||||
};
|
||||
|
||||
users.groups.kapacitor = {
|
||||
gid = config.ids.gids.kapacitor;
|
||||
};
|
||||
};
|
||||
}
|
||||
114
nixos/modules/services/monitoring/loki.nix
Normal file
114
nixos/modules/services/monitoring/loki.nix
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
inherit (lib) escapeShellArgs mkEnableOption mkIf mkOption types;
|
||||
|
||||
cfg = config.services.loki;
|
||||
|
||||
prettyJSON = conf:
|
||||
pkgs.runCommand "loki-config.json" { } ''
|
||||
echo '${builtins.toJSON conf}' | ${pkgs.jq}/bin/jq 'del(._module)' > $out
|
||||
'';
|
||||
|
||||
in {
|
||||
options.services.loki = {
|
||||
enable = mkEnableOption "loki";
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "loki";
|
||||
description = ''
|
||||
User under which the Loki service runs.
|
||||
'';
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
default = "loki";
|
||||
description = ''
|
||||
Group under which the Loki service runs.
|
||||
'';
|
||||
};
|
||||
|
||||
dataDir = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/lib/loki";
|
||||
description = ''
|
||||
Specify the directory for Loki.
|
||||
'';
|
||||
};
|
||||
|
||||
configuration = mkOption {
|
||||
type = (pkgs.formats.json {}).type;
|
||||
default = {};
|
||||
description = ''
|
||||
Specify the configuration for Loki in Nix.
|
||||
'';
|
||||
};
|
||||
|
||||
configFile = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
description = ''
|
||||
Specify a configuration file that Loki should use.
|
||||
'';
|
||||
};
|
||||
|
||||
extraFlags = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = [ "--server.http-listen-port=3101" ];
|
||||
description = ''
|
||||
Specify a list of additional command line flags,
|
||||
which get escaped and are then passed to Loki.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
assertions = [{
|
||||
assertion = (
|
||||
(cfg.configuration == {} -> cfg.configFile != null) &&
|
||||
(cfg.configFile != null -> cfg.configuration == {})
|
||||
);
|
||||
message = ''
|
||||
Please specify either
|
||||
'services.loki.configuration' or
|
||||
'services.loki.configFile'.
|
||||
'';
|
||||
}];
|
||||
|
||||
environment.systemPackages = [ pkgs.grafana-loki ]; # logcli
|
||||
|
||||
users.groups.${cfg.group} = { };
|
||||
users.users.${cfg.user} = {
|
||||
description = "Loki Service User";
|
||||
group = cfg.group;
|
||||
home = cfg.dataDir;
|
||||
createHome = true;
|
||||
isSystemUser = true;
|
||||
};
|
||||
|
||||
systemd.services.loki = {
|
||||
description = "Loki Service Daemon";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig = let
|
||||
conf = if cfg.configFile == null
|
||||
then prettyJSON cfg.configuration
|
||||
else cfg.configFile;
|
||||
in
|
||||
{
|
||||
ExecStart = "${pkgs.grafana-loki}/bin/loki --config.file=${conf} ${escapeShellArgs cfg.extraFlags}";
|
||||
User = cfg.user;
|
||||
Restart = "always";
|
||||
PrivateTmp = true;
|
||||
ProtectHome = true;
|
||||
ProtectSystem = "full";
|
||||
DevicePolicy = "closed";
|
||||
NoNewPrivileges = true;
|
||||
WorkingDirectory = cfg.dataDir;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
160
nixos/modules/services/monitoring/longview.nix
Normal file
160
nixos/modules/services/monitoring/longview.nix
Normal file
|
|
@ -0,0 +1,160 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.longview;
|
||||
|
||||
runDir = "/run/longview";
|
||||
configsDir = "${runDir}/longview.d";
|
||||
|
||||
in {
|
||||
options = {
|
||||
|
||||
services.longview = {
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
If enabled, system metrics will be sent to Linode LongView.
|
||||
'';
|
||||
};
|
||||
|
||||
apiKey = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
example = "01234567-89AB-CDEF-0123456789ABCDEF";
|
||||
description = ''
|
||||
Longview API key. To get this, look in Longview settings which
|
||||
are found at https://manager.linode.com/longview/.
|
||||
|
||||
Warning: this secret is stored in the world-readable Nix store!
|
||||
Use <option>apiKeyFile</option> instead.
|
||||
'';
|
||||
};
|
||||
|
||||
apiKeyFile = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
example = "/run/keys/longview-api-key";
|
||||
description = ''
|
||||
A file containing the Longview API key.
|
||||
To get this, look in Longview settings which
|
||||
are found at https://manager.linode.com/longview/.
|
||||
|
||||
<option>apiKeyFile</option> takes precedence over <option>apiKey</option>.
|
||||
'';
|
||||
};
|
||||
|
||||
apacheStatusUrl = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
example = "http://127.0.0.1/server-status";
|
||||
description = ''
|
||||
The Apache status page URL. If provided, Longview will
|
||||
gather statistics from this location. This requires Apache
|
||||
mod_status to be loaded and enabled.
|
||||
'';
|
||||
};
|
||||
|
||||
nginxStatusUrl = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
example = "http://127.0.0.1/nginx_status";
|
||||
description = ''
|
||||
The Nginx status page URL. Longview will gather statistics
|
||||
from this URL. This requires the Nginx stub_status module to
|
||||
be enabled and configured at the given location.
|
||||
'';
|
||||
};
|
||||
|
||||
mysqlUser = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
description = ''
|
||||
The user for connecting to the MySQL database. If provided,
|
||||
Longview will connect to MySQL and collect statistics about
|
||||
queries, etc. This user does not need to have been granted
|
||||
any extra privileges.
|
||||
'';
|
||||
};
|
||||
|
||||
mysqlPassword = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
description = ''
|
||||
The password corresponding to <option>mysqlUser</option>.
|
||||
Warning: this is stored in cleartext in the Nix store!
|
||||
Use <option>mysqlPasswordFile</option> instead.
|
||||
'';
|
||||
};
|
||||
|
||||
mysqlPasswordFile = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
example = "/run/keys/dbpassword";
|
||||
description = ''
|
||||
A file containing the password corresponding to <option>mysqlUser</option>.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.longview =
|
||||
{ description = "Longview Metrics Collection";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig.Type = "forking";
|
||||
serviceConfig.ExecStop = "-${pkgs.coreutils}/bin/kill -TERM $MAINPID";
|
||||
serviceConfig.ExecReload = "-${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
serviceConfig.PIDFile = "${runDir}/longview.pid";
|
||||
serviceConfig.ExecStart = "${pkgs.longview}/bin/longview";
|
||||
preStart = ''
|
||||
umask 077
|
||||
mkdir -p ${configsDir}
|
||||
'' + (optionalString (cfg.apiKeyFile != null) ''
|
||||
cp --no-preserve=all "${cfg.apiKeyFile}" ${runDir}/longview.key
|
||||
'') + (optionalString (cfg.apacheStatusUrl != "") ''
|
||||
cat > ${configsDir}/Apache.conf <<EOF
|
||||
location ${cfg.apacheStatusUrl}?auto
|
||||
EOF
|
||||
'') + (optionalString (cfg.mysqlUser != "" && cfg.mysqlPasswordFile != null) ''
|
||||
cat > ${configsDir}/MySQL.conf <<EOF
|
||||
username ${cfg.mysqlUser}
|
||||
password `head -n1 "${cfg.mysqlPasswordFile}"`
|
||||
EOF
|
||||
'') + (optionalString (cfg.nginxStatusUrl != "") ''
|
||||
cat > ${configsDir}/Nginx.conf <<EOF
|
||||
location ${cfg.nginxStatusUrl}
|
||||
EOF
|
||||
'');
|
||||
};
|
||||
|
||||
warnings = let warn = k: optional (cfg.${k} != "")
|
||||
"config.services.longview.${k} is insecure. Use ${k}File instead.";
|
||||
in concatMap warn [ "apiKey" "mysqlPassword" ];
|
||||
|
||||
assertions = [
|
||||
{ assertion = cfg.apiKeyFile != null;
|
||||
message = "Longview needs an API key configured";
|
||||
}
|
||||
];
|
||||
|
||||
# Create API key file if not configured.
|
||||
services.longview.apiKeyFile = mkIf (cfg.apiKey != "")
|
||||
(mkDefault (toString (pkgs.writeTextFile {
|
||||
name = "longview.key";
|
||||
text = cfg.apiKey;
|
||||
})));
|
||||
|
||||
# Create MySQL password file if not configured.
|
||||
services.longview.mysqlPasswordFile = mkDefault (toString (pkgs.writeTextFile {
|
||||
name = "mysql-password-file";
|
||||
text = cfg.mysqlPassword;
|
||||
}));
|
||||
};
|
||||
}
|
||||
110
nixos/modules/services/monitoring/mackerel-agent.nix
Normal file
110
nixos/modules/services/monitoring/mackerel-agent.nix
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.mackerel-agent;
|
||||
settingsFmt = pkgs.formats.toml {};
|
||||
in {
|
||||
options.services.mackerel-agent = {
|
||||
enable = mkEnableOption "mackerel.io agent";
|
||||
|
||||
# the upstream package runs as root, but doesn't seem to be strictly
|
||||
# necessary for basic functionality
|
||||
runAsRoot = mkEnableOption "Whether to run as root.";
|
||||
|
||||
autoRetirement = mkEnableOption ''
|
||||
Whether to automatically retire the host upon OS shutdown.
|
||||
'';
|
||||
|
||||
apiKeyFile = mkOption {
|
||||
type = types.path;
|
||||
example = "/run/keys/mackerel-api-key";
|
||||
description = ''
|
||||
Path to file containing the Mackerel API key. The file should contain a
|
||||
single line of the following form:
|
||||
|
||||
<literallayout>apikey = "EXAMPLE_API_KEY"</literallayout>
|
||||
'';
|
||||
};
|
||||
|
||||
settings = mkOption {
|
||||
description = ''
|
||||
Options for mackerel-agent.conf.
|
||||
|
||||
Documentation:
|
||||
<link xlink:href="https://mackerel.io/docs/entry/spec/agent"/>
|
||||
'';
|
||||
|
||||
default = {};
|
||||
example = {
|
||||
verbose = false;
|
||||
silent = false;
|
||||
};
|
||||
|
||||
type = types.submodule {
|
||||
freeformType = settingsFmt.type;
|
||||
|
||||
options.host_status = {
|
||||
on_start = mkOption {
|
||||
type = types.enum [ "working" "standby" "maintenance" "poweroff" ];
|
||||
description = "Host status after agent startup.";
|
||||
default = "working";
|
||||
};
|
||||
on_stop = mkOption {
|
||||
type = types.enum [ "working" "standby" "maintenance" "poweroff" ];
|
||||
description = "Host status after agent shutdown.";
|
||||
default = "poweroff";
|
||||
};
|
||||
};
|
||||
|
||||
options.diagnostic =
|
||||
mkEnableOption "Collect memory usage for the agent itself";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
environment.systemPackages = with pkgs; [ mackerel-agent ];
|
||||
|
||||
environment.etc = {
|
||||
"mackerel-agent/mackerel-agent.conf".source =
|
||||
settingsFmt.generate "mackerel-agent.conf" cfg.settings;
|
||||
"mackerel-agent/conf.d/api-key.conf".source = cfg.apiKeyFile;
|
||||
};
|
||||
|
||||
services.mackerel-agent.settings = {
|
||||
root = mkDefault "/var/lib/mackerel-agent";
|
||||
pidfile = mkDefault "/run/mackerel-agent/mackerel-agent.pid";
|
||||
|
||||
# conf.d stores the symlink to cfg.apiKeyFile
|
||||
include = mkDefault "/etc/mackerel-agent/conf.d/*.conf";
|
||||
};
|
||||
|
||||
# upstream service file in https://git.io/JUt4Q
|
||||
systemd.services.mackerel-agent = {
|
||||
description = "mackerel.io agent";
|
||||
after = [ "network-online.target" "nss-lookup.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
environment = {
|
||||
MACKEREL_PLUGIN_WORKDIR = mkDefault "%C/mackerel-agent";
|
||||
};
|
||||
serviceConfig = {
|
||||
DynamicUser = !cfg.runAsRoot;
|
||||
PrivateTmp = mkDefault true;
|
||||
CacheDirectory = "mackerel-agent";
|
||||
ConfigurationDirectory = "mackerel-agent";
|
||||
RuntimeDirectory = "mackerel-agent";
|
||||
StateDirectory = "mackerel-agent";
|
||||
ExecStart = "${pkgs.mackerel-agent}/bin/mackerel-agent supervise";
|
||||
ExecStopPost = mkIf cfg.autoRetirement "${pkg.mackerel-agent}/bin/mackerel-agent retire -force";
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
LimitNOFILE = mkDefault 65536;
|
||||
LimitNPROC = mkDefault 65536;
|
||||
};
|
||||
restartTriggers = [
|
||||
config.environment.etc."mackerel-agent/mackerel-agent.conf".source
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
151
nixos/modules/services/monitoring/metricbeat.nix
Normal file
151
nixos/modules/services/monitoring/metricbeat.nix
Normal file
|
|
@ -0,0 +1,151 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
inherit (lib)
|
||||
attrValues
|
||||
literalExpression
|
||||
mkEnableOption
|
||||
mkIf
|
||||
mkOption
|
||||
types
|
||||
;
|
||||
cfg = config.services.metricbeat;
|
||||
|
||||
settingsFormat = pkgs.formats.yaml {};
|
||||
|
||||
in
|
||||
{
|
||||
options = {
|
||||
|
||||
services.metricbeat = {
|
||||
|
||||
enable = mkEnableOption "metricbeat";
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.metricbeat;
|
||||
defaultText = literalExpression "pkgs.metricbeat";
|
||||
example = literalExpression "pkgs.metricbeat7";
|
||||
description = ''
|
||||
The metricbeat package to use
|
||||
'';
|
||||
};
|
||||
|
||||
modules = mkOption {
|
||||
description = ''
|
||||
Metricbeat modules are responsible for reading metrics from the various sources.
|
||||
|
||||
This is like <literal>services.metricbeat.settings.metricbeat.modules</literal>,
|
||||
but structured as an attribute set. This has the benefit that multiple
|
||||
NixOS modules can contribute settings to a single metricbeat module.
|
||||
|
||||
A module can be specified multiple times by choosing a different <literal><name></literal>
|
||||
for each, but setting <xref linkend="opt-services.metricbeat.modules._name_.module"/> to the same value.
|
||||
|
||||
See <link xlink:href="https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-modules.html"/>.
|
||||
'';
|
||||
default = {};
|
||||
type = types.attrsOf (types.submodule ({ name, ... }: {
|
||||
freeformType = settingsFormat.type;
|
||||
options = {
|
||||
module = mkOption {
|
||||
type = types.str;
|
||||
default = name;
|
||||
description = ''
|
||||
The name of the module.
|
||||
|
||||
Look for the value after <literal>module:</literal> on the individual
|
||||
module pages linked from <link xlink:href="https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-modules.html"/>.
|
||||
'';
|
||||
};
|
||||
};
|
||||
}));
|
||||
example = {
|
||||
system = {
|
||||
metricsets = ["cpu" "load" "memory" "network" "process" "process_summary" "uptime" "socket_summary"];
|
||||
enabled = true;
|
||||
period = "10s";
|
||||
processes = [".*"];
|
||||
cpu.metrics = ["percentages" "normalized_percentages"];
|
||||
core.metrics = ["percentages"];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
settings = mkOption {
|
||||
type = types.submodule {
|
||||
freeformType = settingsFormat.type;
|
||||
options = {
|
||||
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
description = ''
|
||||
Name of the beat. Defaults to the hostname.
|
||||
See <link xlink:href="https://www.elastic.co/guide/en/beats/metricbeat/current/configuration-general-options.html#_name"/>.
|
||||
'';
|
||||
};
|
||||
|
||||
tags = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
description = ''
|
||||
Tags to place on the shipped metrics.
|
||||
See <link xlink:href="https://www.elastic.co/guide/en/beats/metricbeat/current/configuration-general-options.html#_tags_2"/>.
|
||||
'';
|
||||
};
|
||||
|
||||
metricbeat.modules = mkOption {
|
||||
type = types.listOf settingsFormat.type;
|
||||
default = [];
|
||||
internal = true;
|
||||
description = ''
|
||||
The metric collecting modules. Use <xref linkend="opt-services.metricbeat.modules"/> instead.
|
||||
|
||||
See <link xlink:href="https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-modules.html"/>.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
default = {};
|
||||
description = ''
|
||||
Configuration for metricbeat. See <link xlink:href="https://www.elastic.co/guide/en/beats/metricbeat/current/configuring-howto-metricbeat.html"/> for supported values.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
assertions = [
|
||||
{
|
||||
# empty modules would cause a failure at runtime
|
||||
assertion = cfg.settings.metricbeat.modules != [];
|
||||
message = "services.metricbeat: You must configure one or more modules.";
|
||||
}
|
||||
];
|
||||
|
||||
services.metricbeat.settings.metricbeat.modules = attrValues cfg.modules;
|
||||
|
||||
systemd.services.metricbeat = {
|
||||
description = "metricbeat metrics shipper";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${cfg.package}/bin/metricbeat \
|
||||
-c ${settingsFormat.generate "metricbeat.yml" cfg.settings} \
|
||||
--path.data $STATE_DIRECTORY \
|
||||
--path.logs $LOGS_DIRECTORY \
|
||||
;
|
||||
'';
|
||||
Restart = "always";
|
||||
DynamicUser = true;
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = "tmpfs";
|
||||
StateDirectory = "metricbeat";
|
||||
LogsDirectory = "metricbeat";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
67
nixos/modules/services/monitoring/mimir.nix
Normal file
67
nixos/modules/services/monitoring/mimir.nix
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
inherit (lib) escapeShellArgs mkEnableOption mkIf mkOption types;
|
||||
|
||||
cfg = config.services.mimir;
|
||||
|
||||
settingsFormat = pkgs.formats.yaml {};
|
||||
in {
|
||||
options.services.mimir = {
|
||||
enable = mkEnableOption "mimir";
|
||||
|
||||
configuration = mkOption {
|
||||
type = (pkgs.formats.json {}).type;
|
||||
default = {};
|
||||
description = ''
|
||||
Specify the configuration for Mimir in Nix.
|
||||
'';
|
||||
};
|
||||
|
||||
configFile = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
description = ''
|
||||
Specify a configuration file that Mimir should use.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
# for mimirtool
|
||||
environment.systemPackages = [ pkgs.mimir ];
|
||||
|
||||
assertions = [{
|
||||
assertion = (
|
||||
(cfg.configuration == {} -> cfg.configFile != null) &&
|
||||
(cfg.configFile != null -> cfg.configuration == {})
|
||||
);
|
||||
message = ''
|
||||
Please specify either
|
||||
'services.mimir.configuration' or
|
||||
'services.mimir.configFile'.
|
||||
'';
|
||||
}];
|
||||
|
||||
systemd.services.mimir = {
|
||||
description = "mimir Service Daemon";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig = let
|
||||
conf = if cfg.configFile == null
|
||||
then settingsFormat.generate "config.yaml" cfg.configuration
|
||||
else cfg.configFile;
|
||||
in
|
||||
{
|
||||
ExecStart = "${pkgs.mimir}/bin/mimir --config.file=${conf}";
|
||||
DynamicUser = true;
|
||||
Restart = "always";
|
||||
ProtectSystem = "full";
|
||||
DevicePolicy = "closed";
|
||||
NoNewPrivileges = true;
|
||||
WorkingDirectory = "/var/lib/mimir";
|
||||
StateDirectory = "mimir";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
48
nixos/modules/services/monitoring/monit.nix
Normal file
48
nixos/modules/services/monitoring/monit.nix
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
{config, pkgs, lib, ...}:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.monit;
|
||||
in
|
||||
|
||||
{
|
||||
options.services.monit = {
|
||||
|
||||
enable = mkEnableOption "Monit";
|
||||
|
||||
config = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
description = "monitrc content";
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
environment.systemPackages = [ pkgs.monit ];
|
||||
|
||||
environment.etc.monitrc = {
|
||||
text = cfg.config;
|
||||
mode = "0400";
|
||||
};
|
||||
|
||||
systemd.services.monit = {
|
||||
description = "Pro-active monitoring utility for unix systems";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.monit}/bin/monit -I -c /etc/monitrc";
|
||||
ExecStop = "${pkgs.monit}/bin/monit -c /etc/monitrc quit";
|
||||
ExecReload = "${pkgs.monit}/bin/monit -c /etc/monitrc reload";
|
||||
KillMode = "process";
|
||||
Restart = "always";
|
||||
};
|
||||
restartTriggers = [ config.environment.etc.monitrc.source ];
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
meta.maintainers = with maintainers; [ ryantm ];
|
||||
}
|
||||
404
nixos/modules/services/monitoring/munin.nix
Normal file
404
nixos/modules/services/monitoring/munin.nix
Normal file
|
|
@ -0,0 +1,404 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
# TODO: support munin-async
|
||||
# TODO: LWP/Pg perl libs aren't recognized
|
||||
|
||||
# TODO: support fastcgi
|
||||
# http://guide.munin-monitoring.org/en/latest/example/webserver/apache-cgi.html
|
||||
# spawn-fcgi -s /run/munin/fastcgi-graph.sock -U www-data -u munin -g munin /usr/lib/munin/cgi/munin-cgi-graph
|
||||
# spawn-fcgi -s /run/munin/fastcgi-html.sock -U www-data -u munin -g munin /usr/lib/munin/cgi/munin-cgi-html
|
||||
# https://paste.sh/vofcctHP#-KbDSXVeWoifYncZmLfZzgum
|
||||
# nginx https://munin.readthedocs.org/en/latest/example/webserver/nginx.html
|
||||
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
nodeCfg = config.services.munin-node;
|
||||
cronCfg = config.services.munin-cron;
|
||||
|
||||
muninConf = pkgs.writeText "munin.conf"
|
||||
''
|
||||
dbdir /var/lib/munin
|
||||
htmldir /var/www/munin
|
||||
logdir /var/log/munin
|
||||
rundir /run/munin
|
||||
|
||||
${lib.optionalString (cronCfg.extraCSS != "") "staticdir ${customStaticDir}"}
|
||||
|
||||
${cronCfg.extraGlobalConfig}
|
||||
|
||||
${cronCfg.hosts}
|
||||
'';
|
||||
|
||||
nodeConf = pkgs.writeText "munin-node.conf"
|
||||
''
|
||||
log_level 3
|
||||
log_file Sys::Syslog
|
||||
port 4949
|
||||
host *
|
||||
background 0
|
||||
user root
|
||||
group root
|
||||
host_name ${config.networking.hostName}
|
||||
setsid 0
|
||||
|
||||
# wrapped plugins by makeWrapper being with dots
|
||||
ignore_file ^\.
|
||||
|
||||
allow ^::1$
|
||||
allow ^127\.0\.0\.1$
|
||||
|
||||
${nodeCfg.extraConfig}
|
||||
'';
|
||||
|
||||
pluginConf = pkgs.writeText "munin-plugin-conf"
|
||||
''
|
||||
[hddtemp_smartctl]
|
||||
user root
|
||||
group root
|
||||
|
||||
[meminfo]
|
||||
user root
|
||||
group root
|
||||
|
||||
[ipmi*]
|
||||
user root
|
||||
group root
|
||||
|
||||
[munin*]
|
||||
env.UPDATE_STATSFILE /var/lib/munin/munin-update.stats
|
||||
|
||||
${nodeCfg.extraPluginConfig}
|
||||
'';
|
||||
|
||||
pluginConfDir = pkgs.stdenv.mkDerivation {
|
||||
name = "munin-plugin-conf.d";
|
||||
buildCommand = ''
|
||||
mkdir $out
|
||||
ln -s ${pluginConf} $out/nixos-config
|
||||
'';
|
||||
};
|
||||
|
||||
# Copy one Munin plugin into the Nix store with a specific name.
|
||||
# This is suitable for use with plugins going directly into /etc/munin/plugins,
|
||||
# i.e. munin.extraPlugins.
|
||||
internOnePlugin = name: path:
|
||||
"cp -a '${path}' '${name}'";
|
||||
|
||||
# Copy an entire tree of Munin plugins into a single directory in the Nix
|
||||
# store, with no renaming.
|
||||
# This is suitable for use with munin-node-configure --suggest, i.e.
|
||||
# munin.extraAutoPlugins.
|
||||
internManyPlugins = name: path:
|
||||
"find '${path}' -type f -perm /a+x -exec cp -a -t . '{}' '+'";
|
||||
|
||||
# Use the appropriate intern-fn to copy the plugins into the store and patch
|
||||
# them afterwards in an attempt to get them to run on NixOS.
|
||||
internAndFixPlugins = name: intern-fn: paths:
|
||||
pkgs.runCommand name {} ''
|
||||
mkdir -p "$out"
|
||||
cd "$out"
|
||||
${lib.concatStringsSep "\n"
|
||||
(lib.attrsets.mapAttrsToList intern-fn paths)}
|
||||
chmod -R u+w .
|
||||
find . -type f -exec sed -E -i '
|
||||
s,(/usr)?/s?bin/,/run/current-system/sw/bin/,g
|
||||
' '{}' '+'
|
||||
'';
|
||||
|
||||
# TODO: write a derivation for munin-contrib, so that for contrib plugins
|
||||
# you can just refer to them by name rather than needing to include a copy
|
||||
# of munin-contrib in your nixos configuration.
|
||||
extraPluginDir = internAndFixPlugins "munin-extra-plugins.d"
|
||||
internOnePlugin nodeCfg.extraPlugins;
|
||||
|
||||
extraAutoPluginDir = internAndFixPlugins "munin-extra-auto-plugins.d"
|
||||
internManyPlugins
|
||||
(builtins.listToAttrs
|
||||
(map
|
||||
(path: { name = baseNameOf path; value = path; })
|
||||
nodeCfg.extraAutoPlugins));
|
||||
|
||||
customStaticDir = pkgs.runCommand "munin-custom-static-data" {} ''
|
||||
cp -a "${pkgs.munin}/etc/opt/munin/static" "$out"
|
||||
cd "$out"
|
||||
chmod -R u+w .
|
||||
echo "${cronCfg.extraCSS}" >> style.css
|
||||
echo "${cronCfg.extraCSS}" >> style-new.css
|
||||
'';
|
||||
in
|
||||
|
||||
{
|
||||
|
||||
options = {
|
||||
|
||||
services.munin-node = {
|
||||
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = ''
|
||||
Enable Munin Node agent. Munin node listens on 0.0.0.0 and
|
||||
by default accepts connections only from 127.0.0.1 for security reasons.
|
||||
|
||||
See <link xlink:href='http://guide.munin-monitoring.org/en/latest/architecture/index.html' />.
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
default = "";
|
||||
type = types.lines;
|
||||
description = ''
|
||||
<filename>munin-node.conf</filename> extra configuration. See
|
||||
<link xlink:href='http://guide.munin-monitoring.org/en/latest/reference/munin-node.conf.html' />
|
||||
'';
|
||||
};
|
||||
|
||||
extraPluginConfig = mkOption {
|
||||
default = "";
|
||||
type = types.lines;
|
||||
description = ''
|
||||
<filename>plugin-conf.d</filename> extra plugin configuration. See
|
||||
<link xlink:href='http://guide.munin-monitoring.org/en/latest/plugin/use.html' />
|
||||
'';
|
||||
example = ''
|
||||
[fail2ban_*]
|
||||
user root
|
||||
'';
|
||||
};
|
||||
|
||||
extraPlugins = mkOption {
|
||||
default = {};
|
||||
type = with types; attrsOf path;
|
||||
description = ''
|
||||
Additional Munin plugins to activate. Keys are the name of the plugin
|
||||
symlink, values are the path to the underlying plugin script. You
|
||||
can use the same plugin script multiple times (e.g. for wildcard
|
||||
plugins).
|
||||
|
||||
Note that these plugins do not participate in autoconfiguration. If
|
||||
you want to autoconfigure additional plugins, use
|
||||
<option>services.munin-node.extraAutoPlugins</option>.
|
||||
|
||||
Plugins enabled in this manner take precedence over autoconfigured
|
||||
plugins.
|
||||
|
||||
Plugins will be copied into the Nix store, and it will attempt to
|
||||
modify them to run properly by fixing hardcoded references to
|
||||
<literal>/bin</literal>, <literal>/usr/bin</literal>,
|
||||
<literal>/sbin</literal>, and <literal>/usr/sbin</literal>.
|
||||
'';
|
||||
example = literalExpression ''
|
||||
{
|
||||
zfs_usage_bigpool = /src/munin-contrib/plugins/zfs/zfs_usage_;
|
||||
zfs_usage_smallpool = /src/munin-contrib/plugins/zfs/zfs_usage_;
|
||||
zfs_list = /src/munin-contrib/plugins/zfs/zfs_list;
|
||||
};
|
||||
'';
|
||||
};
|
||||
|
||||
extraAutoPlugins = mkOption {
|
||||
default = [];
|
||||
type = with types; listOf path;
|
||||
description = ''
|
||||
Additional Munin plugins to autoconfigure, using
|
||||
<literal>munin-node-configure --suggest</literal>. These should be
|
||||
the actual paths to the plugin files (or directories containing them),
|
||||
not just their names.
|
||||
|
||||
If you want to manually enable individual plugins instead, use
|
||||
<option>services.munin-node.extraPlugins</option>.
|
||||
|
||||
Note that only plugins that have the 'autoconfig' capability will do
|
||||
anything if listed here, since plugins that cannot autoconfigure
|
||||
won't be automatically enabled by
|
||||
<literal>munin-node-configure</literal>.
|
||||
|
||||
Plugins will be copied into the Nix store, and it will attempt to
|
||||
modify them to run properly by fixing hardcoded references to
|
||||
<literal>/bin</literal>, <literal>/usr/bin</literal>,
|
||||
<literal>/sbin</literal>, and <literal>/usr/sbin</literal>.
|
||||
'';
|
||||
example = literalExpression ''
|
||||
[
|
||||
/src/munin-contrib/plugins/zfs
|
||||
/src/munin-contrib/plugins/ssh
|
||||
];
|
||||
'';
|
||||
};
|
||||
|
||||
disabledPlugins = mkOption {
|
||||
# TODO: figure out why Munin isn't writing the log file and fix it.
|
||||
# In the meantime this at least suppresses a useless graph full of
|
||||
# NaNs in the output.
|
||||
default = [ "munin_stats" ];
|
||||
type = with types; listOf str;
|
||||
description = ''
|
||||
Munin plugins to disable, even if
|
||||
<literal>munin-node-configure --suggest</literal> tries to enable
|
||||
them. To disable a wildcard plugin, use an actual wildcard, as in
|
||||
the example.
|
||||
|
||||
munin_stats is disabled by default as it tries to read
|
||||
<literal>/var/log/munin/munin-update.log</literal> for timing
|
||||
information, and the NixOS build of Munin does not write this file.
|
||||
'';
|
||||
example = [ "diskstats" "zfs_usage_*" ];
|
||||
};
|
||||
};
|
||||
|
||||
services.munin-cron = {
|
||||
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = ''
|
||||
Enable munin-cron. Takes care of all heavy lifting to collect data from
|
||||
nodes and draws graphs to html. Runs munin-update, munin-limits,
|
||||
munin-graphs and munin-html in that order.
|
||||
|
||||
HTML output is in <filename>/var/www/munin/</filename>, configure your
|
||||
favourite webserver to serve static files.
|
||||
'';
|
||||
};
|
||||
|
||||
extraGlobalConfig = mkOption {
|
||||
default = "";
|
||||
type = types.lines;
|
||||
description = ''
|
||||
<filename>munin.conf</filename> extra global configuration.
|
||||
See <link xlink:href='http://guide.munin-monitoring.org/en/latest/reference/munin.conf.html' />.
|
||||
Useful to setup notifications, see
|
||||
<link xlink:href='http://guide.munin-monitoring.org/en/latest/tutorial/alert.html' />
|
||||
'';
|
||||
example = ''
|
||||
contact.email.command mail -s "Munin notification for ''${var:host}" someone@example.com
|
||||
'';
|
||||
};
|
||||
|
||||
hosts = mkOption {
|
||||
default = "";
|
||||
type = types.lines;
|
||||
description = ''
|
||||
Definitions of hosts of nodes to collect data from. Needs at least one
|
||||
host for cron to succeed. See
|
||||
<link xlink:href='http://guide.munin-monitoring.org/en/latest/reference/munin.conf.html' />
|
||||
'';
|
||||
example = literalExpression ''
|
||||
'''
|
||||
[''${config.networking.hostName}]
|
||||
address localhost
|
||||
'''
|
||||
'';
|
||||
};
|
||||
|
||||
extraCSS = mkOption {
|
||||
default = "";
|
||||
type = types.lines;
|
||||
description = ''
|
||||
Custom styling for the HTML that munin-cron generates. This will be
|
||||
appended to the CSS files used by munin-cron and will thus take
|
||||
precedence over the builtin styles.
|
||||
'';
|
||||
example = ''
|
||||
/* A simple dark theme. */
|
||||
html, body { background: #222222; }
|
||||
#header, #footer { background: #333333; }
|
||||
img.i, img.iwarn, img.icrit, img.iunkn {
|
||||
filter: invert(100%) hue-rotate(-30deg);
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = mkMerge [ (mkIf (nodeCfg.enable || cronCfg.enable) {
|
||||
|
||||
environment.systemPackages = [ pkgs.munin ];
|
||||
|
||||
users.users.munin = {
|
||||
description = "Munin monitoring user";
|
||||
group = "munin";
|
||||
uid = config.ids.uids.munin;
|
||||
home = "/var/lib/munin";
|
||||
};
|
||||
|
||||
users.groups.munin = {
|
||||
gid = config.ids.gids.munin;
|
||||
};
|
||||
|
||||
}) (mkIf nodeCfg.enable {
|
||||
|
||||
systemd.services.munin-node = {
|
||||
description = "Munin Node";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = with pkgs; [ munin smartmontools "/run/current-system/sw" "/run/wrappers" ];
|
||||
environment.MUNIN_LIBDIR = "${pkgs.munin}/lib";
|
||||
environment.MUNIN_PLUGSTATE = "/run/munin";
|
||||
environment.MUNIN_LOGDIR = "/var/log/munin";
|
||||
preStart = ''
|
||||
echo "Updating munin plugins..."
|
||||
|
||||
mkdir -p /etc/munin/plugins
|
||||
rm -rf /etc/munin/plugins/*
|
||||
|
||||
# Autoconfigure builtin plugins
|
||||
${pkgs.munin}/bin/munin-node-configure --suggest --shell --families contrib,auto,manual --config ${nodeConf} --libdir=${pkgs.munin}/lib/plugins --servicedir=/etc/munin/plugins --sconfdir=${pluginConfDir} 2>/dev/null | ${pkgs.bash}/bin/bash
|
||||
|
||||
# Autoconfigure extra plugins
|
||||
${pkgs.munin}/bin/munin-node-configure --suggest --shell --families contrib,auto,manual --config ${nodeConf} --libdir=${extraAutoPluginDir} --servicedir=/etc/munin/plugins --sconfdir=${pluginConfDir} 2>/dev/null | ${pkgs.bash}/bin/bash
|
||||
|
||||
${lib.optionalString (nodeCfg.extraPlugins != {}) ''
|
||||
# Link in manually enabled plugins
|
||||
ln -f -s -t /etc/munin/plugins ${extraPluginDir}/*
|
||||
''}
|
||||
|
||||
${lib.optionalString (nodeCfg.disabledPlugins != []) ''
|
||||
# Disable plugins
|
||||
cd /etc/munin/plugins
|
||||
rm -f ${toString nodeCfg.disabledPlugins}
|
||||
''}
|
||||
'';
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.munin}/sbin/munin-node --config ${nodeConf} --servicedir /etc/munin/plugins/ --sconfdir=${pluginConfDir}";
|
||||
};
|
||||
};
|
||||
|
||||
# munin_stats plugin breaks as of 2.0.33 when this doesn't exist
|
||||
systemd.tmpfiles.rules = [ "d /run/munin 0755 munin munin -" ];
|
||||
|
||||
}) (mkIf cronCfg.enable {
|
||||
|
||||
# Munin is hardcoded to use DejaVu Mono and the graphs come out wrong if
|
||||
# it's not available.
|
||||
fonts.fonts = [ pkgs.dejavu_fonts ];
|
||||
|
||||
systemd.timers.munin-cron = {
|
||||
description = "batch Munin master programs";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig.OnCalendar = "*:0/5";
|
||||
};
|
||||
|
||||
systemd.services.munin-cron = {
|
||||
description = "batch Munin master programs";
|
||||
unitConfig.Documentation = "man:munin-cron(8)";
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = "munin";
|
||||
ExecStart = "${pkgs.munin}/bin/munin-cron --config ${muninConf}";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /run/munin 0755 munin munin -"
|
||||
"d /var/log/munin 0755 munin munin -"
|
||||
"d /var/www/munin 0755 munin munin -"
|
||||
"d /var/lib/munin 0755 munin munin -"
|
||||
];
|
||||
})];
|
||||
}
|
||||
213
nixos/modules/services/monitoring/nagios.nix
Normal file
213
nixos/modules/services/monitoring/nagios.nix
Normal file
|
|
@ -0,0 +1,213 @@
|
|||
# Nagios system/network monitoring daemon.
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.nagios;
|
||||
|
||||
nagiosState = "/var/lib/nagios";
|
||||
nagiosLogDir = "/var/log/nagios";
|
||||
urlPath = "/nagios";
|
||||
|
||||
nagiosObjectDefs = cfg.objectDefs;
|
||||
|
||||
nagiosObjectDefsDir = pkgs.runCommand "nagios-objects" {
|
||||
inherit nagiosObjectDefs;
|
||||
preferLocalBuild = true;
|
||||
} "mkdir -p $out; ln -s $nagiosObjectDefs $out/";
|
||||
|
||||
nagiosCfgFile = let
|
||||
default = {
|
||||
log_file="${nagiosLogDir}/current";
|
||||
log_archive_path="${nagiosLogDir}/archive";
|
||||
status_file="${nagiosState}/status.dat";
|
||||
object_cache_file="${nagiosState}/objects.cache";
|
||||
temp_file="${nagiosState}/nagios.tmp";
|
||||
lock_file="/run/nagios.lock";
|
||||
state_retention_file="${nagiosState}/retention.dat";
|
||||
query_socket="${nagiosState}/nagios.qh";
|
||||
check_result_path="${nagiosState}";
|
||||
command_file="${nagiosState}/nagios.cmd";
|
||||
cfg_dir="${nagiosObjectDefsDir}";
|
||||
nagios_user="nagios";
|
||||
nagios_group="nagios";
|
||||
illegal_macro_output_chars="`~$&|'\"<>";
|
||||
retain_state_information="1";
|
||||
};
|
||||
lines = mapAttrsToList (key: value: "${key}=${value}") (default // cfg.extraConfig);
|
||||
content = concatStringsSep "\n" lines;
|
||||
file = pkgs.writeText "nagios.cfg" content;
|
||||
validated = pkgs.runCommand "nagios-checked.cfg" {preferLocalBuild=true;} ''
|
||||
cp ${file} nagios.cfg
|
||||
# nagios checks the existence of /var/lib/nagios, but
|
||||
# it does not exist in the build sandbox, so we fake it
|
||||
mkdir lib
|
||||
lib=$(readlink -f lib)
|
||||
sed -i s@=${nagiosState}@=$lib@ nagios.cfg
|
||||
${pkgs.nagios}/bin/nagios -v nagios.cfg && cp ${file} $out
|
||||
'';
|
||||
defaultCfgFile = if cfg.validateConfig then validated else file;
|
||||
in
|
||||
if cfg.mainConfigFile == null then defaultCfgFile else cfg.mainConfigFile;
|
||||
|
||||
# Plain configuration for the Nagios web-interface with no
|
||||
# authentication.
|
||||
nagiosCGICfgFile = pkgs.writeText "nagios.cgi.conf"
|
||||
''
|
||||
main_config_file=${cfg.mainConfigFile}
|
||||
use_authentication=0
|
||||
url_html_path=${urlPath}
|
||||
'';
|
||||
|
||||
extraHttpdConfig =
|
||||
''
|
||||
ScriptAlias ${urlPath}/cgi-bin ${pkgs.nagios}/sbin
|
||||
|
||||
<Directory "${pkgs.nagios}/sbin">
|
||||
Options ExecCGI
|
||||
Require all granted
|
||||
SetEnv NAGIOS_CGI_CONFIG ${cfg.cgiConfigFile}
|
||||
</Directory>
|
||||
|
||||
Alias ${urlPath} ${pkgs.nagios}/share
|
||||
|
||||
<Directory "${pkgs.nagios}/share">
|
||||
Options None
|
||||
Require all granted
|
||||
</Directory>
|
||||
'';
|
||||
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
(mkRemovedOptionModule [ "services" "nagios" "urlPath" ] "The urlPath option has been removed as it is hard coded to /nagios in the nagios package.")
|
||||
];
|
||||
|
||||
meta.maintainers = with lib.maintainers; [ symphorien ];
|
||||
|
||||
options = {
|
||||
services.nagios = {
|
||||
enable = mkEnableOption "<link xlink:href='http://www.nagios.org/'>Nagios</link> to monitor your system or network.";
|
||||
|
||||
objectDefs = mkOption {
|
||||
description = "
|
||||
A list of Nagios object configuration files that must define
|
||||
the hosts, host groups, services and contacts for the
|
||||
network that you want Nagios to monitor.
|
||||
";
|
||||
type = types.listOf types.path;
|
||||
example = literalExpression "[ ./objects.cfg ]";
|
||||
};
|
||||
|
||||
plugins = mkOption {
|
||||
type = types.listOf types.package;
|
||||
default = with pkgs; [ monitoring-plugins msmtp mailutils ];
|
||||
defaultText = literalExpression "[pkgs.monitoring-plugins pkgs.msmtp pkgs.mailutils]";
|
||||
description = "
|
||||
Packages to be added to the Nagios <envar>PATH</envar>.
|
||||
Typically used to add plugins, but can be anything.
|
||||
";
|
||||
};
|
||||
|
||||
mainConfigFile = mkOption {
|
||||
type = types.nullOr types.package;
|
||||
default = null;
|
||||
description = "
|
||||
If non-null, overrides the main configuration file of Nagios.
|
||||
";
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.attrsOf types.str;
|
||||
example = {
|
||||
debug_level = "-1";
|
||||
debug_file = "/var/log/nagios/debug.log";
|
||||
};
|
||||
default = {};
|
||||
description = "Configuration to add to /etc/nagios.cfg";
|
||||
};
|
||||
|
||||
validateConfig = mkOption {
|
||||
type = types.bool;
|
||||
default = pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform;
|
||||
defaultText = literalExpression "pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform";
|
||||
description = "if true, the syntax of the nagios configuration file is checked at build time";
|
||||
};
|
||||
|
||||
cgiConfigFile = mkOption {
|
||||
type = types.package;
|
||||
default = nagiosCGICfgFile;
|
||||
defaultText = literalExpression "nagiosCGICfgFile";
|
||||
description = "
|
||||
Derivation for the configuration file of Nagios CGI scripts
|
||||
that can be used in web servers for running the Nagios web interface.
|
||||
";
|
||||
};
|
||||
|
||||
enableWebInterface = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "
|
||||
Whether to enable the Nagios web interface. You should also
|
||||
enable Apache (<option>services.httpd.enable</option>).
|
||||
";
|
||||
};
|
||||
|
||||
virtualHost = mkOption {
|
||||
type = types.submodule (import ../web-servers/apache-httpd/vhost-options.nix);
|
||||
example = literalExpression ''
|
||||
{ hostName = "example.org";
|
||||
adminAddr = "webmaster@example.org";
|
||||
enableSSL = true;
|
||||
sslServerCert = "/var/lib/acme/example.org/full.pem";
|
||||
sslServerKey = "/var/lib/acme/example.org/key.pem";
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
Apache configuration can be done by adapting <option>services.httpd.virtualHosts</option>.
|
||||
See <xref linkend="opt-services.httpd.virtualHosts"/> for further information.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
users.users.nagios = {
|
||||
description = "Nagios user ";
|
||||
uid = config.ids.uids.nagios;
|
||||
home = nagiosState;
|
||||
group = "nagios";
|
||||
};
|
||||
|
||||
users.groups.nagios = { };
|
||||
|
||||
# This isn't needed, it's just so that the user can type "nagiostats
|
||||
# -c /etc/nagios.cfg".
|
||||
environment.etc."nagios.cfg".source = nagiosCfgFile;
|
||||
|
||||
environment.systemPackages = [ pkgs.nagios ];
|
||||
systemd.services.nagios = {
|
||||
description = "Nagios monitoring daemon";
|
||||
path = [ pkgs.nagios ] ++ cfg.plugins;
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
restartTriggers = [ nagiosCfgFile ];
|
||||
|
||||
serviceConfig = {
|
||||
User = "nagios";
|
||||
Group = "nagios";
|
||||
Restart = "always";
|
||||
RestartSec = 2;
|
||||
LogsDirectory = "nagios";
|
||||
StateDirectory = "nagios";
|
||||
ExecStart = "${pkgs.nagios}/bin/nagios /etc/nagios.cfg";
|
||||
};
|
||||
};
|
||||
|
||||
services.httpd.virtualHosts = optionalAttrs cfg.enableWebInterface {
|
||||
${cfg.virtualHost.hostName} = mkMerge [ cfg.virtualHost { extraConfig = extraHttpdConfig; } ];
|
||||
};
|
||||
};
|
||||
}
|
||||
312
nixos/modules/services/monitoring/netdata.nix
Normal file
312
nixos/modules/services/monitoring/netdata.nix
Normal file
|
|
@ -0,0 +1,312 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.netdata;
|
||||
|
||||
wrappedPlugins = pkgs.runCommand "wrapped-plugins" { preferLocalBuild = true; } ''
|
||||
mkdir -p $out/libexec/netdata/plugins.d
|
||||
ln -s /run/wrappers/bin/apps.plugin $out/libexec/netdata/plugins.d/apps.plugin
|
||||
ln -s /run/wrappers/bin/cgroup-network $out/libexec/netdata/plugins.d/cgroup-network
|
||||
ln -s /run/wrappers/bin/perf.plugin $out/libexec/netdata/plugins.d/perf.plugin
|
||||
ln -s /run/wrappers/bin/slabinfo.plugin $out/libexec/netdata/plugins.d/slabinfo.plugin
|
||||
ln -s /run/wrappers/bin/freeipmi.plugin $out/libexec/netdata/plugins.d/freeipmi.plugin
|
||||
'';
|
||||
|
||||
plugins = [
|
||||
"${cfg.package}/libexec/netdata/plugins.d"
|
||||
"${wrappedPlugins}/libexec/netdata/plugins.d"
|
||||
] ++ cfg.extraPluginPaths;
|
||||
|
||||
configDirectory = pkgs.runCommand "netdata-config-d" { } ''
|
||||
mkdir $out
|
||||
${concatStringsSep "\n" (mapAttrsToList (path: file: ''
|
||||
mkdir -p "$out/$(dirname ${path})"
|
||||
ln -s "${file}" "$out/${path}"
|
||||
'') cfg.configDir)}
|
||||
'';
|
||||
|
||||
localConfig = {
|
||||
global = {
|
||||
"config directory" = "/etc/netdata/conf.d";
|
||||
"plugins directory" = concatStringsSep " " plugins;
|
||||
};
|
||||
web = {
|
||||
"web files owner" = "root";
|
||||
"web files group" = "root";
|
||||
};
|
||||
"plugin:cgroups" = {
|
||||
"script to get cgroup network interfaces" = "${wrappedPlugins}/libexec/netdata/plugins.d/cgroup-network";
|
||||
"use unified cgroups" = "yes";
|
||||
};
|
||||
};
|
||||
mkConfig = generators.toINI {} (recursiveUpdate localConfig cfg.config);
|
||||
configFile = pkgs.writeText "netdata.conf" (if cfg.configText != null then cfg.configText else mkConfig);
|
||||
|
||||
defaultUser = "netdata";
|
||||
|
||||
in {
|
||||
options = {
|
||||
services.netdata = {
|
||||
enable = mkEnableOption "netdata";
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.netdata;
|
||||
defaultText = literalExpression "pkgs.netdata";
|
||||
description = "Netdata package to use.";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "netdata";
|
||||
description = "User account under which netdata runs.";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
default = "netdata";
|
||||
description = "Group under which netdata runs.";
|
||||
};
|
||||
|
||||
configText = mkOption {
|
||||
type = types.nullOr types.lines;
|
||||
description = "Verbatim netdata.conf, cannot be combined with config.";
|
||||
default = null;
|
||||
example = ''
|
||||
[global]
|
||||
debug log = syslog
|
||||
access log = syslog
|
||||
error log = syslog
|
||||
'';
|
||||
};
|
||||
|
||||
python = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Whether to enable python-based plugins
|
||||
'';
|
||||
};
|
||||
extraPackages = mkOption {
|
||||
type = types.functionTo (types.listOf types.package);
|
||||
default = ps: [];
|
||||
defaultText = literalExpression "ps: []";
|
||||
example = literalExpression ''
|
||||
ps: [
|
||||
ps.psycopg2
|
||||
ps.docker
|
||||
ps.dnspython
|
||||
]
|
||||
'';
|
||||
description = ''
|
||||
Extra python packages available at runtime
|
||||
to enable additional python plugins.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
extraPluginPaths = mkOption {
|
||||
type = types.listOf types.path;
|
||||
default = [ ];
|
||||
example = literalExpression ''
|
||||
[ "/path/to/plugins.d" ]
|
||||
'';
|
||||
description = ''
|
||||
Extra paths to add to the netdata global "plugins directory"
|
||||
option. Useful for when you want to include your own
|
||||
collection scripts.
|
||||
</para><para>
|
||||
Details about writing a custom netdata plugin are available at:
|
||||
<link xlink:href="https://docs.netdata.cloud/collectors/plugins.d/"/>
|
||||
</para><para>
|
||||
Cannot be combined with configText.
|
||||
'';
|
||||
};
|
||||
|
||||
config = mkOption {
|
||||
type = types.attrsOf types.attrs;
|
||||
default = {};
|
||||
description = "netdata.conf configuration as nix attributes. cannot be combined with configText.";
|
||||
example = literalExpression ''
|
||||
global = {
|
||||
"debug log" = "syslog";
|
||||
"access log" = "syslog";
|
||||
"error log" = "syslog";
|
||||
};
|
||||
'';
|
||||
};
|
||||
|
||||
configDir = mkOption {
|
||||
type = types.attrsOf types.path;
|
||||
default = {};
|
||||
description = ''
|
||||
Complete netdata config directory except netdata.conf.
|
||||
The default configuration is merged with changes
|
||||
defined in this option.
|
||||
Each top-level attribute denotes a path in the configuration
|
||||
directory as in environment.etc.
|
||||
Its value is the absolute path and must be readable by netdata.
|
||||
Cannot be combined with configText.
|
||||
'';
|
||||
example = literalExpression ''
|
||||
"health_alarm_notify.conf" = pkgs.writeText "health_alarm_notify.conf" '''
|
||||
sendmail="/path/to/sendmail"
|
||||
''';
|
||||
"health.d" = "/run/secrets/netdata/health.d";
|
||||
'';
|
||||
};
|
||||
|
||||
enableAnalyticsReporting = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Enable reporting of anonymous usage statistics to Netdata Inc. via either
|
||||
Google Analytics (in versions prior to 1.29.4), or Netdata Inc.'s
|
||||
self-hosted PostHog (in versions 1.29.4 and later).
|
||||
See: <link xlink:href="https://learn.netdata.cloud/docs/agent/anonymous-statistics"/>
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
assertions =
|
||||
[ { assertion = cfg.config != {} -> cfg.configText == null ;
|
||||
message = "Cannot specify both config and configText";
|
||||
}
|
||||
];
|
||||
|
||||
environment.etc."netdata/netdata.conf".source = configFile;
|
||||
environment.etc."netdata/conf.d".source = configDirectory;
|
||||
|
||||
systemd.services.netdata = {
|
||||
description = "Real time performance monitoring";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = (with pkgs; [ curl gawk iproute2 which procps ])
|
||||
++ lib.optional cfg.python.enable (pkgs.python3.withPackages cfg.python.extraPackages)
|
||||
++ lib.optional config.virtualisation.libvirtd.enable (config.virtualisation.libvirtd.package);
|
||||
environment = {
|
||||
PYTHONPATH = "${cfg.package}/libexec/netdata/python.d/python_modules";
|
||||
} // lib.optionalAttrs (!cfg.enableAnalyticsReporting) {
|
||||
DO_NOT_TRACK = "1";
|
||||
};
|
||||
restartTriggers = [
|
||||
config.environment.etc."netdata/netdata.conf".source
|
||||
config.environment.etc."netdata/conf.d".source
|
||||
];
|
||||
serviceConfig = {
|
||||
ExecStart = "${cfg.package}/bin/netdata -P /run/netdata/netdata.pid -D -c /etc/netdata/netdata.conf";
|
||||
ExecReload = "${pkgs.util-linux}/bin/kill -s HUP -s USR1 -s USR2 $MAINPID";
|
||||
ExecPostStart = ''while [ "$(netdatacli ping)" != pong ]; do sleep 0.5; done'';
|
||||
|
||||
TimeoutStopSec = 60;
|
||||
Restart = "on-failure";
|
||||
# User and group
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
# Performance
|
||||
LimitNOFILE = "30000";
|
||||
# Runtime directory and mode
|
||||
RuntimeDirectory = "netdata";
|
||||
RuntimeDirectoryMode = "0750";
|
||||
# State directory and mode
|
||||
StateDirectory = "netdata";
|
||||
StateDirectoryMode = "0750";
|
||||
# Cache directory and mode
|
||||
CacheDirectory = "netdata";
|
||||
CacheDirectoryMode = "0750";
|
||||
# Logs directory and mode
|
||||
LogsDirectory = "netdata";
|
||||
LogsDirectoryMode = "0750";
|
||||
# Configuration directory and mode
|
||||
ConfigurationDirectory = "netdata";
|
||||
ConfigurationDirectoryMode = "0755";
|
||||
# Capabilities
|
||||
CapabilityBoundingSet = [
|
||||
"CAP_DAC_OVERRIDE" # is required for freeipmi and slabinfo plugins
|
||||
"CAP_DAC_READ_SEARCH" # is required for apps plugin
|
||||
"CAP_FOWNER" # is required for freeipmi plugin
|
||||
"CAP_SETPCAP" # is required for apps, perf and slabinfo plugins
|
||||
"CAP_SYS_ADMIN" # is required for perf plugin
|
||||
"CAP_SYS_PTRACE" # is required for apps plugin
|
||||
"CAP_SYS_RESOURCE" # is required for ebpf plugin
|
||||
"CAP_NET_RAW" # is required for fping app
|
||||
"CAP_SYS_CHROOT" # is required for cgroups plugin
|
||||
"CAP_SETUID" # is required for cgroups and cgroups-network plugins
|
||||
];
|
||||
# Sandboxing
|
||||
ProtectSystem = "full";
|
||||
ProtectHome = "read-only";
|
||||
PrivateTmp = true;
|
||||
ProtectControlGroups = true;
|
||||
PrivateMounts = true;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.enableCgroupAccounting = true;
|
||||
|
||||
security.wrappers = {
|
||||
"apps.plugin" = {
|
||||
source = "${cfg.package}/libexec/netdata/plugins.d/apps.plugin.org";
|
||||
capabilities = "cap_dac_read_search,cap_sys_ptrace+ep";
|
||||
owner = cfg.user;
|
||||
group = cfg.group;
|
||||
permissions = "u+rx,g+x,o-rwx";
|
||||
};
|
||||
|
||||
"cgroup-network" = {
|
||||
source = "${cfg.package}/libexec/netdata/plugins.d/cgroup-network.org";
|
||||
capabilities = "cap_setuid+ep";
|
||||
owner = cfg.user;
|
||||
group = cfg.group;
|
||||
permissions = "u+rx,g+x,o-rwx";
|
||||
};
|
||||
|
||||
"perf.plugin" = {
|
||||
source = "${cfg.package}/libexec/netdata/plugins.d/perf.plugin.org";
|
||||
capabilities = "cap_sys_admin+ep";
|
||||
owner = cfg.user;
|
||||
group = cfg.group;
|
||||
permissions = "u+rx,g+x,o-rwx";
|
||||
};
|
||||
|
||||
"slabinfo.plugin" = {
|
||||
source = "${cfg.package}/libexec/netdata/plugins.d/slabinfo.plugin.org";
|
||||
capabilities = "cap_dac_override+ep";
|
||||
owner = cfg.user;
|
||||
group = cfg.group;
|
||||
permissions = "u+rx,g+x,o-rwx";
|
||||
};
|
||||
|
||||
} // optionalAttrs (cfg.package.withIpmi) {
|
||||
"freeipmi.plugin" = {
|
||||
source = "${cfg.package}/libexec/netdata/plugins.d/freeipmi.plugin.org";
|
||||
capabilities = "cap_dac_override,cap_fowner+ep";
|
||||
owner = cfg.user;
|
||||
group = cfg.group;
|
||||
permissions = "u+rx,g+x,o-rwx";
|
||||
};
|
||||
};
|
||||
|
||||
security.pam.loginLimits = [
|
||||
{ domain = "netdata"; type = "soft"; item = "nofile"; value = "10000"; }
|
||||
{ domain = "netdata"; type = "hard"; item = "nofile"; value = "30000"; }
|
||||
];
|
||||
|
||||
users.users = optionalAttrs (cfg.user == defaultUser) {
|
||||
${defaultUser} = {
|
||||
group = defaultUser;
|
||||
isSystemUser = true;
|
||||
};
|
||||
};
|
||||
|
||||
users.groups = optionalAttrs (cfg.group == defaultUser) {
|
||||
${defaultUser} = { };
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
113
nixos/modules/services/monitoring/parsedmarc.md
Normal file
113
nixos/modules/services/monitoring/parsedmarc.md
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
# parsedmarc {#module-services-parsedmarc}
|
||||
[parsedmarc](https://domainaware.github.io/parsedmarc/) is a service
|
||||
which parses incoming [DMARC](https://dmarc.org/) reports and stores
|
||||
or sends them to a downstream service for further analysis. In
|
||||
combination with Elasticsearch, Grafana and the included Grafana
|
||||
dashboard, it provides a handy overview of DMARC reports over time.
|
||||
|
||||
## Basic usage {#module-services-parsedmarc-basic-usage}
|
||||
A very minimal setup which reads incoming reports from an external
|
||||
email address and saves them to a local Elasticsearch instance looks
|
||||
like this:
|
||||
|
||||
```nix
|
||||
services.parsedmarc = {
|
||||
enable = true;
|
||||
settings.imap = {
|
||||
host = "imap.example.com";
|
||||
user = "alice@example.com";
|
||||
password = "/path/to/imap_password_file";
|
||||
watch = true;
|
||||
};
|
||||
provision.geoIp = false; # Not recommended!
|
||||
};
|
||||
```
|
||||
|
||||
Note that GeoIP provisioning is disabled in the example for
|
||||
simplicity, but should be turned on for fully functional reports.
|
||||
|
||||
## Local mail
|
||||
Instead of watching an external inbox, a local inbox can be
|
||||
automatically provisioned. The recipient's name is by default set to
|
||||
`dmarc`, but can be configured in
|
||||
[services.parsedmarc.provision.localMail.recipientName](options.html#opt-services.parsedmarc.provision.localMail.recipientName). You
|
||||
need to add an MX record pointing to the host. More concretely: for
|
||||
the example to work, an MX record needs to be set up for
|
||||
`monitoring.example.com` and the complete email address that should be
|
||||
configured in the domain's dmarc policy is
|
||||
`dmarc@monitoring.example.com`.
|
||||
|
||||
```nix
|
||||
services.parsedmarc = {
|
||||
enable = true;
|
||||
provision = {
|
||||
localMail = {
|
||||
enable = true;
|
||||
hostname = monitoring.example.com;
|
||||
};
|
||||
geoIp = false; # Not recommended!
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
## Grafana and GeoIP
|
||||
The reports can be visualized and summarized with parsedmarc's
|
||||
official Grafana dashboard. For all views to work, and for the data to
|
||||
be complete, GeoIP databases are also required. The following example
|
||||
shows a basic deployment where the provisioned Elasticsearch instance
|
||||
is automatically added as a Grafana datasource, and the dashboard is
|
||||
added to Grafana as well.
|
||||
|
||||
```nix
|
||||
services.parsedmarc = {
|
||||
enable = true;
|
||||
provision = {
|
||||
localMail = {
|
||||
enable = true;
|
||||
hostname = url;
|
||||
};
|
||||
grafana = {
|
||||
datasource = true;
|
||||
dashboard = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Not required, but recommended for full functionality
|
||||
services.geoipupdate = {
|
||||
settings = {
|
||||
AccountID = 000000;
|
||||
LicenseKey = "/path/to/license_key_file";
|
||||
};
|
||||
};
|
||||
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
addr = "0.0.0.0";
|
||||
domain = url;
|
||||
rootUrl = "https://" + url;
|
||||
protocol = "socket";
|
||||
security = {
|
||||
adminUser = "admin";
|
||||
adminPasswordFile = "/path/to/admin_password_file";
|
||||
secretKeyFile = "/path/to/secret_key_file";
|
||||
};
|
||||
};
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
recommendedTlsSettings = true;
|
||||
recommendedOptimisation = true;
|
||||
recommendedGzipSettings = true;
|
||||
recommendedProxySettings = true;
|
||||
upstreams.grafana.servers."unix:/${config.services.grafana.socket}" = {};
|
||||
virtualHosts.${url} = {
|
||||
root = config.services.grafana.staticRootPath;
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/".tryFiles = "$uri @grafana";
|
||||
locations."@grafana".proxyPass = "http://grafana";
|
||||
};
|
||||
};
|
||||
users.users.nginx.extraGroups = [ "grafana" ];
|
||||
```
|
||||
542
nixos/modules/services/monitoring/parsedmarc.nix
Normal file
542
nixos/modules/services/monitoring/parsedmarc.nix
Normal file
|
|
@ -0,0 +1,542 @@
|
|||
{ config, lib, options, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.parsedmarc;
|
||||
opt = options.services.parsedmarc;
|
||||
ini = pkgs.formats.ini {};
|
||||
in
|
||||
{
|
||||
options.services.parsedmarc = {
|
||||
|
||||
enable = lib.mkEnableOption ''
|
||||
parsedmarc, a DMARC report monitoring service
|
||||
'';
|
||||
|
||||
provision = {
|
||||
localMail = {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether Postfix and Dovecot should be set up to receive
|
||||
mail locally. parsedmarc will be configured to watch the
|
||||
local inbox as the automatically created user specified in
|
||||
<xref linkend="opt-services.parsedmarc.provision.localMail.recipientName" />
|
||||
'';
|
||||
};
|
||||
|
||||
recipientName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "dmarc";
|
||||
description = ''
|
||||
The DMARC mail recipient name, i.e. the name part of the
|
||||
email address which receives DMARC reports.
|
||||
|
||||
A local user with this name will be set up and assigned a
|
||||
randomized password on service start.
|
||||
'';
|
||||
};
|
||||
|
||||
hostname = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = config.networking.fqdn;
|
||||
defaultText = lib.literalExpression "config.networking.fqdn";
|
||||
example = "monitoring.example.com";
|
||||
description = ''
|
||||
The hostname to use when configuring Postfix.
|
||||
|
||||
Should correspond to the host's fully qualified domain
|
||||
name and the domain part of the email address which
|
||||
receives DMARC reports. You also have to set up an MX record
|
||||
pointing to this domain name.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
geoIp = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Whether to enable and configure the <link
|
||||
linkend="opt-services.geoipupdate.enable">geoipupdate</link>
|
||||
service to automatically fetch GeoIP databases. Not crucial,
|
||||
but recommended for full functionality.
|
||||
|
||||
To finish the setup, you need to manually set the <xref
|
||||
linkend="opt-services.geoipupdate.settings.AccountID" /> and
|
||||
<xref linkend="opt-services.geoipupdate.settings.LicenseKey" />
|
||||
options.
|
||||
'';
|
||||
};
|
||||
|
||||
elasticsearch = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Whether to set up and use a local instance of Elasticsearch.
|
||||
'';
|
||||
};
|
||||
|
||||
grafana = {
|
||||
datasource = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = cfg.provision.elasticsearch && config.services.grafana.enable;
|
||||
defaultText = lib.literalExpression ''
|
||||
config.${opt.provision.elasticsearch} && config.${options.services.grafana.enable}
|
||||
'';
|
||||
apply = x: x && cfg.provision.elasticsearch;
|
||||
description = ''
|
||||
Whether the automatically provisioned Elasticsearch
|
||||
instance should be added as a grafana datasource. Has no
|
||||
effect unless
|
||||
<xref linkend="opt-services.parsedmarc.provision.elasticsearch" />
|
||||
is also enabled.
|
||||
'';
|
||||
};
|
||||
|
||||
dashboard = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = config.services.grafana.enable;
|
||||
defaultText = lib.literalExpression "config.services.grafana.enable";
|
||||
description = ''
|
||||
Whether the official parsedmarc grafana dashboard should
|
||||
be provisioned to the local grafana instance.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
settings = lib.mkOption {
|
||||
description = ''
|
||||
Configuration parameters to set in
|
||||
<filename>parsedmarc.ini</filename>. For a full list of
|
||||
available parameters, see
|
||||
<link xlink:href="https://domainaware.github.io/parsedmarc/#configuration-file" />.
|
||||
'';
|
||||
|
||||
type = lib.types.submodule {
|
||||
freeformType = ini.type;
|
||||
|
||||
options = {
|
||||
general = {
|
||||
save_aggregate = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Save aggregate report data to Elasticsearch and/or Splunk.
|
||||
'';
|
||||
};
|
||||
|
||||
save_forensic = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Save forensic report data to Elasticsearch and/or Splunk.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
imap = {
|
||||
host = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "localhost";
|
||||
description = ''
|
||||
The IMAP server hostname or IP address.
|
||||
'';
|
||||
};
|
||||
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 993;
|
||||
description = ''
|
||||
The IMAP server port.
|
||||
'';
|
||||
};
|
||||
|
||||
ssl = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Use an encrypted SSL/TLS connection.
|
||||
'';
|
||||
};
|
||||
|
||||
user = lib.mkOption {
|
||||
type = with lib.types; nullOr str;
|
||||
default = null;
|
||||
description = ''
|
||||
The IMAP server username.
|
||||
'';
|
||||
};
|
||||
|
||||
password = lib.mkOption {
|
||||
type = with lib.types; nullOr path;
|
||||
default = null;
|
||||
description = ''
|
||||
The path to a file containing the IMAP server password.
|
||||
'';
|
||||
};
|
||||
|
||||
watch = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Use the IMAP IDLE command to process messages as they arrive.
|
||||
'';
|
||||
};
|
||||
|
||||
delete = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Delete messages after processing them, instead of archiving them.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
smtp = {
|
||||
host = lib.mkOption {
|
||||
type = with lib.types; nullOr str;
|
||||
default = null;
|
||||
description = ''
|
||||
The SMTP server hostname or IP address.
|
||||
'';
|
||||
};
|
||||
|
||||
port = lib.mkOption {
|
||||
type = with lib.types; nullOr port;
|
||||
default = null;
|
||||
description = ''
|
||||
The SMTP server port.
|
||||
'';
|
||||
};
|
||||
|
||||
ssl = lib.mkOption {
|
||||
type = with lib.types; nullOr bool;
|
||||
default = null;
|
||||
description = ''
|
||||
Use an encrypted SSL/TLS connection.
|
||||
'';
|
||||
};
|
||||
|
||||
user = lib.mkOption {
|
||||
type = with lib.types; nullOr str;
|
||||
default = null;
|
||||
description = ''
|
||||
The SMTP server username.
|
||||
'';
|
||||
};
|
||||
|
||||
password = lib.mkOption {
|
||||
type = with lib.types; nullOr path;
|
||||
default = null;
|
||||
description = ''
|
||||
The path to a file containing the SMTP server password.
|
||||
'';
|
||||
};
|
||||
|
||||
from = lib.mkOption {
|
||||
type = with lib.types; nullOr str;
|
||||
default = null;
|
||||
description = ''
|
||||
The <literal>From</literal> address to use for the
|
||||
outgoing mail.
|
||||
'';
|
||||
};
|
||||
|
||||
to = lib.mkOption {
|
||||
type = with lib.types; nullOr (listOf str);
|
||||
default = null;
|
||||
description = ''
|
||||
The addresses to send outgoing mail to.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
elasticsearch = {
|
||||
hosts = lib.mkOption {
|
||||
default = [];
|
||||
type = with lib.types; listOf str;
|
||||
apply = x: if x == [] then null else lib.concatStringsSep "," x;
|
||||
description = ''
|
||||
A list of Elasticsearch hosts to push parsed reports
|
||||
to.
|
||||
'';
|
||||
};
|
||||
|
||||
user = lib.mkOption {
|
||||
type = with lib.types; nullOr str;
|
||||
default = null;
|
||||
description = ''
|
||||
Username to use when connecting to Elasticsearch, if
|
||||
required.
|
||||
'';
|
||||
};
|
||||
|
||||
password = lib.mkOption {
|
||||
type = with lib.types; nullOr path;
|
||||
default = null;
|
||||
description = ''
|
||||
The path to a file containing the password to use when
|
||||
connecting to Elasticsearch, if required.
|
||||
'';
|
||||
};
|
||||
|
||||
ssl = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to use an encrypted SSL/TLS connection.
|
||||
'';
|
||||
};
|
||||
|
||||
cert_path = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
default = "/etc/ssl/certs/ca-certificates.crt";
|
||||
description = ''
|
||||
The path to a TLS certificate bundle used to verify
|
||||
the server's certificate.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
kafka = {
|
||||
hosts = lib.mkOption {
|
||||
default = [];
|
||||
type = with lib.types; listOf str;
|
||||
apply = x: if x == [] then null else lib.concatStringsSep "," x;
|
||||
description = ''
|
||||
A list of Apache Kafka hosts to publish parsed reports
|
||||
to.
|
||||
'';
|
||||
};
|
||||
|
||||
user = lib.mkOption {
|
||||
type = with lib.types; nullOr str;
|
||||
default = null;
|
||||
description = ''
|
||||
Username to use when connecting to Kafka, if
|
||||
required.
|
||||
'';
|
||||
};
|
||||
|
||||
password = lib.mkOption {
|
||||
type = with lib.types; nullOr path;
|
||||
default = null;
|
||||
description = ''
|
||||
The path to a file containing the password to use when
|
||||
connecting to Kafka, if required.
|
||||
'';
|
||||
};
|
||||
|
||||
ssl = lib.mkOption {
|
||||
type = with lib.types; nullOr bool;
|
||||
default = null;
|
||||
description = ''
|
||||
Whether to use an encrypted SSL/TLS connection.
|
||||
'';
|
||||
};
|
||||
|
||||
aggregate_topic = lib.mkOption {
|
||||
type = with lib.types; nullOr str;
|
||||
default = null;
|
||||
example = "aggregate";
|
||||
description = ''
|
||||
The Kafka topic to publish aggregate reports on.
|
||||
'';
|
||||
};
|
||||
|
||||
forensic_topic = lib.mkOption {
|
||||
type = with lib.types; nullOr str;
|
||||
default = null;
|
||||
example = "forensic";
|
||||
description = ''
|
||||
The Kafka topic to publish forensic reports on.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
|
||||
services.elasticsearch.enable = lib.mkDefault cfg.provision.elasticsearch;
|
||||
|
||||
services.geoipupdate = lib.mkIf cfg.provision.geoIp {
|
||||
enable = true;
|
||||
settings = {
|
||||
EditionIDs = [
|
||||
"GeoLite2-ASN"
|
||||
"GeoLite2-City"
|
||||
"GeoLite2-Country"
|
||||
];
|
||||
DatabaseDirectory = "/var/lib/GeoIP";
|
||||
};
|
||||
};
|
||||
|
||||
services.dovecot2 = lib.mkIf cfg.provision.localMail.enable {
|
||||
enable = true;
|
||||
protocols = [ "imap" ];
|
||||
};
|
||||
|
||||
services.postfix = lib.mkIf cfg.provision.localMail.enable {
|
||||
enable = true;
|
||||
origin = cfg.provision.localMail.hostname;
|
||||
config = {
|
||||
myhostname = cfg.provision.localMail.hostname;
|
||||
mydestination = cfg.provision.localMail.hostname;
|
||||
};
|
||||
};
|
||||
|
||||
services.grafana = {
|
||||
declarativePlugins = with pkgs.grafanaPlugins;
|
||||
lib.mkIf cfg.provision.grafana.dashboard [
|
||||
grafana-worldmap-panel
|
||||
grafana-piechart-panel
|
||||
];
|
||||
|
||||
provision = {
|
||||
enable = cfg.provision.grafana.datasource || cfg.provision.grafana.dashboard;
|
||||
datasources =
|
||||
let
|
||||
pkgVer = lib.getVersion config.services.elasticsearch.package;
|
||||
esVersion =
|
||||
if lib.versionOlder pkgVer "7" then
|
||||
"60"
|
||||
else if lib.versionOlder pkgVer "8" then
|
||||
"70"
|
||||
else
|
||||
throw "When provisioning parsedmarc grafana datasources: unknown Elasticsearch version.";
|
||||
in
|
||||
lib.mkIf cfg.provision.grafana.datasource [
|
||||
{
|
||||
name = "dmarc-ag";
|
||||
type = "elasticsearch";
|
||||
access = "proxy";
|
||||
url = "localhost:9200";
|
||||
jsonData = {
|
||||
timeField = "date_range";
|
||||
inherit esVersion;
|
||||
};
|
||||
}
|
||||
{
|
||||
name = "dmarc-fo";
|
||||
type = "elasticsearch";
|
||||
access = "proxy";
|
||||
url = "localhost:9200";
|
||||
jsonData = {
|
||||
timeField = "date_range";
|
||||
inherit esVersion;
|
||||
};
|
||||
}
|
||||
];
|
||||
dashboards = lib.mkIf cfg.provision.grafana.dashboard [{
|
||||
name = "parsedmarc";
|
||||
options.path = "${pkgs.python3Packages.parsedmarc.dashboard}";
|
||||
}];
|
||||
};
|
||||
};
|
||||
|
||||
services.parsedmarc.settings = lib.mkMerge [
|
||||
(lib.mkIf cfg.provision.elasticsearch {
|
||||
elasticsearch = {
|
||||
hosts = [ "localhost:9200" ];
|
||||
ssl = false;
|
||||
};
|
||||
})
|
||||
(lib.mkIf cfg.provision.localMail.enable {
|
||||
imap = {
|
||||
host = "localhost";
|
||||
port = 143;
|
||||
ssl = false;
|
||||
user = cfg.provision.localMail.recipientName;
|
||||
password = "${pkgs.writeText "imap-password" "@imap-password@"}";
|
||||
watch = true;
|
||||
};
|
||||
})
|
||||
];
|
||||
|
||||
systemd.services.parsedmarc =
|
||||
let
|
||||
# Remove any empty attributes from the config, i.e. empty
|
||||
# lists, empty attrsets and null. This makes it possible to
|
||||
# list interesting options in `settings` without them always
|
||||
# ending up in the resulting config.
|
||||
filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! builtins.elem v [ null [] {} ])) cfg.settings;
|
||||
parsedmarcConfig = ini.generate "parsedmarc.ini" filteredConfig;
|
||||
mkSecretReplacement = file:
|
||||
lib.optionalString (file != null) ''
|
||||
replace-secret '${file}' '${file}' /run/parsedmarc/parsedmarc.ini
|
||||
'';
|
||||
in
|
||||
{
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "postfix.service" "dovecot2.service" "elasticsearch.service" ];
|
||||
path = with pkgs; [ replace-secret openssl shadow ];
|
||||
serviceConfig = {
|
||||
ExecStartPre = let
|
||||
startPreFullPrivileges = ''
|
||||
set -o errexit -o pipefail -o nounset -o errtrace
|
||||
shopt -s inherit_errexit
|
||||
|
||||
umask u=rwx,g=,o=
|
||||
cp ${parsedmarcConfig} /run/parsedmarc/parsedmarc.ini
|
||||
chown parsedmarc:parsedmarc /run/parsedmarc/parsedmarc.ini
|
||||
${mkSecretReplacement cfg.settings.smtp.password}
|
||||
${mkSecretReplacement cfg.settings.imap.password}
|
||||
${mkSecretReplacement cfg.settings.elasticsearch.password}
|
||||
${mkSecretReplacement cfg.settings.kafka.password}
|
||||
'' + lib.optionalString cfg.provision.localMail.enable ''
|
||||
openssl rand -hex 64 >/run/parsedmarc/dmarc_user_passwd
|
||||
replace-secret '@imap-password@' '/run/parsedmarc/dmarc_user_passwd' /run/parsedmarc/parsedmarc.ini
|
||||
echo "Setting new randomized password for user '${cfg.provision.localMail.recipientName}'."
|
||||
cat <(echo -n "${cfg.provision.localMail.recipientName}:") /run/parsedmarc/dmarc_user_passwd | chpasswd
|
||||
'';
|
||||
in
|
||||
"+${pkgs.writeShellScript "parsedmarc-start-pre-full-privileges" startPreFullPrivileges}";
|
||||
Type = "simple";
|
||||
User = "parsedmarc";
|
||||
Group = "parsedmarc";
|
||||
DynamicUser = true;
|
||||
RuntimeDirectory = "parsedmarc";
|
||||
RuntimeDirectoryMode = 0700;
|
||||
CapabilityBoundingSet = "";
|
||||
PrivateDevices = true;
|
||||
PrivateMounts = true;
|
||||
PrivateUsers = true;
|
||||
ProtectClock = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHome = true;
|
||||
ProtectHostname = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectProc = "invisible";
|
||||
ProcSubset = "pid";
|
||||
SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
|
||||
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
|
||||
RestrictRealtime = true;
|
||||
RestrictNamespaces = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
LockPersonality = true;
|
||||
SystemCallArchitectures = "native";
|
||||
ExecStart = "${pkgs.python3Packages.parsedmarc}/bin/parsedmarc -c /run/parsedmarc/parsedmarc.ini";
|
||||
};
|
||||
};
|
||||
|
||||
users.users.${cfg.provision.localMail.recipientName} = lib.mkIf cfg.provision.localMail.enable {
|
||||
isNormalUser = true;
|
||||
description = "DMARC mail recipient";
|
||||
};
|
||||
};
|
||||
|
||||
# Don't edit the docbook xml directly, edit the md and generate it:
|
||||
# `pandoc parsedmarc.md -t docbook --top-level-division=chapter --extract-media=media -f markdown+smart > parsedmarc.xml`
|
||||
meta.doc = ./parsedmarc.xml;
|
||||
meta.maintainers = [ lib.maintainers.talyz ];
|
||||
}
|
||||
125
nixos/modules/services/monitoring/parsedmarc.xml
Normal file
125
nixos/modules/services/monitoring/parsedmarc.xml
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-parsedmarc">
|
||||
<title>parsedmarc</title>
|
||||
<para>
|
||||
<link xlink:href="https://domainaware.github.io/parsedmarc/">parsedmarc</link>
|
||||
is a service which parses incoming
|
||||
<link xlink:href="https://dmarc.org/">DMARC</link> reports and
|
||||
stores or sends them to a downstream service for further analysis.
|
||||
In combination with Elasticsearch, Grafana and the included Grafana
|
||||
dashboard, it provides a handy overview of DMARC reports over time.
|
||||
</para>
|
||||
<section xml:id="module-services-parsedmarc-basic-usage">
|
||||
<title>Basic usage</title>
|
||||
<para>
|
||||
A very minimal setup which reads incoming reports from an external
|
||||
email address and saves them to a local Elasticsearch instance
|
||||
looks like this:
|
||||
</para>
|
||||
<programlisting language="bash">
|
||||
services.parsedmarc = {
|
||||
enable = true;
|
||||
settings.imap = {
|
||||
host = "imap.example.com";
|
||||
user = "alice@example.com";
|
||||
password = "/path/to/imap_password_file";
|
||||
watch = true;
|
||||
};
|
||||
provision.geoIp = false; # Not recommended!
|
||||
};
|
||||
</programlisting>
|
||||
<para>
|
||||
Note that GeoIP provisioning is disabled in the example for
|
||||
simplicity, but should be turned on for fully functional reports.
|
||||
</para>
|
||||
</section>
|
||||
<section xml:id="local-mail">
|
||||
<title>Local mail</title>
|
||||
<para>
|
||||
Instead of watching an external inbox, a local inbox can be
|
||||
automatically provisioned. The recipient’s name is by default set
|
||||
to <literal>dmarc</literal>, but can be configured in
|
||||
<link xlink:href="options.html#opt-services.parsedmarc.provision.localMail.recipientName">services.parsedmarc.provision.localMail.recipientName</link>.
|
||||
You need to add an MX record pointing to the host. More
|
||||
concretely: for the example to work, an MX record needs to be set
|
||||
up for <literal>monitoring.example.com</literal> and the complete
|
||||
email address that should be configured in the domain’s dmarc
|
||||
policy is <literal>dmarc@monitoring.example.com</literal>.
|
||||
</para>
|
||||
<programlisting language="bash">
|
||||
services.parsedmarc = {
|
||||
enable = true;
|
||||
provision = {
|
||||
localMail = {
|
||||
enable = true;
|
||||
hostname = monitoring.example.com;
|
||||
};
|
||||
geoIp = false; # Not recommended!
|
||||
};
|
||||
};
|
||||
</programlisting>
|
||||
</section>
|
||||
<section xml:id="grafana-and-geoip">
|
||||
<title>Grafana and GeoIP</title>
|
||||
<para>
|
||||
The reports can be visualized and summarized with parsedmarc’s
|
||||
official Grafana dashboard. For all views to work, and for the
|
||||
data to be complete, GeoIP databases are also required. The
|
||||
following example shows a basic deployment where the provisioned
|
||||
Elasticsearch instance is automatically added as a Grafana
|
||||
datasource, and the dashboard is added to Grafana as well.
|
||||
</para>
|
||||
<programlisting language="bash">
|
||||
services.parsedmarc = {
|
||||
enable = true;
|
||||
provision = {
|
||||
localMail = {
|
||||
enable = true;
|
||||
hostname = url;
|
||||
};
|
||||
grafana = {
|
||||
datasource = true;
|
||||
dashboard = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Not required, but recommended for full functionality
|
||||
services.geoipupdate = {
|
||||
settings = {
|
||||
AccountID = 000000;
|
||||
LicenseKey = "/path/to/license_key_file";
|
||||
};
|
||||
};
|
||||
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
addr = "0.0.0.0";
|
||||
domain = url;
|
||||
rootUrl = "https://" + url;
|
||||
protocol = "socket";
|
||||
security = {
|
||||
adminUser = "admin";
|
||||
adminPasswordFile = "/path/to/admin_password_file";
|
||||
secretKeyFile = "/path/to/secret_key_file";
|
||||
};
|
||||
};
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
recommendedTlsSettings = true;
|
||||
recommendedOptimisation = true;
|
||||
recommendedGzipSettings = true;
|
||||
recommendedProxySettings = true;
|
||||
upstreams.grafana.servers."unix:/${config.services.grafana.socket}" = {};
|
||||
virtualHosts.${url} = {
|
||||
root = config.services.grafana.staticRootPath;
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/".tryFiles = "$uri @grafana";
|
||||
locations."@grafana".proxyPass = "http://grafana";
|
||||
};
|
||||
};
|
||||
users.users.nginx.extraGroups = [ "grafana" ];
|
||||
</programlisting>
|
||||
</section>
|
||||
</chapter>
|
||||
187
nixos/modules/services/monitoring/prometheus/alertmanager.nix
Normal file
187
nixos/modules/services/monitoring/prometheus/alertmanager.nix
Normal file
|
|
@ -0,0 +1,187 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.alertmanager;
|
||||
mkConfigFile = pkgs.writeText "alertmanager.yml" (builtins.toJSON cfg.configuration);
|
||||
|
||||
checkedConfig = file: pkgs.runCommand "checked-config" { buildInputs = [ cfg.package ]; } ''
|
||||
ln -s ${file} $out
|
||||
amtool check-config $out
|
||||
'';
|
||||
|
||||
alertmanagerYml = let
|
||||
yml = if cfg.configText != null then
|
||||
pkgs.writeText "alertmanager.yml" cfg.configText
|
||||
else mkConfigFile;
|
||||
in checkedConfig yml;
|
||||
|
||||
cmdlineArgs = cfg.extraFlags ++ [
|
||||
"--config.file /tmp/alert-manager-substituted.yaml"
|
||||
"--web.listen-address ${cfg.listenAddress}:${toString cfg.port}"
|
||||
"--log.level ${cfg.logLevel}"
|
||||
"--storage.path /var/lib/alertmanager"
|
||||
(toString (map (peer: "--cluster.peer ${peer}:9094") cfg.clusterPeers))
|
||||
] ++ (optional (cfg.webExternalUrl != null)
|
||||
"--web.external-url ${cfg.webExternalUrl}"
|
||||
) ++ (optional (cfg.logFormat != null)
|
||||
"--log.format ${cfg.logFormat}"
|
||||
);
|
||||
in {
|
||||
imports = [
|
||||
(mkRemovedOptionModule [ "services" "prometheus" "alertmanager" "user" ] "The alertmanager service is now using systemd's DynamicUser mechanism which obviates a user setting.")
|
||||
(mkRemovedOptionModule [ "services" "prometheus" "alertmanager" "group" ] "The alertmanager service is now using systemd's DynamicUser mechanism which obviates a group setting.")
|
||||
(mkRemovedOptionModule [ "services" "prometheus" "alertmanagerURL" ] ''
|
||||
Due to incompatibility, the alertmanagerURL option has been removed,
|
||||
please use 'services.prometheus2.alertmanagers' instead.
|
||||
'')
|
||||
];
|
||||
|
||||
options = {
|
||||
services.prometheus.alertmanager = {
|
||||
enable = mkEnableOption "Prometheus Alertmanager";
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.prometheus-alertmanager;
|
||||
defaultText = literalExpression "pkgs.alertmanager";
|
||||
description = ''
|
||||
Package that should be used for alertmanager.
|
||||
'';
|
||||
};
|
||||
|
||||
configuration = mkOption {
|
||||
type = types.nullOr types.attrs;
|
||||
default = null;
|
||||
description = ''
|
||||
Alertmanager configuration as nix attribute set.
|
||||
'';
|
||||
};
|
||||
|
||||
configText = mkOption {
|
||||
type = types.nullOr types.lines;
|
||||
default = null;
|
||||
description = ''
|
||||
Alertmanager configuration as YAML text. If non-null, this option
|
||||
defines the text that is written to alertmanager.yml. If null, the
|
||||
contents of alertmanager.yml is generated from the structured config
|
||||
options.
|
||||
'';
|
||||
};
|
||||
|
||||
logFormat = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
If set use a syslog logger or JSON logging.
|
||||
'';
|
||||
};
|
||||
|
||||
logLevel = mkOption {
|
||||
type = types.enum ["debug" "info" "warn" "error" "fatal"];
|
||||
default = "warn";
|
||||
description = ''
|
||||
Only log messages with the given severity or above.
|
||||
'';
|
||||
};
|
||||
|
||||
webExternalUrl = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy).
|
||||
Used for generating relative and absolute links back to Alertmanager itself.
|
||||
If the URL has a path portion, it will be used to prefix all HTTP endoints served by Alertmanager.
|
||||
If omitted, relevant URL components will be derived automatically.
|
||||
'';
|
||||
};
|
||||
|
||||
listenAddress = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
description = ''
|
||||
Address to listen on for the web interface and API. Empty string will listen on all interfaces.
|
||||
"localhost" will listen on 127.0.0.1 (but not ::1).
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.int;
|
||||
default = 9093;
|
||||
description = ''
|
||||
Port to listen on for the web interface and API.
|
||||
'';
|
||||
};
|
||||
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Open port in firewall for incoming connections.
|
||||
'';
|
||||
};
|
||||
|
||||
clusterPeers = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
description = ''
|
||||
Initial peers for HA cluster.
|
||||
'';
|
||||
};
|
||||
|
||||
extraFlags = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
description = ''
|
||||
Extra commandline options when launching the Alertmanager.
|
||||
'';
|
||||
};
|
||||
|
||||
environmentFile = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
example = "/root/alertmanager.env";
|
||||
description = ''
|
||||
File to load as environment file. Environment variables
|
||||
from this file will be interpolated into the config file
|
||||
using envsubst with this syntax:
|
||||
<literal>$ENVIRONMENT ''${VARIABLE}</literal>
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkMerge [
|
||||
(mkIf cfg.enable {
|
||||
assertions = singleton {
|
||||
assertion = cfg.configuration != null || cfg.configText != null;
|
||||
message = "Can not enable alertmanager without a configuration. "
|
||||
+ "Set either the `configuration` or `configText` attribute.";
|
||||
};
|
||||
})
|
||||
(mkIf cfg.enable {
|
||||
networking.firewall.allowedTCPPorts = optional cfg.openFirewall cfg.port;
|
||||
|
||||
systemd.services.alertmanager = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network-online.target" ];
|
||||
preStart = ''
|
||||
${lib.getBin pkgs.envsubst}/bin/envsubst -o "/tmp/alert-manager-substituted.yaml" \
|
||||
-i "${alertmanagerYml}"
|
||||
'';
|
||||
serviceConfig = {
|
||||
Restart = "always";
|
||||
StateDirectory = "alertmanager";
|
||||
DynamicUser = true; # implies PrivateTmp
|
||||
EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
|
||||
WorkingDirectory = "/tmp";
|
||||
ExecStart = "${cfg.package}/bin/alertmanager" +
|
||||
optionalString (length cmdlineArgs != 0) (" \\\n " +
|
||||
concatStringsSep " \\\n " cmdlineArgs);
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
};
|
||||
};
|
||||
})
|
||||
];
|
||||
}
|
||||
1830
nixos/modules/services/monitoring/prometheus/default.nix
Normal file
1830
nixos/modules/services/monitoring/prometheus/default.nix
Normal file
File diff suppressed because it is too large
Load diff
303
nixos/modules/services/monitoring/prometheus/exporters.nix
Normal file
303
nixos/modules/services/monitoring/prometheus/exporters.nix
Normal file
|
|
@ -0,0 +1,303 @@
|
|||
{ config, pkgs, lib, options, ... }:
|
||||
|
||||
let
|
||||
inherit (lib) concatStrings foldl foldl' genAttrs literalExpression maintainers
|
||||
mapAttrsToList mkDefault mkEnableOption mkIf mkMerge mkOption
|
||||
optional types mkOptionDefault flip attrNames;
|
||||
|
||||
cfg = config.services.prometheus.exporters;
|
||||
|
||||
# each attribute in `exporterOpts` is expected to have specified:
|
||||
# - port (types.int): port on which the exporter listens
|
||||
# - serviceOpts (types.attrs): config that is merged with the
|
||||
# default definition of the exporter's
|
||||
# systemd service
|
||||
# - extraOpts (types.attrs): extra configuration options to
|
||||
# configure the exporter with, which
|
||||
# are appended to the default options
|
||||
#
|
||||
# Note that `extraOpts` is optional, but a script for the exporter's
|
||||
# systemd service must be provided by specifying either
|
||||
# `serviceOpts.script` or `serviceOpts.serviceConfig.ExecStart`
|
||||
|
||||
exporterOpts = genAttrs [
|
||||
"apcupsd"
|
||||
"artifactory"
|
||||
"bind"
|
||||
"bird"
|
||||
"bitcoin"
|
||||
"blackbox"
|
||||
"buildkite-agent"
|
||||
"collectd"
|
||||
"dmarc"
|
||||
"dnsmasq"
|
||||
"domain"
|
||||
"dovecot"
|
||||
"fastly"
|
||||
"fritzbox"
|
||||
"influxdb"
|
||||
"json"
|
||||
"jitsi"
|
||||
"kea"
|
||||
"keylight"
|
||||
"knot"
|
||||
"lnd"
|
||||
"mail"
|
||||
"mikrotik"
|
||||
"minio"
|
||||
"modemmanager"
|
||||
"nextcloud"
|
||||
"nginx"
|
||||
"nginxlog"
|
||||
"node"
|
||||
"openldap"
|
||||
"openvpn"
|
||||
"pihole"
|
||||
"postfix"
|
||||
"postgres"
|
||||
"process"
|
||||
"pve"
|
||||
"py-air-control"
|
||||
"redis"
|
||||
"rspamd"
|
||||
"rtl_433"
|
||||
"script"
|
||||
"snmp"
|
||||
"smartctl"
|
||||
"smokeping"
|
||||
"sql"
|
||||
"surfboard"
|
||||
"systemd"
|
||||
"tor"
|
||||
"unbound"
|
||||
"unifi"
|
||||
"unifi-poller"
|
||||
"varnish"
|
||||
"wireguard"
|
||||
"flow"
|
||||
] (name:
|
||||
import (./. + "/exporters/${name}.nix") { inherit config lib pkgs options; }
|
||||
);
|
||||
|
||||
mkExporterOpts = ({ name, port }: {
|
||||
enable = mkEnableOption "the prometheus ${name} exporter";
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = port;
|
||||
description = ''
|
||||
Port to listen on.
|
||||
'';
|
||||
};
|
||||
listenAddress = mkOption {
|
||||
type = types.str;
|
||||
default = "0.0.0.0";
|
||||
description = ''
|
||||
Address to listen on.
|
||||
'';
|
||||
};
|
||||
extraFlags = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
description = ''
|
||||
Extra commandline options to pass to the ${name} exporter.
|
||||
'';
|
||||
};
|
||||
openFirewall = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Open port in firewall for incoming connections.
|
||||
'';
|
||||
};
|
||||
firewallFilter = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = literalExpression ''
|
||||
"-i eth0 -p tcp -m tcp --dport ${toString port}"
|
||||
'';
|
||||
description = ''
|
||||
Specify a filter for iptables to use when
|
||||
<option>services.prometheus.exporters.${name}.openFirewall</option>
|
||||
is true. It is used as `ip46tables -I nixos-fw <option>firewallFilter</option> -j nixos-fw-accept`.
|
||||
'';
|
||||
};
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "${name}-exporter";
|
||||
description = ''
|
||||
User name under which the ${name} exporter shall be run.
|
||||
'';
|
||||
};
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
default = "${name}-exporter";
|
||||
description = ''
|
||||
Group under which the ${name} exporter shall be run.
|
||||
'';
|
||||
};
|
||||
});
|
||||
|
||||
mkSubModule = { name, port, extraOpts, imports }: {
|
||||
${name} = mkOption {
|
||||
type = types.submodule [{
|
||||
inherit imports;
|
||||
options = (mkExporterOpts {
|
||||
inherit name port;
|
||||
} // extraOpts);
|
||||
} ({ config, ... }: mkIf config.openFirewall {
|
||||
firewallFilter = mkDefault "-p tcp -m tcp --dport ${toString config.port}";
|
||||
})];
|
||||
internal = true;
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
|
||||
mkSubModules = (foldl' (a: b: a//b) {}
|
||||
(mapAttrsToList (name: opts: mkSubModule {
|
||||
inherit name;
|
||||
inherit (opts) port;
|
||||
extraOpts = opts.extraOpts or {};
|
||||
imports = opts.imports or [];
|
||||
}) exporterOpts)
|
||||
);
|
||||
|
||||
mkExporterConf = { name, conf, serviceOpts }:
|
||||
let
|
||||
enableDynamicUser = serviceOpts.serviceConfig.DynamicUser or true;
|
||||
in
|
||||
mkIf conf.enable {
|
||||
warnings = conf.warnings or [];
|
||||
users.users."${name}-exporter" = (mkIf (conf.user == "${name}-exporter" && !enableDynamicUser) {
|
||||
description = "Prometheus ${name} exporter service user";
|
||||
isSystemUser = true;
|
||||
inherit (conf) group;
|
||||
});
|
||||
users.groups = (mkIf (conf.group == "${name}-exporter" && !enableDynamicUser) {
|
||||
"${name}-exporter" = {};
|
||||
});
|
||||
networking.firewall.extraCommands = mkIf conf.openFirewall (concatStrings [
|
||||
"ip46tables -A nixos-fw ${conf.firewallFilter} "
|
||||
"-m comment --comment ${name}-exporter -j nixos-fw-accept"
|
||||
]);
|
||||
systemd.services."prometheus-${name}-exporter" = mkMerge ([{
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig.Restart = mkDefault "always";
|
||||
serviceConfig.PrivateTmp = mkDefault true;
|
||||
serviceConfig.WorkingDirectory = mkDefault /tmp;
|
||||
serviceConfig.DynamicUser = mkDefault enableDynamicUser;
|
||||
serviceConfig.User = mkDefault conf.user;
|
||||
serviceConfig.Group = conf.group;
|
||||
# Hardening
|
||||
serviceConfig.CapabilityBoundingSet = mkDefault [ "" ];
|
||||
serviceConfig.DeviceAllow = [ "" ];
|
||||
serviceConfig.LockPersonality = true;
|
||||
serviceConfig.MemoryDenyWriteExecute = true;
|
||||
serviceConfig.NoNewPrivileges = true;
|
||||
serviceConfig.PrivateDevices = true;
|
||||
serviceConfig.ProtectClock = mkDefault true;
|
||||
serviceConfig.ProtectControlGroups = true;
|
||||
serviceConfig.ProtectHome = true;
|
||||
serviceConfig.ProtectHostname = true;
|
||||
serviceConfig.ProtectKernelLogs = true;
|
||||
serviceConfig.ProtectKernelModules = true;
|
||||
serviceConfig.ProtectKernelTunables = true;
|
||||
serviceConfig.ProtectSystem = mkDefault "strict";
|
||||
serviceConfig.RemoveIPC = true;
|
||||
serviceConfig.RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
|
||||
serviceConfig.RestrictNamespaces = true;
|
||||
serviceConfig.RestrictRealtime = true;
|
||||
serviceConfig.RestrictSUIDSGID = true;
|
||||
serviceConfig.SystemCallArchitectures = "native";
|
||||
serviceConfig.UMask = "0077";
|
||||
} serviceOpts ]);
|
||||
};
|
||||
in
|
||||
{
|
||||
|
||||
imports = (lib.forEach [ "blackboxExporter" "collectdExporter" "fritzboxExporter"
|
||||
"jsonExporter" "minioExporter" "nginxExporter" "nodeExporter"
|
||||
"snmpExporter" "unifiExporter" "varnishExporter" ]
|
||||
(opt: lib.mkRemovedOptionModule [ "services" "prometheus" "${opt}" ] ''
|
||||
The prometheus exporters are now configured using `services.prometheus.exporters'.
|
||||
See the 18.03 release notes for more information.
|
||||
'' ));
|
||||
|
||||
options.services.prometheus.exporters = mkOption {
|
||||
type = types.submodule {
|
||||
options = (mkSubModules);
|
||||
};
|
||||
description = "Prometheus exporter configuration";
|
||||
default = {};
|
||||
example = literalExpression ''
|
||||
{
|
||||
node = {
|
||||
enable = true;
|
||||
enabledCollectors = [ "systemd" ];
|
||||
};
|
||||
varnish.enable = true;
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
config = mkMerge ([{
|
||||
assertions = [ {
|
||||
assertion = cfg.snmp.enable -> (
|
||||
(cfg.snmp.configurationPath == null) != (cfg.snmp.configuration == null)
|
||||
);
|
||||
message = ''
|
||||
Please ensure you have either `services.prometheus.exporters.snmp.configuration'
|
||||
or `services.prometheus.exporters.snmp.configurationPath' set!
|
||||
'';
|
||||
} {
|
||||
assertion = cfg.mikrotik.enable -> (
|
||||
(cfg.mikrotik.configFile == null) != (cfg.mikrotik.configuration == null)
|
||||
);
|
||||
message = ''
|
||||
Please specify either `services.prometheus.exporters.mikrotik.configuration'
|
||||
or `services.prometheus.exporters.mikrotik.configFile'.
|
||||
'';
|
||||
} {
|
||||
assertion = cfg.mail.enable -> (
|
||||
(cfg.mail.configFile == null) != (cfg.mail.configuration == null)
|
||||
);
|
||||
message = ''
|
||||
Please specify either 'services.prometheus.exporters.mail.configuration'
|
||||
or 'services.prometheus.exporters.mail.configFile'.
|
||||
'';
|
||||
} {
|
||||
assertion = cfg.sql.enable -> (
|
||||
(cfg.sql.configFile == null) != (cfg.sql.configuration == null)
|
||||
);
|
||||
message = ''
|
||||
Please specify either 'services.prometheus.exporters.sql.configuration' or
|
||||
'services.prometheus.exporters.sql.configFile'
|
||||
'';
|
||||
} ] ++ (flip map (attrNames cfg) (exporter: {
|
||||
assertion = cfg.${exporter}.firewallFilter != null -> cfg.${exporter}.openFirewall;
|
||||
message = ''
|
||||
The `firewallFilter'-option of exporter ${exporter} doesn't have any effect unless
|
||||
`openFirewall' is set to `true'!
|
||||
'';
|
||||
}));
|
||||
}] ++ [(mkIf config.services.minio.enable {
|
||||
services.prometheus.exporters.minio.minioAddress = mkDefault "http://localhost:9000";
|
||||
services.prometheus.exporters.minio.minioAccessKey = mkDefault config.services.minio.accessKey;
|
||||
services.prometheus.exporters.minio.minioAccessSecret = mkDefault config.services.minio.secretKey;
|
||||
})] ++ [(mkIf config.services.prometheus.exporters.rtl_433.enable {
|
||||
hardware.rtl-sdr.enable = mkDefault true;
|
||||
})] ++ [(mkIf config.services.postfix.enable {
|
||||
services.prometheus.exporters.postfix.group = mkDefault config.services.postfix.setgidGroup;
|
||||
})] ++ (mapAttrsToList (name: conf:
|
||||
mkExporterConf {
|
||||
inherit name;
|
||||
inherit (conf) serviceOpts;
|
||||
conf = cfg.${name};
|
||||
}) exporterOpts)
|
||||
);
|
||||
|
||||
meta = {
|
||||
doc = ./exporters.xml;
|
||||
maintainers = [ maintainers.willibutz ];
|
||||
};
|
||||
}
|
||||
248
nixos/modules/services/monitoring/prometheus/exporters.xml
Normal file
248
nixos/modules/services/monitoring/prometheus/exporters.xml
Normal file
|
|
@ -0,0 +1,248 @@
|
|||
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||
version="5.0"
|
||||
xml:id="module-services-prometheus-exporters">
|
||||
<title>Prometheus exporters</title>
|
||||
<para>
|
||||
Prometheus exporters provide metrics for the
|
||||
<link xlink:href="https://prometheus.io">prometheus monitoring system</link>.
|
||||
</para>
|
||||
<section xml:id="module-services-prometheus-exporters-configuration">
|
||||
<title>Configuration</title>
|
||||
|
||||
<para>
|
||||
One of the most common exporters is the
|
||||
<link xlink:href="https://github.com/prometheus/node_exporter">node
|
||||
exporter</link>, it provides hardware and OS metrics from the host it's
|
||||
running on. The exporter could be configured as follows:
|
||||
<programlisting>
|
||||
services.prometheus.exporters.node = {
|
||||
enable = true;
|
||||
port = 9100;
|
||||
enabledCollectors = [
|
||||
"logind"
|
||||
"systemd"
|
||||
];
|
||||
disabledCollectors = [
|
||||
"textfile"
|
||||
];
|
||||
openFirewall = true;
|
||||
firewallFilter = "-i br0 -p tcp -m tcp --dport 9100";
|
||||
};
|
||||
</programlisting>
|
||||
It should now serve all metrics from the collectors that are explicitly
|
||||
enabled and the ones that are
|
||||
<link xlink:href="https://github.com/prometheus/node_exporter#enabled-by-default">enabled
|
||||
by default</link>, via http under <literal>/metrics</literal>. In this
|
||||
example the firewall should just allow incoming connections to the
|
||||
exporter's port on the bridge interface <literal>br0</literal> (this would
|
||||
have to be configured seperately of course). For more information about
|
||||
configuration see <literal>man configuration.nix</literal> or search through
|
||||
the
|
||||
<link xlink:href="https://nixos.org/nixos/options.html#prometheus.exporters">available
|
||||
options</link>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Prometheus can now be configured to consume the metrics produced by the exporter:
|
||||
<programlisting>
|
||||
services.prometheus = {
|
||||
# ...
|
||||
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "node";
|
||||
static_configs = [{
|
||||
targets = [ "localhost:${toString config.services.prometheus.exporters.node.port}" ];
|
||||
}];
|
||||
}
|
||||
];
|
||||
|
||||
# ...
|
||||
}
|
||||
</programlisting>
|
||||
</para>
|
||||
</section>
|
||||
<section xml:id="module-services-prometheus-exporters-new-exporter">
|
||||
<title>Adding a new exporter</title>
|
||||
|
||||
<para>
|
||||
To add a new exporter, it has to be packaged first (see
|
||||
<literal>nixpkgs/pkgs/servers/monitoring/prometheus/</literal> for
|
||||
examples), then a module can be added. The postfix exporter is used in this
|
||||
example:
|
||||
</para>
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
Some default options for all exporters are provided by
|
||||
<literal>nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix</literal>:
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem override='none'>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>enable</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>port</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>listenAddress</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>extraFlags</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>openFirewall</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>firewallFilter</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>user</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>group</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
As there is already a package available, the module can now be added. This
|
||||
is accomplished by adding a new file to the
|
||||
<literal>nixos/modules/services/monitoring/prometheus/exporters/</literal>
|
||||
directory, which will be called postfix.nix and contains all exporter
|
||||
specific options and configuration:
|
||||
<programlisting>
|
||||
# nixpgs/nixos/modules/services/prometheus/exporters/postfix.nix
|
||||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
# for convenience we define cfg here
|
||||
cfg = config.services.prometheus.exporters.postfix;
|
||||
in
|
||||
{
|
||||
port = 9154; # The postfix exporter listens on this port by default
|
||||
|
||||
# `extraOpts` is an attribute set which contains additional options
|
||||
# (and optional overrides for default options).
|
||||
# Note that this attribute is optional.
|
||||
extraOpts = {
|
||||
telemetryPath = mkOption {
|
||||
type = types.str;
|
||||
default = "/metrics";
|
||||
description = ''
|
||||
Path under which to expose metrics.
|
||||
'';
|
||||
};
|
||||
logfilePath = mkOption {
|
||||
type = types.path;
|
||||
default = /var/log/postfix_exporter_input.log;
|
||||
example = /var/log/mail.log;
|
||||
description = ''
|
||||
Path where Postfix writes log entries.
|
||||
This file will be truncated by this exporter!
|
||||
'';
|
||||
};
|
||||
showqPath = mkOption {
|
||||
type = types.path;
|
||||
default = /var/spool/postfix/public/showq;
|
||||
example = /var/lib/postfix/queue/public/showq;
|
||||
description = ''
|
||||
Path at which Postfix places its showq socket.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
# `serviceOpts` is an attribute set which contains configuration
|
||||
# for the exporter's systemd service. One of
|
||||
# `serviceOpts.script` and `serviceOpts.serviceConfig.ExecStart`
|
||||
# has to be specified here. This will be merged with the default
|
||||
# service confiuration.
|
||||
# Note that by default 'DynamicUser' is 'true'.
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
DynamicUser = false;
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-postfix-exporter}/bin/postfix_exporter \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--web.telemetry-path ${cfg.telemetryPath} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
</programlisting>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
This should already be enough for the postfix exporter. Additionally one
|
||||
could now add assertions and conditional default values. This can be done
|
||||
in the 'meta-module' that combines all exporter definitions and generates
|
||||
the submodules:
|
||||
<literal>nixpkgs/nixos/modules/services/prometheus/exporters.nix</literal>
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
<section xml:id="module-services-prometheus-exporters-update-exporter-module">
|
||||
<title>Updating an exporter module</title>
|
||||
<para>
|
||||
Should an exporter option change at some point, it is possible to add
|
||||
information about the change to the exporter definition similar to
|
||||
<literal>nixpkgs/nixos/modules/rename.nix</literal>:
|
||||
<programlisting>
|
||||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.nginx;
|
||||
in
|
||||
{
|
||||
port = 9113;
|
||||
extraOpts = {
|
||||
# additional module options
|
||||
# ...
|
||||
};
|
||||
serviceOpts = {
|
||||
# service configuration
|
||||
# ...
|
||||
};
|
||||
imports = [
|
||||
# 'services.prometheus.exporters.nginx.telemetryEndpoint' -> 'services.prometheus.exporters.nginx.telemetryPath'
|
||||
(mkRenamedOptionModule [ "telemetryEndpoint" ] [ "telemetryPath" ])
|
||||
|
||||
# removed option 'services.prometheus.exporters.nginx.insecure'
|
||||
(mkRemovedOptionModule [ "insecure" ] ''
|
||||
This option was replaced by 'prometheus.exporters.nginx.sslVerify' which defaults to true.
|
||||
'')
|
||||
({ options.warnings = options.warnings; })
|
||||
];
|
||||
}
|
||||
</programlisting>
|
||||
</para>
|
||||
</section>
|
||||
</chapter>
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.apcupsd;
|
||||
in
|
||||
{
|
||||
port = 9162;
|
||||
extraOpts = {
|
||||
apcupsdAddress = mkOption {
|
||||
type = types.str;
|
||||
default = ":3551";
|
||||
description = ''
|
||||
Address of the apcupsd Network Information Server (NIS).
|
||||
'';
|
||||
};
|
||||
|
||||
apcupsdNetwork = mkOption {
|
||||
type = types.enum ["tcp" "tcp4" "tcp6"];
|
||||
default = "tcp";
|
||||
description = ''
|
||||
Network of the apcupsd Network Information Server (NIS): one of "tcp", "tcp4", or "tcp6".
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-apcupsd-exporter}/bin/apcupsd_exporter \
|
||||
-telemetry.addr ${cfg.listenAddress}:${toString cfg.port} \
|
||||
-apcupsd.addr ${cfg.apcupsdAddress} \
|
||||
-apcupsd.network ${cfg.apcupsdNetwork} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.artifactory;
|
||||
in
|
||||
{
|
||||
port = 9531;
|
||||
extraOpts = {
|
||||
scrapeUri = mkOption {
|
||||
type = types.str;
|
||||
default = "http://localhost:8081/artifactory";
|
||||
description = ''
|
||||
URI on which to scrape JFrog Artifactory.
|
||||
'';
|
||||
};
|
||||
|
||||
artiUsername = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
Username for authentication against JFrog Artifactory API.
|
||||
'';
|
||||
};
|
||||
|
||||
artiPassword = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
description = ''
|
||||
Password for authentication against JFrog Artifactory API.
|
||||
One of the password or access token needs to be set.
|
||||
'';
|
||||
};
|
||||
|
||||
artiAccessToken = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
description = ''
|
||||
Access token for authentication against JFrog Artifactory API.
|
||||
One of the password or access token needs to be set.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-artifactory-exporter}/bin/artifactory_exporter \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--artifactory.scrape-uri ${cfg.scrapeUri} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
Environment = [
|
||||
"ARTI_USERNAME=${cfg.artiUsername}"
|
||||
"ARTI_PASSWORD=${cfg.artiPassword}"
|
||||
"ARTI_ACCESS_TOKEN=${cfg.artiAccessToken}"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.bind;
|
||||
in
|
||||
{
|
||||
port = 9119;
|
||||
extraOpts = {
|
||||
bindURI = mkOption {
|
||||
type = types.str;
|
||||
default = "http://localhost:8053/";
|
||||
description = ''
|
||||
HTTP XML API address of an Bind server.
|
||||
'';
|
||||
};
|
||||
bindTimeout = mkOption {
|
||||
type = types.str;
|
||||
default = "10s";
|
||||
description = ''
|
||||
Timeout for trying to get stats from Bind.
|
||||
'';
|
||||
};
|
||||
bindVersion = mkOption {
|
||||
type = types.enum [ "xml.v2" "xml.v3" "auto" ];
|
||||
default = "auto";
|
||||
description = ''
|
||||
BIND statistics version. Can be detected automatically.
|
||||
'';
|
||||
};
|
||||
bindGroups = mkOption {
|
||||
type = types.listOf (types.enum [ "server" "view" "tasks" ]);
|
||||
default = [ "server" "view" ];
|
||||
description = ''
|
||||
List of statistics to collect. Available: [server, view, tasks]
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-bind-exporter}/bin/bind_exporter \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--bind.pid-file /var/run/named/named.pid \
|
||||
--bind.timeout ${toString cfg.bindTimeout} \
|
||||
--bind.stats-url ${cfg.bindURI} \
|
||||
--bind.stats-version ${cfg.bindVersion} \
|
||||
--bind.stats-groups ${concatStringsSep "," cfg.bindGroups} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.bird;
|
||||
in
|
||||
{
|
||||
port = 9324;
|
||||
extraOpts = {
|
||||
birdVersion = mkOption {
|
||||
type = types.enum [ 1 2 ];
|
||||
default = 2;
|
||||
description = ''
|
||||
Specifies whether BIRD1 or BIRD2 is in use.
|
||||
'';
|
||||
};
|
||||
birdSocket = mkOption {
|
||||
type = types.path;
|
||||
default = "/run/bird/bird.ctl";
|
||||
description = ''
|
||||
Path to BIRD2 (or BIRD1 v4) socket.
|
||||
'';
|
||||
};
|
||||
newMetricFormat = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Enable the new more-generic metric format.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
SupplementaryGroups = singleton (if cfg.birdVersion == 1 then "bird" else "bird2");
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-bird-exporter}/bin/bird_exporter \
|
||||
-web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
-bird.socket ${cfg.birdSocket} \
|
||||
-bird.v2=${if cfg.birdVersion == 2 then "true" else "false"} \
|
||||
-format.new=${if cfg.newMetricFormat then "true" else "false"} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
RestrictAddressFamilies = [
|
||||
# Need AF_UNIX to collect data
|
||||
"AF_UNIX"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,82 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.bitcoin;
|
||||
in
|
||||
{
|
||||
port = 9332;
|
||||
extraOpts = {
|
||||
rpcUser = mkOption {
|
||||
type = types.str;
|
||||
default = "bitcoinrpc";
|
||||
description = ''
|
||||
RPC user name.
|
||||
'';
|
||||
};
|
||||
|
||||
rpcPasswordFile = mkOption {
|
||||
type = types.path;
|
||||
description = ''
|
||||
File containing RPC password.
|
||||
'';
|
||||
};
|
||||
|
||||
rpcScheme = mkOption {
|
||||
type = types.enum [ "http" "https" ];
|
||||
default = "http";
|
||||
description = ''
|
||||
Whether to connect to bitcoind over http or https.
|
||||
'';
|
||||
};
|
||||
|
||||
rpcHost = mkOption {
|
||||
type = types.str;
|
||||
default = "localhost";
|
||||
description = ''
|
||||
RPC host.
|
||||
'';
|
||||
};
|
||||
|
||||
rpcPort = mkOption {
|
||||
type = types.port;
|
||||
default = 8332;
|
||||
description = ''
|
||||
RPC port number.
|
||||
'';
|
||||
};
|
||||
|
||||
refreshSeconds = mkOption {
|
||||
type = types.ints.unsigned;
|
||||
default = 300;
|
||||
description = ''
|
||||
How often to ask bitcoind for metrics.
|
||||
'';
|
||||
};
|
||||
|
||||
extraEnv = mkOption {
|
||||
type = types.attrsOf types.str;
|
||||
default = {};
|
||||
description = ''
|
||||
Extra environment variables for the exporter.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
script = ''
|
||||
export BITCOIN_RPC_PASSWORD=$(cat ${cfg.rpcPasswordFile})
|
||||
exec ${pkgs.prometheus-bitcoin-exporter}/bin/bitcoind-monitor.py
|
||||
'';
|
||||
|
||||
environment = {
|
||||
BITCOIN_RPC_USER = cfg.rpcUser;
|
||||
BITCOIN_RPC_SCHEME = cfg.rpcScheme;
|
||||
BITCOIN_RPC_HOST = cfg.rpcHost;
|
||||
BITCOIN_RPC_PORT = toString cfg.rpcPort;
|
||||
METRICS_ADDR = cfg.listenAddress;
|
||||
METRICS_PORT = toString cfg.port;
|
||||
REFRESH_SECONDS = toString cfg.refreshSeconds;
|
||||
} // cfg.extraEnv;
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,70 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
logPrefix = "services.prometheus.exporter.blackbox";
|
||||
cfg = config.services.prometheus.exporters.blackbox;
|
||||
|
||||
# This ensures that we can deal with string paths, path types and
|
||||
# store-path strings with context.
|
||||
coerceConfigFile = file:
|
||||
if (builtins.isPath file) || (lib.isStorePath file) then
|
||||
file
|
||||
else
|
||||
(lib.warn ''
|
||||
${logPrefix}: configuration file "${file}" is being copied to the nix-store.
|
||||
If you would like to avoid that, please set enableConfigCheck to false.
|
||||
'' /. + file);
|
||||
checkConfigLocation = file:
|
||||
if lib.hasPrefix "/tmp/" file then
|
||||
throw
|
||||
"${logPrefix}: configuration file must not reside within /tmp - it won't be visible to the systemd service."
|
||||
else
|
||||
true;
|
||||
checkConfig = file:
|
||||
pkgs.runCommand "checked-blackbox-exporter.conf" {
|
||||
preferLocalBuild = true;
|
||||
buildInputs = [ pkgs.buildPackages.prometheus-blackbox-exporter ];
|
||||
} ''
|
||||
ln -s ${coerceConfigFile file} $out
|
||||
blackbox_exporter --config.check --config.file $out
|
||||
'';
|
||||
in {
|
||||
port = 9115;
|
||||
extraOpts = {
|
||||
configFile = mkOption {
|
||||
type = types.path;
|
||||
description = ''
|
||||
Path to configuration file.
|
||||
'';
|
||||
};
|
||||
enableConfigCheck = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Whether to run a correctness check for the configuration file. This depends
|
||||
on the configuration file residing in the nix-store. Paths passed as string will
|
||||
be copied to the store.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
serviceOpts = let
|
||||
adjustedConfigFile = if cfg.enableConfigCheck then
|
||||
checkConfig cfg.configFile
|
||||
else
|
||||
checkConfigLocation cfg.configFile;
|
||||
in {
|
||||
serviceConfig = {
|
||||
AmbientCapabilities = [ "CAP_NET_RAW" ]; # for ping probes
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-blackbox-exporter}/bin/blackbox_exporter \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--config.file ${escapeShellArg adjustedConfigFile} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.buildkite-agent;
|
||||
in
|
||||
{
|
||||
port = 9876;
|
||||
extraOpts = {
|
||||
tokenPath = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
apply = final: if final == null then null else toString final;
|
||||
description = ''
|
||||
The token from your Buildkite "Agents" page.
|
||||
|
||||
A run-time path to the token file, which is supposed to be provisioned
|
||||
outside of Nix store.
|
||||
'';
|
||||
};
|
||||
interval = mkOption {
|
||||
type = types.str;
|
||||
default = "30s";
|
||||
example = "1min";
|
||||
description = ''
|
||||
How often to update metrics.
|
||||
'';
|
||||
};
|
||||
endpoint = mkOption {
|
||||
type = types.str;
|
||||
default = "https://agent.buildkite.com/v3";
|
||||
description = ''
|
||||
The Buildkite Agent API endpoint.
|
||||
'';
|
||||
};
|
||||
queues = mkOption {
|
||||
type = with types; nullOr (listOf str);
|
||||
default = null;
|
||||
example = literalExpression ''[ "my-queue1" "my-queue2" ]'';
|
||||
description = ''
|
||||
Which specific queues to process.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
script =
|
||||
let
|
||||
queues = concatStringsSep " " (map (q: "-queue ${q}") cfg.queues);
|
||||
in
|
||||
''
|
||||
export BUILDKITE_AGENT_TOKEN="$(cat ${toString cfg.tokenPath})"
|
||||
exec ${pkgs.buildkite-agent-metrics}/bin/buildkite-agent-metrics \
|
||||
-backend prometheus \
|
||||
-interval ${cfg.interval} \
|
||||
-endpoint ${cfg.endpoint} \
|
||||
${optionalString (cfg.queues != null) queues} \
|
||||
-prometheus-addr "${cfg.listenAddress}:${toString cfg.port}" ${concatStringsSep " " cfg.extraFlags}
|
||||
'';
|
||||
serviceConfig = {
|
||||
DynamicUser = false;
|
||||
RuntimeDirectory = "buildkite-agent-metrics";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.collectd;
|
||||
in
|
||||
{
|
||||
port = 9103;
|
||||
extraOpts = {
|
||||
collectdBinary = {
|
||||
enable = mkEnableOption "collectd binary protocol receiver";
|
||||
|
||||
authFile = mkOption {
|
||||
default = null;
|
||||
type = types.nullOr types.path;
|
||||
description = "File mapping user names to pre-shared keys (passwords).";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.int;
|
||||
default = 25826;
|
||||
description = "Network address on which to accept collectd binary network packets.";
|
||||
};
|
||||
|
||||
listenAddress = mkOption {
|
||||
type = types.str;
|
||||
default = "0.0.0.0";
|
||||
description = ''
|
||||
Address to listen on for binary network packets.
|
||||
'';
|
||||
};
|
||||
|
||||
securityLevel = mkOption {
|
||||
type = types.enum ["None" "Sign" "Encrypt"];
|
||||
default = "None";
|
||||
description = ''
|
||||
Minimum required security level for accepted packets.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
logFormat = mkOption {
|
||||
type = types.enum [ "logfmt" "json" ];
|
||||
default = "logfmt";
|
||||
example = "json";
|
||||
description = ''
|
||||
Set the log format.
|
||||
'';
|
||||
};
|
||||
|
||||
logLevel = mkOption {
|
||||
type = types.enum ["debug" "info" "warn" "error" "fatal"];
|
||||
default = "info";
|
||||
description = ''
|
||||
Only log messages with the given severity or above.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = let
|
||||
collectSettingsArgs = if (cfg.collectdBinary.enable) then ''
|
||||
--collectd.listen-address ${cfg.collectdBinary.listenAddress}:${toString cfg.collectdBinary.port} \
|
||||
--collectd.security-level ${cfg.collectdBinary.securityLevel} \
|
||||
'' else "";
|
||||
in {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-collectd-exporter}/bin/collectd_exporter \
|
||||
--log.format ${escapeShellArg cfg.logFormat} \
|
||||
--log.level ${cfg.logLevel} \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
${collectSettingsArgs} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
117
nixos/modules/services/monitoring/prometheus/exporters/dmarc.nix
Normal file
117
nixos/modules/services/monitoring/prometheus/exporters/dmarc.nix
Normal file
|
|
@ -0,0 +1,117 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.dmarc;
|
||||
|
||||
json = builtins.toJSON {
|
||||
inherit (cfg) folders port;
|
||||
listen_addr = cfg.listenAddress;
|
||||
storage_path = "$STATE_DIRECTORY";
|
||||
imap = (builtins.removeAttrs cfg.imap [ "passwordFile" ]) // { password = "$IMAP_PASSWORD"; use_ssl = true; };
|
||||
poll_interval_seconds = cfg.pollIntervalSeconds;
|
||||
deduplication_max_seconds = cfg.deduplicationMaxSeconds;
|
||||
logging = {
|
||||
version = 1;
|
||||
disable_existing_loggers = false;
|
||||
};
|
||||
};
|
||||
in {
|
||||
port = 9797;
|
||||
extraOpts = {
|
||||
imap = {
|
||||
host = mkOption {
|
||||
type = types.str;
|
||||
default = "localhost";
|
||||
description = ''
|
||||
Hostname of IMAP server to connect to.
|
||||
'';
|
||||
};
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 993;
|
||||
description = ''
|
||||
Port of the IMAP server to connect to.
|
||||
'';
|
||||
};
|
||||
username = mkOption {
|
||||
type = types.str;
|
||||
example = "postmaster@example.org";
|
||||
description = ''
|
||||
Login username for the IMAP connection.
|
||||
'';
|
||||
};
|
||||
passwordFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/run/secrets/dovecot_pw";
|
||||
description = ''
|
||||
File containing the login password for the IMAP connection.
|
||||
'';
|
||||
};
|
||||
};
|
||||
folders = {
|
||||
inbox = mkOption {
|
||||
type = types.str;
|
||||
default = "INBOX";
|
||||
description = ''
|
||||
IMAP mailbox that is checked for incoming DMARC aggregate reports
|
||||
'';
|
||||
};
|
||||
done = mkOption {
|
||||
type = types.str;
|
||||
default = "Archive";
|
||||
description = ''
|
||||
IMAP mailbox that successfully processed reports are moved to.
|
||||
'';
|
||||
};
|
||||
error = mkOption {
|
||||
type = types.str;
|
||||
default = "Invalid";
|
||||
description = ''
|
||||
IMAP mailbox that emails are moved to that could not be processed.
|
||||
'';
|
||||
};
|
||||
};
|
||||
pollIntervalSeconds = mkOption {
|
||||
type = types.ints.unsigned;
|
||||
default = 60;
|
||||
description = ''
|
||||
How often to poll the IMAP server in seconds.
|
||||
'';
|
||||
};
|
||||
deduplicationMaxSeconds = mkOption {
|
||||
type = types.ints.unsigned;
|
||||
default = 604800;
|
||||
defaultText = "7 days (in seconds)";
|
||||
description = ''
|
||||
How long individual report IDs will be remembered to avoid
|
||||
counting double delivered reports twice.
|
||||
'';
|
||||
};
|
||||
debug = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to declare enable <literal>--debug</literal>.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
path = with pkgs; [ envsubst coreutils ];
|
||||
serviceConfig = {
|
||||
StateDirectory = "prometheus-dmarc-exporter";
|
||||
WorkingDirectory = "/var/lib/prometheus-dmarc-exporter";
|
||||
ExecStart = "${pkgs.writeShellScript "setup-cfg" ''
|
||||
export IMAP_PASSWORD="$(<${cfg.imap.passwordFile})"
|
||||
envsubst \
|
||||
-i ${pkgs.writeText "dmarc-exporter.json.template" json} \
|
||||
-o ''${STATE_DIRECTORY}/dmarc-exporter.json
|
||||
|
||||
exec ${pkgs.dmarc-metrics-exporter}/bin/dmarc-metrics-exporter \
|
||||
--configuration /var/lib/prometheus-dmarc-exporter/dmarc-exporter.json \
|
||||
${optionalString cfg.debug "--debug"}
|
||||
''}";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.dnsmasq;
|
||||
in
|
||||
{
|
||||
port = 9153;
|
||||
extraOpts = {
|
||||
dnsmasqListenAddress = mkOption {
|
||||
type = types.str;
|
||||
default = "localhost:53";
|
||||
description = ''
|
||||
Address on which dnsmasq listens.
|
||||
'';
|
||||
};
|
||||
leasesPath = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/lib/misc/dnsmasq.leases";
|
||||
example = "/var/lib/dnsmasq/dnsmasq.leases";
|
||||
description = ''
|
||||
Path to the <literal>dnsmasq.leases</literal> file.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-dnsmasq-exporter}/bin/dnsmasq_exporter \
|
||||
--listen ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--dnsmasq ${cfg.dnsmasqListenAddress} \
|
||||
--leases_path ${escapeShellArg cfg.leasesPath} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.domain;
|
||||
in
|
||||
{
|
||||
port = 9222;
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-domain-exporter}/bin/domain_exporter \
|
||||
--bind ${cfg.listenAddress}:${toString cfg.port} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,92 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.dovecot;
|
||||
in
|
||||
{
|
||||
port = 9166;
|
||||
extraOpts = {
|
||||
telemetryPath = mkOption {
|
||||
type = types.str;
|
||||
default = "/metrics";
|
||||
description = ''
|
||||
Path under which to expose metrics.
|
||||
'';
|
||||
};
|
||||
socketPath = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/run/dovecot/stats";
|
||||
example = "/var/run/dovecot2/old-stats";
|
||||
description = ''
|
||||
Path under which the stats socket is placed.
|
||||
The user/group under which the exporter runs,
|
||||
should be able to access the socket in order
|
||||
to scrape the metrics successfully.
|
||||
|
||||
Please keep in mind that the stats module has changed in
|
||||
<link xlink:href="https://wiki2.dovecot.org/Upgrading/2.3">Dovecot 2.3+</link> which
|
||||
is not <link xlink:href="https://github.com/kumina/dovecot_exporter/issues/8">compatible with this exporter</link>.
|
||||
|
||||
The following extra config has to be passed to Dovecot to ensure that recent versions
|
||||
work with this exporter:
|
||||
<programlisting>
|
||||
{
|
||||
<xref linkend="opt-services.prometheus.exporters.dovecot.enable" /> = true;
|
||||
<xref linkend="opt-services.prometheus.exporters.dovecot.socketPath" /> = "/var/run/dovecot2/old-stats";
|
||||
<xref linkend="opt-services.dovecot2.mailPlugins.globally.enable" /> = [ "old_stats" ];
|
||||
<xref linkend="opt-services.dovecot2.extraConfig" /> = '''
|
||||
service old-stats {
|
||||
unix_listener old-stats {
|
||||
user = dovecot-exporter
|
||||
group = dovecot-exporter
|
||||
mode = 0660
|
||||
}
|
||||
fifo_listener old-stats-mail {
|
||||
mode = 0660
|
||||
user = dovecot
|
||||
group = dovecot
|
||||
}
|
||||
fifo_listener old-stats-user {
|
||||
mode = 0660
|
||||
user = dovecot
|
||||
group = dovecot
|
||||
}
|
||||
}
|
||||
plugin {
|
||||
old_stats_refresh = 30 secs
|
||||
old_stats_track_cmds = yes
|
||||
}
|
||||
''';
|
||||
}
|
||||
</programlisting>
|
||||
'';
|
||||
};
|
||||
scopes = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ "user" ];
|
||||
example = [ "user" "global" ];
|
||||
description = ''
|
||||
Stats scopes to query.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
DynamicUser = false;
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-dovecot-exporter}/bin/dovecot_exporter \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--web.telemetry-path ${cfg.telemetryPath} \
|
||||
--dovecot.socket-path ${escapeShellArg cfg.socketPath} \
|
||||
--dovecot.scopes ${concatStringsSep "," cfg.scopes} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
RestrictAddressFamilies = [
|
||||
# Need AF_UNIX to collect data
|
||||
"AF_UNIX"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let cfg = config.services.prometheus.exporters.fastly;
|
||||
in
|
||||
{
|
||||
port = 9118;
|
||||
extraOpts = {
|
||||
debug = mkEnableOption "Debug logging mode for fastly-exporter";
|
||||
|
||||
configFile = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
description = ''
|
||||
Path to a fastly-exporter configuration file.
|
||||
Example one can be generated with <literal>fastly-exporter --config-file-example</literal>.
|
||||
'';
|
||||
example = "./fastly-exporter-config.txt";
|
||||
};
|
||||
|
||||
tokenPath = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
apply = final: if final == null then null else toString final;
|
||||
description = ''
|
||||
A run-time path to the token file, which is supposed to be provisioned
|
||||
outside of Nix store.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
script = ''
|
||||
${optionalString (cfg.tokenPath != null)
|
||||
"export FASTLY_API_TOKEN=$(cat ${toString cfg.tokenPath})"}
|
||||
${pkgs.prometheus-fastly-exporter}/bin/fastly-exporter \
|
||||
-listen http://${cfg.listenAddress}:${toString cfg.port}
|
||||
${optionalString cfg.debug "-debug true"} \
|
||||
${optionalString (cfg.configFile != null) "-config-file ${cfg.configFile}"}
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.flow;
|
||||
in {
|
||||
port = 9590;
|
||||
extraOpts = {
|
||||
brokers = mkOption {
|
||||
type = types.listOf types.str;
|
||||
example = literalExpression ''[ "kafka.example.org:19092" ]'';
|
||||
description = "List of Kafka brokers to connect to.";
|
||||
};
|
||||
|
||||
asn = mkOption {
|
||||
type = types.ints.positive;
|
||||
example = 65542;
|
||||
description = "The ASN being monitored.";
|
||||
};
|
||||
|
||||
partitions = mkOption {
|
||||
type = types.listOf types.int;
|
||||
default = [];
|
||||
description = ''
|
||||
The number of the partitions to consume, none means all.
|
||||
'';
|
||||
};
|
||||
|
||||
topic = mkOption {
|
||||
type = types.str;
|
||||
example = "pmacct.acct";
|
||||
description = "The Kafka topic to consume from.";
|
||||
};
|
||||
};
|
||||
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-flow-exporter}/bin/flow-exporter \
|
||||
-asn ${toString cfg.asn} \
|
||||
-topic ${cfg.topic} \
|
||||
-brokers ${concatStringsSep "," cfg.brokers} \
|
||||
${optionalString (cfg.partitions != []) "-partitions ${concatStringsSep "," cfg.partitions}"} \
|
||||
-addr ${cfg.listenAddress}:${toString cfg.port} ${concatStringsSep " " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.fritzbox;
|
||||
in
|
||||
{
|
||||
port = 9133;
|
||||
extraOpts = {
|
||||
gatewayAddress = mkOption {
|
||||
type = types.str;
|
||||
default = "fritz.box";
|
||||
description = ''
|
||||
The hostname or IP of the FRITZ!Box.
|
||||
'';
|
||||
};
|
||||
|
||||
gatewayPort = mkOption {
|
||||
type = types.int;
|
||||
default = 49000;
|
||||
description = ''
|
||||
The port of the FRITZ!Box UPnP service.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-fritzbox-exporter}/bin/exporter \
|
||||
-listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
-gateway-address ${cfg.gatewayAddress} \
|
||||
-gateway-port ${toString cfg.gatewayPort} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.influxdb;
|
||||
in
|
||||
{
|
||||
port = 9122;
|
||||
extraOpts = {
|
||||
sampleExpiry = mkOption {
|
||||
type = types.str;
|
||||
default = "5m";
|
||||
example = "10m";
|
||||
description = "How long a sample is valid for";
|
||||
};
|
||||
udpBindAddress = mkOption {
|
||||
type = types.str;
|
||||
default = ":9122";
|
||||
example = "192.0.2.1:9122";
|
||||
description = "Address on which to listen for udp packets";
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
RuntimeDirectory = "prometheus-influxdb-exporter";
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-influxdb-exporter}/bin/influxdb_exporter \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--influxdb.sample-expiry ${cfg.sampleExpiry} ${concatStringsSep " " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.jitsi;
|
||||
in
|
||||
{
|
||||
port = 9700;
|
||||
extraOpts = {
|
||||
url = mkOption {
|
||||
type = types.str;
|
||||
default = "http://localhost:8080/colibri/stats";
|
||||
description = ''
|
||||
Jitsi Videobridge metrics URL to monitor.
|
||||
This is usually /colibri/stats on port 8080 of the jitsi videobridge host.
|
||||
'';
|
||||
};
|
||||
interval = mkOption {
|
||||
type = types.str;
|
||||
default = "30s";
|
||||
example = "1min";
|
||||
description = ''
|
||||
How often to scrape new data
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-jitsi-exporter}/bin/jitsiexporter \
|
||||
-url ${escapeShellArg cfg.url} \
|
||||
-host ${cfg.listenAddress} \
|
||||
-port ${toString cfg.port} \
|
||||
-interval ${toString cfg.interval} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.json;
|
||||
in
|
||||
{
|
||||
port = 7979;
|
||||
extraOpts = {
|
||||
configFile = mkOption {
|
||||
type = types.path;
|
||||
description = ''
|
||||
Path to configuration file.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-json-exporter}/bin/json_exporter \
|
||||
--config.file ${escapeShellArg cfg.configFile} \
|
||||
--web.listen-address="${cfg.listenAddress}:${toString cfg.port}" \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
imports = [
|
||||
(mkRemovedOptionModule [ "url" ] ''
|
||||
This option was removed. The URL of the endpoint serving JSON
|
||||
must now be provided to the exporter by prometheus via the url
|
||||
parameter `target'.
|
||||
|
||||
In prometheus a scrape URL would look like this:
|
||||
|
||||
http://some.json-exporter.host:7979/probe?target=https://example.com/some/json/endpoint
|
||||
|
||||
For more information, take a look at the official documentation
|
||||
(https://github.com/prometheus-community/json_exporter) of the json_exporter.
|
||||
'')
|
||||
({ options.warnings = options.warnings; options.assertions = options.assertions; })
|
||||
];
|
||||
}
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, options
|
||||
}:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.kea;
|
||||
in {
|
||||
port = 9547;
|
||||
extraOpts = {
|
||||
controlSocketPaths = mkOption {
|
||||
type = types.listOf types.str;
|
||||
example = literalExpression ''
|
||||
[
|
||||
"/run/kea/kea-dhcp4.socket"
|
||||
"/run/kea/kea-dhcp6.socket"
|
||||
]
|
||||
'';
|
||||
description = ''
|
||||
Paths to kea control sockets
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
after = [
|
||||
"kea-dhcp4-server.service"
|
||||
"kea-dhcp6-server.service"
|
||||
];
|
||||
serviceConfig = {
|
||||
User = "kea";
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-kea-exporter}/bin/kea-exporter \
|
||||
--address ${cfg.listenAddress} \
|
||||
--port ${toString cfg.port} \
|
||||
${concatStringsSep " \\n" cfg.controlSocketPaths}
|
||||
'';
|
||||
SupplementaryGroups = [ "kea" ];
|
||||
RestrictAddressFamilies = [
|
||||
# Need AF_UNIX to collect data
|
||||
"AF_UNIX"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.keylight;
|
||||
in
|
||||
{
|
||||
port = 9288;
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-keylight-exporter}/bin/keylight_exporter \
|
||||
-metrics.addr ${cfg.listenAddress}:${toString cfg.port} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.knot;
|
||||
in {
|
||||
port = 9433;
|
||||
extraOpts = {
|
||||
knotLibraryPath = mkOption {
|
||||
type = types.str;
|
||||
default = "${pkgs.knot-dns.out}/lib/libknot.so";
|
||||
defaultText = literalExpression ''"''${pkgs.knot-dns.out}/lib/libknot.so"'';
|
||||
description = ''
|
||||
Path to the library of <package>knot-dns</package>.
|
||||
'';
|
||||
};
|
||||
|
||||
knotSocketPath = mkOption {
|
||||
type = types.str;
|
||||
default = "/run/knot/knot.sock";
|
||||
description = ''
|
||||
Socket path of <citerefentry><refentrytitle>knotd</refentrytitle>
|
||||
<manvolnum>8</manvolnum></citerefentry>.
|
||||
'';
|
||||
};
|
||||
|
||||
knotSocketTimeout = mkOption {
|
||||
type = types.int;
|
||||
default = 2000;
|
||||
description = ''
|
||||
Timeout in seconds.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-knot-exporter}/bin/knot_exporter \
|
||||
--web-listen-addr ${cfg.listenAddress} \
|
||||
--web-listen-port ${toString cfg.port} \
|
||||
--knot-library-path ${cfg.knotLibraryPath} \
|
||||
--knot-socket-path ${cfg.knotSocketPath} \
|
||||
--knot-socket-timeout ${toString cfg.knotSocketTimeout} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
SupplementaryGroups = [ "knot" ];
|
||||
RestrictAddressFamilies = [
|
||||
# Need AF_UNIX to collect data
|
||||
"AF_UNIX"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.lnd;
|
||||
in
|
||||
{
|
||||
port = 9092;
|
||||
extraOpts = {
|
||||
lndHost = mkOption {
|
||||
type = types.str;
|
||||
default = "localhost:10009";
|
||||
description = ''
|
||||
lnd instance gRPC address:port.
|
||||
'';
|
||||
};
|
||||
|
||||
lndTlsPath = mkOption {
|
||||
type = types.path;
|
||||
description = ''
|
||||
Path to lnd TLS certificate.
|
||||
'';
|
||||
};
|
||||
|
||||
lndMacaroonDir = mkOption {
|
||||
type = types.path;
|
||||
description = ''
|
||||
Path to lnd macaroons.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts.serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-lnd-exporter}/bin/lndmon \
|
||||
--prometheus.listenaddr=${cfg.listenAddress}:${toString cfg.port} \
|
||||
--prometheus.logdir=/var/log/prometheus-lnd-exporter \
|
||||
--lnd.host=${cfg.lndHost} \
|
||||
--lnd.tlspath=${cfg.lndTlsPath} \
|
||||
--lnd.macaroondir=${cfg.lndMacaroonDir} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
LogsDirectory = "prometheus-lnd-exporter";
|
||||
ReadOnlyPaths = [ cfg.lndTlsPath cfg.lndMacaroonDir ];
|
||||
};
|
||||
}
|
||||
176
nixos/modules/services/monitoring/prometheus/exporters/mail.nix
Normal file
176
nixos/modules/services/monitoring/prometheus/exporters/mail.nix
Normal file
|
|
@ -0,0 +1,176 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.mail;
|
||||
|
||||
configurationFile = pkgs.writeText "prometheus-mail-exporter.conf" (builtins.toJSON (
|
||||
# removes the _module attribute, null values and converts attrNames to lowercase
|
||||
mapAttrs' (name: value:
|
||||
if name == "servers"
|
||||
then nameValuePair (toLower name)
|
||||
((map (srv: (mapAttrs' (n: v: nameValuePair (toLower n) v)
|
||||
(filterAttrs (n: v: !(n == "_module" || v == null)) srv)
|
||||
))) value)
|
||||
else nameValuePair (toLower name) value
|
||||
) (filterAttrs (n: _: !(n == "_module")) cfg.configuration)
|
||||
));
|
||||
|
||||
serverOptions.options = {
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
Value for label 'configname' which will be added to all metrics.
|
||||
'';
|
||||
};
|
||||
server = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
Hostname of the server that should be probed.
|
||||
'';
|
||||
};
|
||||
port = mkOption {
|
||||
type = types.int;
|
||||
example = 587;
|
||||
description = ''
|
||||
Port to use for SMTP.
|
||||
'';
|
||||
};
|
||||
from = mkOption {
|
||||
type = types.str;
|
||||
example = "exporteruser@domain.tld";
|
||||
description = ''
|
||||
Content of 'From' Header for probing mails.
|
||||
'';
|
||||
};
|
||||
to = mkOption {
|
||||
type = types.str;
|
||||
example = "exporteruser@domain.tld";
|
||||
description = ''
|
||||
Content of 'To' Header for probing mails.
|
||||
'';
|
||||
};
|
||||
detectionDir = mkOption {
|
||||
type = types.path;
|
||||
example = "/var/spool/mail/exporteruser/new";
|
||||
description = ''
|
||||
Directory in which new mails for the exporter user are placed.
|
||||
Note that this needs to exist when the exporter starts.
|
||||
'';
|
||||
};
|
||||
login = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "exporteruser@domain.tld";
|
||||
description = ''
|
||||
Username to use for SMTP authentication.
|
||||
'';
|
||||
};
|
||||
passphrase = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
Password to use for SMTP authentication.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
exporterOptions.options = {
|
||||
monitoringInterval = mkOption {
|
||||
type = types.str;
|
||||
example = "10s";
|
||||
description = ''
|
||||
Time interval between two probe attempts.
|
||||
'';
|
||||
};
|
||||
mailCheckTimeout = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
Timeout until mails are considered "didn't make it".
|
||||
'';
|
||||
};
|
||||
disableFileDeletion = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Disables the exporter's function to delete probing mails.
|
||||
'';
|
||||
};
|
||||
servers = mkOption {
|
||||
type = types.listOf (types.submodule serverOptions);
|
||||
default = [];
|
||||
example = literalExpression ''
|
||||
[ {
|
||||
name = "testserver";
|
||||
server = "smtp.domain.tld";
|
||||
port = 587;
|
||||
from = "exporteruser@domain.tld";
|
||||
to = "exporteruser@domain.tld";
|
||||
detectionDir = "/path/to/Maildir/new";
|
||||
} ]
|
||||
'';
|
||||
description = ''
|
||||
List of servers that should be probed.
|
||||
|
||||
<emphasis>Note:</emphasis> if your mailserver has <citerefentry>
|
||||
<refentrytitle>rspamd</refentrytitle><manvolnum>8</manvolnum></citerefentry> configured,
|
||||
it can happen that emails from this exporter are marked as spam.
|
||||
|
||||
It's possible to work around the issue with a config like this:
|
||||
<programlisting>
|
||||
{
|
||||
<link linkend="opt-services.rspamd.locals._name_.text">services.rspamd.locals."multimap.conf".text</link> = '''
|
||||
ALLOWLIST_PROMETHEUS {
|
||||
filter = "email:domain:tld";
|
||||
type = "from";
|
||||
map = "''${pkgs.writeText "allowmap" "domain.tld"}";
|
||||
score = -100.0;
|
||||
}
|
||||
''';
|
||||
}
|
||||
</programlisting>
|
||||
'';
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
port = 9225;
|
||||
extraOpts = {
|
||||
configFile = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
description = ''
|
||||
Specify the mailexporter configuration file to use.
|
||||
'';
|
||||
};
|
||||
configuration = mkOption {
|
||||
type = types.nullOr (types.submodule exporterOptions);
|
||||
default = null;
|
||||
description = ''
|
||||
Specify the mailexporter configuration file to use.
|
||||
'';
|
||||
};
|
||||
telemetryPath = mkOption {
|
||||
type = types.str;
|
||||
default = "/metrics";
|
||||
description = ''
|
||||
Path under which to expose metrics.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
DynamicUser = false;
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-mail-exporter}/bin/mailexporter \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--web.telemetry-path ${cfg.telemetryPath} \
|
||||
--config.file ${
|
||||
if cfg.configuration != null then configurationFile else (escapeShellArg cfg.configFile)
|
||||
} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.mikrotik;
|
||||
in
|
||||
{
|
||||
port = 9436;
|
||||
extraOpts = {
|
||||
configFile = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
description = ''
|
||||
Path to a mikrotik exporter configuration file. Mutually exclusive with
|
||||
<option>configuration</option> option.
|
||||
'';
|
||||
example = literalExpression "./mikrotik.yml";
|
||||
};
|
||||
|
||||
configuration = mkOption {
|
||||
type = types.nullOr types.attrs;
|
||||
default = null;
|
||||
description = ''
|
||||
Mikrotik exporter configuration as nix attribute set. Mutually exclusive with
|
||||
<option>configFile</option> option.
|
||||
|
||||
See <link xlink:href="https://github.com/nshttpd/mikrotik-exporter/blob/master/README.md"/>
|
||||
for the description of the configuration file format.
|
||||
'';
|
||||
example = literalExpression ''
|
||||
{
|
||||
devices = [
|
||||
{
|
||||
name = "my_router";
|
||||
address = "10.10.0.1";
|
||||
user = "prometheus";
|
||||
password = "changeme";
|
||||
}
|
||||
];
|
||||
features = {
|
||||
bgp = true;
|
||||
dhcp = true;
|
||||
routes = true;
|
||||
optics = true;
|
||||
};
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = let
|
||||
configFile = if cfg.configFile != null
|
||||
then cfg.configFile
|
||||
else "${pkgs.writeText "mikrotik-exporter.yml" (builtins.toJSON cfg.configuration)}";
|
||||
in {
|
||||
serviceConfig = {
|
||||
# -port is misleading name, it actually accepts address too
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-mikrotik-exporter}/bin/mikrotik-exporter \
|
||||
-config-file=${escapeShellArg configFile} \
|
||||
-port=${cfg.listenAddress}:${toString cfg.port} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.minio;
|
||||
in
|
||||
{
|
||||
port = 9290;
|
||||
extraOpts = {
|
||||
minioAddress = mkOption {
|
||||
type = types.str;
|
||||
example = "https://10.0.0.1:9000";
|
||||
description = ''
|
||||
The URL of the minio server.
|
||||
Use HTTPS if Minio accepts secure connections only.
|
||||
By default this connects to the local minio server if enabled.
|
||||
'';
|
||||
};
|
||||
|
||||
minioAccessKey = mkOption {
|
||||
type = types.str;
|
||||
example = "yourMinioAccessKey";
|
||||
description = ''
|
||||
The value of the Minio access key.
|
||||
It is required in order to connect to the server.
|
||||
By default this uses the one from the local minio server if enabled
|
||||
and <literal>config.services.minio.accessKey</literal>.
|
||||
'';
|
||||
};
|
||||
|
||||
minioAccessSecret = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
The value of the Minio access secret.
|
||||
It is required in order to connect to the server.
|
||||
By default this uses the one from the local minio server if enabled
|
||||
and <literal>config.services.minio.secretKey</literal>.
|
||||
'';
|
||||
};
|
||||
|
||||
minioBucketStats = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Collect statistics about the buckets and files in buckets.
|
||||
It requires more computation, use it carefully in case of large buckets..
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-minio-exporter}/bin/minio-exporter \
|
||||
-web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
-minio.server ${cfg.minioAddress} \
|
||||
-minio.access-key ${escapeShellArg cfg.minioAccessKey} \
|
||||
-minio.access-secret ${escapeShellArg cfg.minioAccessSecret} \
|
||||
${optionalString cfg.minioBucketStats "-minio.bucket-stats"} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.modemmanager;
|
||||
in
|
||||
{
|
||||
port = 9539;
|
||||
extraOpts = {
|
||||
refreshRate = mkOption {
|
||||
type = types.str;
|
||||
default = "5s";
|
||||
description = ''
|
||||
How frequently ModemManager will refresh the extended signal quality
|
||||
information for each modem. The duration should be specified in seconds
|
||||
("5s"), minutes ("1m"), or hours ("1h").
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
# Required in order to authenticate with ModemManager via D-Bus.
|
||||
SupplementaryGroups = "networkmanager";
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-modemmanager-exporter}/bin/modemmanager_exporter \
|
||||
-addr ${cfg.listenAddress}:${toString cfg.port} \
|
||||
-rate ${cfg.refreshRate} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
RestrictAddressFamilies = [
|
||||
# Need AF_UNIX to collect data
|
||||
"AF_UNIX"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.nextcloud;
|
||||
in
|
||||
{
|
||||
port = 9205;
|
||||
extraOpts = {
|
||||
url = mkOption {
|
||||
type = types.str;
|
||||
example = "https://domain.tld";
|
||||
description = ''
|
||||
URL to the Nextcloud serverinfo page.
|
||||
Adding the path to the serverinfo API is optional, it defaults
|
||||
to <literal>/ocs/v2.php/apps/serverinfo/api/v1/info</literal>.
|
||||
'';
|
||||
};
|
||||
username = mkOption {
|
||||
type = types.str;
|
||||
default = "nextcloud-exporter";
|
||||
description = ''
|
||||
Username for connecting to Nextcloud.
|
||||
Note that this account needs to have admin privileges in Nextcloud.
|
||||
'';
|
||||
};
|
||||
passwordFile = mkOption {
|
||||
type = types.path;
|
||||
example = "/path/to/password-file";
|
||||
description = ''
|
||||
File containing the password for connecting to Nextcloud.
|
||||
Make sure that this file is readable by the exporter user.
|
||||
'';
|
||||
};
|
||||
timeout = mkOption {
|
||||
type = types.str;
|
||||
default = "5s";
|
||||
description = ''
|
||||
Timeout for getting server info document.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
DynamicUser = false;
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-nextcloud-exporter}/bin/nextcloud-exporter \
|
||||
--addr ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--username ${cfg.username} \
|
||||
--timeout ${cfg.timeout} \
|
||||
--server ${cfg.url} \
|
||||
--password ${escapeShellArg "@${cfg.passwordFile}"} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.nginx;
|
||||
in
|
||||
{
|
||||
port = 9113;
|
||||
extraOpts = {
|
||||
scrapeUri = mkOption {
|
||||
type = types.str;
|
||||
default = "http://localhost/nginx_status";
|
||||
description = ''
|
||||
Address to access the nginx status page.
|
||||
Can be enabled with services.nginx.statusPage = true.
|
||||
'';
|
||||
};
|
||||
telemetryPath = mkOption {
|
||||
type = types.str;
|
||||
default = "/metrics";
|
||||
description = ''
|
||||
Path under which to expose metrics.
|
||||
'';
|
||||
};
|
||||
sslVerify = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Whether to perform certificate verification for https.
|
||||
'';
|
||||
};
|
||||
constLabels = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = [
|
||||
"label1=value1"
|
||||
"label2=value2"
|
||||
];
|
||||
description = ''
|
||||
A list of constant labels that will be used in every metric.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = mkMerge ([{
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-nginx-exporter}/bin/nginx-prometheus-exporter \
|
||||
--nginx.scrape-uri='${cfg.scrapeUri}' \
|
||||
--nginx.ssl-verify=${boolToString cfg.sslVerify} \
|
||||
--web.listen-address=${cfg.listenAddress}:${toString cfg.port} \
|
||||
--web.telemetry-path=${cfg.telemetryPath} \
|
||||
--prometheus.const-labels=${concatStringsSep "," cfg.constLabels} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
}] ++ [(mkIf config.services.nginx.enable {
|
||||
after = [ "nginx.service" ];
|
||||
requires = [ "nginx.service" ];
|
||||
})]);
|
||||
imports = [
|
||||
(mkRenamedOptionModule [ "telemetryEndpoint" ] [ "telemetryPath" ])
|
||||
(mkRemovedOptionModule [ "insecure" ] ''
|
||||
This option was replaced by 'prometheus.exporters.nginx.sslVerify'.
|
||||
'')
|
||||
({ options.warnings = options.warnings; options.assertions = options.assertions; })
|
||||
];
|
||||
}
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.nginxlog;
|
||||
in {
|
||||
port = 9117;
|
||||
extraOpts = {
|
||||
settings = mkOption {
|
||||
type = types.attrs;
|
||||
default = {};
|
||||
description = ''
|
||||
All settings of nginxlog expressed as an Nix attrset.
|
||||
|
||||
Check the official documentation for the corresponding YAML
|
||||
settings that can all be used here: https://github.com/martin-helmich/prometheus-nginxlog-exporter
|
||||
|
||||
The `listen` object is already generated by `port`, `listenAddress` and `metricsEndpoint` and
|
||||
will be merged with the value of `settings` before writting it as JSON.
|
||||
'';
|
||||
};
|
||||
|
||||
metricsEndpoint = mkOption {
|
||||
type = types.str;
|
||||
default = "/metrics";
|
||||
description = ''
|
||||
Path under which to expose metrics.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
serviceOpts = let
|
||||
listenConfig = {
|
||||
listen = {
|
||||
port = cfg.port;
|
||||
address = cfg.listenAddress;
|
||||
metrics_endpoint = cfg.metricsEndpoint;
|
||||
};
|
||||
};
|
||||
completeConfig = pkgs.writeText "nginxlog-exporter.yaml" (builtins.toJSON (lib.recursiveUpdate listenConfig cfg.settings));
|
||||
in {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-nginxlog-exporter}/bin/prometheus-nginxlog-exporter -config-file ${completeConfig}
|
||||
'';
|
||||
Restart="always";
|
||||
ProtectSystem="full";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.node;
|
||||
in
|
||||
{
|
||||
port = 9100;
|
||||
extraOpts = {
|
||||
enabledCollectors = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = [ "systemd" ];
|
||||
description = ''
|
||||
Collectors to enable. The collectors listed here are enabled in addition to the default ones.
|
||||
'';
|
||||
};
|
||||
disabledCollectors = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = [ "timex" ];
|
||||
description = ''
|
||||
Collectors to disable which are enabled by default.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
DynamicUser = false;
|
||||
RuntimeDirectory = "prometheus-node-exporter";
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-node-exporter}/bin/node_exporter \
|
||||
${concatMapStringsSep " " (x: "--collector." + x) cfg.enabledCollectors} \
|
||||
${concatMapStringsSep " " (x: "--no-collector." + x) cfg.disabledCollectors} \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} ${concatStringsSep " " cfg.extraFlags}
|
||||
'';
|
||||
RestrictAddressFamilies = optionals (any (collector: (collector == "logind" || collector == "systemd")) cfg.enabledCollectors) [
|
||||
# needs access to dbus via unix sockets (logind/systemd)
|
||||
"AF_UNIX"
|
||||
] ++ optionals (any (collector: (collector == "network_route" || collector == "wifi")) cfg.enabledCollectors) [
|
||||
# needs netlink sockets for wireless collector
|
||||
"AF_NETLINK"
|
||||
];
|
||||
# The timex collector needs to access clock APIs
|
||||
ProtectClock = any (collector: collector == "timex") cfg.disabledCollectors;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.openldap;
|
||||
in {
|
||||
port = 9330;
|
||||
extraOpts = {
|
||||
ldapCredentialFile = mkOption {
|
||||
type = types.path;
|
||||
example = "/run/keys/ldap_pass";
|
||||
description = ''
|
||||
Environment file to contain the credentials to authenticate against
|
||||
<package>openldap</package>.
|
||||
|
||||
The file should look like this:
|
||||
<programlisting>
|
||||
---
|
||||
ldapUser: "cn=monitoring,cn=Monitor"
|
||||
ldapPass: "secret"
|
||||
</programlisting>
|
||||
'';
|
||||
};
|
||||
protocol = mkOption {
|
||||
default = "tcp";
|
||||
example = "udp";
|
||||
type = types.str;
|
||||
description = ''
|
||||
Which protocol to use to connect against <package>openldap</package>.
|
||||
'';
|
||||
};
|
||||
ldapAddr = mkOption {
|
||||
default = "localhost:389";
|
||||
type = types.str;
|
||||
description = ''
|
||||
Address of the <package>openldap</package>-instance.
|
||||
'';
|
||||
};
|
||||
metricsPath = mkOption {
|
||||
default = "/metrics";
|
||||
type = types.str;
|
||||
description = ''
|
||||
URL path where metrics should be exposed.
|
||||
'';
|
||||
};
|
||||
interval = mkOption {
|
||||
default = "30s";
|
||||
type = types.str;
|
||||
example = "1m";
|
||||
description = ''
|
||||
Scrape interval of the exporter.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts.serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-openldap-exporter}/bin/openldap_exporter \
|
||||
--promAddr ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--metrPath ${cfg.metricsPath} \
|
||||
--ldapNet ${cfg.protocol} \
|
||||
--interval ${cfg.interval} \
|
||||
--config ${cfg.ldapCredentialFile} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.openvpn;
|
||||
in {
|
||||
port = 9176;
|
||||
extraOpts = {
|
||||
statusPaths = mkOption {
|
||||
type = types.listOf types.str;
|
||||
description = ''
|
||||
Paths to OpenVPN status files. Please configure the OpenVPN option
|
||||
<literal>status</literal> accordingly.
|
||||
'';
|
||||
};
|
||||
telemetryPath = mkOption {
|
||||
type = types.str;
|
||||
default = "/metrics";
|
||||
description = ''
|
||||
Path under which to expose metrics.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
PrivateDevices = true;
|
||||
ProtectKernelModules = true;
|
||||
NoNewPrivileges = true;
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-openvpn-exporter}/bin/openvpn_exporter \
|
||||
-openvpn.status_paths "${concatStringsSep "," cfg.statusPaths}" \
|
||||
-web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
-web.telemetry-path ${cfg.telemetryPath}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.pihole;
|
||||
in
|
||||
{
|
||||
port = 9617;
|
||||
extraOpts = {
|
||||
apiToken = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
example = "580a770cb40511eb85290242ac130003580a770cb40511eb85290242ac130003";
|
||||
description = ''
|
||||
pi-hole API token which can be used instead of a password
|
||||
'';
|
||||
};
|
||||
interval = mkOption {
|
||||
type = types.str;
|
||||
default = "10s";
|
||||
example = "30s";
|
||||
description = ''
|
||||
How often to scrape new data
|
||||
'';
|
||||
};
|
||||
password = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
example = "password";
|
||||
description = ''
|
||||
The password to login into pihole. An api token can be used instead.
|
||||
'';
|
||||
};
|
||||
piholeHostname = mkOption {
|
||||
type = types.str;
|
||||
default = "pihole";
|
||||
example = "127.0.0.1";
|
||||
description = ''
|
||||
Hostname or address where to find the pihole webinterface
|
||||
'';
|
||||
};
|
||||
piholePort = mkOption {
|
||||
type = types.port;
|
||||
default = 80;
|
||||
example = 443;
|
||||
description = ''
|
||||
The port pihole webinterface is reachable on
|
||||
'';
|
||||
};
|
||||
protocol = mkOption {
|
||||
type = types.enum [ "http" "https" ];
|
||||
default = "http";
|
||||
example = "https";
|
||||
description = ''
|
||||
The protocol which is used to connect to pihole
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.bash}/bin/bash -c "${pkgs.prometheus-pihole-exporter}/bin/pihole-exporter \
|
||||
-interval ${cfg.interval} \
|
||||
${optionalString (cfg.apiToken != "") "-pihole_api_token ${cfg.apiToken}"} \
|
||||
-pihole_hostname ${cfg.piholeHostname} \
|
||||
${optionalString (cfg.password != "") "-pihole_password ${cfg.password}"} \
|
||||
-pihole_port ${toString cfg.piholePort} \
|
||||
-pihole_protocol ${cfg.protocol} \
|
||||
-port ${toString cfg.port}"
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,98 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.postfix;
|
||||
in
|
||||
{
|
||||
port = 9154;
|
||||
extraOpts = {
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
Group under which the postfix exporter shall be run.
|
||||
It should match the group that is allowed to access the
|
||||
<literal>showq</literal> socket in the <literal>queue/public/</literal> directory.
|
||||
Defaults to <literal>services.postfix.setgidGroup</literal> when postfix is enabled.
|
||||
'';
|
||||
};
|
||||
telemetryPath = mkOption {
|
||||
type = types.str;
|
||||
default = "/metrics";
|
||||
description = ''
|
||||
Path under which to expose metrics.
|
||||
'';
|
||||
};
|
||||
logfilePath = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/log/postfix_exporter_input.log";
|
||||
example = "/var/log/mail.log";
|
||||
description = ''
|
||||
Path where Postfix writes log entries.
|
||||
This file will be truncated by this exporter!
|
||||
'';
|
||||
};
|
||||
showqPath = mkOption {
|
||||
type = types.path;
|
||||
default = "/var/lib/postfix/queue/public/showq";
|
||||
example = "/var/spool/postfix/public/showq";
|
||||
description = ''
|
||||
Path where Postfix places its showq socket.
|
||||
'';
|
||||
};
|
||||
systemd = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Whether to enable reading metrics from the systemd journal instead of from a logfile
|
||||
'';
|
||||
};
|
||||
unit = mkOption {
|
||||
type = types.str;
|
||||
default = "postfix.service";
|
||||
description = ''
|
||||
Name of the postfix systemd unit.
|
||||
'';
|
||||
};
|
||||
slice = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
Name of the postfix systemd slice.
|
||||
This overrides the <option>systemd.unit</option>.
|
||||
'';
|
||||
};
|
||||
journalPath = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
description = ''
|
||||
Path to the systemd journal.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
DynamicUser = false;
|
||||
# By default, each prometheus exporter only gets AF_INET & AF_INET6,
|
||||
# but AF_UNIX is needed to read from the `showq`-socket.
|
||||
RestrictAddressFamilies = [ "AF_UNIX" ];
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-postfix-exporter}/bin/postfix_exporter \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--web.telemetry-path ${cfg.telemetryPath} \
|
||||
--postfix.showq_path ${escapeShellArg cfg.showqPath} \
|
||||
${concatStringsSep " \\\n " (cfg.extraFlags
|
||||
++ optional cfg.systemd.enable "--systemd.enable"
|
||||
++ optional cfg.systemd.enable (if cfg.systemd.slice != null
|
||||
then "--systemd.slice ${cfg.systemd.slice}"
|
||||
else "--systemd.unit ${cfg.systemd.unit}")
|
||||
++ optional (cfg.systemd.enable && (cfg.systemd.journalPath != null))
|
||||
"--systemd.journal_path ${escapeShellArg cfg.systemd.journalPath}"
|
||||
++ optional (!cfg.systemd.enable) "--postfix.logfile_path ${escapeShellArg cfg.logfilePath}")}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.postgres;
|
||||
in
|
||||
{
|
||||
port = 9187;
|
||||
extraOpts = {
|
||||
telemetryPath = mkOption {
|
||||
type = types.str;
|
||||
default = "/metrics";
|
||||
description = ''
|
||||
Path under which to expose metrics.
|
||||
'';
|
||||
};
|
||||
dataSourceName = mkOption {
|
||||
type = types.str;
|
||||
default = "user=postgres database=postgres host=/run/postgresql sslmode=disable";
|
||||
example = "postgresql://username:password@localhost:5432/postgres?sslmode=disable";
|
||||
description = ''
|
||||
Accepts PostgreSQL URI form and key=value form arguments.
|
||||
'';
|
||||
};
|
||||
runAsLocalSuperUser = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to run the exporter as the local 'postgres' super user.
|
||||
'';
|
||||
};
|
||||
|
||||
# TODO perhaps LoadCredential would be more appropriate
|
||||
environmentFile = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
example = "/root/prometheus-postgres-exporter.env";
|
||||
description = ''
|
||||
Environment file as defined in <citerefentry>
|
||||
<refentrytitle>systemd.exec</refentrytitle><manvolnum>5</manvolnum>
|
||||
</citerefentry>.
|
||||
|
||||
Secrets may be passed to the service without adding them to the
|
||||
world-readable Nix store, by specifying placeholder variables as
|
||||
the option value in Nix and setting these variables accordingly in the
|
||||
environment file.
|
||||
|
||||
Environment variables from this file will be interpolated into the
|
||||
config file using envsubst with this syntax:
|
||||
<literal>$ENVIRONMENT ''${VARIABLE}</literal>
|
||||
|
||||
The main use is to set the DATA_SOURCE_NAME that contains the
|
||||
postgres password
|
||||
|
||||
note that contents from this file will override dataSourceName
|
||||
if you have set it from nix.
|
||||
|
||||
<programlisting>
|
||||
# Content of the environment file
|
||||
DATA_SOURCE_NAME=postgresql://username:password@localhost:5432/postgres?sslmode=disable
|
||||
</programlisting>
|
||||
|
||||
Note that this file needs to be available on the host on which
|
||||
this exporter is running.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
serviceOpts = {
|
||||
environment.DATA_SOURCE_NAME = cfg.dataSourceName;
|
||||
serviceConfig = {
|
||||
DynamicUser = false;
|
||||
User = mkIf cfg.runAsLocalSuperUser (mkForce "postgres");
|
||||
EnvironmentFile = mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-postgres-exporter}/bin/postgres_exporter \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--web.telemetry-path ${cfg.telemetryPath} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
RestrictAddressFamilies = [
|
||||
# Need AF_UNIX to collect data
|
||||
"AF_UNIX"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.process;
|
||||
configFile = pkgs.writeText "process-exporter.yaml" (builtins.toJSON cfg.settings);
|
||||
in
|
||||
{
|
||||
port = 9256;
|
||||
extraOpts = {
|
||||
settings.process_names = mkOption {
|
||||
type = types.listOf types.anything;
|
||||
default = [];
|
||||
example = literalExpression ''
|
||||
[
|
||||
# Remove nix store path from process name
|
||||
{ name = "{{.Matches.Wrapped}} {{ .Matches.Args }}"; cmdline = [ "^/nix/store[^ ]*/(?P<Wrapped>[^ /]*) (?P<Args>.*)" ]; }
|
||||
]
|
||||
'';
|
||||
description = ''
|
||||
All settings expressed as an Nix attrset.
|
||||
|
||||
Check the official documentation for the corresponding YAML
|
||||
settings that can all be used here: <link xlink:href="https://github.com/ncabatoff/process-exporter" />
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
DynamicUser = false;
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-process-exporter}/bin/process-exporter \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--config.path ${configFile} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
NoNewPrivileges = true;
|
||||
ProtectHome = true;
|
||||
ProtectSystem = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectControlGroups = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
118
nixos/modules/services/monitoring/prometheus/exporters/pve.nix
Normal file
118
nixos/modules/services/monitoring/prometheus/exporters/pve.nix
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.pve;
|
||||
|
||||
# pve exporter requires a config file so create an empty one if configFile is not provided
|
||||
emptyConfigFile = pkgs.writeTextFile {
|
||||
name = "pve.yml";
|
||||
text = "default:";
|
||||
};
|
||||
|
||||
computedConfigFile = "${if cfg.configFile == null then emptyConfigFile else cfg.configFile}";
|
||||
in
|
||||
{
|
||||
port = 9221;
|
||||
extraOpts = {
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.prometheus-pve-exporter;
|
||||
defaultText = literalExpression "pkgs.prometheus-pve-exporter";
|
||||
example = literalExpression "pkgs.prometheus-pve-exporter";
|
||||
description = ''
|
||||
The package to use for prometheus-pve-exporter
|
||||
'';
|
||||
};
|
||||
|
||||
environmentFile = mkOption {
|
||||
type = with types; nullOr path;
|
||||
default = null;
|
||||
example = "/etc/prometheus-pve-exporter/pve.env";
|
||||
description = ''
|
||||
Path to the service's environment file. This path can either be a computed path in /nix/store or a path in the local filesystem.
|
||||
|
||||
The environment file should NOT be stored in /nix/store as it contains passwords and/or keys in plain text.
|
||||
|
||||
Environment reference: https://github.com/prometheus-pve/prometheus-pve-exporter#authentication
|
||||
'';
|
||||
};
|
||||
|
||||
configFile = mkOption {
|
||||
type = with types; nullOr path;
|
||||
default = null;
|
||||
example = "/etc/prometheus-pve-exporter/pve.yml";
|
||||
description = ''
|
||||
Path to the service's config file. This path can either be a computed path in /nix/store or a path in the local filesystem.
|
||||
|
||||
The config file should NOT be stored in /nix/store as it will contain passwords and/or keys in plain text.
|
||||
|
||||
If both configFile and environmentFile are provided, the configFile option will be ignored.
|
||||
|
||||
Configuration reference: https://github.com/prometheus-pve/prometheus-pve-exporter/#authentication
|
||||
'';
|
||||
};
|
||||
|
||||
collectors = {
|
||||
status = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Collect Node/VM/CT status
|
||||
'';
|
||||
};
|
||||
version = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Collect PVE version info
|
||||
'';
|
||||
};
|
||||
node = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Collect PVE node info
|
||||
'';
|
||||
};
|
||||
cluster = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Collect PVE cluster info
|
||||
'';
|
||||
};
|
||||
resources = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Collect PVE resources info
|
||||
'';
|
||||
};
|
||||
config = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
Collect PVE onboot status
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${cfg.package}/bin/pve_exporter \
|
||||
--${if cfg.collectors.status == true then "" else "no-"}collector.status \
|
||||
--${if cfg.collectors.version == true then "" else "no-"}collector.version \
|
||||
--${if cfg.collectors.node == true then "" else "no-"}collector.node \
|
||||
--${if cfg.collectors.cluster == true then "" else "no-"}collector.cluster \
|
||||
--${if cfg.collectors.resources == true then "" else "no-"}collector.resources \
|
||||
--${if cfg.collectors.config == true then "" else "no-"}collector.config \
|
||||
${computedConfigFile} \
|
||||
${toString cfg.port} ${cfg.listenAddress}
|
||||
'';
|
||||
} // optionalAttrs (cfg.environmentFile != null) {
|
||||
EnvironmentFile = cfg.environmentFile;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.py-air-control;
|
||||
|
||||
workingDir = "/var/lib/${cfg.stateDir}";
|
||||
|
||||
in
|
||||
{
|
||||
port = 9896;
|
||||
extraOpts = {
|
||||
deviceHostname = mkOption {
|
||||
type = types.str;
|
||||
example = "192.168.1.123";
|
||||
description = ''
|
||||
The hostname of the air purification device from which to scrape the metrics.
|
||||
'';
|
||||
};
|
||||
protocol = mkOption {
|
||||
type = types.str;
|
||||
default = "http";
|
||||
description = ''
|
||||
The protocol to use when communicating with the air purification device.
|
||||
Available: [http, coap, plain_coap]
|
||||
'';
|
||||
};
|
||||
stateDir = mkOption {
|
||||
type = types.str;
|
||||
default = "prometheus-py-air-control-exporter";
|
||||
description = ''
|
||||
Directory below <literal>/var/lib</literal> to store runtime data.
|
||||
This directory will be created automatically using systemd's StateDirectory mechanism.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
DynamicUser = false;
|
||||
StateDirectory = cfg.stateDir;
|
||||
WorkingDirectory = workingDir;
|
||||
ExecStart = ''
|
||||
${pkgs.python3Packages.py-air-control-exporter}/bin/py-air-control-exporter \
|
||||
--host ${cfg.deviceHostname} \
|
||||
--protocol ${cfg.protocol} \
|
||||
--listen-port ${toString cfg.port} \
|
||||
--listen-address ${cfg.listenAddress}
|
||||
'';
|
||||
Environment = [ "HOME=${workingDir}" ];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.redis;
|
||||
in
|
||||
{
|
||||
port = 9121;
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-redis-exporter}/bin/redis_exporter \
|
||||
-web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.rspamd;
|
||||
|
||||
mkFile = conf:
|
||||
pkgs.writeText "rspamd-exporter-config.yml" (builtins.toJSON conf);
|
||||
|
||||
generateConfig = extraLabels: {
|
||||
metrics = (map (path: {
|
||||
name = "rspamd_${replaceStrings [ "[" "." " " "]" "\\" "'" ] [ "_" "_" "_" "" "" "" ] path}";
|
||||
path = "{ .${path} }";
|
||||
labels = extraLabels;
|
||||
}) [
|
||||
"actions['add\\ header']"
|
||||
"actions['no\\ action']"
|
||||
"actions['rewrite\\ subject']"
|
||||
"actions['soft\\ reject']"
|
||||
"actions.greylist"
|
||||
"actions.reject"
|
||||
"bytes_allocated"
|
||||
"chunks_allocated"
|
||||
"chunks_freed"
|
||||
"chunks_oversized"
|
||||
"connections"
|
||||
"control_connections"
|
||||
"ham_count"
|
||||
"learned"
|
||||
"pools_allocated"
|
||||
"pools_freed"
|
||||
"read_only"
|
||||
"scanned"
|
||||
"shared_chunks_allocated"
|
||||
"spam_count"
|
||||
"total_learns"
|
||||
]) ++ [{
|
||||
name = "rspamd_statfiles";
|
||||
type = "object";
|
||||
path = "{.statfiles[*]}";
|
||||
labels = recursiveUpdate {
|
||||
symbol = "{.symbol}";
|
||||
type = "{.type}";
|
||||
} extraLabels;
|
||||
values = {
|
||||
revision = "{.revision}";
|
||||
size = "{.size}";
|
||||
total = "{.total}";
|
||||
used = "{.used}";
|
||||
languages = "{.languages}";
|
||||
users = "{.users}";
|
||||
};
|
||||
}];
|
||||
};
|
||||
in
|
||||
{
|
||||
port = 7980;
|
||||
extraOpts = {
|
||||
extraLabels = mkOption {
|
||||
type = types.attrsOf types.str;
|
||||
default = {
|
||||
host = config.networking.hostName;
|
||||
};
|
||||
defaultText = literalExpression "{ host = config.networking.hostName; }";
|
||||
example = literalExpression ''
|
||||
{
|
||||
host = config.networking.hostName;
|
||||
custom_label = "some_value";
|
||||
}
|
||||
'';
|
||||
description = "Set of labels added to each metric.";
|
||||
};
|
||||
};
|
||||
serviceOpts.serviceConfig.ExecStart = ''
|
||||
${pkgs.prometheus-json-exporter}/bin/json_exporter \
|
||||
--config.file ${mkFile (generateConfig cfg.extraLabels)} \
|
||||
--web.listen-address "${cfg.listenAddress}:${toString cfg.port}" \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
|
||||
imports = [
|
||||
(mkRemovedOptionModule [ "url" ] ''
|
||||
This option was removed. The URL of the rspamd metrics endpoint
|
||||
must now be provided to the exporter by prometheus via the url
|
||||
parameter `target'.
|
||||
|
||||
In prometheus a scrape URL would look like this:
|
||||
|
||||
http://some.rspamd-exporter.host:7980/probe?target=http://some.rspamd.host:11334/stat
|
||||
|
||||
For more information, take a look at the official documentation
|
||||
(https://github.com/prometheus-community/json_exporter) of the json_exporter.
|
||||
'')
|
||||
({ options.warnings = options.warnings; options.assertions = options.assertions; })
|
||||
];
|
||||
}
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.rtl_433;
|
||||
in
|
||||
{
|
||||
port = 9550;
|
||||
|
||||
extraOpts = let
|
||||
mkMatcherOptionType = field: description: with lib.types;
|
||||
listOf (submodule {
|
||||
options = {
|
||||
name = lib.mkOption {
|
||||
type = str;
|
||||
description = "Name to match.";
|
||||
};
|
||||
"${field}" = lib.mkOption {
|
||||
type = int;
|
||||
inherit description;
|
||||
};
|
||||
location = lib.mkOption {
|
||||
type = str;
|
||||
description = "Location to match.";
|
||||
};
|
||||
};
|
||||
});
|
||||
in
|
||||
{
|
||||
rtl433Flags = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "-C si";
|
||||
example = "-C si -R 19";
|
||||
description = ''
|
||||
Flags passed verbatim to rtl_433 binary.
|
||||
Having <literal>-C si</literal> (the default) is recommended since only Celsius temperatures are parsed.
|
||||
'';
|
||||
};
|
||||
channels = lib.mkOption {
|
||||
type = mkMatcherOptionType "channel" "Channel to match.";
|
||||
default = [];
|
||||
example = [
|
||||
{ name = "Acurite"; channel = 6543; location = "Kitchen"; }
|
||||
];
|
||||
description = ''
|
||||
List of channel matchers to export.
|
||||
'';
|
||||
};
|
||||
ids = lib.mkOption {
|
||||
type = mkMatcherOptionType "id" "ID to match.";
|
||||
default = [];
|
||||
example = [
|
||||
{ name = "Nexus"; id = 1; location = "Bedroom"; }
|
||||
];
|
||||
description = ''
|
||||
List of ID matchers to export.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
# rtl-sdr udev rules make supported USB devices +rw by plugdev.
|
||||
SupplementaryGroups = "plugdev";
|
||||
# rtl_433 needs rw access to the USB radio.
|
||||
PrivateDevices = lib.mkForce false;
|
||||
DeviceAllow = lib.mkForce "char-usb_device rw";
|
||||
RestrictAddressFamilies = [ "AF_NETLINK" ];
|
||||
|
||||
ExecStart = let
|
||||
matchers = (map (m:
|
||||
"--channel_matcher '${m.name},${toString m.channel},${m.location}'"
|
||||
) cfg.channels) ++ (map (m:
|
||||
"--id_matcher '${m.name},${toString m.id},${m.location}'"
|
||||
) cfg.ids); in ''
|
||||
${pkgs.prometheus-rtl_433-exporter}/bin/rtl_433_prometheus \
|
||||
-listen ${cfg.listenAddress}:${toString cfg.port} \
|
||||
-subprocess "${pkgs.rtl_433}/bin/rtl_433 -F json ${cfg.rtl433Flags}" \
|
||||
${lib.concatStringsSep " \\\n " matchers} \
|
||||
${lib.concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.script;
|
||||
configFile = pkgs.writeText "script-exporter.yaml" (builtins.toJSON cfg.settings);
|
||||
in
|
||||
{
|
||||
port = 9172;
|
||||
extraOpts = {
|
||||
settings.scripts = mkOption {
|
||||
type = with types; listOf (submodule {
|
||||
options = {
|
||||
name = mkOption {
|
||||
type = str;
|
||||
example = "sleep";
|
||||
description = "Name of the script.";
|
||||
};
|
||||
script = mkOption {
|
||||
type = str;
|
||||
example = "sleep 5";
|
||||
description = "Shell script to execute when metrics are requested.";
|
||||
};
|
||||
timeout = mkOption {
|
||||
type = nullOr int;
|
||||
default = null;
|
||||
example = 60;
|
||||
description = "Optional timeout for the script in seconds.";
|
||||
};
|
||||
};
|
||||
});
|
||||
example = literalExpression ''
|
||||
{
|
||||
scripts = [
|
||||
{ name = "sleep"; script = "sleep 5"; }
|
||||
];
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
All settings expressed as an Nix attrset.
|
||||
|
||||
Check the official documentation for the corresponding YAML
|
||||
settings that can all be used here: <link xlink:href="https://github.com/adhocteam/script_exporter#sample-configuration" />
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-script-exporter}/bin/script_exporter \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--config.file ${configFile} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
NoNewPrivileges = true;
|
||||
ProtectHome = true;
|
||||
ProtectSystem = "strict";
|
||||
ProtectKernelTunables = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectControlGroups = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.smartctl;
|
||||
format = pkgs.formats.yaml {};
|
||||
configFile = format.generate "smartctl-exporter.yml" {
|
||||
smartctl_exporter = {
|
||||
bind_to = "${cfg.listenAddress}:${toString cfg.port}";
|
||||
url_path = "/metrics";
|
||||
smartctl_location = "${pkgs.smartmontools}/bin/smartctl";
|
||||
collect_not_more_than_period = cfg.maxInterval;
|
||||
devices = cfg.devices;
|
||||
};
|
||||
};
|
||||
in {
|
||||
port = 9633;
|
||||
|
||||
extraOpts = {
|
||||
devices = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = literalExpression ''
|
||||
[ "/dev/sda", "/dev/nvme0n1" ];
|
||||
'';
|
||||
description = ''
|
||||
Paths to the disks that will be monitored. Will autodiscover
|
||||
all disks if none given.
|
||||
'';
|
||||
};
|
||||
maxInterval = mkOption {
|
||||
type = types.str;
|
||||
default = "60s";
|
||||
example = "2m";
|
||||
description = ''
|
||||
Interval that limits how often a disk can be queried.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
AmbientCapabilities = [
|
||||
"CAP_SYS_RAWIO"
|
||||
"CAP_SYS_ADMIN"
|
||||
];
|
||||
CapabilityBoundingSet = [
|
||||
"CAP_SYS_RAWIO"
|
||||
"CAP_SYS_ADMIN"
|
||||
];
|
||||
DevicePolicy = "closed";
|
||||
DeviceAllow = lib.mkOverride 100 (
|
||||
if cfg.devices != [] then
|
||||
cfg.devices
|
||||
else [
|
||||
"block-blkext rw"
|
||||
"block-sd rw"
|
||||
"char-nvme rw"
|
||||
]
|
||||
);
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-smartctl-exporter}/bin/smartctl_exporter -config ${configFile}
|
||||
'';
|
||||
PrivateDevices = lib.mkForce false;
|
||||
ProtectProc = "invisible";
|
||||
ProcSubset = "pid";
|
||||
SupplementaryGroups = [ "disk" ];
|
||||
SystemCallFilter = [
|
||||
"@system-service"
|
||||
"~@privileged @resources"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.smokeping;
|
||||
goDuration = types.mkOptionType {
|
||||
name = "goDuration";
|
||||
description = "Go duration (https://golang.org/pkg/time/#ParseDuration)";
|
||||
check = x: types.str.check x && builtins.match "(-?[0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+" x != null;
|
||||
inherit (types.str) merge;
|
||||
};
|
||||
in
|
||||
{
|
||||
port = 9374;
|
||||
extraOpts = {
|
||||
telemetryPath = mkOption {
|
||||
type = types.str;
|
||||
default = "/metrics";
|
||||
description = ''
|
||||
Path under which to expose metrics.
|
||||
'';
|
||||
};
|
||||
pingInterval = mkOption {
|
||||
type = goDuration;
|
||||
default = "1s";
|
||||
description = ''
|
||||
Interval between pings.
|
||||
'';
|
||||
};
|
||||
buckets = mkOption {
|
||||
type = types.commas;
|
||||
default = "5e-05,0.0001,0.0002,0.0004,0.0008,0.0016,0.0032,0.0064,0.0128,0.0256,0.0512,0.1024,0.2048,0.4096,0.8192,1.6384,3.2768,6.5536,13.1072,26.2144";
|
||||
description = ''
|
||||
List of buckets to use for the response duration histogram.
|
||||
'';
|
||||
};
|
||||
hosts = mkOption {
|
||||
type = with types; listOf str;
|
||||
description = ''
|
||||
List of endpoints to probe.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
AmbientCapabilities = [ "CAP_NET_RAW" ];
|
||||
CapabilityBoundingSet = [ "CAP_NET_RAW" ];
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-smokeping-prober}/bin/smokeping_prober \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--web.telemetry-path ${cfg.telemetryPath} \
|
||||
--buckets ${cfg.buckets} \
|
||||
--ping.interval ${cfg.pingInterval} \
|
||||
--privileged \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags} \
|
||||
${concatStringsSep " " cfg.hosts}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.snmp;
|
||||
in
|
||||
{
|
||||
port = 9116;
|
||||
extraOpts = {
|
||||
configurationPath = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
description = ''
|
||||
Path to a snmp exporter configuration file. Mutually exclusive with 'configuration' option.
|
||||
'';
|
||||
example = literalExpression "./snmp.yml";
|
||||
};
|
||||
|
||||
configuration = mkOption {
|
||||
type = types.nullOr types.attrs;
|
||||
default = null;
|
||||
description = ''
|
||||
Snmp exporter configuration as nix attribute set. Mutually exclusive with 'configurationPath' option.
|
||||
'';
|
||||
example = {
|
||||
"default" = {
|
||||
"version" = 2;
|
||||
"auth" = {
|
||||
"community" = "public";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
logFormat = mkOption {
|
||||
type = types.enum ["logfmt" "json"];
|
||||
default = "logfmt";
|
||||
description = ''
|
||||
Output format of log messages.
|
||||
'';
|
||||
};
|
||||
|
||||
logLevel = mkOption {
|
||||
type = types.enum ["debug" "info" "warn" "error"];
|
||||
default = "info";
|
||||
description = ''
|
||||
Only log messages with the given severity or above.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = let
|
||||
configFile = if cfg.configurationPath != null
|
||||
then cfg.configurationPath
|
||||
else "${pkgs.writeText "snmp-exporter-conf.yml" (builtins.toJSON cfg.configuration)}";
|
||||
in {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-snmp-exporter}/bin/snmp_exporter \
|
||||
--config.file=${escapeShellArg configFile} \
|
||||
--log.format=${escapeShellArg cfg.logFormat} \
|
||||
--log.level=${cfg.logLevel} \
|
||||
--web.listen-address=${cfg.listenAddress}:${toString cfg.port} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
108
nixos/modules/services/monitoring/prometheus/exporters/sql.nix
Normal file
108
nixos/modules/services/monitoring/prometheus/exporters/sql.nix
Normal file
|
|
@ -0,0 +1,108 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.sql;
|
||||
cfgOptions = {
|
||||
options = with types; {
|
||||
jobs = mkOption {
|
||||
type = attrsOf (submodule jobOptions);
|
||||
default = { };
|
||||
description = "An attrset of metrics scraping jobs to run.";
|
||||
};
|
||||
};
|
||||
};
|
||||
jobOptions = {
|
||||
options = with types; {
|
||||
interval = mkOption {
|
||||
type = str;
|
||||
description = ''
|
||||
How often to run this job, specified in
|
||||
<link xlink:href="https://golang.org/pkg/time/#ParseDuration">Go duration</link> format.
|
||||
'';
|
||||
};
|
||||
connections = mkOption {
|
||||
type = listOf str;
|
||||
description = "A list of connection strings of the SQL servers to scrape metrics from";
|
||||
};
|
||||
startupSql = mkOption {
|
||||
type = listOf str;
|
||||
default = [];
|
||||
description = "A list of SQL statements to execute once after making a connection.";
|
||||
};
|
||||
queries = mkOption {
|
||||
type = attrsOf (submodule queryOptions);
|
||||
description = "SQL queries to run.";
|
||||
};
|
||||
};
|
||||
};
|
||||
queryOptions = {
|
||||
options = with types; {
|
||||
help = mkOption {
|
||||
type = nullOr str;
|
||||
default = null;
|
||||
description = "A human-readable description of this metric.";
|
||||
};
|
||||
labels = mkOption {
|
||||
type = listOf str;
|
||||
default = [ ];
|
||||
description = "A set of columns that will be used as Prometheus labels.";
|
||||
};
|
||||
query = mkOption {
|
||||
type = str;
|
||||
description = "The SQL query to run.";
|
||||
};
|
||||
values = mkOption {
|
||||
type = listOf str;
|
||||
description = "A set of columns that will be used as values of this metric.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
configFile =
|
||||
if cfg.configFile != null
|
||||
then cfg.configFile
|
||||
else
|
||||
let
|
||||
nameInline = mapAttrsToList (k: v: v // { name = k; });
|
||||
renameStartupSql = j: removeAttrs (j // { startup_sql = j.startupSql; }) [ "startupSql" ];
|
||||
configuration = {
|
||||
jobs = map renameStartupSql
|
||||
(nameInline (mapAttrs (k: v: (v // { queries = nameInline v.queries; })) cfg.configuration.jobs));
|
||||
};
|
||||
in
|
||||
builtins.toFile "config.yaml" (builtins.toJSON configuration);
|
||||
in
|
||||
{
|
||||
extraOpts = {
|
||||
configFile = mkOption {
|
||||
type = with types; nullOr path;
|
||||
default = null;
|
||||
description = ''
|
||||
Path to configuration file.
|
||||
'';
|
||||
};
|
||||
configuration = mkOption {
|
||||
type = with types; nullOr (submodule cfgOptions);
|
||||
default = null;
|
||||
description = ''
|
||||
Exporter configuration as nix attribute set. Mutually exclusive with 'configFile' option.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
port = 9237;
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-sql-exporter}/bin/sql_exporter \
|
||||
-web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
-config.file ${configFile} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
RestrictAddressFamilies = [
|
||||
# Need AF_UNIX to collect data
|
||||
"AF_UNIX"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.surfboard;
|
||||
in
|
||||
{
|
||||
port = 9239;
|
||||
extraOpts = {
|
||||
modemAddress = mkOption {
|
||||
type = types.str;
|
||||
default = "192.168.100.1";
|
||||
description = ''
|
||||
The hostname or IP of the cable modem.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
description = "Prometheus exporter for surfboard cable modem";
|
||||
unitConfig.Documentation = "https://github.com/ipstatic/surfboard_exporter";
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-surfboard-exporter}/bin/surfboard_exporter \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--modem-address ${cfg.modemAddress} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let cfg = config.services.prometheus.exporters.systemd;
|
||||
|
||||
in {
|
||||
port = 9558;
|
||||
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-systemd-exporter}/bin/systemd_exporter \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} ${concatStringsSep " " cfg.extraFlags}
|
||||
'';
|
||||
RestrictAddressFamilies = [
|
||||
# Need AF_UNIX to collect data
|
||||
"AF_UNIX"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.tor;
|
||||
in
|
||||
{
|
||||
port = 9130;
|
||||
extraOpts = {
|
||||
torControlAddress = mkOption {
|
||||
type = types.str;
|
||||
default = "127.0.0.1";
|
||||
description = ''
|
||||
Tor control IP address or hostname.
|
||||
'';
|
||||
};
|
||||
|
||||
torControlPort = mkOption {
|
||||
type = types.int;
|
||||
default = 9051;
|
||||
description = ''
|
||||
Tor control port.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-tor-exporter}/bin/prometheus-tor-exporter \
|
||||
-b ${cfg.listenAddress} \
|
||||
-p ${toString cfg.port} \
|
||||
-a ${cfg.torControlAddress} \
|
||||
-c ${toString cfg.torControlPort} \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
|
||||
# CPython requires a process to either have $HOME defined or run as a UID
|
||||
# defined in /etc/passwd. The latter is false with DynamicUser, so define a
|
||||
# dummy $HOME. https://bugs.python.org/issue10496
|
||||
environment = { HOME = "/var/empty"; };
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.unbound;
|
||||
in
|
||||
{
|
||||
port = 9167;
|
||||
extraOpts = {
|
||||
fetchType = mkOption {
|
||||
# TODO: add shm when upstream implemented it
|
||||
type = types.enum [ "tcp" "uds" ];
|
||||
default = "uds";
|
||||
description = ''
|
||||
Which methods the exporter uses to get the information from unbound.
|
||||
'';
|
||||
};
|
||||
|
||||
telemetryPath = mkOption {
|
||||
type = types.str;
|
||||
default = "/metrics";
|
||||
description = ''
|
||||
Path under which to expose metrics.
|
||||
'';
|
||||
};
|
||||
|
||||
controlInterface = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "/run/unbound/unbound.socket";
|
||||
description = ''
|
||||
Path to the unbound socket for uds mode or the control interface port for tcp mode.
|
||||
|
||||
Example:
|
||||
uds-mode: /run/unbound/unbound.socket
|
||||
tcp-mode: 127.0.0.1:8953
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
serviceOpts = mkMerge ([{
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-unbound-exporter}/bin/unbound-telemetry \
|
||||
${cfg.fetchType} \
|
||||
--bind ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--path ${cfg.telemetryPath} \
|
||||
${optionalString (cfg.controlInterface != null) "--control-interface ${cfg.controlInterface}"} \
|
||||
${toString cfg.extraFlags}
|
||||
'';
|
||||
RestrictAddressFamilies = [
|
||||
# Need AF_UNIX to collect data
|
||||
"AF_UNIX"
|
||||
];
|
||||
};
|
||||
}] ++ [
|
||||
(mkIf config.services.unbound.enable {
|
||||
after = [ "unbound.service" ];
|
||||
requires = [ "unbound.service" ];
|
||||
})
|
||||
]);
|
||||
}
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.unifi-poller;
|
||||
|
||||
configFile = pkgs.writeText "prometheus-unifi-poller-exporter.json" (generators.toJSON {} {
|
||||
poller = { inherit (cfg.log) debug quiet; };
|
||||
unifi = { inherit (cfg) controllers; };
|
||||
influxdb.disable = true;
|
||||
prometheus = {
|
||||
http_listen = "${cfg.listenAddress}:${toString cfg.port}";
|
||||
report_errors = cfg.log.prometheusErrors;
|
||||
};
|
||||
});
|
||||
|
||||
in {
|
||||
port = 9130;
|
||||
|
||||
extraOpts = {
|
||||
inherit (options.services.unifi-poller.unifi) controllers;
|
||||
log = {
|
||||
debug = mkEnableOption "debug logging including line numbers, high resolution timestamps, per-device logs.";
|
||||
quiet = mkEnableOption "startup and error logs only.";
|
||||
prometheusErrors = mkEnableOption "emitting errors to prometheus.";
|
||||
};
|
||||
};
|
||||
|
||||
serviceOpts.serviceConfig = {
|
||||
ExecStart = "${pkgs.unifi-poller}/bin/unifi-poller --config ${configFile}";
|
||||
DynamicUser = false;
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.unifi;
|
||||
in
|
||||
{
|
||||
port = 9130;
|
||||
extraOpts = {
|
||||
unifiAddress = mkOption {
|
||||
type = types.str;
|
||||
example = "https://10.0.0.1:8443";
|
||||
description = ''
|
||||
URL of the UniFi Controller API.
|
||||
'';
|
||||
};
|
||||
|
||||
unifiInsecure = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
If enabled skip the verification of the TLS certificate of the UniFi Controller API.
|
||||
Use with caution.
|
||||
'';
|
||||
};
|
||||
|
||||
unifiUsername = mkOption {
|
||||
type = types.str;
|
||||
example = "ReadOnlyUser";
|
||||
description = ''
|
||||
username for authentication against UniFi Controller API.
|
||||
'';
|
||||
};
|
||||
|
||||
unifiPassword = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
Password for authentication against UniFi Controller API.
|
||||
'';
|
||||
};
|
||||
|
||||
unifiTimeout = mkOption {
|
||||
type = types.str;
|
||||
default = "5s";
|
||||
example = "2m";
|
||||
description = ''
|
||||
Timeout including unit for UniFi Controller API requests.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-unifi-exporter}/bin/unifi_exporter \
|
||||
-telemetry.addr ${cfg.listenAddress}:${toString cfg.port} \
|
||||
-unifi.addr ${cfg.unifiAddress} \
|
||||
-unifi.username ${escapeShellArg cfg.unifiUsername} \
|
||||
-unifi.password ${escapeShellArg cfg.unifiPassword} \
|
||||
-unifi.timeout ${cfg.unifiTimeout} \
|
||||
${optionalString cfg.unifiInsecure "-unifi.insecure" } \
|
||||
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,89 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.varnish;
|
||||
in
|
||||
{
|
||||
port = 9131;
|
||||
extraOpts = {
|
||||
noExit = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Do not exit server on Varnish scrape errors.
|
||||
'';
|
||||
};
|
||||
withGoMetrics = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Export go runtime and http handler metrics.
|
||||
'';
|
||||
};
|
||||
verbose = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Enable verbose logging.
|
||||
'';
|
||||
};
|
||||
raw = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Enable raw stdout logging without timestamps.
|
||||
'';
|
||||
};
|
||||
varnishStatPath = mkOption {
|
||||
type = types.str;
|
||||
default = "varnishstat";
|
||||
description = ''
|
||||
Path to varnishstat.
|
||||
'';
|
||||
};
|
||||
instance = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = config.services.varnish.stateDir;
|
||||
defaultText = lib.literalExpression "config.services.varnish.stateDir";
|
||||
description = ''
|
||||
varnishstat -n value.
|
||||
'';
|
||||
};
|
||||
healthPath = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
Path under which to expose healthcheck. Disabled unless configured.
|
||||
'';
|
||||
};
|
||||
telemetryPath = mkOption {
|
||||
type = types.str;
|
||||
default = "/metrics";
|
||||
description = ''
|
||||
Path under which to expose metrics.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
path = [ config.services.varnish.package ];
|
||||
serviceConfig = {
|
||||
RestartSec = mkDefault 1;
|
||||
DynamicUser = false;
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-varnish-exporter}/bin/prometheus_varnish_exporter \
|
||||
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||||
--web.telemetry-path ${cfg.telemetryPath} \
|
||||
--varnishstat-path ${escapeShellArg cfg.varnishStatPath} \
|
||||
${concatStringsSep " \\\n " (cfg.extraFlags
|
||||
++ optional (cfg.healthPath != null) "--web.health-path ${cfg.healthPath}"
|
||||
++ optional (cfg.instance != null) "-n ${escapeShellArg cfg.instance}"
|
||||
++ optional cfg.noExit "--no-exit"
|
||||
++ optional cfg.withGoMetrics "--with-go-metrics"
|
||||
++ optional cfg.verbose "--verbose"
|
||||
++ optional cfg.raw "--raw")}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
{ config, lib, pkgs, options }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.exporters.wireguard;
|
||||
in {
|
||||
port = 9586;
|
||||
imports = [
|
||||
(mkRenamedOptionModule [ "addr" ] [ "listenAddress" ])
|
||||
({ options.warnings = options.warnings; options.assertions = options.assertions; })
|
||||
];
|
||||
extraOpts = {
|
||||
verbose = mkEnableOption "Verbose logging mode for prometheus-wireguard-exporter";
|
||||
|
||||
wireguardConfig = mkOption {
|
||||
type = with types; nullOr (either path str);
|
||||
default = null;
|
||||
|
||||
description = ''
|
||||
Path to the Wireguard Config to
|
||||
<link xlink:href="https://github.com/MindFlavor/prometheus_wireguard_exporter/tree/2.0.0#usage">add the peer's name to the stats of a peer</link>.
|
||||
|
||||
Please note that <literal>networking.wg-quick</literal> is required for this feature
|
||||
as <literal>networking.wireguard</literal> uses
|
||||
<citerefentry><refentrytitle>wg</refentrytitle><manvolnum>8</manvolnum></citerefentry>
|
||||
to set the peers up.
|
||||
'';
|
||||
};
|
||||
|
||||
singleSubnetPerField = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
By default, all allowed IPs and subnets are comma-separated in the
|
||||
<literal>allowed_ips</literal> field. With this option enabled,
|
||||
a single IP and subnet will be listed in fields like <literal>allowed_ip_0</literal>,
|
||||
<literal>allowed_ip_1</literal> and so on.
|
||||
'';
|
||||
};
|
||||
|
||||
withRemoteIp = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether or not the remote IP of a WireGuard peer should be exposed via prometheus.
|
||||
'';
|
||||
};
|
||||
};
|
||||
serviceOpts = {
|
||||
path = [ pkgs.wireguard-tools ];
|
||||
|
||||
serviceConfig = {
|
||||
AmbientCapabilities = [ "CAP_NET_ADMIN" ];
|
||||
CapabilityBoundingSet = [ "CAP_NET_ADMIN" ];
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-wireguard-exporter}/bin/prometheus_wireguard_exporter \
|
||||
-p ${toString cfg.port} \
|
||||
-l ${cfg.listenAddress} \
|
||||
${optionalString cfg.verbose "-v"} \
|
||||
${optionalString cfg.singleSubnetPerField "-s"} \
|
||||
${optionalString cfg.withRemoteIp "-r"} \
|
||||
${optionalString (cfg.wireguardConfig != null) "-n ${escapeShellArg cfg.wireguardConfig}"}
|
||||
'';
|
||||
RestrictAddressFamilies = [
|
||||
# Need AF_NETLINK to collect data
|
||||
"AF_NETLINK"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
166
nixos/modules/services/monitoring/prometheus/pushgateway.nix
Normal file
166
nixos/modules/services/monitoring/prometheus/pushgateway.nix
Normal file
|
|
@ -0,0 +1,166 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.pushgateway;
|
||||
|
||||
cmdlineArgs =
|
||||
opt "web.listen-address" cfg.web.listen-address
|
||||
++ opt "web.telemetry-path" cfg.web.telemetry-path
|
||||
++ opt "web.external-url" cfg.web.external-url
|
||||
++ opt "web.route-prefix" cfg.web.route-prefix
|
||||
++ optional cfg.persistMetrics ''--persistence.file="/var/lib/${cfg.stateDir}/metrics"''
|
||||
++ opt "persistence.interval" cfg.persistence.interval
|
||||
++ opt "log.level" cfg.log.level
|
||||
++ opt "log.format" cfg.log.format
|
||||
++ cfg.extraFlags;
|
||||
|
||||
opt = k : v : optional (v != null) ''--${k}="${v}"'';
|
||||
|
||||
in {
|
||||
options = {
|
||||
services.prometheus.pushgateway = {
|
||||
enable = mkEnableOption "Prometheus Pushgateway";
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.prometheus-pushgateway;
|
||||
defaultText = literalExpression "pkgs.prometheus-pushgateway";
|
||||
description = ''
|
||||
Package that should be used for the prometheus pushgateway.
|
||||
'';
|
||||
};
|
||||
|
||||
web.listen-address = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
Address to listen on for the web interface, API and telemetry.
|
||||
|
||||
<literal>null</literal> will default to <literal>:9091</literal>.
|
||||
'';
|
||||
};
|
||||
|
||||
web.telemetry-path = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
Path under which to expose metrics.
|
||||
|
||||
<literal>null</literal> will default to <literal>/metrics</literal>.
|
||||
'';
|
||||
};
|
||||
|
||||
web.external-url = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
The URL under which Pushgateway is externally reachable.
|
||||
'';
|
||||
};
|
||||
|
||||
web.route-prefix = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
Prefix for the internal routes of web endpoints.
|
||||
|
||||
Defaults to the path of
|
||||
<option>services.prometheus.pushgateway.web.external-url</option>.
|
||||
'';
|
||||
};
|
||||
|
||||
persistence.interval = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "10m";
|
||||
description = ''
|
||||
The minimum interval at which to write out the persistence file.
|
||||
|
||||
<literal>null</literal> will default to <literal>5m</literal>.
|
||||
'';
|
||||
};
|
||||
|
||||
log.level = mkOption {
|
||||
type = types.nullOr (types.enum ["debug" "info" "warn" "error" "fatal"]);
|
||||
default = null;
|
||||
description = ''
|
||||
Only log messages with the given severity or above.
|
||||
|
||||
<literal>null</literal> will default to <literal>info</literal>.
|
||||
'';
|
||||
};
|
||||
|
||||
log.format = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
example = "logger:syslog?appname=bob&local=7";
|
||||
description = ''
|
||||
Set the log target and format.
|
||||
|
||||
<literal>null</literal> will default to <literal>logger:stderr</literal>.
|
||||
'';
|
||||
};
|
||||
|
||||
extraFlags = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
description = ''
|
||||
Extra commandline options when launching the Pushgateway.
|
||||
'';
|
||||
};
|
||||
|
||||
persistMetrics = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to persist metrics to a file.
|
||||
|
||||
When enabled metrics will be saved to a file called
|
||||
<literal>metrics</literal> in the directory
|
||||
<literal>/var/lib/pushgateway</literal>. The directory below
|
||||
<literal>/var/lib</literal> can be set using
|
||||
<option>services.prometheus.pushgateway.stateDir</option>.
|
||||
'';
|
||||
};
|
||||
|
||||
stateDir = mkOption {
|
||||
type = types.str;
|
||||
default = "pushgateway";
|
||||
description = ''
|
||||
Directory below <literal>/var/lib</literal> to store metrics.
|
||||
|
||||
This directory will be created automatically using systemd's
|
||||
StateDirectory mechanism when
|
||||
<option>services.prometheus.pushgateway.persistMetrics</option>
|
||||
is enabled.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = !hasPrefix "/" cfg.stateDir;
|
||||
message =
|
||||
"The option services.prometheus.pushgateway.stateDir" +
|
||||
" shouldn't be an absolute directory." +
|
||||
" It should be a directory relative to /var/lib.";
|
||||
}
|
||||
];
|
||||
systemd.services.pushgateway = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
Restart = "always";
|
||||
DynamicUser = true;
|
||||
ExecStart = "${cfg.package}/bin/pushgateway" +
|
||||
optionalString (length cmdlineArgs != 0) (" \\\n " +
|
||||
concatStringsSep " \\\n " cmdlineArgs);
|
||||
StateDirectory = if cfg.persistMetrics then cfg.stateDir else null;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
55
nixos/modules/services/monitoring/prometheus/xmpp-alerts.nix
Normal file
55
nixos/modules/services/monitoring/prometheus/xmpp-alerts.nix
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.prometheus.xmpp-alerts;
|
||||
settingsFormat = pkgs.formats.yaml {};
|
||||
configFile = settingsFormat.generate "prometheus-xmpp-alerts.yml" cfg.settings;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
(mkRenamedOptionModule
|
||||
[ "services" "prometheus" "xmpp-alerts" "configuration" ]
|
||||
[ "services" "prometheus" "xmpp-alerts" "settings" ])
|
||||
];
|
||||
|
||||
options.services.prometheus.xmpp-alerts = {
|
||||
enable = mkEnableOption "XMPP Web hook service for Alertmanager";
|
||||
|
||||
settings = mkOption {
|
||||
type = settingsFormat.type;
|
||||
default = {};
|
||||
|
||||
description = ''
|
||||
Configuration for prometheus xmpp-alerts, see
|
||||
<link xlink:href="https://github.com/jelmer/prometheus-xmpp-alerts/blob/master/xmpp-alerts.yml.example"/>
|
||||
for supported values.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.prometheus-xmpp-alerts = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.prometheus-xmpp-alerts}/bin/prometheus-xmpp-alerts --config ${configFile}";
|
||||
Restart = "on-failure";
|
||||
DynamicUser = true;
|
||||
PrivateTmp = true;
|
||||
PrivateDevices = true;
|
||||
ProtectHome = true;
|
||||
ProtectSystem = "strict";
|
||||
ProtectKernelTunables = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectControlGroups = true;
|
||||
NoNewPrivileges = true;
|
||||
SystemCallArchitectures = "native";
|
||||
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
|
||||
SystemCallFilter = [ "@system-service" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
81
nixos/modules/services/monitoring/riemann-dash.nix
Normal file
81
nixos/modules/services/monitoring/riemann-dash.nix
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with pkgs;
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
cfg = config.services.riemann-dash;
|
||||
|
||||
conf = writeText "config.rb" ''
|
||||
riemann_base = "${cfg.dataDir}"
|
||||
config.store[:ws_config] = "#{riemann_base}/config/config.json"
|
||||
${cfg.config}
|
||||
'';
|
||||
|
||||
launcher = writeScriptBin "riemann-dash" ''
|
||||
#!/bin/sh
|
||||
exec ${pkgs.riemann-dash}/bin/riemann-dash ${conf}
|
||||
'';
|
||||
|
||||
in {
|
||||
|
||||
options = {
|
||||
|
||||
services.riemann-dash = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Enable the riemann-dash dashboard daemon.
|
||||
'';
|
||||
};
|
||||
config = mkOption {
|
||||
type = types.lines;
|
||||
description = ''
|
||||
Contents added to the end of the riemann-dash configuration file.
|
||||
'';
|
||||
};
|
||||
dataDir = mkOption {
|
||||
type = types.str;
|
||||
default = "/var/riemann-dash";
|
||||
description = ''
|
||||
Location of the riemann-base dir. The dashboard configuration file is
|
||||
is stored to this directory. The directory is created automatically on
|
||||
service start, and owner is set to the riemanndash user.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
users.groups.riemanndash.gid = config.ids.gids.riemanndash;
|
||||
|
||||
users.users.riemanndash = {
|
||||
description = "riemann-dash daemon user";
|
||||
uid = config.ids.uids.riemanndash;
|
||||
group = "riemanndash";
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d '${cfg.dataDir}' - riemanndash riemanndash - -"
|
||||
];
|
||||
|
||||
systemd.services.riemann-dash = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
wants = [ "riemann.service" ];
|
||||
after = [ "riemann.service" ];
|
||||
preStart = ''
|
||||
mkdir -p '${cfg.dataDir}/config'
|
||||
'';
|
||||
serviceConfig = {
|
||||
User = "riemanndash";
|
||||
ExecStart = "${launcher}/bin/riemann-dash";
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
70
nixos/modules/services/monitoring/riemann-tools.nix
Normal file
70
nixos/modules/services/monitoring/riemann-tools.nix
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with pkgs;
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
cfg = config.services.riemann-tools;
|
||||
|
||||
riemannHost = "${cfg.riemannHost}";
|
||||
|
||||
healthLauncher = writeScriptBin "riemann-health" ''
|
||||
#!/bin/sh
|
||||
exec ${pkgs.riemann-tools}/bin/riemann-health ${builtins.concatStringsSep " " cfg.extraArgs} --host ${riemannHost}
|
||||
'';
|
||||
|
||||
|
||||
in {
|
||||
|
||||
options = {
|
||||
|
||||
services.riemann-tools = {
|
||||
enableHealth = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Enable the riemann-health daemon.
|
||||
'';
|
||||
};
|
||||
riemannHost = mkOption {
|
||||
type = types.str;
|
||||
default = "127.0.0.1";
|
||||
description = ''
|
||||
Address of the host riemann node. Defaults to localhost.
|
||||
'';
|
||||
};
|
||||
extraArgs = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
description = ''
|
||||
A list of commandline-switches forwarded to a riemann-tool.
|
||||
See for example `riemann-health --help` for available options.
|
||||
'';
|
||||
example = ["-p 5555" "--timeout=30" "--attribute=myattribute=42"];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enableHealth {
|
||||
|
||||
users.groups.riemanntools.gid = config.ids.gids.riemanntools;
|
||||
|
||||
users.users.riemanntools = {
|
||||
description = "riemann-tools daemon user";
|
||||
uid = config.ids.uids.riemanntools;
|
||||
group = "riemanntools";
|
||||
};
|
||||
|
||||
systemd.services.riemann-health = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = [ procps ];
|
||||
serviceConfig = {
|
||||
User = "riemanntools";
|
||||
ExecStart = "${healthLauncher}/bin/riemann-health";
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
105
nixos/modules/services/monitoring/riemann.nix
Normal file
105
nixos/modules/services/monitoring/riemann.nix
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with pkgs;
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
cfg = config.services.riemann;
|
||||
|
||||
classpath = concatStringsSep ":" (
|
||||
cfg.extraClasspathEntries ++ [ "${riemann}/share/java/riemann.jar" ]
|
||||
);
|
||||
|
||||
riemannConfig = concatStringsSep "\n" (
|
||||
[cfg.config] ++ (map (f: ''(load-file "${f}")'') cfg.configFiles)
|
||||
);
|
||||
|
||||
launcher = writeScriptBin "riemann" ''
|
||||
#!/bin/sh
|
||||
exec ${jdk}/bin/java ${concatStringsSep " " cfg.extraJavaOpts} \
|
||||
-cp ${classpath} \
|
||||
riemann.bin ${cfg.configFile}
|
||||
'';
|
||||
|
||||
in {
|
||||
|
||||
options = {
|
||||
|
||||
services.riemann = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Enable the Riemann network monitoring daemon.
|
||||
'';
|
||||
};
|
||||
config = mkOption {
|
||||
type = types.lines;
|
||||
description = ''
|
||||
Contents of the Riemann configuration file. For more complicated
|
||||
config you should use configFile.
|
||||
'';
|
||||
};
|
||||
configFiles = mkOption {
|
||||
type = with types; listOf path;
|
||||
default = [];
|
||||
description = ''
|
||||
Extra files containing Riemann configuration. These files will be
|
||||
loaded at runtime by Riemann (with Clojure's
|
||||
<literal>load-file</literal> function) at the end of the
|
||||
configuration if you use the config option, this is ignored if you
|
||||
use configFile.
|
||||
'';
|
||||
};
|
||||
configFile = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
A Riemann config file. Any files in the same directory as this file
|
||||
will be added to the classpath by Riemann.
|
||||
'';
|
||||
};
|
||||
extraClasspathEntries = mkOption {
|
||||
type = with types; listOf str;
|
||||
default = [];
|
||||
description = ''
|
||||
Extra entries added to the Java classpath when running Riemann.
|
||||
'';
|
||||
};
|
||||
extraJavaOpts = mkOption {
|
||||
type = with types; listOf str;
|
||||
default = [];
|
||||
description = ''
|
||||
Extra Java options used when launching Riemann.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
users.groups.riemann.gid = config.ids.gids.riemann;
|
||||
|
||||
users.users.riemann = {
|
||||
description = "riemann daemon user";
|
||||
uid = config.ids.uids.riemann;
|
||||
group = "riemann";
|
||||
};
|
||||
|
||||
services.riemann.configFile = mkDefault (
|
||||
writeText "riemann-config.clj" riemannConfig
|
||||
);
|
||||
|
||||
systemd.services.riemann = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = [ inetutils ];
|
||||
serviceConfig = {
|
||||
User = "riemann";
|
||||
ExecStart = "${launcher}/bin/riemann";
|
||||
};
|
||||
serviceConfig.LimitNOFILE = 65536;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
134
nixos/modules/services/monitoring/scollector.nix
Normal file
134
nixos/modules/services/monitoring/scollector.nix
Normal file
|
|
@ -0,0 +1,134 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.scollector;
|
||||
|
||||
collectors = pkgs.runCommand "collectors" { preferLocalBuild = true; }
|
||||
''
|
||||
mkdir -p $out
|
||||
${lib.concatStringsSep
|
||||
"\n"
|
||||
(lib.mapAttrsToList
|
||||
(frequency: binaries:
|
||||
"mkdir -p $out/${frequency}\n" +
|
||||
(lib.concatStringsSep
|
||||
"\n"
|
||||
(map (path: "ln -s ${path} $out/${frequency}/$(basename ${path})")
|
||||
binaries)))
|
||||
cfg.collectors)}
|
||||
'';
|
||||
|
||||
conf = pkgs.writeText "scollector.toml" ''
|
||||
Host = "${cfg.bosunHost}"
|
||||
ColDir = "${collectors}"
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
|
||||
in {
|
||||
|
||||
options = {
|
||||
|
||||
services.scollector = {
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to run scollector.
|
||||
'';
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.scollector;
|
||||
defaultText = literalExpression "pkgs.scollector";
|
||||
description = ''
|
||||
scollector binary to use.
|
||||
'';
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "scollector";
|
||||
description = ''
|
||||
User account under which scollector runs.
|
||||
'';
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
default = "scollector";
|
||||
description = ''
|
||||
Group account under which scollector runs.
|
||||
'';
|
||||
};
|
||||
|
||||
bosunHost = mkOption {
|
||||
type = types.str;
|
||||
default = "localhost:8070";
|
||||
description = ''
|
||||
Host and port of the bosun server that will store the collected
|
||||
data.
|
||||
'';
|
||||
};
|
||||
|
||||
collectors = mkOption {
|
||||
type = with types; attrsOf (listOf path);
|
||||
default = {};
|
||||
example = literalExpression ''{ "0" = [ "''${postgresStats}/bin/collect-stats" ]; }'';
|
||||
description = ''
|
||||
An attribute set mapping the frequency of collection to a list of
|
||||
binaries that should be executed at that frequency. You can use "0"
|
||||
to run a binary forever.
|
||||
'';
|
||||
};
|
||||
|
||||
extraOpts = mkOption {
|
||||
type = with types; listOf str;
|
||||
default = [];
|
||||
example = [ "-d" ];
|
||||
description = ''
|
||||
Extra scollector command line options
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
description = ''
|
||||
Extra scollector configuration added to the end of scollector.toml
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
config = mkIf config.services.scollector.enable {
|
||||
|
||||
systemd.services.scollector = {
|
||||
description = "scollector metrics collector (part of Bosun)";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
path = [ pkgs.coreutils pkgs.iproute2 ];
|
||||
|
||||
serviceConfig = {
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
ExecStart = "${cfg.package}/bin/scollector -conf=${conf} ${lib.concatStringsSep " " cfg.extraOpts}";
|
||||
};
|
||||
};
|
||||
|
||||
users.users.scollector = {
|
||||
description = "scollector user";
|
||||
group = "scollector";
|
||||
uid = config.ids.uids.scollector;
|
||||
};
|
||||
|
||||
users.groups.scollector.gid = config.ids.gids.scollector;
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
253
nixos/modules/services/monitoring/smartd.nix
Normal file
253
nixos/modules/services/monitoring/smartd.nix
Normal file
|
|
@ -0,0 +1,253 @@
|
|||
{ config, lib, options, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
host = config.networking.hostName or "unknown"
|
||||
+ optionalString (config.networking.domain != null) ".${config.networking.domain}";
|
||||
|
||||
cfg = config.services.smartd;
|
||||
opt = options.services.smartd;
|
||||
|
||||
nm = cfg.notifications.mail;
|
||||
nw = cfg.notifications.wall;
|
||||
nx = cfg.notifications.x11;
|
||||
|
||||
smartdNotify = pkgs.writeScript "smartd-notify.sh" ''
|
||||
#! ${pkgs.runtimeShell}
|
||||
${optionalString nm.enable ''
|
||||
{
|
||||
${pkgs.coreutils}/bin/cat << EOF
|
||||
From: smartd on ${host} <${nm.sender}>
|
||||
To: undisclosed-recipients:;
|
||||
Subject: $SMARTD_SUBJECT
|
||||
|
||||
$SMARTD_FULLMESSAGE
|
||||
EOF
|
||||
|
||||
${pkgs.smartmontools}/sbin/smartctl -a -d "$SMARTD_DEVICETYPE" "$SMARTD_DEVICE"
|
||||
} | ${nm.mailer} -i "${nm.recipient}"
|
||||
''}
|
||||
${optionalString nw.enable ''
|
||||
{
|
||||
${pkgs.coreutils}/bin/cat << EOF
|
||||
Problem detected with disk: $SMARTD_DEVICESTRING
|
||||
Warning message from smartd is:
|
||||
|
||||
$SMARTD_MESSAGE
|
||||
EOF
|
||||
} | ${pkgs.util-linux}/bin/wall 2>/dev/null
|
||||
''}
|
||||
${optionalString nx.enable ''
|
||||
export DISPLAY=${nx.display}
|
||||
{
|
||||
${pkgs.coreutils}/bin/cat << EOF
|
||||
Problem detected with disk: $SMARTD_DEVICESTRING
|
||||
Warning message from smartd is:
|
||||
|
||||
$SMARTD_FULLMESSAGE
|
||||
EOF
|
||||
} | ${pkgs.xorg.xmessage}/bin/xmessage -file - 2>/dev/null &
|
||||
''}
|
||||
'';
|
||||
|
||||
notifyOpts = optionalString (nm.enable || nw.enable || nx.enable)
|
||||
("-m <nomailer> -M exec ${smartdNotify} " + optionalString cfg.notifications.test "-M test ");
|
||||
|
||||
smartdConf = pkgs.writeText "smartd.conf" ''
|
||||
# Autogenerated smartd startup config file
|
||||
DEFAULT ${notifyOpts}${cfg.defaults.monitored}
|
||||
|
||||
${concatMapStringsSep "\n" (d: "${d.device} ${d.options}") cfg.devices}
|
||||
|
||||
${optionalString cfg.autodetect
|
||||
"DEVICESCAN ${notifyOpts}${cfg.defaults.autodetected}"}
|
||||
'';
|
||||
|
||||
smartdDeviceOpts = { ... }: {
|
||||
|
||||
options = {
|
||||
|
||||
device = mkOption {
|
||||
example = "/dev/sda";
|
||||
type = types.str;
|
||||
description = "Location of the device.";
|
||||
};
|
||||
|
||||
options = mkOption {
|
||||
default = "";
|
||||
example = "-d sat";
|
||||
type = types.separatedString " ";
|
||||
description = "Options that determine how smartd monitors the device.";
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
###### interface
|
||||
|
||||
options = {
|
||||
|
||||
services.smartd = {
|
||||
|
||||
enable = mkEnableOption "smartd daemon from <literal>smartmontools</literal> package";
|
||||
|
||||
autodetect = mkOption {
|
||||
default = true;
|
||||
type = types.bool;
|
||||
description = ''
|
||||
Whenever smartd should monitor all devices connected to the
|
||||
machine at the time it's being started (the default).
|
||||
|
||||
Set to false to monitor the devices listed in
|
||||
<option>services.smartd.devices</option> only.
|
||||
'';
|
||||
};
|
||||
|
||||
extraOptions = mkOption {
|
||||
default = [];
|
||||
type = types.listOf types.str;
|
||||
example = ["-A /var/log/smartd/" "--interval=3600"];
|
||||
description = ''
|
||||
Extra command-line options passed to the <literal>smartd</literal>
|
||||
daemon on startup.
|
||||
|
||||
(See <literal>man 8 smartd</literal>.)
|
||||
'';
|
||||
};
|
||||
|
||||
notifications = {
|
||||
|
||||
mail = {
|
||||
enable = mkOption {
|
||||
default = config.services.mail.sendmailSetuidWrapper != null;
|
||||
defaultText = literalExpression "config.services.mail.sendmailSetuidWrapper != null";
|
||||
type = types.bool;
|
||||
description = "Whenever to send e-mail notifications.";
|
||||
};
|
||||
|
||||
sender = mkOption {
|
||||
default = "root";
|
||||
example = "example@domain.tld";
|
||||
type = types.str;
|
||||
description = ''
|
||||
Sender of the notification messages.
|
||||
Acts as the value of <literal>email</literal> in the emails' <literal>From: ... </literal> field.
|
||||
'';
|
||||
};
|
||||
|
||||
recipient = mkOption {
|
||||
default = "root";
|
||||
type = types.str;
|
||||
description = "Recipient of the notification messages.";
|
||||
};
|
||||
|
||||
mailer = mkOption {
|
||||
default = "/run/wrappers/bin/sendmail";
|
||||
type = types.path;
|
||||
description = ''
|
||||
Sendmail-compatible binary to be used to send the messages.
|
||||
|
||||
You should probably enable
|
||||
<option>services.postfix</option> or some other MTA for
|
||||
this to work.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
wall = {
|
||||
enable = mkOption {
|
||||
default = true;
|
||||
type = types.bool;
|
||||
description = "Whenever to send wall notifications to all users.";
|
||||
};
|
||||
};
|
||||
|
||||
x11 = {
|
||||
enable = mkOption {
|
||||
default = config.services.xserver.enable;
|
||||
defaultText = literalExpression "config.services.xserver.enable";
|
||||
type = types.bool;
|
||||
description = "Whenever to send X11 xmessage notifications.";
|
||||
};
|
||||
|
||||
display = mkOption {
|
||||
default = ":${toString config.services.xserver.display}";
|
||||
defaultText = literalExpression ''":''${toString config.services.xserver.display}"'';
|
||||
type = types.str;
|
||||
description = "DISPLAY to send X11 notifications to.";
|
||||
};
|
||||
};
|
||||
|
||||
test = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = "Whenever to send a test notification on startup.";
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
defaults = {
|
||||
monitored = mkOption {
|
||||
default = "-a";
|
||||
type = types.separatedString " ";
|
||||
example = "-a -o on -s (S/../.././02|L/../../7/04)";
|
||||
description = ''
|
||||
Common default options for explicitly monitored (listed in
|
||||
<option>services.smartd.devices</option>) devices.
|
||||
|
||||
The default value turns on monitoring of all the things (see
|
||||
<literal>man 5 smartd.conf</literal>).
|
||||
|
||||
The example also turns on SMART Automatic Offline Testing on
|
||||
startup, and schedules short self-tests daily, and long
|
||||
self-tests weekly.
|
||||
'';
|
||||
};
|
||||
|
||||
autodetected = mkOption {
|
||||
default = cfg.defaults.monitored;
|
||||
defaultText = literalExpression "config.${opt.defaults.monitored}";
|
||||
type = types.separatedString " ";
|
||||
description = ''
|
||||
Like <option>services.smartd.defaults.monitored</option>, but for the
|
||||
autodetected devices.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
devices = mkOption {
|
||||
default = [];
|
||||
example = [ { device = "/dev/sda"; } { device = "/dev/sdb"; options = "-d sat"; } ];
|
||||
type = with types; listOf (submodule smartdDeviceOpts);
|
||||
description = "List of devices to monitor.";
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
assertions = [ {
|
||||
assertion = cfg.autodetect || cfg.devices != [];
|
||||
message = "smartd can't run with both disabled autodetect and an empty list of devices to monitor.";
|
||||
} ];
|
||||
|
||||
systemd.services.smartd = {
|
||||
description = "S.M.A.R.T. Daemon";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig.ExecStart = "${pkgs.smartmontools}/sbin/smartd ${lib.concatStringsSep " " cfg.extraOptions} --no-fork --configfile=${smartdConf}";
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
149
nixos/modules/services/monitoring/statsd.nix
Normal file
149
nixos/modules/services/monitoring/statsd.nix
Normal file
|
|
@ -0,0 +1,149 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
|
||||
cfg = config.services.statsd;
|
||||
|
||||
isBuiltinBackend = name:
|
||||
builtins.elem name [ "graphite" "console" "repeater" ];
|
||||
|
||||
backendsToPackages = let
|
||||
mkMap = list: name:
|
||||
if isBuiltinBackend name then list
|
||||
else list ++ [ pkgs.nodePackages.${name} ];
|
||||
in foldl mkMap [];
|
||||
|
||||
configFile = pkgs.writeText "statsd.conf" ''
|
||||
{
|
||||
address: "${cfg.listenAddress}",
|
||||
port: "${toString cfg.port}",
|
||||
mgmt_address: "${cfg.mgmt_address}",
|
||||
mgmt_port: "${toString cfg.mgmt_port}",
|
||||
backends: [${
|
||||
concatMapStringsSep "," (name:
|
||||
if (isBuiltinBackend name)
|
||||
then ''"./backends/${name}"''
|
||||
else ''"${name}"''
|
||||
) cfg.backends}],
|
||||
${optionalString (cfg.graphiteHost!=null) ''graphiteHost: "${cfg.graphiteHost}",''}
|
||||
${optionalString (cfg.graphitePort!=null) ''graphitePort: "${toString cfg.graphitePort}",''}
|
||||
console: {
|
||||
prettyprint: false
|
||||
},
|
||||
log: {
|
||||
backend: "stdout"
|
||||
},
|
||||
automaticConfigReload: false${optionalString (cfg.extraConfig != null) ","}
|
||||
${cfg.extraConfig}
|
||||
}
|
||||
'';
|
||||
|
||||
deps = pkgs.buildEnv {
|
||||
name = "statsd-runtime-deps";
|
||||
pathsToLink = [ "/lib" ];
|
||||
ignoreCollisions = true;
|
||||
|
||||
paths = backendsToPackages cfg.backends;
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
|
||||
###### interface
|
||||
|
||||
options.services.statsd = {
|
||||
|
||||
enable = mkEnableOption "statsd";
|
||||
|
||||
listenAddress = mkOption {
|
||||
description = "Address that statsd listens on over UDP";
|
||||
default = "127.0.0.1";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
description = "Port that stats listens for messages on over UDP";
|
||||
default = 8125;
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
mgmt_address = mkOption {
|
||||
description = "Address to run management TCP interface on";
|
||||
default = "127.0.0.1";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
mgmt_port = mkOption {
|
||||
description = "Port to run the management TCP interface on";
|
||||
default = 8126;
|
||||
type = types.int;
|
||||
};
|
||||
|
||||
backends = mkOption {
|
||||
description = "List of backends statsd will use for data persistence";
|
||||
default = [];
|
||||
example = [
|
||||
"graphite"
|
||||
"console"
|
||||
"repeater"
|
||||
"statsd-librato-backend"
|
||||
"stackdriver-statsd-backend"
|
||||
"statsd-influxdb-backend"
|
||||
];
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
|
||||
graphiteHost = mkOption {
|
||||
description = "Hostname or IP of Graphite server";
|
||||
default = null;
|
||||
type = types.nullOr types.str;
|
||||
};
|
||||
|
||||
graphitePort = mkOption {
|
||||
description = "Port of Graphite server (i.e. carbon-cache).";
|
||||
default = null;
|
||||
type = types.nullOr types.int;
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
description = "Extra configuration options for statsd";
|
||||
default = "";
|
||||
type = types.nullOr types.str;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
assertions = map (backend: {
|
||||
assertion = !isBuiltinBackend backend -> hasAttrByPath [ backend ] pkgs.nodePackages;
|
||||
message = "Only builtin backends (graphite, console, repeater) or backends enumerated in `pkgs.nodePackages` are allowed!";
|
||||
}) cfg.backends;
|
||||
|
||||
users.users.statsd = {
|
||||
uid = config.ids.uids.statsd;
|
||||
description = "Statsd daemon user";
|
||||
};
|
||||
|
||||
systemd.services.statsd = {
|
||||
description = "Statsd Server";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
environment = {
|
||||
NODE_PATH = "${deps}/lib/node_modules";
|
||||
};
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.statsd}/bin/statsd ${configFile}";
|
||||
User = "statsd";
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages = [ pkgs.statsd ];
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
76
nixos/modules/services/monitoring/sysstat.nix
Normal file
76
nixos/modules/services/monitoring/sysstat.nix
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
with lib;
|
||||
let
|
||||
cfg = config.services.sysstat;
|
||||
in {
|
||||
options = {
|
||||
services.sysstat = {
|
||||
enable = mkEnableOption "sar system activity collection";
|
||||
|
||||
collect-frequency = mkOption {
|
||||
type = types.str;
|
||||
default = "*:00/10";
|
||||
description = ''
|
||||
OnCalendar specification for sysstat-collect
|
||||
'';
|
||||
};
|
||||
|
||||
collect-args = mkOption {
|
||||
type = types.str;
|
||||
default = "1 1";
|
||||
description = ''
|
||||
Arguments to pass sa1 when collecting statistics
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.sysstat = {
|
||||
description = "Resets System Activity Logs";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
User = "root";
|
||||
RemainAfterExit = true;
|
||||
Type = "oneshot";
|
||||
ExecStart = "${pkgs.sysstat}/lib/sa/sa1 --boot";
|
||||
LogsDirectory = "sa";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.sysstat-collect = {
|
||||
description = "system activity accounting tool";
|
||||
unitConfig.Documentation = "man:sa1(8)";
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = "root";
|
||||
ExecStart = "${pkgs.sysstat}/lib/sa/sa1 ${cfg.collect-args}";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.timers.sysstat-collect = {
|
||||
description = "Run system activity accounting tool on a regular basis";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig.OnCalendar = cfg.collect-frequency;
|
||||
};
|
||||
|
||||
systemd.services.sysstat-summary = {
|
||||
description = "Generate a daily summary of process accounting";
|
||||
unitConfig.Documentation = "man:sa2(8)";
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = "root";
|
||||
ExecStart = "${pkgs.sysstat}/lib/sa/sa2 -A";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.timers.sysstat-summary = {
|
||||
description = "Generate summary of yesterday's process accounting";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig.OnCalendar = "00:07:00";
|
||||
};
|
||||
};
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue