Skip to content

Commit

Permalink
nixos/services.hadoop.hdfs: remove with lib;
Browse files Browse the repository at this point in the history
  • Loading branch information
Stunkymonkey committed Dec 8, 2024
1 parent decec5e commit e7e4c15
Showing 1 changed file with 32 additions and 33 deletions.
65 changes: 32 additions & 33 deletions nixos/modules/services/cluster/hadoop/hdfs.nix
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.hadoop;

Expand All @@ -8,9 +7,9 @@ let

# Generator for HDFS service options
hadoopServiceOption = { serviceName, firewallOption ? true, extraOpts ? null }: {
enable = mkEnableOption serviceName;
restartIfChanged = mkOption {
type = types.bool;
enable = lib.mkEnableOption serviceName;
restartIfChanged = lib.mkOption {
type = lib.types.bool;
description = ''
Automatically restart the service on config change.
This can be set to false to defer restarts on clusters running critical applications.
Expand All @@ -19,32 +18,32 @@ let
'';
default = false;
};
extraFlags = mkOption{
type = with types; listOf str;
extraFlags = lib.mkOption{
type = with lib.types; listOf str;
default = [];
description = "Extra command line flags to pass to ${serviceName}";
example = [
"-Dcom.sun.management.jmxremote"
"-Dcom.sun.management.jmxremote.port=8010"
];
};
extraEnv = mkOption{
type = with types; attrsOf str;
extraEnv = lib.mkOption{
type = with lib.types; attrsOf str;
default = {};
description = "Extra environment variables for ${serviceName}";
};
} // (optionalAttrs firewallOption {
openFirewall = mkOption {
type = types.bool;
} // (lib.optionalAttrs firewallOption {
openFirewall = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Open firewall ports for ${serviceName}.";
};
}) // (optionalAttrs (extraOpts != null) extraOpts);
}) // (lib.optionalAttrs (extraOpts != null) extraOpts);

# Generator for HDFS service configs
hadoopServiceConfig =
{ name
, serviceOptions ? cfg.hdfs."${toLower name}"
, serviceOptions ? cfg.hdfs."${lib.toLower name}"
, description ? "Hadoop HDFS ${name}"
, User ? "hdfs"
, allowedTCPPorts ? [ ]
Expand All @@ -53,23 +52,23 @@ let
, extraConfig ? { }
}: (

mkIf serviceOptions.enable ( mkMerge [{
systemd.services."hdfs-${toLower name}" = {
lib.mkIf serviceOptions.enable ( lib.mkMerge [{
systemd.services."hdfs-${lib.toLower name}" = {
inherit description preStart;
environment = environment // serviceOptions.extraEnv;
wantedBy = [ "multi-user.target" ];
inherit (serviceOptions) restartIfChanged;
serviceConfig = {
inherit User;
SyslogIdentifier = "hdfs-${toLower name}";
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} ${toLower name} ${escapeShellArgs serviceOptions.extraFlags}";
SyslogIdentifier = "hdfs-${lib.toLower name}";
ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} ${lib.toLower name} ${lib.escapeShellArgs serviceOptions.extraFlags}";
Restart = "always";
};
};

services.hadoop.gatewayRole.enable = true;

networking.firewall.allowedTCPPorts = mkIf
networking.firewall.allowedTCPPorts = lib.mkIf
((builtins.hasAttr "openFirewall" serviceOptions) && serviceOptions.openFirewall)
allowedTCPPorts;
} extraConfig])
Expand All @@ -80,8 +79,8 @@ in
options.services.hadoop.hdfs = {

namenode = hadoopServiceOption { serviceName = "HDFS NameNode"; } // {
formatOnInit = mkOption {
type = types.bool;
formatOnInit = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Format HDFS namenode on first start. This is useful for quickly spinning up
Expand All @@ -94,18 +93,18 @@ in
};

datanode = hadoopServiceOption { serviceName = "HDFS DataNode"; } // {
dataDirs = mkOption {
dataDirs = lib.mkOption {
default = null;
description = "Tier and path definitions for datanode storage.";
type = with types; nullOr (listOf (submodule {
type = with lib.types; nullOr (listOf (submodule {
options = {
type = mkOption {
type = lib.mkOption {
type = enum [ "SSD" "DISK" "ARCHIVE" "RAM_DISK" ];
description = ''
Storage types ([SSD]/[DISK]/[ARCHIVE]/[RAM_DISK]) for HDFS storage policies.
'';
};
path = mkOption {
path = lib.mkOption {
type = path;
example = [ "/var/lib/hadoop/hdfs/dn" ];
description = "Determines where on the local filesystem a data node should store its blocks.";
Expand All @@ -123,16 +122,16 @@ in
};

httpfs = hadoopServiceOption { serviceName = "HDFS JournalNode"; } // {
tempPath = mkOption {
type = types.path;
tempPath = lib.mkOption {
type = lib.types.path;
default = "/tmp/hadoop/httpfs";
description = "HTTPFS_TEMP path used by HTTPFS";
};
};

};

config = mkMerge [
config = lib.mkMerge [
(hadoopServiceConfig {
name = "NameNode";
allowedTCPPorts = [
Expand All @@ -141,15 +140,15 @@ in
8022 # namenode.servicerpc-address
8019 # dfs.ha.zkfc.port
];
preStart = (mkIf cfg.hdfs.namenode.formatOnInit
preStart = (lib.mkIf cfg.hdfs.namenode.formatOnInit
"${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true"
);
})

(hadoopServiceConfig {
name = "DataNode";
# port numbers for datanode changed between hadoop 2 and 3
allowedTCPPorts = if versionAtLeast cfg.package.version "3" then [
allowedTCPPorts = if lib.versionAtLeast cfg.package.version "3" then [
9864 # datanode.http.address
9866 # datanode.address
9867 # datanode.ipc.address
Expand All @@ -158,8 +157,8 @@ in
50010 # datanode.address
50020 # datanode.ipc.address
];
extraConfig.services.hadoop.hdfsSiteInternal."dfs.datanode.data.dir" = mkIf (cfg.hdfs.datanode.dataDirs!= null)
(concatMapStringsSep "," (x: "["+x.type+"]file://"+x.path) cfg.hdfs.datanode.dataDirs);
extraConfig.services.hadoop.hdfsSiteInternal."dfs.datanode.data.dir" = lib.mkIf (cfg.hdfs.datanode.dataDirs!= null)
(lib.concatMapStringsSep "," (x: "["+x.type+"]file://"+x.path) cfg.hdfs.datanode.dataDirs);
})

(hadoopServiceConfig {
Expand All @@ -185,14 +184,14 @@ in
];
})

(mkIf cfg.gatewayRole.enable {
(lib.mkIf cfg.gatewayRole.enable {
users.users.hdfs = {
description = "Hadoop HDFS user";
group = "hadoop";
uid = config.ids.uids.hdfs;
};
})
(mkIf cfg.hdfs.httpfs.enable {
(lib.mkIf cfg.hdfs.httpfs.enable {
users.users.httpfs = {
description = "Hadoop HTTPFS user";
group = "hadoop";
Expand Down

0 comments on commit e7e4c15

Please sign in to comment.