Compare commits

..

9 Commits

Author SHA1 Message Date
32a2b87313
Fix osu cross-compilation
All checks were successful
CI / build:cross (pull_request) Successful in 12s
CI / build:all (pull_request) Successful in 15s
2025-10-31 12:01:53 +01:00
c1edc91e1b
Set mpich default compilers from targetPackages 2025-10-31 12:01:52 +01:00
242fbdc268
Enable meta.cross for mpich related packages 2025-10-31 12:01:52 +01:00
b0d90a8ee9
Disable meta.cross for gpi-2 and tagaspi 2025-10-31 12:01:52 +01:00
16300fa966
Fix nativeBuildInputs for tagaspi 2025-10-31 12:01:52 +01:00
46da697690
Fix nativeBuildInputs for gpi-2 2025-10-31 12:01:52 +01:00
10b2dca7b6
Fix mpich cross compilation (disable fortran) 2025-10-31 12:01:51 +01:00
fc69ef3217 Enable pam_slurm_adopt in all compute nodes
All checks were successful
CI / build:cross (pull_request) Successful in 5s
CI / build:all (pull_request) Successful in 15s
CI / build:all (push) Successful in 3s
CI / build:cross (push) Successful in 6s
Prevents access to owl1 and owl2 too if the user doesn't have any jobs
running there.

Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-31 11:41:50 +01:00
1d025f7a38 Don't suspend owl compute nodes
Currently the owl nodes are located on top of the rack and turning them
off causes a high temperature increase at that region, which accumulates
heat from the whole rack. To maximize airflow we will leave them on at
all times. This also makes allocations immediate at the extra cost of
around 200 W.

In the future, if we include more nodes in SLURM we can configure those
to turn off if needed.

Fixes: #156
Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-31 11:41:44 +01:00
3 changed files with 18 additions and 53 deletions

View File

@ -93,20 +93,4 @@
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
serviceConfig.ExecStart = script; serviceConfig.ExecStart = script;
}; };
# Only allow SSH connections from users who have a SLURM allocation
# See: https://slurm.schedmd.com/pam_slurm_adopt.html
security.pam.services.sshd.rules.account.slurm = {
control = "required";
enable = true;
modulePath = "${pkgs.slurm}/lib/security/pam_slurm_adopt.so";
args = [ "log_level=debug5" ];
order = 999999; # Make it last one
};
# Disable systemd session (pam_systemd.so) as it will conflict with the
# pam_slurm_adopt.so module. What happens is that the shell is first adopted
# into the slurmstepd task and then into the systemd session, which is not
# what we want, otherwise it will linger even if all jobs are gone.
security.pam.services.sshd.startSession = lib.mkForce false;
} }

View File

@ -1,4 +1,4 @@
{ lib, ... }: { lib, pkgs, ... }:
{ {
imports = [ imports = [
@ -21,4 +21,20 @@
}; };
services.slurm.client.enable = true; services.slurm.client.enable = true;
# Only allow SSH connections from users who have a SLURM allocation
# See: https://slurm.schedmd.com/pam_slurm_adopt.html
security.pam.services.sshd.rules.account.slurm = {
control = "required";
enable = true;
modulePath = "${pkgs.slurm}/lib/security/pam_slurm_adopt.so";
args = [ "log_level=debug5" ];
order = 999999; # Make it last one
};
# Disable systemd session (pam_systemd.so) as it will conflict with the
# pam_slurm_adopt.so module. What happens is that the shell is first adopted
# into the slurmstepd task and then into the systemd session, which is not
# what we want, otherwise it will linger even if all jobs are gone.
security.pam.services.sshd.startSession = lib.mkForce false;
} }

View File

@ -1,31 +1,6 @@
{ config, pkgs, ... }: { config, pkgs, ... }:
let {
suspendProgram = pkgs.writeShellScript "suspend.sh" ''
exec 1>>/var/log/power_save.log 2>>/var/log/power_save.log
set -x
export "PATH=/run/current-system/sw/bin:$PATH"
echo "$(date) Suspend invoked $0 $*" >> /var/log/power_save.log
hosts=$(scontrol show hostnames $1)
for host in $hosts; do
echo Shutting down host: $host
ipmitool -I lanplus -H ''${host}-ipmi -P "" -U "" chassis power off
done
'';
resumeProgram = pkgs.writeShellScript "resume.sh" ''
exec 1>>/var/log/power_save.log 2>>/var/log/power_save.log
set -x
export "PATH=/run/current-system/sw/bin:$PATH"
echo "$(date) Suspend invoked $0 $*" >> /var/log/power_save.log
hosts=$(scontrol show hostnames $1)
for host in $hosts; do
echo Starting host: $host
ipmitool -I lanplus -H ''${host}-ipmi -P "" -U "" chassis power on
done
'';
in {
services.slurm = { services.slurm = {
controlMachine = "apex"; controlMachine = "apex";
clusterName = "jungle"; clusterName = "jungle";
@ -59,16 +34,6 @@ in {
# the resources. Use the task/cgroup plugin to enable process containment. # the resources. Use the task/cgroup plugin to enable process containment.
TaskPlugin=task/affinity,task/cgroup TaskPlugin=task/affinity,task/cgroup
# Power off unused nodes until they are requested
SuspendProgram=${suspendProgram}
SuspendTimeout=60
ResumeProgram=${resumeProgram}
ResumeTimeout=300
SuspendExcNodes=fox
# Turn the nodes off after 1 hour of inactivity
SuspendTime=3600
# Reduce port range so we can allow only this range in the firewall # Reduce port range so we can allow only this range in the firewall
SrunPortRange=60000-61000 SrunPortRange=60000-61000