Compare commits

...

5 Commits

Author SHA1 Message Date
541c16cf44 Enable pam_slurm_adopt in all compute nodes
All checks were successful
CI / build:cross (pull_request) Successful in 6s
CI / build:all (pull_request) Successful in 20s
Prevents access to owl1 and owl2 too if the user doesn't have any jobs
running there.
2025-10-31 11:27:44 +01:00
018d94bd77 Don't suspend owl compute nodes
Currently the owl nodes are located on top of the rack and turning them
off causes a high temperature increase at that region, which accumulates
heat from the whole rack. To maximize airflow we will leave them on at
all times. This also makes allocations immediate at the extra cost of
around 200 W.

In the future, if we include more nodes in SLURM we can configure those
to turn off if needed.

Fixes: #156
2025-10-31 11:27:44 +01:00
7989779c8f Filter out packages by platform from crossSet
All checks were successful
CI / build:cross (pull_request) Successful in 5s
CI / build:all (pull_request) Successful in 15s
CI / build:all (push) Successful in 6s
CI / build:cross (push) Successful in 8s
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
Tested-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-31 11:21:13 +01:00
7d721084a7 Add meta to cudainfo
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-31 11:20:01 +01:00
796d34a549 Set amd-uprof platforms to x86_64-linux only
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-31 11:19:57 +01:00
6 changed files with 33 additions and 60 deletions

View File

@ -93,20 +93,4 @@
wantedBy = [ "multi-user.target" ];
serviceConfig.ExecStart = script;
};
# Only allow SSH connections from users who have a SLURM allocation
# See: https://slurm.schedmd.com/pam_slurm_adopt.html
security.pam.services.sshd.rules.account.slurm = {
control = "required";
enable = true;
modulePath = "${pkgs.slurm}/lib/security/pam_slurm_adopt.so";
args = [ "log_level=debug5" ];
order = 999999; # Make it last one
};
# Disable systemd session (pam_systemd.so) as it will conflict with the
# pam_slurm_adopt.so module. What happens is that the shell is first adopted
# into the slurmstepd task and then into the systemd session, which is not
# what we want, otherwise it will linger even if all jobs are gone.
security.pam.services.sshd.startSession = lib.mkForce false;
}

View File

@ -1,4 +1,4 @@
{ lib, ... }:
{ lib, pkgs, ... }:
{
imports = [
@ -21,4 +21,20 @@
};
services.slurm.client.enable = true;
# Only allow SSH connections from users who have a SLURM allocation
# See: https://slurm.schedmd.com/pam_slurm_adopt.html
security.pam.services.sshd.rules.account.slurm = {
control = "required";
enable = true;
modulePath = "${pkgs.slurm}/lib/security/pam_slurm_adopt.so";
args = [ "log_level=debug5" ];
order = 999999; # Make it last one
};
# Disable systemd session (pam_systemd.so) as it will conflict with the
# pam_slurm_adopt.so module. What happens is that the shell is first adopted
# into the slurmstepd task and then into the systemd session, which is not
# what we want, otherwise it will linger even if all jobs are gone.
security.pam.services.sshd.startSession = lib.mkForce false;
}

View File

@ -1,31 +1,6 @@
{ config, pkgs, ... }:
let
suspendProgram = pkgs.writeShellScript "suspend.sh" ''
exec 1>>/var/log/power_save.log 2>>/var/log/power_save.log
set -x
export "PATH=/run/current-system/sw/bin:$PATH"
echo "$(date) Suspend invoked $0 $*" >> /var/log/power_save.log
hosts=$(scontrol show hostnames $1)
for host in $hosts; do
echo Shutting down host: $host
ipmitool -I lanplus -H ''${host}-ipmi -P "" -U "" chassis power off
done
'';
resumeProgram = pkgs.writeShellScript "resume.sh" ''
exec 1>>/var/log/power_save.log 2>>/var/log/power_save.log
set -x
export "PATH=/run/current-system/sw/bin:$PATH"
echo "$(date) Suspend invoked $0 $*" >> /var/log/power_save.log
hosts=$(scontrol show hostnames $1)
for host in $hosts; do
echo Starting host: $host
ipmitool -I lanplus -H ''${host}-ipmi -P "" -U "" chassis power on
done
'';
in {
{
services.slurm = {
controlMachine = "apex";
clusterName = "jungle";
@ -59,16 +34,6 @@ in {
# the resources. Use the task/cgroup plugin to enable process containment.
TaskPlugin=task/affinity,task/cgroup
# Power off unused nodes until they are requested
SuspendProgram=${suspendProgram}
SuspendTimeout=60
ResumeProgram=${resumeProgram}
ResumeTimeout=300
SuspendExcNodes=fox
# Turn the nodes off after 1 hour of inactivity
SuspendTime=3600
# Reduce port range so we can allow only this range in the firewall
SrunPortRange=60000-61000

View File

@ -101,14 +101,16 @@ let
pkgsTopLevel = filterAttrs (_: isDerivation) bscPkgs;
# Native build in that platform doesn't imply cross build works
canCrossCompile = platform: pkg:
canCrossCompile = platform: default: pkg:
(isDerivation pkg) &&
# Must be defined explicitly
(pkg.meta.cross or false) &&
(meta.availableOn platform pkg);
# If meta.cross is undefined, use default
(pkg.meta.cross or default) &&
(meta.availableOn final.pkgsCross.${platform}.stdenv.hostPlatform pkg);
# For now only RISC-V
crossSet = { riscv64 = final.pkgsCross.riscv64.bsc.pkgsTopLevel; };
crossSet = genAttrs [ "riscv64" ] (platform:
filterAttrs (_: canCrossCompile platform true)
final.pkgsCross.${platform}.bsc.pkgsTopLevel);
buildList = name: paths:
final.runCommandLocal name { } ''
@ -128,7 +130,7 @@ let
# For now only RISC-V
crossList = buildList "ci-cross"
(filter
(canCrossCompile final.pkgsCross.riscv64.stdenv.hostPlatform)
(canCrossCompile "riscv64" false) # opt-in (pkgs with: meta.cross = true)
(builtins.attrValues crossSet.riscv64));
in bscPkgs // {

View File

@ -90,7 +90,7 @@ in
meta = {
description = "Performance analysis tool-suite for x86 based applications";
homepage = "https://www.amd.com/es/developer/uprof.html";
platforms = lib.platforms.linux;
platforms = [ "x86_64-linux" ];
license = lib.licenses.unfree;
maintainers = with lib.maintainers.bsc; [ rarias varcila ];
};

View File

@ -1,5 +1,6 @@
{
stdenv
, lib
, cudatoolkit
, cudaPackages
, autoAddDriverRunpath
@ -40,4 +41,9 @@ stdenv.mkDerivation (finalAttrs: {
'';
installPhase = "touch $out";
};
meta = {
platforms = [ "x86_64-linux" ];
maintainers = with lib.maintainers.bsc; [ rarias ];
};
})