Compare commits

..

5 Commits

Author SHA1 Message Date
fc69ef3217 Enable pam_slurm_adopt in all compute nodes
Prevents access to owl1 and owl2 too if the user doesn't have any jobs
running there.

Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-31 11:41:50 +01:00
1d025f7a38 Don't suspend owl compute nodes
Currently the owl nodes are located on top of the rack and turning them
off causes a high temperature increase at that region, which accumulates
heat from the whole rack. To maximize airflow we will leave them on at
all times. This also makes allocations immediate at the extra cost of
around 200 W.

In the future, if we include more nodes in SLURM we can configure those
to turn off if needed.

Fixes: rarias/jungle#156
Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-31 11:41:44 +01:00
7989779c8f Filter out packages by platform from crossSet
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
Tested-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-31 11:21:13 +01:00
7d721084a7 Add meta to cudainfo
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-31 11:20:01 +01:00
796d34a549 Set amd-uprof platforms to x86_64-linux only
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-31 11:19:57 +01:00
8 changed files with 36 additions and 93 deletions

View File

@@ -93,20 +93,4 @@
wantedBy = [ "multi-user.target" ];
serviceConfig.ExecStart = script;
};
# Only allow SSH connections from users who have a SLURM allocation
# See: https://slurm.schedmd.com/pam_slurm_adopt.html
security.pam.services.sshd.rules.account.slurm = {
control = "required";
enable = true;
modulePath = "${pkgs.slurm}/lib/security/pam_slurm_adopt.so";
args = [ "log_level=debug5" ];
order = 999999; # Make it last one
};
# Disable systemd session (pam_systemd.so) as it will conflict with the
# pam_slurm_adopt.so module. What happens is that the shell is first adopted
# into the slurmstepd task and then into the systemd session, which is not
# what we want, otherwise it will linger even if all jobs are gone.
security.pam.services.sshd.startSession = lib.mkForce false;
}

View File

@@ -1,4 +1,4 @@
{ lib, ... }:
{ lib, pkgs, ... }:
{
imports = [
@@ -21,4 +21,20 @@
};
services.slurm.client.enable = true;
# Only allow SSH connections from users who have a SLURM allocation
# See: https://slurm.schedmd.com/pam_slurm_adopt.html
security.pam.services.sshd.rules.account.slurm = {
control = "required";
enable = true;
modulePath = "${pkgs.slurm}/lib/security/pam_slurm_adopt.so";
args = [ "log_level=debug5" ];
order = 999999; # Make it last one
};
# Disable systemd session (pam_systemd.so) as it will conflict with the
# pam_slurm_adopt.so module. What happens is that the shell is first adopted
# into the slurmstepd task and then into the systemd session, which is not
# what we want, otherwise it will linger even if all jobs are gone.
security.pam.services.sshd.startSession = lib.mkForce false;
}

View File

@@ -1,31 +1,6 @@
{ config, pkgs, ... }:
let
suspendProgram = pkgs.writeShellScript "suspend.sh" ''
exec 1>>/var/log/power_save.log 2>>/var/log/power_save.log
set -x
export "PATH=/run/current-system/sw/bin:$PATH"
echo "$(date) Suspend invoked $0 $*" >> /var/log/power_save.log
hosts=$(scontrol show hostnames $1)
for host in $hosts; do
echo Shutting down host: $host
ipmitool -I lanplus -H ''${host}-ipmi -P "" -U "" chassis power off
done
'';
resumeProgram = pkgs.writeShellScript "resume.sh" ''
exec 1>>/var/log/power_save.log 2>>/var/log/power_save.log
set -x
export "PATH=/run/current-system/sw/bin:$PATH"
echo "$(date) Suspend invoked $0 $*" >> /var/log/power_save.log
hosts=$(scontrol show hostnames $1)
for host in $hosts; do
echo Starting host: $host
ipmitool -I lanplus -H ''${host}-ipmi -P "" -U "" chassis power on
done
'';
in {
{
services.slurm = {
controlMachine = "apex";
clusterName = "jungle";
@@ -59,16 +34,6 @@ in {
# the resources. Use the task/cgroup plugin to enable process containment.
TaskPlugin=task/affinity,task/cgroup
# Power off unused nodes until they are requested
SuspendProgram=${suspendProgram}
SuspendTimeout=60
ResumeProgram=${resumeProgram}
ResumeTimeout=300
SuspendExcNodes=fox
# Turn the nodes off after 1 hour of inactivity
SuspendTime=3600
# Reduce port range so we can allow only this range in the firewall
SrunPortRange=60000-61000

View File

@@ -101,14 +101,16 @@ let
pkgsTopLevel = filterAttrs (_: isDerivation) bscPkgs;
# Native build in that platform doesn't imply cross build works
canCrossCompile = platform: pkg:
canCrossCompile = platform: default: pkg:
(isDerivation pkg) &&
# Must be defined explicitly
(pkg.meta.cross or false) &&
(meta.availableOn platform pkg);
# If meta.cross is undefined, use default
(pkg.meta.cross or default) &&
(meta.availableOn final.pkgsCross.${platform}.stdenv.hostPlatform pkg);
# For now only RISC-V
crossSet = { riscv64 = final.pkgsCross.riscv64.bsc.pkgsTopLevel; };
crossSet = genAttrs [ "riscv64" ] (platform:
filterAttrs (_: canCrossCompile platform true)
final.pkgsCross.${platform}.bsc.pkgsTopLevel);
buildList = name: paths:
final.runCommandLocal name { } ''
@@ -128,7 +130,7 @@ let
# For now only RISC-V
crossList = buildList "ci-cross"
(filter
(canCrossCompile final.pkgsCross.riscv64.stdenv.hostPlatform)
(canCrossCompile "riscv64" false) # opt-in (pkgs with: meta.cross = true)
(builtins.attrValues crossSet.riscv64));
in bscPkgs // {

View File

@@ -90,7 +90,7 @@ in
meta = {
description = "Performance analysis tool-suite for x86 based applications";
homepage = "https://www.amd.com/es/developer/uprof.html";
platforms = lib.platforms.linux;
platforms = [ "x86_64-linux" ];
license = lib.licenses.unfree;
maintainers = with lib.maintainers.bsc; [ rarias varcila ];
};

View File

@@ -1,5 +1,6 @@
{
stdenv
, lib
, cudatoolkit
, cudaPackages
, autoAddDriverRunpath
@@ -40,4 +41,9 @@ stdenv.mkDerivation (finalAttrs: {
'';
installPhase = "touch $out";
};
meta = {
platforms = [ "x86_64-linux" ];
maintainers = with lib.maintainers.bsc; [ rarias ];
};
})

View File

@@ -9,6 +9,7 @@
, automake
, libtool
, mpi
, rsync
, gfortran
}:
@@ -49,18 +50,7 @@ stdenv.mkDerivation rec {
"CXXFLAGS=-fPIC"
];
nativeBuildInputs = [
autoconf
automake
gfortran
libtool
];
buildInputs = [
slurm
mpiAll
rdma-core-all
];
buildInputs = [ slurm mpiAll rdma-core-all autoconf automake libtool rsync gfortran ];
hardeningDisable = [ "all" ];

View File

@@ -6,12 +6,6 @@
, pmix
, gfortran
, symlinkJoin
# Disabled when cross-compiling
# To fix cross compilation, we should fill the values in:
# https://github.com/pmodels/mpich/blob/main/maint/fcrosscompile/cross_values.txt.in
# For each arch
, enableFortran ? stdenv.hostPlatform == stdenv.buildPlatform
, perl
}:
let
@@ -21,13 +15,10 @@ let
paths = [ pmix.dev pmix.out ];
};
in mpich.overrideAttrs (old: {
buildInputs = old.buildInputs ++ [
buildInput = old.buildInputs ++ [
libfabric
pmixAll
];
nativeBuildInputs = old.nativeBuildInputs ++ [
perl
];
configureFlags = [
"--enable-shared"
"--enable-sharedlib"
@@ -40,18 +31,7 @@ in mpich.overrideAttrs (old: {
] ++ lib.optionals (lib.versionAtLeast gfortran.version "10") [
"FFLAGS=-fallow-argument-mismatch" # https://github.com/pmodels/mpich/issues/4300
"FCFLAGS=-fallow-argument-mismatch"
] ++ lib.optionals (!enableFortran) [
"--disable-fortran"
];
preFixup = ''
# Ensure the default compilers are the ones mpich was built with
sed -i 's:CC="gcc":CC=${stdenv.cc}/bin/gcc:' $out/bin/mpicc
sed -i 's:CXX="g++":CXX=${stdenv.cc}/bin/g++:' $out/bin/mpicxx
'' + lib.optionalString enableFortran ''
sed -i 's:FC="gfortran":FC=${gfortran}/bin/gfortran:' $out/bin/mpifort
'';
hardeningDisable = [ "all" ];
meta = old.meta // {