Compare commits

..

5 Commits

Author SHA1 Message Date
00e7bafb6b Enable meta.cross for mpich related packages
All checks were successful
CI / build:cross (pull_request) Successful in 13m52s
CI / build:all (pull_request) Successful in 19m56s
2025-10-30 17:40:41 +01:00
d9f726b453 Disable meta.cross for gpi-2 and tagaspi 2025-10-30 17:40:41 +01:00
b323615370 Fix nativeBuildInputs for tagaspi 2025-10-30 17:40:41 +01:00
4edbeddef7 Fix nativeBuildInputs for gpi-2 2025-10-30 17:40:41 +01:00
3028dff971 Fix mpich cross compilation (disable fortran) 2025-10-30 17:40:40 +01:00
13 changed files with 297 additions and 160 deletions

View File

@@ -93,4 +93,20 @@
wantedBy = [ "multi-user.target" ];
serviceConfig.ExecStart = script;
};
# Only allow SSH connections from users who have a SLURM allocation
# See: https://slurm.schedmd.com/pam_slurm_adopt.html
security.pam.services.sshd.rules.account.slurm = {
control = "required";
enable = true;
modulePath = "${pkgs.slurm}/lib/security/pam_slurm_adopt.so";
args = [ "log_level=debug5" ];
order = 999999; # Make it last one
};
# Disable systemd session (pam_systemd.so) as it will conflict with the
# pam_slurm_adopt.so module. What happens is that the shell is first adopted
# into the slurmstepd task and then into the systemd session, which is not
# what we want, otherwise it will linger even if all jobs are gone.
security.pam.services.sshd.startSession = lib.mkForce false;
}

View File

@@ -1,4 +1,4 @@
{ lib, pkgs, ... }:
{ lib, ... }:
{
imports = [
@@ -21,20 +21,4 @@
};
services.slurm.client.enable = true;
# Only allow SSH connections from users who have a SLURM allocation
# See: https://slurm.schedmd.com/pam_slurm_adopt.html
security.pam.services.sshd.rules.account.slurm = {
control = "required";
enable = true;
modulePath = "${pkgs.slurm}/lib/security/pam_slurm_adopt.so";
args = [ "log_level=debug5" ];
order = 999999; # Make it last one
};
# Disable systemd session (pam_systemd.so) as it will conflict with the
# pam_slurm_adopt.so module. What happens is that the shell is first adopted
# into the slurmstepd task and then into the systemd session, which is not
# what we want, otherwise it will linger even if all jobs are gone.
security.pam.services.sshd.startSession = lib.mkForce false;
}

View File

@@ -1,6 +1,31 @@
{ config, pkgs, ... }:
{
let
suspendProgram = pkgs.writeShellScript "suspend.sh" ''
exec 1>>/var/log/power_save.log 2>>/var/log/power_save.log
set -x
export "PATH=/run/current-system/sw/bin:$PATH"
echo "$(date) Suspend invoked $0 $*" >> /var/log/power_save.log
hosts=$(scontrol show hostnames $1)
for host in $hosts; do
echo Shutting down host: $host
ipmitool -I lanplus -H ''${host}-ipmi -P "" -U "" chassis power off
done
'';
resumeProgram = pkgs.writeShellScript "resume.sh" ''
exec 1>>/var/log/power_save.log 2>>/var/log/power_save.log
set -x
export "PATH=/run/current-system/sw/bin:$PATH"
echo "$(date) Suspend invoked $0 $*" >> /var/log/power_save.log
hosts=$(scontrol show hostnames $1)
for host in $hosts; do
echo Starting host: $host
ipmitool -I lanplus -H ''${host}-ipmi -P "" -U "" chassis power on
done
'';
in {
services.slurm = {
controlMachine = "apex";
clusterName = "jungle";
@@ -34,6 +59,16 @@
# the resources. Use the task/cgroup plugin to enable process containment.
TaskPlugin=task/affinity,task/cgroup
# Power off unused nodes until they are requested
SuspendProgram=${suspendProgram}
SuspendTimeout=60
ResumeProgram=${resumeProgram}
ResumeTimeout=300
SuspendExcNodes=fox
# Turn the nodes off after 1 hour of inactivity
SuspendTime=3600
# Reduce port range so we can allow only this range in the firewall
SrunPortRange=60000-61000

View File

@@ -1,8 +1,9 @@
final: /* Future last stage */
prev: /* Previous stage */
with final.lib;
let
lib = prev.lib;
callPackage = final.callPackage;
bscPkgs = {
@@ -35,7 +36,6 @@ let
mpich = callPackage ./pkgs/mpich/default.nix { mpich = prev.mpich; };
nanos6 = callPackage ./pkgs/nanos6/default.nix { };
nanos6Debug = final.nanos6.override { enableDebug = true; };
nix = callPackage ./pkgs/nix/default.nix { nix = prev.nix; };
nixtools = callPackage ./pkgs/nixtools/default.nix { };
nixgen = callPackage ./pkgs/nixgen/default.nix { };
# Broken because of pkgsStatic.libcap
@@ -97,23 +97,18 @@ let
};
};
# Load our custom lib functions with import, callPackage fails.
lib' = import ./pkgs/lib.nix { lib = prev.lib; };
# For now, only build toplevel packages in CI/Hydra
pkgsTopLevel = lib.filterAttrs (_: lib.isDerivation) bscPkgs;
pkgsTopLevel = filterAttrs (_: isDerivation) bscPkgs;
# Native build in that platform doesn't imply cross build works
canCrossCompile = platform: default: pkg:
(lib.isDerivation pkg) &&
# If meta.cross is undefined, use default
(pkg.meta.cross or default) &&
(lib.meta.availableOn final.pkgsCross.${platform}.stdenv.hostPlatform pkg);
canCrossCompile = platform: pkg:
(isDerivation pkg) &&
# Must be defined explicitly
(pkg.meta.cross or false) &&
(meta.availableOn platform pkg);
# For now only RISC-V
crossSet = lib.genAttrs [ "riscv64" ] (platform:
lib.filterAttrs (_: canCrossCompile platform true)
final.pkgsCross.${platform}.bsc.pkgsTopLevel);
crossSet = { riscv64 = final.pkgsCross.riscv64.bsc.pkgsTopLevel; };
buildList = name: paths:
final.runCommandLocal name { } ''
@@ -128,17 +123,21 @@ let
'';
pkgsList = buildList "ci-pkgs" (builtins.attrValues pkgsTopLevel);
testsList = buildList "ci-tests" (lib.collect lib.isDerivation tests);
testsList = buildList "ci-tests" (collect isDerivation tests);
allList = buildList' "ci-all" [ pkgsList testsList ];
# For now only RISC-V
crossList = buildList "ci-cross"
(lib.filter
(canCrossCompile "riscv64" false) # opt-in (pkgs with: meta.cross = true)
(filter
(canCrossCompile final.pkgsCross.riscv64.stdenv.hostPlatform)
(builtins.attrValues crossSet.riscv64));
in bscPkgs // {
lib = lib';
lib = prev.lib // {
maintainers = prev.lib.maintainers // {
bsc = import ./pkgs/maintainers.nix;
};
};
# Prevent accidental usage of bsc-ci attribute
bsc-ci = throw "the bsc-ci attribute is deprecated, use bsc.ci";

View File

@@ -90,7 +90,7 @@ in
meta = {
description = "Performance analysis tool-suite for x86 based applications";
homepage = "https://www.amd.com/es/developer/uprof.html";
platforms = [ "x86_64-linux" ];
platforms = lib.platforms.linux;
license = lib.licenses.unfree;
maintainers = with lib.maintainers.bsc; [ rarias varcila ];
};

View File

@@ -14,19 +14,16 @@
, openblas
, ovni
, gitBranch ? "master"
, gitURL ? "ssh://git@bscpm04.bsc.es/rarias/bench6.git"
, gitCommit ? "bf29a53113737c3aa74d2fe3d55f59868faea7b4"
, gitUrls ? [
"ssh://git@bscpm04.bsc.es/rarias/bench6.git"
"https://github.com/rodarima/bench6.git"
]
}:
stdenv.mkDerivation rec {
pname = "bench6";
version = "${src.shortRev}";
src = lib.fetchGitMirror {
urls = gitUrls;
src = builtins.fetchGit {
url = gitURL;
ref = gitBranch;
rev = gitCommit;
};

View File

@@ -1,6 +1,5 @@
{
stdenv
, lib
, cudatoolkit
, cudaPackages
, autoAddDriverRunpath
@@ -41,9 +40,4 @@ stdenv.mkDerivation (finalAttrs: {
'';
installPhase = "touch $out";
};
meta = {
platforms = [ "x86_64-linux" ];
maintainers = with lib.maintainers.bsc; [ rarias ];
};
})

View File

@@ -43,7 +43,7 @@ stdenv.mkDerivation rec {
configureFlags = [
"--with-infiniband=${rdma-core-all}"
"--with-mpi=yes" # fixes mpi detection when cross-compiling
"--with-mpi=yes"
"--with-slurm"
"CFLAGS=-fPIC"
"CXXFLAGS=-fPIC"
@@ -70,6 +70,6 @@ stdenv.mkDerivation rec {
maintainers = with lib.maintainers.bsc; [ rarias ];
platforms = lib.platforms.linux;
license = lib.licenses.gpl3Plus;
cross = false; # infiniband detection does not work
cross = false;
};
}

View File

@@ -1,30 +0,0 @@
{ lib }:
let
# If not supported, fall back to tryEval, which will fail in the first case.
safeCatchAll = if (builtins ? catchAll)
then builtins.catchAll
else e: (builtins.tryEval e) // { msg = ""; };
in lib.extend (_: lib: {
# Same as fetchGit but accepts a list of mirror urls
fetchGitMirror = { urls, ... } @ args:
let
cleanArgs = lib.removeAttrs args [ "urls" ];
fetchUrl = url: builtins.fetchGit (cleanArgs // { inherit url; });
safeFetch = url: safeCatchAll (fetchUrl url);
complain = url:
let
r = safeFetch url;
in
if (r.success) then r
else lib.warn "cannot fetch ${url}, trying next
mirror:${builtins.replaceStrings ["\n" ] ["\n> "] ("\n"+r.msg)}" r;
fetchList = lib.map (url: complain url) urls;
bad = throw "cannot fetch from any mirror";
good = lib.findFirst (e: e.success) bad fetchList;
in good.value;
maintainers = lib.maintainers // {
bsc = import ./maintainers.nix;
};
})

View File

@@ -12,7 +12,6 @@
# For each arch
, enableFortran ? stdenv.hostPlatform == stdenv.buildPlatform
, perl
, targetPackages
}:
let
@@ -46,10 +45,10 @@ in mpich.overrideAttrs (old: {
];
preFixup = ''
sed -i 's:^CC=.*:CC=${targetPackages.stdenv.cc}/bin/${targetPackages.stdenv.cc.targetPrefix}cc:' $out/bin/mpicc
sed -i 's:^CXX=.*:CXX=${targetPackages.stdenv.cc}/bin/${targetPackages.stdenv.cc.targetPrefix}c++:' $out/bin/mpicxx
sed -i "s:^CC=.*:CC=''${CC:-gcc}:" $out/bin/mpicc
sed -i "s:^CXX=.*:CXX=''${CXX:-g++}:" $out/bin/mpicxx
'' + lib.optionalString enableFortran ''
sed -i 's:^FC=.*:FC=${targetPackages.gfortran or gfortran}/bin/${targetPackages.gfortran.targetPrefix or gfortran.targetPrefix}gfortran:' $out/bin/mpifort
sed -i "s:^FC=.*:FC=''${FC:-gfortran}:" $out/bin/mpifort
'';
hardeningDisable = [ "all" ];

View File

@@ -1,64 +0,0 @@
From 3aa73c21e3afc91522a6121b0d591af6925b4ba6 Mon Sep 17 00:00:00 2001
From: Rodrigo Arias Mallo <rodarima@gmail.com>
Date: Mon, 13 Oct 2025 16:05:30 +0200
Subject: [PATCH] Add builtins.catchAll to catch all types of errors
Allows fetching multiple Git repositories with builtin.fetchGit and
catching any errors thrown by the builtin, in opposition to the builtin
tryEval.
---
src/libexpr/primops.cc | 38 ++++++++++++++++++++++++++++++++++++++
1 file changed, 38 insertions(+)
diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc
index 36a67a39d..3b26f9f43 100644
--- a/src/libexpr/primops.cc
+++ b/src/libexpr/primops.cc
@@ -849,6 +849,44 @@ static RegisterPrimOp primop_tryEval({
.fun = prim_tryEval,
});
+/* Like tryEval but catch all errors. Success => {success=true; value=something;},
+ * else => {success=false; value=false;} */
+static void prim_catchAll(EvalState & state, const PosIdx pos, Value * * args, Value & v)
+{
+ auto attrs = state.buildBindings(3);
+ try {
+ state.forceValue(*args[0], pos);
+ attrs.insert(state.sValue, args[0]);
+ attrs.alloc("success").mkBool(true);
+ attrs.alloc("msg").mkNull();
+ } catch (Error & e) {
+ attrs.alloc(state.sValue).mkBool(false);
+ attrs.alloc("success").mkBool(false);
+ attrs.alloc("msg").mkString(e.msg());
+ }
+ v.mkAttrs(attrs);
+}
+
+static RegisterPrimOp primop_catchAll({
+ .name = "__catchAll",
+ .args = {"e"},
+ .doc = R"(
+ Try to shallowly evaluate *e*. Return a set containing the
+ attributes `success` (`true` if *e* evaluated successfully,
+ `false` if an error was thrown) and `value`, equalling *e* if
+ successful and `false` otherwise. In contrast with `tryEval`,
+ `catchAll` will prevent all errors from being thrown, including
+ for those created by `abort` and type errors generated by
+ builtins. Also note that this doesn't evaluate *e* deeply, so
+ `let e = { x = throw ""; }; in (builtins.catchAll e).success`
+ will be `true`. Using `builtins.deepSeq` one can get the expected
+ result: `let e = { x = throw ""; }; in
+ (builtins.catchAll (builtins.deepSeq e e)).success` will be
+ `false`.
+ )",
+ .fun = prim_catchAll,
+});
+
/* Return an environment variable. Use with care. */
static void prim_getEnv(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
--
2.51.0

View File

@@ -1,7 +1,219 @@
{ nix }:
{ lib, fetchurl, fetchFromGitHub, callPackage
, storeDir ? "/nix/store"
, stateDir ? "/nix/var"
, confDir ? "/etc"
, boehmgc
, stdenv, llvmPackages_6
}:
nix.overrideAttrs (old: {
patches = (old.patches or []) ++ [
./add-catchAll.patch
];
})
let
common =
{ lib, stdenv, fetchpatch, perl, curl, bzip2, sqlite, openssl ? null, xz
, bash, coreutils, gzip, gnutar
, pkg-config, boehmgc, perlPackages, libsodium, brotli, boost, editline, nlohmann_json
, autoreconfHook, autoconf-archive, bison, flex, libxml2, libxslt, docbook5, docbook_xsl_ns
, jq, libarchive, rustc, cargo
# Used by tests
, gmock
, busybox-sandbox-shell
, storeDir
, stateDir
, confDir
, withLibseccomp ? lib.any (lib.meta.platformMatch stdenv.hostPlatform) libseccomp.meta.platforms, libseccomp
, withAWS ? stdenv.isLinux || stdenv.isDarwin, aws-sdk-cpp
, name, suffix ? "", src, crates ? null
}:
let
sh = busybox-sandbox-shell;
nix = stdenv.mkDerivation rec {
inherit name src;
version = lib.getVersion name;
is24 = lib.versionAtLeast version "2.4pre";
isExactly23 = lib.versionAtLeast version "2.3" && lib.versionOlder version "2.4";
VERSION_SUFFIX = suffix;
outputs = [ "out" "dev" "man" "doc" ];
nativeBuildInputs =
[ pkg-config ]
++ lib.optionals is24 [ autoreconfHook autoconf-archive bison flex libxml2 libxslt
docbook5 docbook_xsl_ns jq gmock ];
buildInputs =
[ curl openssl sqlite xz bzip2 nlohmann_json
brotli boost editline
]
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
++ lib.optionals is24 [ libarchive rustc cargo ]
++ lib.optional withLibseccomp libseccomp
++ lib.optional withAWS
((aws-sdk-cpp.override {
apis = ["s3" "transfer"];
customMemoryManagement = false;
}).overrideDerivation (args: {
patches = args.patches or [] ++ [(fetchpatch {
url = "https://github.com/edolstra/aws-sdk-cpp/commit/7d58e303159b2fb343af9a1ec4512238efa147c7.patch";
sha256 = "103phn6kyvs1yc7fibyin3lgxz699qakhw671kl207484im55id1";
})];
}));
propagatedBuildInputs = [ boehmgc ];
# Seems to be required when using std::atomic with 64-bit types
NIX_LDFLAGS = lib.optionalString (stdenv.hostPlatform.system == "armv5tel-linux" || stdenv.hostPlatform.system == "armv6l-linux") "-latomic";
preConfigure =
# Copy libboost_context so we don't get all of Boost in our closure.
# https://github.com/NixOS/nixpkgs/issues/45462
''
mkdir -p $out/lib
cp -pd ${boost}/lib/{libboost_context*,libboost_thread*,libboost_system*} $out/lib
rm -f $out/lib/*.a
${lib.optionalString stdenv.isLinux ''
chmod u+w $out/lib/*.so.*
patchelf --set-rpath $out/lib:${stdenv.cc.cc.lib}/lib $out/lib/libboost_thread.so.*
''}
'' +
# Unpack the Rust crates.
lib.optionalString is24 ''
tar xvf ${crates} -C nix-rust/
mv nix-rust/nix-vendored-crates* nix-rust/vendor
'' +
# For Nix-2.3, patch around an issue where the Nix configure step pulls in the
# build system's bash and other utilities when cross-compiling
lib.optionalString (stdenv.buildPlatform != stdenv.hostPlatform && isExactly23) ''
mkdir tmp/
substitute corepkgs/config.nix.in tmp/config.nix.in \
--subst-var-by bash ${bash}/bin/bash \
--subst-var-by coreutils ${coreutils}/bin \
--subst-var-by bzip2 ${bzip2}/bin/bzip2 \
--subst-var-by gzip ${gzip}/bin/gzip \
--subst-var-by xz ${xz}/bin/xz \
--subst-var-by tar ${gnutar}/bin/tar \
--subst-var-by tr ${coreutils}/bin/tr
mv tmp/config.nix.in corepkgs/config.nix.in
'';
configureFlags =
[ "--with-store-dir=${storeDir}"
"--localstatedir=${stateDir}"
"--sysconfdir=${confDir}"
"--disable-init-state"
"--enable-gc"
]
++ lib.optionals stdenv.isLinux [
"--with-sandbox-shell=${sh}/bin/busybox"
]
++ lib.optional (
stdenv.hostPlatform != stdenv.buildPlatform && stdenv.hostPlatform ? nix && stdenv.hostPlatform.nix ? system
) ''--with-system=${stdenv.hostPlatform.nix.system}''
# RISC-V support in progress https://github.com/seccomp/libseccomp/pull/50
++ lib.optional (!withLibseccomp) "--disable-seccomp-sandboxing";
makeFlags = [ "profiledir=$(out)/etc/profile.d" ];
installFlags = [ "sysconfdir=$(out)/etc" ];
doInstallCheck = false;
# socket path becomes too long otherwise
#preInstallCheck = lib.optional stdenv.isDarwin ''
# export TMPDIR=$NIX_BUILD_TOP
#'';
separateDebugInfo = stdenv.isLinux;
enableParallelBuilding = true;
meta = {
description = "Powerful package manager that makes package management reliable and reproducible";
longDescription = ''
Nix is a powerful package manager for Linux and other Unix systems that
makes package management reliable and reproducible. It provides atomic
upgrades and rollbacks, side-by-side installation of multiple versions of
a package, multi-user package management and easy setup of build
environments.
'';
homepage = "https://nixos.org/";
license = lib.licenses.lgpl2Plus;
maintainers = [ lib.maintainers.eelco ];
platforms = lib.platforms.unix;
outputsToInstall = [ "out" "man" ];
};
passthru = {
perl-bindings = stdenv.mkDerivation {
pname = "nix-perl";
inherit version;
inherit src;
postUnpack = "sourceRoot=$sourceRoot/perl";
# This is not cross-compile safe, don't have time to fix right now
# but noting for future travellers.
nativeBuildInputs =
[ perl pkg-config curl nix libsodium boost autoreconfHook autoconf-archive ];
configureFlags =
[ "--with-dbi=${perlPackages.DBI}/${perl.libPrefix}"
"--with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix}"
];
preConfigure = "export NIX_STATE_DIR=$TMPDIR";
preBuild = "unset NIX_INDENT_MAKE";
};
};
};
in nix;
in rec {
nix = nixUnstable;
nixUnstable = lib.lowPrio (callPackage common rec {
name = "nix-2.4${suffix}";
suffix = "pre7534_b92f58f6";
#src = /home/Computational/rarias/nix/nix-rodarima;
src = fetchFromGitHub {
owner = "rodarima";
repo = "nix";
rev = "3a642187c33ed46d952d3a50a83b2576b704fab7";
sha256 = "0s8is2czpkcj1x1kcjqgbnsbbl03w3fwjjiclsd44zh1ij3wb90s";
};
crates = fetchurl {
url = "https://hydra.nixos.org/build/118797694/download/1/nix-vendored-crates-2.4pre7534_b92f58f6.tar.xz";
sha256 = "a4c2612bbd81732bbb899bc0c230e07b16f6b6150ffbb19c4907dedbbc2bf9fc";
};
inherit storeDir stateDir confDir boehmgc;
});
nixFlakes = lib.lowPrio (callPackage common rec {
name = "nix-2.4${suffix}";
suffix = "pre20200521_00b562c";
src = fetchFromGitHub {
owner = "NixOS";
repo = "nix";
rev = "00b562c87ec4c3bbe514f5dc1f4d1c41f66f66bf";
sha256 = "0s8is2czpkcj1x1kcjqgbnsbbl03w3fwjjiclsd44zh1ij3wb90s";
};
crates = fetchurl {
url = "https://hydra.nixos.org/build/118093786/download/1/nix-vendored-crates-2.4pre20200501_941f952.tar.xz";
sha256 = "060f4n5srdbb8vsj0m14aqch7im79a4h5g3nrs41p1xc602vhcdl";
};
inherit storeDir stateDir confDir boehmgc;
});
}

View File

@@ -32,11 +32,6 @@ stdenv.mkDerivation rec {
"CXX=mpicxx"
];
env = {
MPICH_CC="${stdenv.cc}/bin/${stdenv.cc.targetPrefix}cc";
MPICH_CXX="${stdenv.cc}/bin/${stdenv.cc.targetPrefix}c++";
};
postInstall = ''
mkdir -p $out/bin
for f in $(find $out -executable -type f); do