Compare commits

...

49 Commits

Author SHA1 Message Date
16a8f84727 Refactor intel wrapper logic into single function 2025-12-10 17:36:06 +01:00
92d12c5f69 Properly find include path in patch intel wrapper 2025-12-10 17:26:19 +01:00
01d7133a5d Use nixSupport instead of extraBuildCommands 2025-12-10 17:09:04 +01:00
8f0f327683 Use packages.json for intel packages instead of awk script 2025-12-10 17:09:04 +01:00
97ffc95f8d Add passthru in ompss2 wrapper for icpx compat 2025-12-10 17:09:04 +01:00
3b4202f3f5 Remove wrapper flags when clang called from intel 2025-12-10 17:09:04 +01:00
cdc4e7f8fd Add oneMath 2025-12-10 17:09:03 +01:00
9436e3c604 Fix parsing of new apt package list for oneapi 2023
New apt list does not have Package: as the first entry for all packages
2025-12-10 17:09:03 +01:00
941a13357c Add TASYCL 2.1.0 2025-12-10 17:09:03 +01:00
d87c4ee985 Add test for icpx with ompss-2 as host compiler 2025-12-10 17:09:03 +01:00
f94e0b05c1 Add SYCL test compilation 2025-12-10 17:09:03 +01:00
b45eed9aef Add intelPackages_202{4,5} and make 2025 the default 2025-12-10 17:09:03 +01:00
ee9af71da0 Remove conflicting definitions in amd-uprof-driver
See: https://lkml.org/lkml/2025/4/9/1709
2025-12-10 14:34:53 +01:00
1d3bda33a0 Mark mcxx as broken and remove from package list 2025-12-03 10:15:17 +01:00
87bf095dae Fix moved package linuxPackages.perf is now perf 2025-12-03 10:15:17 +01:00
2264e15102 Fix replaced nixseparatedebuginfod
nixseparatedebuginfod has been replaced by nixseparatedebuginfod2
2025-12-03 10:15:17 +01:00
209f8a582e Use standard gcc for intel packages
This reverts 26f52aa27d
2025-12-03 10:15:16 +01:00
1457d85f4c Fix renamed option watchdog.runtimeTime
The option 'systemd.watchdog.runtimeTime' has been renamed to
'systemd.settings.Manager.RuntimeWatchdogSec'.
2025-12-02 17:53:14 +01:00
ad812ea32d Replace wrapGAppsHook with wrapGAppsHook3 2025-12-02 17:53:13 +01:00
5bc928c407 Fix changed cudaPackages.cuda_cudart output
See: https://github.com/NixOS/nixpkgs/pull/437723
2025-12-02 17:53:13 +01:00
eb9358abab Set pyproject=true in buildPythonApplication
The buildPythonPackage and buildPythonApplication functions now
  require an explicit format attribute. Previously the default format
  used setuptools and called setup.py from the source tree, which is
  deprecated. The modern alternative is to configure pyproject = true
  with build-system = [ setuptools ].
2025-12-02 17:53:13 +01:00
d2025d35d9 Fix renamed llvm bintools
Moved from llvmPackages_latest.tools.bintools to
llvmPackages_latest.bintools
2025-12-02 17:53:13 +01:00
6e089344da Upgrade nixpkgs to 25.11 2025-12-02 17:53:13 +01:00
a173af654f Fix osu cross-compilation
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-31 16:23:46 +01:00
2fff7e4a7b Set mpich default compilers from targetPackages
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-31 16:23:46 +01:00
a761b73336 Enable meta.cross for mpich related packages
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-31 16:23:46 +01:00
86eb796771 Disable meta.cross for gpi-2 and tagaspi
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-31 16:23:46 +01:00
08633435cf Fix nativeBuildInputs for tagaspi
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-31 16:23:46 +01:00
39d64456a4 Fix nativeBuildInputs for gpi-2
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-31 16:23:46 +01:00
410040a4a0 Fix mpich cross compilation (disable fortran)
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-31 16:23:46 +01:00
fc69ef3217 Enable pam_slurm_adopt in all compute nodes
Prevents access to owl1 and owl2 too if the user doesn't have any jobs
running there.

Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-31 11:41:50 +01:00
1d025f7a38 Don't suspend owl compute nodes
Currently the owl nodes are located on top of the rack and turning them
off causes a high temperature increase at that region, which accumulates
heat from the whole rack. To maximize airflow we will leave them on at
all times. This also makes allocations immediate at the extra cost of
around 200 W.

In the future, if we include more nodes in SLURM we can configure those
to turn off if needed.

Fixes: rarias/jungle#156
Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-31 11:41:44 +01:00
7989779c8f Filter out packages by platform from crossSet
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
Tested-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-31 11:21:13 +01:00
7d721084a7 Add meta to cudainfo
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-31 11:20:01 +01:00
796d34a549 Set amd-uprof platforms to x86_64-linux only
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-31 11:19:57 +01:00
5ff1b1343b Add nixgen to all machines
Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-29 16:28:05 +01:00
c5cc13fad8 Add nixgen package
Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-29 16:27:56 +01:00
2e09314a7e Update OmpSs-2 LLVM to 2025.11
Reviewed-by: Aleix Boné <abonerib@bsc.es>
Tested-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-29 16:22:57 +01:00
217d9c1fc0 Update NODES to 1.4
Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-29 16:21:46 +01:00
f47ab7757e Update nOS-V to 4.0.0
Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-29 16:21:43 +01:00
4b265c071e Update ovni to 1.13.0
Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-29 16:21:32 +01:00
019826d09e Add OmpSs-2 release timers and services
Send a reminder email to the STAR group to mark the release cycle dates.

Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-28 12:38:37 +01:00
a294daf7e3 Use specific mail-robot group to send mail
Allows any user to be able to send mail from the robot account as long
as it is added to the mail-robot group.

Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-28 12:38:17 +01:00
a7018250ca Add missing slurm package to overlay
Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-28 11:44:40 +01:00
e3d1785285 Run a shell in the allocated node with salloc
By default, salloc will open a new shell in the *current* node instead
of in the allocated node. This often causes users to leave the extra
shell running once the allocation ends. Repeating this process several
times causes chains of shells.

By running the shell in the remote node, once the allocation ends the
shell finishes as well.

Fixes: rarias/jungle#174
See: https://slurm.schedmd.com/faq.html#prompt
Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-28 11:44:14 +01:00
ab86243a07 Add missing which in nodes checkPhase
When enabling checks, the build log is polluted with errors.

Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
Tested-by: Aleix Boné <abonerib@bsc.es>
2025-10-23 15:59:21 +02:00
14f2393d30 Update website
Add apex page and replace bscpkgs references for jungle after the merge.

See: rarias/jungle-website#1
Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-22 15:48:13 +02:00
f115d611e7 Add aaguirre user
Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-22 15:28:29 +02:00
4261d327c6 Include agenix module and package directly
Avoids adding an extra flake input only to fetch a single module and
package.

Reviewed-by: Aleix Boné <abonerib@bsc.es>
Tested-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-10-14 09:37:47 +02:00
65 changed files with 2066 additions and 43112 deletions

90
flake.lock generated
View File

@@ -1,107 +1,25 @@
{
"nodes": {
"agenix": {
"inputs": {
"darwin": "darwin",
"home-manager": "home-manager",
"nixpkgs": [
"nixpkgs"
],
"systems": "systems"
},
"locked": {
"lastModified": 1750173260,
"narHash": "sha256-9P1FziAwl5+3edkfFcr5HeGtQUtrSdk/MksX39GieoA=",
"owner": "ryantm",
"repo": "agenix",
"rev": "531beac616433bac6f9e2a19feb8e99a22a66baf",
"type": "github"
},
"original": {
"owner": "ryantm",
"repo": "agenix",
"type": "github"
}
},
"darwin": {
"inputs": {
"nixpkgs": [
"agenix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1744478979,
"narHash": "sha256-dyN+teG9G82G+m+PX/aSAagkC+vUv0SgUw3XkPhQodQ=",
"owner": "lnl7",
"repo": "nix-darwin",
"rev": "43975d782b418ebf4969e9ccba82466728c2851b",
"type": "github"
},
"original": {
"owner": "lnl7",
"ref": "master",
"repo": "nix-darwin",
"type": "github"
}
},
"home-manager": {
"inputs": {
"nixpkgs": [
"agenix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1745494811,
"narHash": "sha256-YZCh2o9Ua1n9uCvrvi5pRxtuVNml8X2a03qIFfRKpFs=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "abfad3d2958c9e6300a883bd443512c55dfeb1be",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "home-manager",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1752436162,
"narHash": "sha256-Kt1UIPi7kZqkSc5HVj6UY5YLHHEzPBkgpNUByuyxtlw=",
"lastModified": 1764522689,
"narHash": "sha256-SqUuBFjhl/kpDiVaKLQBoD8TLD+/cTUzzgVFoaHrkqY=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "dfcd5b901dbab46c9c6e80b265648481aafb01f8",
"rev": "8bb5646e0bed5dbd3ab08c7a7cc15b75ab4e1d0f",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-25.05",
"ref": "nixos-25.11",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"agenix": "agenix",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",

View File

@@ -1,15 +1,13 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
agenix.url = "github:ryantm/agenix";
agenix.inputs.nixpkgs.follows = "nixpkgs";
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.11";
};
outputs = { self, nixpkgs, agenix, ... }:
outputs = { self, nixpkgs, ... }:
let
mkConf = name: nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
specialArgs = { inherit nixpkgs agenix; theFlake = self; };
specialArgs = { inherit nixpkgs; theFlake = self; };
modules = [ "${self.outPath}/m/${name}/configuration.nix" ];
};
# For now we only support x86

View File

@@ -1,9 +1,8 @@
{ agenix, ... }:
{ pkgs, ... }:
{
imports = [ agenix.nixosModules.default ];
imports = [ ../../module/agenix.nix ];
environment.systemPackages = [
agenix.packages.x86_64-linux.default
];
# Add agenix to system packages
environment.systemPackages = [ pkgs.agenix ];
}

View File

@@ -1,12 +1,12 @@
{ pkgs, config, ... }:
{ pkgs, ... }:
{
environment.systemPackages = with pkgs; [
vim wget git htop tmux pciutils tcpdump ripgrep nix-index nixos-option
nix-diff ipmitool freeipmi ethtool lm_sensors cmake gnumake file tree
ncdu config.boot.kernelPackages.perf ldns pv
# From bsckgs overlay
osumb
ncdu perf ldns pv
# From jungle overlay
osumb nixgen
];
programs.direnv.enable = true;

View File

@@ -180,6 +180,19 @@
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFmMqKqPg4uocNOr3O41kLbZMOMJn3m2ZdN1JvTR96z3 bsccns@arnau-bsc"
];
};
aaguirre = {
uid = 9655;
isNormalUser = true;
home = "/home/Computational/aaguirre";
description = "Alejandro Aguirre";
group = "Computational";
hosts = [ "apex" "hut" ];
hashedPassword = "$6$TXRXQT6jjBvxkxU6$E.sh5KspAm1qeG5Ct7OPHpo8REmbGDwjFGvqeGgTVz3GASGOAnPL7UMZsMAsAKBoahOw.v8LNno6XGrTEPzZH1";
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOlRX7ZCnqtUJYCxKgWmgSrFCYuA2LHY96rVwqxXPl86 aaguirre@BSC-8488184117"
];
};
};
groups = {

View File

@@ -5,5 +5,5 @@
boot.kernelModules = [ "ipmi_watchdog" ];
# Enable systemd watchdog with 30 s interval
systemd.watchdog.runtimeTime = "30s";
systemd.settings.Manager.RuntimeWatchdogSec = 30;
}

View File

@@ -93,20 +93,4 @@
wantedBy = [ "multi-user.target" ];
serviceConfig.ExecStart = script;
};
# Only allow SSH connections from users who have a SLURM allocation
# See: https://slurm.schedmd.com/pam_slurm_adopt.html
security.pam.services.sshd.rules.account.slurm = {
control = "required";
enable = true;
modulePath = "${pkgs.slurm}/lib/security/pam_slurm_adopt.so";
args = [ "log_level=debug5" ];
order = 999999; # Make it last one
};
# Disable systemd session (pam_systemd.so) as it will conflict with the
# pam_slurm_adopt.so module. What happens is that the shell is first adopted
# into the slurmstepd task and then into the systemd session, which is not
# what we want, otherwise it will linger even if all jobs are gone.
security.pam.services.sshd.startSession = lib.mkForce false;
}

View File

@@ -17,6 +17,7 @@
./postgresql.nix
./nginx.nix
./p.nix
./ompss2-timer.nix
#./pxe.nix
];

View File

@@ -29,6 +29,9 @@
};
};
# Allow gitea user to send mail
users.users.gitea.extraGroups = [ "mail-robot" ];
services.gitea-actions-runner.instances = {
runrun = {
enable = true;

View File

@@ -1,8 +1,11 @@
{ config, lib, ... }:
{
# Robot user that can see the password to send mail from jungle-robot
users.groups.mail-robot = {};
age.secrets.jungleRobotPassword = {
file = ../../secrets/jungle-robot-password.age;
group = "gitea";
group = "mail-robot";
mode = "440";
};

View File

@@ -4,8 +4,8 @@ let
name = "jungle-web";
src = pkgs.fetchgit {
url = "https://jungle.bsc.es/git/rarias/jungle-website.git";
rev = "739bf0175a7f05380fe7ad7023ff1d60db1710e1";
hash = "sha256-ea5DzhYTzZ9TmqD+x95rdNdLbxPnBluqlYH2NmBYmc4=";
rev = "52abaf4d71652a9ef77a0b098db14ca33bffff4c";
hash = "sha256-/ul9GazbOrOkmlvSgDz/+2W+V+ir5725Y7mVLc3rb0M=";
};
buildInputs = [ pkgs.hugo ];
buildPhase = ''

85
m/hut/ompss2-timer.nix Normal file
View File

@@ -0,0 +1,85 @@
{ config, pkgs, ... }:
{
systemd.timers = {
"ompss2-closing" = {
wantedBy = [ "timers.target" ];
timerConfig = {
Unit = "ompss2-closing.service";
OnCalendar = [ "*-03-15 07:00:00" "*-09-15 07:00:00"];
};
};
"ompss2-freeze" = {
wantedBy = [ "timers.target" ];
timerConfig = {
Unit = "ompss2-freeze.service";
OnCalendar = [ "*-04-15 07:00:00" "*-10-15 07:00:00" ];
};
};
"ompss2-release" = {
wantedBy = [ "timers.target" ];
timerConfig = {
Unit = "ompss2-release.service";
OnCalendar = [ "*-05-15 07:00:00" "*-11-15 07:00:00" ];
};
};
};
systemd.services =
let
closing = pkgs.writeText "closing.txt"
''
Subject: OmpSs-2 release enters closing period
Hi,
You have one month to merge the remaining features for the next OmpSs-2
release. Please, identify what needs to be merged and discuss it in the next
OmpSs-2 meeting.
Thanks!,
Jungle robot
'';
freeze = pkgs.writeText "freeze.txt"
''
Subject: OmpSs-2 release enters freeze period
Hi,
The period to introduce new features or breaking changes is over, only bug
fixes are allowed now. During this time, please prepare the release notes
to be included in the next OmpSs-2 release.
Thanks!,
Jungle robot
'';
release = pkgs.writeText "release.txt"
''
Subject: OmpSs-2 release now
Hi,
The period to introduce bug fixes is now over. Please, proceed to do the
OmpSs-2 release.
Thanks!,
Jungle robot
'';
mkServ = name: mail: {
"ompss2-${name}" = {
script = ''
set -eu
set -o pipefail
cat ${mail} | ${config.security.wrapperDir}/sendmail star@bsc.es
'';
serviceConfig = {
Type = "oneshot";
DynamicUser = true;
Group = "mail-robot";
};
};
};
in
(mkServ "closing" closing) //
(mkServ "freeze" freeze) //
(mkServ "release" release);
}

357
m/module/agenix.nix Normal file
View File

@@ -0,0 +1,357 @@
{
config,
options,
lib,
pkgs,
...
}:
with lib;
let
cfg = config.age;
isDarwin = lib.attrsets.hasAttrByPath [ "environment" "darwinConfig" ] options;
ageBin = config.age.ageBin;
users = config.users.users;
sysusersEnabled =
if isDarwin then
false
else
options.systemd ? sysusers && (config.systemd.sysusers.enable || config.services.userborn.enable);
mountCommand =
if isDarwin then
''
if ! diskutil info "${cfg.secretsMountPoint}" &> /dev/null; then
num_sectors=1048576
dev=$(hdiutil attach -nomount ram://"$num_sectors" | sed 's/[[:space:]]*$//')
newfs_hfs -v agenix "$dev"
mount -t hfs -o nobrowse,nodev,nosuid,-m=0751 "$dev" "${cfg.secretsMountPoint}"
fi
''
else
''
grep -q "${cfg.secretsMountPoint} ramfs" /proc/mounts ||
mount -t ramfs none "${cfg.secretsMountPoint}" -o nodev,nosuid,mode=0751
'';
newGeneration = ''
_agenix_generation="$(basename "$(readlink ${cfg.secretsDir})" || echo 0)"
(( ++_agenix_generation ))
echo "[agenix] creating new generation in ${cfg.secretsMountPoint}/$_agenix_generation"
mkdir -p "${cfg.secretsMountPoint}"
chmod 0751 "${cfg.secretsMountPoint}"
${mountCommand}
mkdir -p "${cfg.secretsMountPoint}/$_agenix_generation"
chmod 0751 "${cfg.secretsMountPoint}/$_agenix_generation"
'';
chownGroup = if isDarwin then "admin" else "keys";
# chown the secrets mountpoint and the current generation to the keys group
# instead of leaving it root:root.
chownMountPoint = ''
chown :${chownGroup} "${cfg.secretsMountPoint}" "${cfg.secretsMountPoint}/$_agenix_generation"
'';
setTruePath = secretType: ''
${
if secretType.symlink then
''
_truePath="${cfg.secretsMountPoint}/$_agenix_generation/${secretType.name}"
''
else
''
_truePath="${secretType.path}"
''
}
'';
installSecret = secretType: ''
${setTruePath secretType}
echo "decrypting '${secretType.file}' to '$_truePath'..."
TMP_FILE="$_truePath.tmp"
IDENTITIES=()
for identity in ${toString cfg.identityPaths}; do
test -r "$identity" || continue
test -s "$identity" || continue
IDENTITIES+=(-i)
IDENTITIES+=("$identity")
done
test "''${#IDENTITIES[@]}" -eq 0 && echo "[agenix] WARNING: no readable identities found!"
mkdir -p "$(dirname "$_truePath")"
[ "${secretType.path}" != "${cfg.secretsDir}/${secretType.name}" ] && mkdir -p "$(dirname "${secretType.path}")"
(
umask u=r,g=,o=
test -f "${secretType.file}" || echo '[agenix] WARNING: encrypted file ${secretType.file} does not exist!'
test -d "$(dirname "$TMP_FILE")" || echo "[agenix] WARNING: $(dirname "$TMP_FILE") does not exist!"
LANG=${
config.i18n.defaultLocale or "C"
} ${ageBin} --decrypt "''${IDENTITIES[@]}" -o "$TMP_FILE" "${secretType.file}"
)
chmod ${secretType.mode} "$TMP_FILE"
mv -f "$TMP_FILE" "$_truePath"
${optionalString secretType.symlink ''
[ "${secretType.path}" != "${cfg.secretsDir}/${secretType.name}" ] && ln -sfT "${cfg.secretsDir}/${secretType.name}" "${secretType.path}"
''}
'';
testIdentities = map (path: ''
test -f ${path} || echo '[agenix] WARNING: config.age.identityPaths entry ${path} not present!'
'') cfg.identityPaths;
cleanupAndLink = ''
_agenix_generation="$(basename "$(readlink ${cfg.secretsDir})" || echo 0)"
(( ++_agenix_generation ))
echo "[agenix] symlinking new secrets to ${cfg.secretsDir} (generation $_agenix_generation)..."
ln -sfT "${cfg.secretsMountPoint}/$_agenix_generation" ${cfg.secretsDir}
(( _agenix_generation > 1 )) && {
echo "[agenix] removing old secrets (generation $(( _agenix_generation - 1 )))..."
rm -rf "${cfg.secretsMountPoint}/$(( _agenix_generation - 1 ))"
}
'';
installSecrets = builtins.concatStringsSep "\n" (
[ "echo '[agenix] decrypting secrets...'" ]
++ testIdentities
++ (map installSecret (builtins.attrValues cfg.secrets))
++ [ cleanupAndLink ]
);
chownSecret = secretType: ''
${setTruePath secretType}
chown ${secretType.owner}:${secretType.group} "$_truePath"
'';
chownSecrets = builtins.concatStringsSep "\n" (
[ "echo '[agenix] chowning...'" ]
++ [ chownMountPoint ]
++ (map chownSecret (builtins.attrValues cfg.secrets))
);
secretType = types.submodule (
{ config, ... }:
{
options = {
name = mkOption {
type = types.str;
default = config._module.args.name;
defaultText = literalExpression "config._module.args.name";
description = ''
Name of the file used in {option}`age.secretsDir`
'';
};
file = mkOption {
type = types.path;
description = ''
Age file the secret is loaded from.
'';
};
path = mkOption {
type = types.str;
default = "${cfg.secretsDir}/${config.name}";
defaultText = literalExpression ''
"''${cfg.secretsDir}/''${config.name}"
'';
description = ''
Path where the decrypted secret is installed.
'';
};
mode = mkOption {
type = types.str;
default = "0400";
description = ''
Permissions mode of the decrypted secret in a format understood by chmod.
'';
};
owner = mkOption {
type = types.str;
default = "0";
description = ''
User of the decrypted secret.
'';
};
group = mkOption {
type = types.str;
default = users.${config.owner}.group or "0";
defaultText = literalExpression ''
users.''${config.owner}.group or "0"
'';
description = ''
Group of the decrypted secret.
'';
};
symlink = mkEnableOption "symlinking secrets to their destination" // {
default = true;
};
};
}
);
in
{
imports = [
(mkRenamedOptionModule [ "age" "sshKeyPaths" ] [ "age" "identityPaths" ])
];
options.age = {
ageBin = mkOption {
type = types.str;
default = "${pkgs.age}/bin/age";
defaultText = literalExpression ''
"''${pkgs.age}/bin/age"
'';
description = ''
The age executable to use.
'';
};
secrets = mkOption {
type = types.attrsOf secretType;
default = { };
description = ''
Attrset of secrets.
'';
};
secretsDir = mkOption {
type = types.path;
default = "/run/agenix";
description = ''
Folder where secrets are symlinked to
'';
};
secretsMountPoint = mkOption {
type =
types.addCheck types.str (
s:
(builtins.match "[ \t\n]*" s) == null # non-empty
&& (builtins.match ".+/" s) == null
) # without trailing slash
// {
description = "${types.str.description} (with check: non-empty without trailing slash)";
};
default = "/run/agenix.d";
description = ''
Where secrets are created before they are symlinked to {option}`age.secretsDir`
'';
};
identityPaths = mkOption {
type = types.listOf types.path;
default =
if isDarwin then
[
"/etc/ssh/ssh_host_ed25519_key"
"/etc/ssh/ssh_host_rsa_key"
]
else if (config.services.openssh.enable or false) then
map (e: e.path) (
lib.filter (e: e.type == "rsa" || e.type == "ed25519") config.services.openssh.hostKeys
)
else
[ ];
defaultText = literalExpression ''
if isDarwin
then [
"/etc/ssh/ssh_host_ed25519_key"
"/etc/ssh/ssh_host_rsa_key"
]
else if (config.services.openssh.enable or false)
then map (e: e.path) (lib.filter (e: e.type == "rsa" || e.type == "ed25519") config.services.openssh.hostKeys)
else [];
'';
description = ''
Path to SSH keys to be used as identities in age decryption.
'';
};
};
config = mkIf (cfg.secrets != { }) (mkMerge [
{
assertions = [
{
assertion = cfg.identityPaths != [ ];
message = "age.identityPaths must be set, for example by enabling openssh.";
}
];
}
(optionalAttrs (!isDarwin) {
# When using sysusers we no longer be started as an activation script
# because those are started in initrd while sysusers is started later.
systemd.services.agenix-install-secrets = mkIf sysusersEnabled {
wantedBy = [ "sysinit.target" ];
after = [ "systemd-sysusers.service" ];
unitConfig.DefaultDependencies = "no";
path = [ pkgs.mount ];
serviceConfig = {
Type = "oneshot";
ExecStart = pkgs.writeShellScript "agenix-install" (concatLines [
newGeneration
installSecrets
chownSecrets
]);
RemainAfterExit = true;
};
};
# Create a new directory full of secrets for symlinking (this helps
# ensure removed secrets are actually removed, or at least become
# invalid symlinks).
system.activationScripts = mkIf (!sysusersEnabled) {
agenixNewGeneration = {
text = newGeneration;
deps = [
"specialfs"
];
};
agenixInstall = {
text = installSecrets;
deps = [
"agenixNewGeneration"
"specialfs"
];
};
# So user passwords can be encrypted.
users.deps = [ "agenixInstall" ];
# Change ownership and group after users and groups are made.
agenixChown = {
text = chownSecrets;
deps = [
"users"
"groups"
];
};
# So other activation scripts can depend on agenix being done.
agenix = {
text = "";
deps = [ "agenixChown" ];
};
};
})
(optionalAttrs isDarwin {
launchd.daemons.activate-agenix = {
script = ''
set -e
set -o pipefail
export PATH="${pkgs.gnugrep}/bin:${pkgs.coreutils}/bin:@out@/sw/bin:/usr/bin:/bin:/usr/sbin:/sbin"
${newGeneration}
${installSecrets}
${chownSecrets}
exit 0
'';
serviceConfig = {
RunAtLoad = true;
KeepAlive.SuccessfulExit = false;
};
};
})
]);
}

View File

@@ -1,3 +1,10 @@
{
services.nixseparatedebuginfod.enable = true;
services.nixseparatedebuginfod2 = {
enable = true;
substituters = [
"local:"
"https://cache.nixos.org"
"http://hut/cache"
];
};
}

View File

@@ -1,4 +1,4 @@
{ lib, ... }:
{ lib, pkgs, ... }:
{
imports = [
@@ -21,4 +21,20 @@
};
services.slurm.client.enable = true;
# Only allow SSH connections from users who have a SLURM allocation
# See: https://slurm.schedmd.com/pam_slurm_adopt.html
security.pam.services.sshd.rules.account.slurm = {
control = "required";
enable = true;
modulePath = "${pkgs.slurm}/lib/security/pam_slurm_adopt.so";
args = [ "log_level=debug5" ];
order = 999999; # Make it last one
};
# Disable systemd session (pam_systemd.so) as it will conflict with the
# pam_slurm_adopt.so module. What happens is that the shell is first adopted
# into the slurmstepd task and then into the systemd session, which is not
# what we want, otherwise it will linger even if all jobs are gone.
security.pam.services.sshd.startSession = lib.mkForce false;
}

View File

@@ -1,31 +1,6 @@
{ config, pkgs, ... }:
let
suspendProgram = pkgs.writeShellScript "suspend.sh" ''
exec 1>>/var/log/power_save.log 2>>/var/log/power_save.log
set -x
export "PATH=/run/current-system/sw/bin:$PATH"
echo "$(date) Suspend invoked $0 $*" >> /var/log/power_save.log
hosts=$(scontrol show hostnames $1)
for host in $hosts; do
echo Shutting down host: $host
ipmitool -I lanplus -H ''${host}-ipmi -P "" -U "" chassis power off
done
'';
resumeProgram = pkgs.writeShellScript "resume.sh" ''
exec 1>>/var/log/power_save.log 2>>/var/log/power_save.log
set -x
export "PATH=/run/current-system/sw/bin:$PATH"
echo "$(date) Suspend invoked $0 $*" >> /var/log/power_save.log
hosts=$(scontrol show hostnames $1)
for host in $hosts; do
echo Starting host: $host
ipmitool -I lanplus -H ''${host}-ipmi -P "" -U "" chassis power on
done
'';
in {
{
services.slurm = {
controlMachine = "apex";
clusterName = "jungle";
@@ -59,16 +34,6 @@ in {
# the resources. Use the task/cgroup plugin to enable process containment.
TaskPlugin=task/affinity,task/cgroup
# Power off unused nodes until they are requested
SuspendProgram=${suspendProgram}
SuspendTimeout=60
ResumeProgram=${resumeProgram}
ResumeTimeout=300
SuspendExcNodes=fox
# Turn the nodes off after 1 hour of inactivity
SuspendTime=3600
# Reduce port range so we can allow only this range in the firewall
SrunPortRange=60000-61000
@@ -86,9 +51,7 @@ in {
# when a task runs (srun) so we can ssh early.
PrologFlags=Alloc,Contain,X11
# LaunchParameters=ulimit_pam_adopt will set RLIMIT_RSS in processes
# adopted by the external step, similar to tasks running in regular steps
# LaunchParameters=ulimit_pam_adopt
LaunchParameters=use_interactive_step
SlurmdDebug=debug5
#DebugFlags=Protocol,Cgroup
'';

View File

@@ -4,8 +4,8 @@ let
name = "jungle-web";
src = pkgs.fetchgit {
url = "https://jungle.bsc.es/git/rarias/jungle-website.git";
rev = "739bf0175a7f05380fe7ad7023ff1d60db1710e1";
hash = "sha256-ea5DzhYTzZ9TmqD+x95rdNdLbxPnBluqlYH2NmBYmc4=";
rev = "52abaf4d71652a9ef77a0b098db14ca33bffff4c";
hash = "sha256-/ul9GazbOrOkmlvSgDz/+2W+V+ir5725Y7mVLc3rb0M=";
};
buildInputs = [ pkgs.hugo ];
buildPhase = ''

View File

@@ -7,6 +7,7 @@ let
callPackage = final.callPackage;
bscPkgs = {
agenix = prev.callPackage ./pkgs/agenix/default.nix { };
amd-uprof = prev.callPackage ./pkgs/amd-uprof/default.nix { };
bench6 = callPackage ./pkgs/bench6/default.nix { };
bigotes = callPackage ./pkgs/bigotes/default.nix { };
@@ -18,7 +19,11 @@ let
cudainfo = prev.callPackage ./pkgs/cudainfo/default.nix { };
#extrae = callPackage ./pkgs/extrae/default.nix { }; # Broken and outdated
gpi-2 = callPackage ./pkgs/gpi-2/default.nix { };
intel-apt = callPackage ./pkgs/intel-oneapi/packages.nix { };
intelPackages_2023 = callPackage ./pkgs/intel-oneapi/2023.nix { };
intelPackages_2024 = final.intel-apt.hpckit_2024;
intelPackages_2025 = final.intel-apt.hpckit_2025;
intelPackages = final.intelPackages_2025;
jemallocNanos6 = callPackage ./pkgs/nanos6/jemalloc.nix { };
# FIXME: Extend this to all linuxPackages variants. Open problem, see:
# https://discourse.nixos.org/t/whats-the-right-way-to-make-a-custom-kernel-module-available/4636
@@ -29,18 +34,21 @@ let
amd-uprof-driver = _prev.callPackage ./pkgs/amd-uprof/driver.nix { };
});
lmbench = callPackage ./pkgs/lmbench/default.nix { };
mcxx = callPackage ./pkgs/mcxx/default.nix { };
# Broken and unmantained
# mcxx = callPackage ./pkgs/mcxx/default.nix { };
meteocat-exporter = prev.callPackage ./pkgs/meteocat-exporter/default.nix { };
mpi = final.mpich; # Set MPICH as default
mpich = callPackage ./pkgs/mpich/default.nix { mpich = prev.mpich; };
nanos6 = callPackage ./pkgs/nanos6/default.nix { };
nanos6Debug = final.nanos6.override { enableDebug = true; };
nixtools = callPackage ./pkgs/nixtools/default.nix { };
nixgen = callPackage ./pkgs/nixgen/default.nix { };
# Broken because of pkgsStatic.libcap
# See: https://github.com/NixOS/nixpkgs/pull/268791
#nix-wrap = callPackage ./pkgs/nix-wrap/default.nix { };
nodes = callPackage ./pkgs/nodes/default.nix { };
nosv = callPackage ./pkgs/nosv/default.nix { };
oneMath = callPackage ./pkgs/onemath/default.nix { };
openmp = callPackage ./pkgs/llvm-ompss2/openmp.nix { monorepoSrc = final.clangOmpss2Unwrapped.src; version = final.clangOmpss2Unwrapped.version; };
openmpv = final.openmp.override { enableNosv = true; enableOvni = true; };
osumb = callPackage ./pkgs/osu/default.nix { };
@@ -50,6 +58,7 @@ let
prometheus-slurm-exporter = prev.callPackage ./pkgs/slurm-exporter/default.nix { };
#pscom = callPackage ./pkgs/parastation/pscom.nix { }; # Unmaintaned
#psmpi = callPackage ./pkgs/parastation/psmpi.nix { }; # Unmaintaned
slurm = import ./pkgs/slurm/default.nix { slurm = prev.slurm; };
sonar = callPackage ./pkgs/sonar/default.nix { };
stdenvClangOmpss2 = final.stdenv.override { cc = final.clangOmpss2; allowedRequisites = null; };
stdenvClangOmpss2Nanos6 = final.stdenv.override { cc = final.clangOmpss2Nanos6; allowedRequisites = null; };
@@ -57,6 +66,8 @@ let
stdenvClangOmpss2NodesOmpv = final.stdenv.override { cc = final.clangOmpss2NodesOmpv; allowedRequisites = null; };
tagaspi = callPackage ./pkgs/tagaspi/default.nix { };
tampi = callPackage ./pkgs/tampi/default.nix { };
tasycl = callPackage ./pkgs/tasycl/default.nix { };
tasycl-acpp = callPackage ./pkgs/tasycl/default.nix { useIntel = false; };
upc-qaire-exporter = prev.callPackage ./pkgs/upc-qaire-exporter/default.nix { };
wxparaver = callPackage ./pkgs/paraver/default.nix { };
};
@@ -66,6 +77,8 @@ let
#sigsegv = callPackage ./test/reproducers/sigsegv.nix { };
hello-c = callPackage ./test/compilers/hello-c.nix { };
hello-cpp = callPackage ./test/compilers/hello-cpp.nix { };
hello-sycl = callPackage ./test/compilers/hello-sycl.nix { };
hello-syclompss = callPackage ./test/compilers/icpx-ompss2.nix { };
lto = callPackage ./test/compilers/lto.nix { };
asan = callPackage ./test/compilers/asan.nix { };
intel2023-icx-c = hello-c.override { stdenv = final.intelPackages_2023.stdenv; };
@@ -75,6 +88,13 @@ let
intel2023-ifort = callPackage ./test/compilers/hello-f.nix {
stdenv = final.intelPackages_2023.stdenv-ifort;
};
intel2024-icx-c = hello-c.override { stdenv = final.intelPackages_2024.stdenv; };
intel2025-icx-c = hello-c.override { stdenv = final.intelPackages_2025.stdenv; };
intel2024-icx-cpp = hello-cpp.override { stdenv = final.intelPackages_2024.stdenv; };
intel2025-icx-cpp = hello-cpp.override { stdenv = final.intelPackages_2025.stdenv; };
# intel2023-sycl = hello-sycl.override { intelPackages = final.intelPackages_2023; }; # broken
intel2024-sycl = hello-sycl.override { intelPackages = final.intelPackages_2024; };
intel2025-sycl = hello-sycl.override { intelPackages = final.intelPackages_2025; };
clangOmpss2-lto = lto.override { stdenv = final.stdenvClangOmpss2Nanos6; };
clangOmpss2-asan = asan.override { stdenv = final.stdenvClangOmpss2Nanos6; };
clangOmpss2-task = callPackage ./test/compilers/ompss2.nix {
@@ -98,14 +118,16 @@ let
pkgsTopLevel = filterAttrs (_: isDerivation) bscPkgs;
# Native build in that platform doesn't imply cross build works
canCrossCompile = platform: pkg:
canCrossCompile = platform: default: pkg:
(isDerivation pkg) &&
# Must be defined explicitly
(pkg.meta.cross or false) &&
(meta.availableOn platform pkg);
# If meta.cross is undefined, use default
(pkg.meta.cross or default) &&
(meta.availableOn final.pkgsCross.${platform}.stdenv.hostPlatform pkg);
# For now only RISC-V
crossSet = { riscv64 = final.pkgsCross.riscv64.bsc.pkgsTopLevel; };
crossSet = genAttrs [ "riscv64" ] (platform:
filterAttrs (_: canCrossCompile platform true)
final.pkgsCross.${platform}.bsc.pkgsTopLevel);
buildList = name: paths:
final.runCommandLocal name { } ''
@@ -125,7 +147,7 @@ let
# For now only RISC-V
crossList = buildList "ci-cross"
(filter
(canCrossCompile final.pkgsCross.riscv64.stdenv.hostPlatform)
(canCrossCompile "riscv64" false) # opt-in (pkgs with: meta.cross = true)
(builtins.attrValues crossSet.riscv64));
in bscPkgs // {

212
pkgs/agenix/agenix.sh Normal file
View File

@@ -0,0 +1,212 @@
#!/usr/bin/env bash
set -Eeuo pipefail
PACKAGE="agenix"
function show_help () {
echo "$PACKAGE - edit and rekey age secret files"
echo " "
echo "$PACKAGE -e FILE [-i PRIVATE_KEY]"
echo "$PACKAGE -r [-i PRIVATE_KEY]"
echo ' '
echo 'options:'
echo '-h, --help show help'
# shellcheck disable=SC2016
echo '-e, --edit FILE edits FILE using $EDITOR'
echo '-r, --rekey re-encrypts all secrets with specified recipients'
echo '-d, --decrypt FILE decrypts FILE to STDOUT'
echo '-i, --identity identity to use when decrypting'
echo '-v, --verbose verbose output'
echo ' '
echo 'FILE an age-encrypted file'
echo ' '
echo 'PRIVATE_KEY a path to a private SSH key used to decrypt file'
echo ' '
echo 'EDITOR environment variable of editor to use when editing FILE'
echo ' '
echo 'If STDIN is not interactive, EDITOR will be set to "cp /dev/stdin"'
echo ' '
echo 'RULES environment variable with path to Nix file specifying recipient public keys.'
echo "Defaults to './secrets.nix'"
echo ' '
echo "agenix version: @version@"
echo "age binary path: @ageBin@"
echo "age version: $(@ageBin@ --version)"
}
function warn() {
printf '%s\n' "$*" >&2
}
function err() {
warn "$*"
exit 1
}
test $# -eq 0 && (show_help && exit 1)
REKEY=0
DECRYPT_ONLY=0
DEFAULT_DECRYPT=(--decrypt)
while test $# -gt 0; do
case "$1" in
-h|--help)
show_help
exit 0
;;
-e|--edit)
shift
if test $# -gt 0; then
export FILE=$1
else
echo "no FILE specified"
exit 1
fi
shift
;;
-i|--identity)
shift
if test $# -gt 0; then
DEFAULT_DECRYPT+=(--identity "$1")
else
echo "no PRIVATE_KEY specified"
exit 1
fi
shift
;;
-r|--rekey)
shift
REKEY=1
;;
-d|--decrypt)
shift
DECRYPT_ONLY=1
if test $# -gt 0; then
export FILE=$1
else
echo "no FILE specified"
exit 1
fi
shift
;;
-v|--verbose)
shift
set -x
;;
*)
show_help
exit 1
;;
esac
done
RULES=${RULES:-./secrets.nix}
function cleanup {
if [ -n "${CLEARTEXT_DIR+x}" ]
then
rm -rf -- "$CLEARTEXT_DIR"
fi
if [ -n "${REENCRYPTED_DIR+x}" ]
then
rm -rf -- "$REENCRYPTED_DIR"
fi
}
trap "cleanup" 0 2 3 15
function keys {
(@nixInstantiate@ --json --eval --strict -E "(let rules = import $RULES; in rules.\"$1\".publicKeys)" | @jqBin@ -r .[]) || exit 1
}
function armor {
(@nixInstantiate@ --json --eval --strict -E "(let rules = import $RULES; in (builtins.hasAttr \"armor\" rules.\"$1\" && rules.\"$1\".armor))") || exit 1
}
function decrypt {
FILE=$1
KEYS=$2
if [ -z "$KEYS" ]
then
err "There is no rule for $FILE in $RULES."
fi
if [ -f "$FILE" ]
then
DECRYPT=("${DEFAULT_DECRYPT[@]}")
if [[ "${DECRYPT[*]}" != *"--identity"* ]]; then
if [ -f "$HOME/.ssh/id_rsa" ]; then
DECRYPT+=(--identity "$HOME/.ssh/id_rsa")
fi
if [ -f "$HOME/.ssh/id_ed25519" ]; then
DECRYPT+=(--identity "$HOME/.ssh/id_ed25519")
fi
fi
if [[ "${DECRYPT[*]}" != *"--identity"* ]]; then
err "No identity found to decrypt $FILE. Try adding an SSH key at $HOME/.ssh/id_rsa or $HOME/.ssh/id_ed25519 or using the --identity flag to specify a file."
fi
@ageBin@ "${DECRYPT[@]}" -- "$FILE" || exit 1
fi
}
function edit {
FILE=$1
KEYS=$(keys "$FILE") || exit 1
ARMOR=$(armor "$FILE") || exit 1
CLEARTEXT_DIR=$(@mktempBin@ -d)
CLEARTEXT_FILE="$CLEARTEXT_DIR/$(basename -- "$FILE")"
DEFAULT_DECRYPT+=(-o "$CLEARTEXT_FILE")
decrypt "$FILE" "$KEYS" || exit 1
[ ! -f "$CLEARTEXT_FILE" ] || cp -- "$CLEARTEXT_FILE" "$CLEARTEXT_FILE.before"
[ -t 0 ] || EDITOR='cp -- /dev/stdin'
$EDITOR "$CLEARTEXT_FILE"
if [ ! -f "$CLEARTEXT_FILE" ]
then
warn "$FILE wasn't created."
return
fi
[ -f "$FILE" ] && [ "$EDITOR" != ":" ] && @diffBin@ -q -- "$CLEARTEXT_FILE.before" "$CLEARTEXT_FILE" && warn "$FILE wasn't changed, skipping re-encryption." && return
ENCRYPT=()
if [[ "$ARMOR" == "true" ]]; then
ENCRYPT+=(--armor)
fi
while IFS= read -r key
do
if [ -n "$key" ]; then
ENCRYPT+=(--recipient "$key")
fi
done <<< "$KEYS"
REENCRYPTED_DIR=$(@mktempBin@ -d)
REENCRYPTED_FILE="$REENCRYPTED_DIR/$(basename -- "$FILE")"
ENCRYPT+=(-o "$REENCRYPTED_FILE")
@ageBin@ "${ENCRYPT[@]}" <"$CLEARTEXT_FILE" || exit 1
mkdir -p -- "$(dirname -- "$FILE")"
mv -f -- "$REENCRYPTED_FILE" "$FILE"
}
function rekey {
FILES=$( (@nixInstantiate@ --json --eval -E "(let rules = import $RULES; in builtins.attrNames rules)" | @jqBin@ -r .[]) || exit 1)
for FILE in $FILES
do
warn "rekeying $FILE..."
EDITOR=: edit "$FILE"
cleanup
done
}
[ $REKEY -eq 1 ] && rekey && exit 0
[ $DECRYPT_ONLY -eq 1 ] && DEFAULT_DECRYPT+=("-o" "-") && decrypt "${FILE}" "$(keys "$FILE")" && exit 0
edit "$FILE" && cleanup && exit 0

66
pkgs/agenix/default.nix Normal file
View File

@@ -0,0 +1,66 @@
{
lib,
stdenv,
age,
jq,
nix,
mktemp,
diffutils,
replaceVars,
ageBin ? "${age}/bin/age",
shellcheck,
}:
let
bin = "${placeholder "out"}/bin/agenix";
in
stdenv.mkDerivation rec {
pname = "agenix";
version = "0.15.0";
src = replaceVars ./agenix.sh {
inherit ageBin version;
jqBin = "${jq}/bin/jq";
nixInstantiate = "${nix}/bin/nix-instantiate";
mktempBin = "${mktemp}/bin/mktemp";
diffBin = "${diffutils}/bin/diff";
};
dontUnpack = true;
doInstallCheck = true;
installCheckInputs = [ shellcheck ];
postInstallCheck = ''
shellcheck ${bin}
${bin} -h | grep ${version}
test_tmp=$(mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir')
export HOME="$test_tmp/home"
export NIX_STORE_DIR="$test_tmp/nix/store"
export NIX_STATE_DIR="$test_tmp/nix/var"
mkdir -p "$HOME" "$NIX_STORE_DIR" "$NIX_STATE_DIR"
function cleanup {
rm -rf "$test_tmp"
}
trap "cleanup" 0 2 3 15
mkdir -p $HOME/.ssh
cp -r "${./example}" $HOME/secrets
chmod -R u+rw $HOME/secrets
(
umask u=rw,g=r,o=r
cp ${./example_keys/user1.pub} $HOME/.ssh/id_ed25519.pub
chown $UID $HOME/.ssh/id_ed25519.pub
)
(
umask u=rw,g=,o=
cp ${./example_keys/user1} $HOME/.ssh/id_ed25519
chown $UID $HOME/.ssh/id_ed25519
)
cd $HOME/secrets
test $(${bin} -d secret1.age) = "hello"
'';
installPhase = ''
install -D $src ${bin}
'';
meta.description = "age-encrypted secrets for NixOS";
}

View File

@@ -0,0 +1,7 @@
age-encryption.org/v1
-> ssh-ed25519 V3XmEA zirqdzZZ1E+sedBn7fbEHq4ntLEkokZ4GctarBBOHXY
Rvs5YHaAUeCZyNwPedubPcHClWYIuXXWA5zadXPWY6w
-> ssh-ed25519 KLPP8w BVp4rDkOYSQyn8oVeHFeinSqW+pdVtxBF9+5VM1yORY
bMwppAi8Nhz0328taU4AzUkTVyWtSLvFZG6c5W/Fs78
--- xCbqLhXAcOziO2wmbjTiSQfZvt5Rlsc4SCvF+iEzpQA
<EFBFBD>KB<EFBFBD><EFBFBD>/<2F>Z<><5A>r<EFBFBD>%<01><>4<EFBFBD><34><EFBFBD>Mq5<71><35>_<EFBFBD><5F>ݒ<><DD92><EFBFBD><EFBFBD><EFBFBD>11 ܨqM;& <20><>Lr<4C><72><EFBFBD>f<EFBFBD><66><EFBFBD>]>N

View File

@@ -0,0 +1,7 @@
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IHNzaC1lZDI1NTE5IFYzWG1FQSBpZkZW
aFpLNnJxc0VUMHRmZ2dZS0pjMGVENnR3OHd5K0RiT1RjRUhibFZBCnN5UG5vUjA3
SXpsNGtiVUw4T0tIVFo5Wkk5QS9NQlBndzVvektiQ0ozc0kKLS0tIGxyY1Q4dEZ1
VGZEanJyTFNta2JNRmpZb2FnK2JyS1hSVml1UGdMNWZKQXMKYla+wTXcRedyZoEb
LVWaSx49WoUTU0KBPJg9RArxaeC23GoCDzR/aM/1DvYU
-----END AGE ENCRYPTED FILE-----

View File

@@ -0,0 +1,9 @@
age-encryption.org/v1
-> ssh-ed25519 KLPP8w s1DYZRlZuSsyhmZCF1lFB+E9vB8bZ/+ZhBRlx8nprwE
nmYVCsVBrX2CFXXPU+D+bbkkIe/foofp+xoUrg9DHZw
-> ssh-ed25519 V3XmEA Pwv3oCwcY0DX8rY48UNfsj9RumWsn4dbgorYHCwObgI
FKxRYkL3JHtJxUwymWDF0rAtJ33BivDI6IfPsfumM90
-> V'v(/u$-grease em/Vgf 2qDuk
7I3iiQLPGi1COML9u/JeYkr7EqbSLoU
--- 57WJRigUGtmcObrssS3s4PvmR8wgh1AOC/ijJn1s3xI
<EFBFBD>'K<>ƷY&<26>7G<37>O<EFBFBD><4F>Fj<13>k<EFBFBD>X<EFBFBD><58>BnuJ<75><4A>:9<>(<><7F><EFBFBD>X<EFBFBD>#<23>A<EFBFBD><41><EFBFBD><EFBFBD>ڧj<DAA7>,<02>_<17><><EFBFBD>?<3F>Z<EFBFBD><17>v<EFBFBD><76>V<EFBFBD>96]oks~%<25>c <04>e^C<>%JQ5<51><H<>z}<7D>C<EFBFBD>,<2C>p<EFBFBD><70>*!W<><57><EFBFBD>A<EFBFBD><41><EFBFBD>҅dC<15>K)<10><>-<2D>y

Binary file not shown.

View File

@@ -0,0 +1,5 @@
age-encryption.org/v1
-> ssh-ed25519 V3XmEA OB4+1FbPhQ3r6iGksM7peWX5it8NClpXIq/o5nnP7GA
FmHVUj+A5i5+bDFgySQskmlvynnosJiWUTJmBRiNA9I
--- tP+3mFVtd7ogVu1Lkboh55zoi5a77Ht08Uc/QuIviv4
<EFBFBD><EFBFBD>X<EFBFBD>{<7B><>O<EFBFBD><4F><1F><04>tMXx<58>vӪ(<28>I<EFBFBD>myP<79><50><EFBFBD><EFBFBD>+3<>S3i

View File

@@ -0,0 +1,23 @@
let
user1 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL0idNvgGiucWgup/mP78zyC23uFjYq0evcWdjGQUaBH";
system1 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPJDyIr/FSz1cJdcoW69R+NrWzwGK/+3gJpqD1t8L2zE";
in
{
"secret1.age".publicKeys = [
user1
system1
];
"secret2.age".publicKeys = [ user1 ];
"passwordfile-user1.age".publicKeys = [
user1
system1
];
"-leading-hyphen-filename.age".publicKeys = [
user1
system1
];
"armored-secret.age" = {
publicKeys = [ user1 ];
armor = true;
};
}

View File

@@ -0,0 +1,7 @@
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACDyQ8iK/xUs9XCXXKFuvUfja1s8Biv/t4Caag9bfC9sxAAAAJA3yvCWN8rw
lgAAAAtzc2gtZWQyNTUxOQAAACDyQ8iK/xUs9XCXXKFuvUfja1s8Biv/t4Caag9bfC9sxA
AAAEA+J2V6AG1NriAIvnNKRauIEh1JE9HSdhvKJ68a5Fm0w/JDyIr/FSz1cJdcoW69R+Nr
WzwGK/+3gJpqD1t8L2zEAAAADHJ5YW50bUBob21lMQE=
-----END OPENSSH PRIVATE KEY-----

View File

@@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPJDyIr/FSz1cJdcoW69R+NrWzwGK/+3gJpqD1t8L2zE

View File

@@ -0,0 +1,7 @@
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACC9InTb4BornFoLqf5j+/M8gtt7hY2KtHr3FnYxkFGgRwAAAJC2JJ8htiSf
IQAAAAtzc2gtZWQyNTUxOQAAACC9InTb4BornFoLqf5j+/M8gtt7hY2KtHr3FnYxkFGgRw
AAAEDxt5gC/s53IxiKAjfZJVCCcFIsdeERdIgbYhLO719+Kb0idNvgGiucWgup/mP78zyC
23uFjYq0evcWdjGQUaBHAAAADHJ5YW50bUBob21lMQE=
-----END OPENSSH PRIVATE KEY-----

View File

@@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL0idNvgGiucWgup/mP78zyC23uFjYq0evcWdjGQUaBH

23
pkgs/agenix/update.sh Executable file
View File

@@ -0,0 +1,23 @@
#!/bin/sh
set -e
# All operations are done relative to root
GITROOT=$(git rev-parse --show-toplevel)
cd "$GITROOT"
REVISION=${1:-main}
TMPCLONE=$(mktemp -d)
trap "rm -rf ${TMPCLONE}" EXIT
git clone https://github.com/ryantm/agenix.git --revision="$REVISION" "$TMPCLONE" --depth=1
cp "${TMPCLONE}/pkgs/agenix.sh" pkgs/agenix/agenix.sh
cp "${TMPCLONE}/pkgs/agenix.nix" pkgs/agenix/default.nix
sed -i 's#../example#./example#' pkgs/agenix/default.nix
cp "${TMPCLONE}/example/"* pkgs/agenix/example/
cp "${TMPCLONE}/example_keys/"* pkgs/agenix/example_keys/
cp "${TMPCLONE}/modules/age.nix" m/module/agenix.nix

View File

@@ -90,7 +90,7 @@ in
meta = {
description = "Performance analysis tool-suite for x86 based applications";
homepage = "https://www.amd.com/es/developer/uprof.html";
platforms = lib.platforms.linux;
platforms = [ "x86_64-linux" ];
license = lib.licenses.unfree;
maintainers = with lib.maintainers.bsc; [ rarias varcila ];
};

View File

@@ -19,7 +19,7 @@ in stdenv.mkDerivation {
'';
hardeningDisable = [ "pic" "format" ];
nativeBuildInputs = kernel.moduleBuildDependencies;
patches = [ ./makefile.patch ./hrtimer.patch ];
patches = [ ./makefile.patch ./hrtimer.patch ./remove-wr-rdmsrq.patch ];
makeFlags = [
"KERNEL_VERSION=${kernel.modDirVersion}"
"KERNEL_DIR=${kernel.dev}/lib/modules/${kernel.modDirVersion}/build"

View File

@@ -0,0 +1,20 @@
diff --git a/inc/PwrProfAsm.h b/inc/PwrProfAsm.h
index d77770a..c93a0e9 100644
--- a/inc/PwrProfAsm.h
+++ b/inc/PwrProfAsm.h
@@ -347,6 +347,7 @@
#endif
+/*
#define rdmsrq(msr,val1,val2,val3,val4) ({ \
__asm__ __volatile__( \
"rdmsr\n" \
@@ -362,6 +363,7 @@
:"c"(msr), "a"(val1), "d"(val2), "S"(val3), "D"(val4) \
); \
})
+*/
#define rdmsrpw(msr,val1,val2,val3,val4) ({ \
__asm__ __volatile__( \

View File

@@ -1,5 +1,6 @@
{
stdenv
, lib
, cudatoolkit
, cudaPackages
, autoAddDriverRunpath
@@ -11,7 +12,7 @@ stdenv.mkDerivation (finalAttrs: {
src = ./.;
buildInputs = [
cudatoolkit # Required for nvcc
cudaPackages.cuda_cudart.static # Required for -lcudart_static
(lib.getOutput "static" cudaPackages.cuda_cudart) # Required for -lcudart_static
autoAddDriverRunpath
];
installPhase = ''
@@ -40,4 +41,9 @@ stdenv.mkDerivation (finalAttrs: {
'';
installPhase = "touch $out";
};
meta = {
platforms = [ "x86_64-linux" ];
maintainers = with lib.maintainers.bsc; [ rarias ];
};
})

View File

@@ -9,7 +9,6 @@
, automake
, libtool
, mpi
, rsync
, gfortran
}:
@@ -44,13 +43,24 @@ stdenv.mkDerivation rec {
configureFlags = [
"--with-infiniband=${rdma-core-all}"
"--with-mpi=${mpiAll}"
"--with-mpi=yes" # fixes mpi detection when cross-compiling
"--with-slurm"
"CFLAGS=-fPIC"
"CXXFLAGS=-fPIC"
];
buildInputs = [ slurm mpiAll rdma-core-all autoconf automake libtool rsync gfortran ];
nativeBuildInputs = [
autoconf
automake
gfortran
libtool
];
buildInputs = [
slurm
mpiAll
rdma-core-all
];
hardeningDisable = [ "all" ];
@@ -60,5 +70,6 @@ stdenv.mkDerivation rec {
maintainers = with lib.maintainers.bsc; [ rarias ];
platforms = lib.platforms.linux;
license = lib.licenses.gpl3Plus;
cross = false; # infiniband detection does not work
};
}

View File

@@ -10,7 +10,7 @@
, zlib
, autoPatchelfHook
, libfabric
, gcc13
, gcc
, wrapCCWith
}:
@@ -33,8 +33,6 @@ let
maintainers = with lib.maintainers.bsc; [ abonerib ];
};
gcc = gcc13;
v = {
hpckit = "2023.1.0";
compiler = "2023.1.0";
@@ -42,45 +40,19 @@ let
mpi = "2021.9.0";
};
aptPackageIndex = stdenv.mkDerivation {
name = "intel-oneapi-packages";
srcs = [
# Run update.sh to update the package lists
./amd64-packages ./all-packages
];
phases = [ "installPhase" ];
installPhase = ''
awk -F': ' '\
BEGIN { print "[ {" } \
NR>1 && /^Package: / { print "} {"; } \
/: / { printf "%s = \"%s\";\n", $1, $2 } \
END { print "} ]" }' $srcs > $out
'';
};
aptPackages = import aptPackageIndex;
apthost = "https://apt.repos.intel.com/oneapi/";
getSum = pkgList: name:
findMatch = name:
let
matches = lib.filter (x: name == x.Package) pkgList;
#n = lib.length matches;
#match = builtins.trace (name + " -- ${builtins.toString n}") (lib.elemAt matches 0);
match = lib.elemAt matches 0;
in
match.SHA256;
getUrl = pkgList: name:
let
matches = lib.filter (x: name == x.Package) pkgList;
#match = assert lib.length matches == 1; lib.elemAt matches 0;
aptPackages = builtins.fromJSON (builtins.readFile ./packages.json);
matches = lib.filter (x: name == x.pname) aptPackages;
n = lib.length matches;
match =
#builtins.trace (name + " -- n=${builtins.toString n}")
(lib.elemAt matches 0);
match = builtins.traceVerbose (name + " -- ${builtins.toString n}") (builtins.head matches);
apthost = "https://apt.repos.intel.com/oneapi/";
in
apthost + match.Filename;
{
url = apthost + match.filename;
sha256 = match.sha256;
};
uncompressDebs = debs: name: stdenv.mkDerivation {
name = name;
@@ -100,10 +72,7 @@ let
joinDebs = name: names:
let
urls = builtins.map (x: getUrl aptPackages x) names;
sums = builtins.map (x: getSum aptPackages x) names;
getsrc = url: sha256: builtins.fetchurl { inherit url sha256; };
debs = lib.zipListsWith getsrc urls sums;
debs = builtins.map (x: builtins.fetchurl (findMatch x)) names;
in
uncompressDebs debs "${name}-source";
@@ -472,7 +441,7 @@ let
'';
};
ifort-wrapper = wrapIntel rec {
ifort-wrapper = wrapIntel {
cc = intel-compiler-fortran;
mygcc = gcc;
extraBuild = ''

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,297 @@
{
lib,
stdenv,
callPackage,
dpkg,
fetchurl,
sqlite,
elfutils,
}:
let
inherit (builtins)
attrNames
attrValues
concatMap
elem
filter
fromJSON
getAttr
groupBy
head
isNull
listToAttrs
map
mapAttrs
readFile
replaceStrings
splitVersion
;
inherit (lib)
converge
findFirst
groupBy'
hasPrefix
optional
pipe
take
toInt
toList
versionAtLeast
versionOlder
;
aptData = fromJSON (readFile ./packages.json);
# Compare versions in debian control file syntax
# See: https://www.debian.org/doc/debian-policy/ch-relationships.html#syntax-of-relationship-fields
#
# NOTE: this is not a proper version comparison
#
# A proper version solver, should aggregate dependencies with the same name
# and compute the constraint (e.g. a (>= 2) a (<< 5) -> 2 <= a << 5)
#
# But in the intel repo, there are no such "duplicated" dependencies to specify
# upper limits, which leads to issues when intel-hpckit-2021 depends on things
# like intel-basekit >= 2021.1.0-2403 and we end up installing the newest
# basekit instead of the one from 2021.
#
# To mitigate this, >= is set to take the latest version with matching major
# and minor (only revision and patch are allowed to change)
compareVersions =
got: kind: want:
let
g0 = take 2 (splitVersion got);
w0 = take 2 (splitVersion want);
in
if isNull want then
true
else if kind == "=" then
got == want
else if kind == "<<" then
versionOlder got want
else if kind == "<=" then
versionAtLeast want got
else if kind == ">>" then
versionOlder want got
else if kind == ">=" then
(g0 == w0) && versionAtLeast got want # always match major version
else
throw "unknown operation: ${kind}";
findMatching =
{
pname,
kind,
version,
}:
findFirst (x: pname == x.pname && compareVersions x.version kind version) null aptData;
isIntel = pkg: (hasPrefix "intel-" pkg.pname);
expandDeps =
pkg: (map findMatching (filter isIntel pkg.dependencies)) ++ (optional (pkg.size != 0) pkg);
# get the oldest by major version. If they have the same major version, take
# the newest. This prevents most issues with resolutions
# versionOlder b a -> true if b is older than a (b `older` a)
getNewerInMajor =
a: b:
let
va = a.version;
vb = b.version;
va0 = head (splitVersion va);
vb0 = head (splitVersion vb);
in
if isNull a then
b
else if va0 != vb0 then
if va0 > vb0 then b else a
else if versionOlder vb va then
a
else
b;
removeDups = l: attrValues (groupBy' getNewerInMajor null (getAttr "provides") l);
_resolveDeps = converge (l: removeDups (concatMap expandDeps l));
resolveDeps =
pkg:
let
deps = _resolveDeps (toList pkg);
namedDeps = (map (x: "${x.pname}-${x.version}") deps);
in
builtins.traceVerbose (builtins.deepSeq namedDeps namedDeps) deps;
blacklist = [
"intel-basekit-env"
"intel-basekit-getting-started"
"intel-hpckit-env"
"intel-hpckit-getting-started"
"intel-oneapi-advisor"
"intel-oneapi-common-licensing"
"intel-oneapi-common-oneapi-vars"
"intel-oneapi-common-vars"
"intel-oneapi-compiler-cpp-eclipse-cfg"
"intel-oneapi-compiler-dpcpp-eclipse-cfg"
"intel-oneapi-condaindex"
"intel-oneapi-dev-utilities-eclipse-cfg"
"intel-oneapi-dpcpp-ct-eclipse-cfg"
"intel-oneapi-eclipse-ide"
"intel-oneapi-hpc-toolkit-getting-started"
"intel-oneapi-icc-eclipse-plugin-cpp"
"intel-oneapi-vtune"
"intel-oneapi-vtune-eclipse-plugin-vtune"
];
isInBlacklist = pkg: elem pkg.provides blacklist;
removeBlacklist = filter (e: !(isInBlacklist e));
dpkgExtractAll =
pname: version:
{ srcs, deps }:
stdenv.mkDerivation {
inherit pname version srcs;
nativeBuildInputs = [ dpkg ];
phases = [ "installPhase" ];
passthru = { inherit deps; };
installPhase = ''
mkdir -p $out
for src in $srcs; do
echo "Unpacking $src"
dpkg -x $src $out
done
'';
};
apthost = "https://apt.repos.intel.com/oneapi/";
fetchDeb =
p:
fetchurl {
url = apthost + p.filename;
inherit (p) sha256;
};
buildIntel =
pkg:
pipe pkg [
resolveDeps
removeBlacklist
(l: {
srcs = map fetchDeb l;
deps = l;
})
(dpkgExtractAll "${pkg.provides}-extracted" pkg.version)
];
findHpcKit =
year:
findMatching {
pname = "intel-hpckit";
kind = "<<";
version = toString (year + 1);
};
years = map toInt (attrNames components);
patchIntel = callPackage ./patch_intel.nix { };
# Version information for each hpckit. This is used to normalize the paths
# so that files are in $out/{bin,lib,include...} instead of all over the place
# in $out/opt/intel/oneapi/*/*/{...}.
#
# The most important is the compiler component, which is used to build the
# stdenv for the hpckit.
#
# NOTE: this have to be manually specified, so we can avoid IFD. To add a
# new version, add a new field with an empty attrset, (e.g. "2026" = {}; ),
# build hpckit_2026.unpatched and use the values from
# result/opt/intel/oneapi/* to populate the attrset.
#
# WARN: if there are more than one version in the folders of the unpatched
# components, our dependency resolution hacks have probably failed and the
# package set may be broken.
components = {
"2025" = {
ishmem = "1.4";
pti = "0.13";
tcm = "1.4";
umf = "0.11";
ccl = "2021.16";
compiler = "2025.2";
dal = "2025.8";
debugger = "2025.2";
dev-utilities = "2025.2";
dnnl = "2025.2";
dpcpp-ct = "2025.2";
dpl = "2022.9";
ipp = "2022.2";
ippcp = "2025.2";
mkl = "2025.2";
mpi = "2021.16";
tbb = "2022.2";
};
"2024" = {
tcm = "1.1";
ccl = "2021.13";
compiler = "2024.2";
dal = "2024.6";
debugger = "2024.2";
dev-utilities = "2024.2";
diagnostics = "2024.2";
dnnl = "2024.2";
dpcpp-ct = "2024.2";
dpl = "2022.6";
ipp = "2021.12";
ippcp = "2021.12";
mkl = "2024.2";
mpi = "2021.13";
tbb = "2021.13";
extraPackages = [
sqlite
elfutils
];
};
};
replaceDots = replaceStrings [ "." ] [ "_" ];
in
lib.recurseIntoAttrs (
listToAttrs (
map (
year:
let
year_str = toString year;
in
{
name = "hpckit_${year_str}";
value = patchIntel {
unpatched = buildIntel (findHpcKit year);
components = components.${year_str};
};
}
) years
)
)
// {
apt = pipe aptData [
(groupBy (p: replaceDots p.provides))
(mapAttrs (
_: l:
listToAttrs (
map (pkg: {
name = replaceDots ("v" + pkg.version);
value = pkg;
}) l
)
))
];
inherit resolveDeps patchIntel buildIntel;
}

View File

@@ -0,0 +1,189 @@
{
stdenv,
stdenvNoCC,
lib,
symlinkJoin,
autoPatchelfHook,
wrapCCWith,
overrideCC,
gcc,
hwloc,
libelf,
libffi_3_3,
libpsm2,
libuuid,
libxml2,
numactl,
ocl-icd,
openssl,
python3,
rdma-core,
ucx,
zlib,
}:
lib.makeOverridable (
{
unpatched,
components ? { },
extraPackages ? components.extraPackages or [ ],
}:
let
inherit (builtins)
attrValues
filter
mapAttrs
removeAttrs
;
__components = removeAttrs components [ "extraPackages" ];
_components = __components;
# _components = lib.traceSeqN 2 {
# inherit unpatched __components;
# deps = builtins.map (x: "${x.pname}-${x.version}") unpatched.deps;
# } __components;
wrapIntel =
cc:
let
targetConfig = stdenv.targetPlatform.config;
in
(wrapCCWith {
inherit cc;
nixSupport = {
cc-ldflags = [
"-L${gcc.cc}/lib/gcc/${targetConfig}/${gcc.version}"
"-L${gcc.cc.lib}/lib"
"-L${cc}/lib"
];
cc-cflags = [
"--gcc-toolchain=${gcc.cc}"
"-isystem \"${cc.original}/lib/clang/*/include\""
"-isystem ${cc}/include"
"-isystem ${cc}/include/intel64"
"-isystem ${gcc.cc}/lib/gcc/${targetConfig}/${gcc.version}/include"
];
libcxx-cxxflags = [
# "--gcc-toolchain=${gcc.cc}"
"-isystem ${gcc.cc}/include/c++/${gcc.version}"
"-isystem ${gcc.cc}/include/c++/${gcc.version}/${targetConfig}"
];
};
extraBuildCommands = ''
# FIXME: We should find a better way to modify the PATH instead of using
# this ugly hack. See https://jungle.bsc.es/git/rarias/bscpkgs/issues/9
echo 'path_backup="${gcc.cc}/bin:$path_backup"' >>$out/nix-support/cc-wrapper-hook
# Disable hardening by default
echo "" > $out/nix-support/add-hardening.sh
wrap icx $wrapper $ccPath/icx
wrap icpx $wrapper $ccPath/icpx
wrap ifx $wrapper $ccPath/ifx
ln -s $out/bin/icpx $out/bin/c++
ln -s $out/bin/icx $out/bin/cc
sed -i 's/.*isCxx=0/isCxx=1/' $out/bin/icpx
# Use this to detect when a compiler subprocess is called
# from icpx (--fsycl-host-compiler)
echo 'export NIX_CC_WRAPPER_INTEL=1' >>$out/nix-support/cc-wrapper-hook
# oneMath looks for sycl libraries in bin/../lib
ln -s ${cc}/lib $out/lib
ln -s ${cc}/include $out/include
'';
}).overrideAttrs
(old: {
installPhase = old.installPhase + ''
export named_cc="icx"
export named_cxx="icpx"
export named_fc="ifx"
'';
});
in
stdenvNoCC.mkDerivation (finalAttrs: {
pname = lib.removeSuffix "-extracted" unpatched.pname;
inherit (unpatched) version;
src = unpatched;
phases = [
"installPhase"
"fixupPhase"
];
buildInputs = [
libffi_3_3
libelf
libxml2
hwloc
numactl
libuuid
libpsm2
zlib
ocl-icd
rdma-core
ucx
openssl
python3
stdenv.cc.cc.lib
]
++ extraPackages;
autoPatchelfIgnoreMissingDeps = [
"libhwloc.so.5"
"libcuda.so.1"
"libze_loader.so.1"
];
# There are broken symlinks that go outside packages, ignore them
dontCheckForBrokenSymlinks = true;
nativeBuildInputs = [ autoPatchelfHook ];
installPhase = ''
cp -r $src/opt/intel/oneapi/ $out
'';
passthru =
let
pkgs = mapAttrs (
folder: version:
let
original = "${finalAttrs.finalPackage}/${folder}/${version}";
in
(symlinkJoin {
pname = "intel-${folder}";
inherit version;
paths = [ original ];
}).overrideAttrs
{ passthru = { inherit original; }; }
) _components;
in
pkgs
// {
inherit unpatched;
pkgs = lib.recurseIntoAttrs pkgs;
components = _components;
# This contains all packages properly symlinked into toplevel directories
# in $out.
#
# NOTE: there are clashes with packages that have symlinks outside their
# scope (libtcm and env/vars.sh)
all = symlinkJoin {
pname = finalAttrs.finalPackage + "-symlinked";
inherit (finalAttrs.finalPackage) version;
paths = filter lib.isDerivation (attrValues finalAttrs.finalPackage.pkgs);
};
stdenv = overrideCC stdenv finalAttrs.finalPackage.cc;
cc = wrapIntel finalAttrs.finalPackage.pkgs.compiler;
};
})
)

29
pkgs/intel-oneapi/process.jq Executable file
View File

@@ -0,0 +1,29 @@
#!/usr/bin/env -S jq -f
def extract_fields: {
pname : .Package,
version : .Version,
provides : .Package | sub("[0-9.-]*$"; ""),
filename : .Filename,
size : ."Installed-Size" | tonumber,
sha256 : .SHA256,
dependencies : .Depends,
} ;
# parses dependencies into a list of [{.pname, .kind, .version}]
# some dependencies do not have a version specified, in which case, kind = version = null
#
# example dependencies:
# intel-oneapi-common-vars (>= 2023.0.0-25325), intel-oneapi-common-licensing-2023.0.0
def split_dependencies : map(try(.dependencies |= split(",\\s?"; "")) // .dependencies |= []) ;
def match_version : capture("(?<pname>[a-zA-Z0-9_\\-.]*) *(\\((?<kind>[<>=]*) *(?<version>.*)\\))?"; "") ;
def parse_dependencies : map_values(.dependencies.[] |= match_version) ;
def sort_version_decreasing : sort_by(.version | split("[-.]"; "") | map(tonumber)) | reverse ;
map(extract_fields) | split_dependencies | parse_dependencies | sort_version_decreasing
# [.[] | select(.pname == "intel-hpckit") | .version]

29
pkgs/intel-oneapi/toJson.awk Executable file
View File

@@ -0,0 +1,29 @@
#!/usr/bin/env -S awk -f
BEGIN {
FS=": "
prev_empty=1
t=" "
print "[ {"
}
!NF { # empty line, update separator so next non empty line closes the dict
prev_empty=1
t="},\n{ "
next # skip line (we won't match anything else)
}
{
printf t "\"%s\" : \"%s\"\n", $1, $2
if (prev_empty) {
# we were the first after a group of empty lines, following ones have to
# have a comma
prev_empty=0
t=", "
}
}
END { print "} ]" }

View File

@@ -1,4 +1,11 @@
#!/bin/sh
curl https://apt.repos.intel.com/oneapi/dists/all/main/binary-amd64/Packages -o amd64-packages
curl https://apt.repos.intel.com/oneapi/dists/all/main/binary-all/Packages -o all-packages
out_64=$(mktemp intel-api.64.XXXXXX)
out_all=$(mktemp intel-api.all.XXXXXX)
trap 'rm -f "$out_64" "$out_all"' EXIT INT HUP
curl https://apt.repos.intel.com/oneapi/dists/all/main/binary-amd64/Packages -o "$out_64"
curl https://apt.repos.intel.com/oneapi/dists/all/main/binary-all/Packages -o "$out_all"
# NOTE: we use `jq -r tostring` to minify the json (3.2Mb -> 2.3Mb)
cat "$out_64" "$out_all" | ./toJson.awk | ./process.jq | jq -r tostring >packages.json

View File

@@ -16,19 +16,19 @@
, useGit ? false
, gitUrl ? "ssh://git@bscpm04.bsc.es/llvm-ompss/llvm-mono.git"
, gitBranch ? "master"
, gitCommit ? "880e2341c56bad1dc14e8c369fb3356bec19018e"
, gitCommit ? "872ba63f86edaefc9787984ef3fae9f2f94e0124" # github-release-2025.11
}:
let
stdenv = llvmPackages_latest.stdenv;
release = rec {
version = "2025.06";
version = "2025.11";
src = fetchFromGitHub {
owner = "bsc-pm";
repo = "llvm";
rev = "refs/tags/github-release-${version}";
hash = "sha256-ww9PpRmtz/M9IyLiZ8rAehx2UW4VpQt+svf4XfKBzKo=";
hash = "sha256-UgwMTUkM9Z87dDH205swZFBeFhrcbLAxginViG40pBM=";
};
};

View File

@@ -3,6 +3,7 @@
, lib
, gcc
, clangOmpss2Unwrapped
, writeShellScript
, openmp ? null
, wrapCCWith
, llvmPackages_latest
@@ -27,20 +28,17 @@ let
# We need to replace the lld linker from bintools with our linker just built,
# otherwise we run into incompatibility issues when mixing compiler and linker
# versions.
bintools-unwrapped = llvmPackages_latest.tools.bintools-unwrapped.override {
bintools-unwrapped = llvmPackages_latest.bintools-unwrapped.override {
lld = clangOmpss2Unwrapped;
};
bintools = llvmPackages_latest.tools.bintools.override {
bintools = llvmPackages_latest.bintools.override {
bintools = bintools-unwrapped;
};
targetConfig = stdenv.targetPlatform.config;
inherit gcc;
cc = clangOmpss2Unwrapped;
gccVersion = with versions; let v = gcc.version; in concatStringsSep "." [(major v) (minor v) (patch v)];
in wrapCCWith {
inherit cc bintools;
# extraPackages adds packages to depsTargetTargetPropagated
extraPackages = optional (openmp != null) openmp;
extraBuildCommands = ''
echo "-target ${targetConfig}" >> $out/nix-support/cc-cflags
echo "-B${gcc.cc}/lib/gcc/${targetConfig}/${gccVersion}" >> $out/nix-support/cc-cflags
@@ -57,14 +55,50 @@ in wrapCCWith {
echo "--gcc-toolchain=${gcc}" >> $out/nix-support/cc-cflags
wrap clang++ $wrapper $ccPath/clang++
'' + optionalString (openmp != null) ''
echo "export OPENMP_RUNTIME=${ompname}" >> $out/nix-support/cc-wrapper-hook
'' + optionalString (ompss2rt != null) ''
echo "export OMPSS2_RUNTIME=${rtname}" >> $out/nix-support/cc-wrapper-hook
echo "export ${homevar}=${ompss2rt}" >> $out/nix-support/cc-wrapper-hook
'' + optionalString (ompss2rt != null && ompss2rt.pname == "nodes") ''
echo "export NOSV_HOME=${ompss2rt.nosv}" >> $out/nix-support/cc-wrapper-hook
'';
}
envExports = lib.optionalString (openmp != null) ''
echo "export OPENMP_RUNTIME=${ompname}" >> $out/nix-support/cc-wrapper-hook
'' + optionalString (ompss2rt != null) ''
echo "export OMPSS2_RUNTIME=${rtname}" >> $out/nix-support/cc-wrapper-hook
echo "export ${homevar}=${ompss2rt}" >> $out/nix-support/cc-wrapper-hook
'' + optionalString (ompss2rt != null && ompss2rt.pname == "nodes") ''
echo "export NOSV_HOME=${ompss2rt.nosv}" >> $out/nix-support/cc-wrapper-hook
'';
extraPackages = optional (openmp != null) openmp;
wrappedCC = wrapCCWith {
# extraPackages adds packages to depsTargetTargetPropagated
inherit cc bintools extraPackages;
extraBuildCommands = extraBuildCommands + envExports;
};
resetIntelCCFlags = let tconf = builtins.replaceStrings ["-"] ["_"] targetConfig;
in writeShellScript "remove-intel.sh" ''
if [ "''${NIX_CC_WRAPPER_INTEL:-0}" = 1 ]; then
unset NIX_CFLAGS_COMPILE_${tconf}
unset NIX_CC_WRAPPER_FLAGS_SET_${tconf}
if (( "''${NIX_DEBUG:-0}" >= 1 )); then
echo "ompss2: cleaned NIX_CFLAGS_COMPILE_${tconf} (invokation from intel compiler detected)"
fi
fi
'';
intelExtraBuildCommands = ''
sed -i 's|# Flirting.*|source ${resetIntelCCFlags}\n\n&|' $out/bin/clang
sed -i 's|# Flirting.*|source ${resetIntelCCFlags}\n\n&|' $out/bin/clang++
'';
wrappedCCIntel = wrapCCWith {
inherit cc bintools extraPackages;
# extraPackages adds packages to depsTargetTargetPropagated
extraBuildCommands = intelExtraBuildCommands + envExports;
};
in wrappedCC.overrideAttrs (oldAttrs: {
passthru = oldAttrs.passthru // {
forIcpx = wrappedCCIntel;
};
})

View File

@@ -65,6 +65,7 @@ stdenv.mkDerivation rec {
];
meta = {
broken = true;
homepage = "https://github.com/bsc-pm/mcxx";
description = "C/C++/Fortran source-to-source compilation infrastructure aimed at fast prototyping";
maintainers = with lib.maintainers.bsc; [ rpenacob ];

View File

@@ -1,9 +1,11 @@
{ python3Packages, lib }:
python3Packages.buildPythonApplication rec {
python3Packages.buildPythonApplication {
pname = "meteocat-exporter";
version = "1.0";
pyproject = true;
src = ./.;
doCheck = false;

View File

@@ -6,6 +6,13 @@
, pmix
, gfortran
, symlinkJoin
# Disabled when cross-compiling
# To fix cross compilation, we should fill the values in:
# https://github.com/pmodels/mpich/blob/main/maint/fcrosscompile/cross_values.txt.in
# For each arch
, enableFortran ? stdenv.hostPlatform == stdenv.buildPlatform
, perl
, targetPackages
}:
let
@@ -15,10 +22,13 @@ let
paths = [ pmix.dev pmix.out ];
};
in mpich.overrideAttrs (old: {
buildInput = old.buildInputs ++ [
buildInputs = old.buildInputs ++ [
libfabric
pmixAll
];
nativeBuildInputs = old.nativeBuildInputs ++ [
perl
];
configureFlags = [
"--enable-shared"
"--enable-sharedlib"
@@ -31,10 +41,21 @@ in mpich.overrideAttrs (old: {
] ++ lib.optionals (lib.versionAtLeast gfortran.version "10") [
"FFLAGS=-fallow-argument-mismatch" # https://github.com/pmodels/mpich/issues/4300
"FCFLAGS=-fallow-argument-mismatch"
] ++ lib.optionals (!enableFortran) [
"--disable-fortran"
];
preFixup = ''
sed -i 's:^CC=.*:CC=${targetPackages.stdenv.cc}/bin/${targetPackages.stdenv.cc.targetPrefix}cc:' $out/bin/mpicc
sed -i 's:^CXX=.*:CXX=${targetPackages.stdenv.cc}/bin/${targetPackages.stdenv.cc.targetPrefix}c++:' $out/bin/mpicxx
'' + lib.optionalString enableFortran ''
sed -i 's:^FC=.*:FC=${targetPackages.gfortran or gfortran}/bin/${targetPackages.gfortran.targetPrefix or gfortran.targetPrefix}gfortran:' $out/bin/mpifort
'';
hardeningDisable = [ "all" ];
meta = old.meta // {
maintainers = old.meta.maintainers ++ (with lib.maintainers.bsc; [ rarias ]);
cross = true;
};
})

22
pkgs/nixgen/default.nix Normal file
View File

@@ -0,0 +1,22 @@
{
stdenv
, lib
}:
stdenv.mkDerivation {
pname = "nixgen";
version = "0.0.1";
src = ./nixgen;
dontUnpack = true;
phases = [ "installPhase" ];
installPhase = ''
mkdir -p $out/bin
cp -a $src $out/bin/nixgen
'';
meta = {
description = "Quickly generate flake.nix from command line";
maintainers = with lib.maintainers.bsc; [ rarias ];
platforms = lib.platforms.linux;
license = lib.licenses.gpl3Plus;
};
}

97
pkgs/nixgen/nixgen Executable file
View File

@@ -0,0 +1,97 @@
#!/bin/sh
#
# Copyright (c) 2025, Barcelona Supercomputing Center (BSC)
# SPDX-License-Identifier: GPL-3.0+
# Author: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
function usage() {
echo "USAGE: nixgen [-f] [package [...]] [-b package [...]]" >&2
echo " Generates a flake.nix file with the given packages." >&2
echo " After flake.nix is created, use 'nix develop' to enter the shell." >&2
echo "OPTIONS" >&2
echo " -f Overwrite existing flake.nix (default: no)." >&2
echo " packages... Add these packages to the shell." >&2
echo " -b packages... Add the dependencies needed to build these packages." >&2
echo "EXAMPLE" >&2
echo " $ nixgen ovni bigotes -b nosv tampi" >&2
echo " Adds the packages ovni and bigotes as well as all required dependencies" >&2
echo " to build nosv and tampi." >&2
echo "AUTHOR" >&2
echo " Rodrigo Arias Mallo <rodrigo.arias@bsc.es>" >&2
exit 1
}
mode=package
packages=
inputsFrom=
force=
if [[ $# -eq 0 ]]; then
usage
fi
while [[ $# -gt 0 ]]; do
case $1 in -b)
mode=build
shift
;;
-f)
force=1
shift
;;
-h)
usage
;;
-*|--*)
echo "error: unknown option $1" >&2
exit 1
;;
*)
if [ "$mode" == "package" ]; then
packages+="${packages:+ }$1"
else
inputsFrom+="${inputsFrom:+ }$1"
fi
shift
;;
esac
done
if [ ! "$force" -a -e flake.nix ]; then
echo "error: flake.nix exists, force overwrite with -f" >&2
exit 1
fi
cat > flake.nix <<EOF
{
inputs.jungle.url = "git+https://jungle.bsc.es/git/rarias/jungle";
outputs = { self, jungle }:
let
nixpkgs = jungle.inputs.nixpkgs;
customOverlay = (final: prev: {
# Example overlay, for now empty
});
pkgs = import nixpkgs {
system = "x86_64-linux";
overlays = [
# Apply jungle overlay to get our BSC custom packages
jungle.outputs.bscOverlay
# And on top apply our local changes to customize for cluster
customOverlay
];
};
in {
devShells.x86_64-linux.default = pkgs.mkShell {
pname = "devshell";
# Include these packages in the shell
packages = with pkgs; [
$packages
];
# The dependencies needed to build these packages will be also included
inputsFrom = with pkgs; [
$inputsFrom
];
};
};
}
EOF

View File

@@ -3,7 +3,6 @@
, lib
, fetchFromGitHub
, pkg-config
, perl
, numactl
, hwloc
, boost
@@ -11,22 +10,23 @@
, ovni
, nosv
, clangOmpss2
, which
, useGit ? false
, gitUrl ? "ssh://git@gitlab-internal.bsc.es/nos-v/nodes.git"
, gitBranch ? "master"
, gitCommit ? "6002ec9ae6eb876d962cc34366952a3b26599ba6"
, gitCommit ? "511489e71504a44381e0930562e7ac80ac69a848" # version-1.4
}:
with lib;
let
release = rec {
version = "1.3";
version = "1.4";
src = fetchFromGitHub {
owner = "bsc-pm";
repo = "nodes";
rev = "version-${version}";
hash = "sha256-cFb9pxcjtkMmH0CsGgUO9LTdXDNh7MCqicgGWawLrsU=";
hash = "sha256-+lR/R0l3fGZO3XG7whMorFW2y2YZ0ZFnLeOHyQYrAsQ=";
};
};
@@ -59,6 +59,7 @@ in
doCheck = false;
nativeCheckInputs = [
clangOmpss2
which
];
# The "bindnow" flags are incompatible with ifunc resolution mechanism. We

View File

@@ -13,19 +13,19 @@
, useGit ? false
, gitUrl ? "git@gitlab-internal.bsc.es:nos-v/nos-v.git"
, gitBranch ? "master"
, gitCommit ? "9f47063873c3aa9d6a47482a82c5000a8c813dd8"
, gitCommit ? "1108e4786b58e0feb9a16fa093010b763eb2f8e8" # version 4.0.0
}:
with lib;
let
release = rec {
version = "3.2.0";
version = "4.0.0";
src = fetchFromGitHub {
owner = "bsc-pm";
repo = "nos-v";
rev = "${version}";
hash = "sha256-yaz92426EM8trdkBJlISmAoG9KJCDTvoAW/HKrasvOw=";
hash = "sha256-llaq73bd/YxLVKNlMebnUHKa4z3sdcsuDUoVwUxNuw8=";
};
};

91
pkgs/onemath/default.nix Normal file
View File

@@ -0,0 +1,91 @@
{
lib,
fetchFromGitHub,
cmake,
withCFlags,
intelPackages,
mklSupport ? true,
config,
cudaSupport ? config.cudaSupport,
cudaPackages ? { },
rocmSupport ? config.rocmSupport,
hipTargets ? null, # only one target at a time supported
rocmPackages ? { },
}:
let
# rocmSupport is not enough, we need a specific target
enableHip = rocmSupport && hipTargets != null;
stdenv = withCFlags (lib.optionals cudaSupport [
"--cuda-path=${cudaPackages.cudatoolkit}"
]) intelPackages.stdenv;
in
# at least one backend has to be enabled
assert mklSupport || cudaSupport || enableHip;
stdenv.mkDerivation rec {
pname = "oneMath";
version = "0.8";
src = fetchFromGitHub {
owner = "uxlfoundation";
repo = "oneMath";
rev = "v${version}";
sha256 = "sha256-xK8lKI3oqKlx3xtvdScpMq+HXAuoYCP0BZdkEqnJP5o=";
};
cmakeFlags = [
(lib.cmakeBool "ENABLE_MKLCPU_BACKEND" mklSupport)
(lib.cmakeBool "ENABLE_MKLGPU_BACKEND" mklSupport)
(lib.cmakeBool "ENABLE_CUBLAS_BACKEND" cudaSupport)
(lib.cmakeBool "ENABLE_CUFFT_BACKEND" cudaSupport)
(lib.cmakeBool "ENABLE_CURAND_BACKEND" cudaSupport)
(lib.cmakeBool "ENABLE_CUSOLVER_BACKEND" cudaSupport)
(lib.cmakeBool "ENABLE_CUSPARSE_BACKEND" cudaSupport)
(lib.cmakeBool "ENABLE_ROCBLAS_BACKEND" enableHip)
(lib.cmakeBool "ENABLE_ROCFFT_BACKEND" enableHip)
(lib.cmakeBool "ENABLE_ROCSOLVER_BACKEND" enableHip)
(lib.cmakeBool "ENABLE_ROCRAND_BACKEND" enableHip)
(lib.cmakeBool "ENABLE_ROCSPARSE_BACKEND" enableHip)
(lib.cmakeBool "BUILD_FUNCTIONAL_TESTS" false)
(lib.cmakeBool "BUILD_EXAMPLES" false)
]
++ lib.optionals enableHip [
(lib.cmakeFeature "HIP_TARGETS" hipTargets)
];
nativeBuildInputs = [ cmake ];
buildInputs =
lib.optionals (mklSupport) [
intelPackages.mkl
intelPackages.tbb
]
++ lib.optionals (enableHip) [
rocmPackages.rocmPath
rocmPackages.rocblas
rocmPackages.rocfft
rocmPackages.rocsolver
rocmPackages.rocrand
rocmPackages.rocsparse
]
++ lib.optionals (cudaSupport) [
(lib.getDev cudaPackages.cuda_cudart)
cudaPackages.cudatoolkit
cudaPackages.libcublas
cudaPackages.libcurand
cudaPackages.libcufft
cudaPackages.libcusparse
cudaPackages.libcusolver
];
}

View File

@@ -32,6 +32,11 @@ stdenv.mkDerivation rec {
"CXX=mpicxx"
];
env = {
MPICH_CC="${stdenv.cc}/bin/${stdenv.cc.targetPrefix}cc";
MPICH_CXX="${stdenv.cc}/bin/${stdenv.cc.targetPrefix}c++";
};
postInstall = ''
mkdir -p $out/bin
for f in $(find $out -executable -type f); do
@@ -44,5 +49,6 @@ stdenv.mkDerivation rec {
homepage = "http://mvapich.cse.ohio-state.edu/benchmarks/";
maintainers = [ ];
platforms = lib.platforms.all;
cross = true;
};
}

View File

@@ -7,7 +7,7 @@
, useGit ? false
, gitBranch ? "master"
, gitUrl ? "ssh://git@bscpm04.bsc.es/rarias/ovni.git"
, gitCommit ? "e4f62382076f0cf0b1d08175cf57cc0bc51abc61"
, gitCommit ? "06432668f346c8bdc1006fabc23e94ccb81b0d8b" # version 1.13.0
, enableDebug ? false
# Only enable MPI if the build is native (fails on cross-compilation)
, useMpi ? (stdenv.buildPlatform.canExecute stdenv.hostPlatform)
@@ -15,13 +15,13 @@
let
release = rec {
version = "1.12.0";
version = "1.13.0";
src = fetchFromGitHub {
owner = "bsc-pm";
repo = "ovni";
rev = "${version}";
hash = "sha256-H04JvsVKrdqr3ON7JhU0g17jjlg/jzQ7eTfx9vUNd3E=";
} // { shortRev = "a73afcf"; };
hash = "sha256-0l2ryIyWNiZqeYdVlnj/WnQGS3xFCY4ICG8JedX424w=";
} // { shortRev = "0643266"; };
};
git = rec {

View File

@@ -12,7 +12,7 @@
, paraverKernel
, openssl
, glibcLocales
, wrapGAppsHook
, wrapGAppsHook3
}:
let
@@ -64,7 +64,7 @@ stdenv.mkDerivation rec {
autoconf
automake
autoreconfHook
wrapGAppsHook
wrapGAppsHook3
];
buildInputs = [

View File

@@ -35,5 +35,6 @@ stdenv.mkDerivation rec {
maintainers = with lib.maintainers.bsc; [ rarias ];
platforms = lib.platforms.linux;
license = lib.licenses.mit;
cross = true;
};
}

View File

@@ -5,23 +5,14 @@
, automake
, autoconf
, libtool
, mpi
, autoreconfHook
, gpi-2
, boost
, numactl
, rdma-core
, gfortran
, symlinkJoin
}:
let
mpiAll = symlinkJoin {
name = "mpi-all";
paths = [ mpi.all ];
};
in
stdenv.mkDerivation rec {
pname = "tagaspi";
enableParallelBuilding = true;
@@ -35,16 +26,18 @@ stdenv.mkDerivation rec {
hash = "sha256-RGG/Re2uM293HduZfGzKUWioDtwnSYYdfeG9pVrX9EM=";
};
buildInputs = [
nativeBuildInputs = [
autoreconfHook
automake
autoconf
libtool
gfortran
];
buildInputs = [
boost
numactl
rdma-core
gfortran
mpiAll
];
dontDisableStatic = true;
@@ -63,5 +56,6 @@ stdenv.mkDerivation rec {
maintainers = with lib.maintainers.bsc; [ rarias ];
platforms = lib.platforms.linux;
license = lib.licenses.gpl3Plus;
cross = false; # gpi-2 cannot cross
};
}

View File

@@ -68,5 +68,6 @@ in stdenv.mkDerivation {
maintainers = with lib.maintainers.bsc; [ rarias ];
platforms = lib.platforms.linux;
license = lib.licenses.gpl3Plus;
cross = true;
};
}

87
pkgs/tasycl/default.nix Normal file
View File

@@ -0,0 +1,87 @@
{
lib,
stdenv,
autoconf,
automake,
autoreconfHook,
boost,
fetchFromGitHub,
gnumake,
libtool,
withCFlags,
useIntel ? true,
adaptivecpp ? null,
intelPackages ? null,
useGit ? false,
gitUrl ? "git@gitlab-internal.bsc.es:task-awareness/tasycl/tasycl.git",
gitBranch ? "main",
gitCommit ? "78f98dcf60a66e0eaa3b4ebcf55be076bec64825",
}:
assert !useIntel -> adaptivecpp != null;
assert useIntel -> intelPackages != null;
let
variant = if useIntel then "intel" else "acpp";
syclStdenv = withCFlags [ "-O3" ] (if useIntel then intelPackages.stdenv else stdenv);
release = rec {
version = "2.1.0";
src = fetchFromGitHub {
owner = "bsc-pm";
repo = "tasycl";
rev = version;
hash = "sha256-0kXnb0lHeQNHR27GTLbJ8xbiICLU8k2+FqEnnFSrzzo=";
};
};
git = rec {
version = src.shortRev;
src = builtins.fetchGit {
url = gitUrl;
ref = gitBranch;
rev = gitCommit;
};
};
source = if (useGit) then git else release;
in
syclStdenv.mkDerivation {
pname = "tasycl-${variant}";
inherit (source) src version;
enableParallelBuilding = true;
separateDebugInfo = true;
nativeBuildInputs = [
autoreconfHook
automake
autoconf
libtool
gnumake
];
buildInputs = [
boost
];
configureFlags = lib.optionals (!useIntel) [
"CXX=${lib.getExe adaptivecpp}"
];
# add symlinks so we can explicitly link with tasycl-intel / tasycl-acpp
postInstall = ''
pushd $out/lib
for i in libtasycl* ; do
ln -s "$i" "''\${i/tasycl/tasycl-${variant}}"
done
popd
'';
hardeningDisable = [ "all" ];
}

View File

@@ -1,9 +1,11 @@
{ python3Packages, lib }:
python3Packages.buildPythonApplication rec {
python3Packages.buildPythonApplication {
pname = "upc-qaire-exporter";
version = "1.0";
pyproject = true;
src = ./.;
doCheck = false;

View File

@@ -0,0 +1,62 @@
{
intelPackages,
writeText,
strace,
}:
let
stdenv = intelPackages.stdenv;
hello_sycl = writeText "hello.cpp" ''
#include <sycl/sycl.hpp>
class hello_world;
int main(int argc, char** argv) try {
auto device_selector = sycl::default_selector_v;
sycl::queue queue(device_selector);
std::cout << "Running on: "
<< queue.get_device().get_info<sycl::info::device::name>()
<< std::endl;
queue.submit([&] (sycl::handler& cgh) {
auto os = sycl::stream{128, 128, cgh};
cgh.single_task<hello_world>([=]() {
os << "Hello World! (on device)\n";
});
});
return 0;
} catch (sycl::exception &e) {
std::cout << "SYCL exception: " << e.what() << std::endl;
return 0; // we excpect to fail since no devices should be available;
}
'';
in
stdenv.mkDerivation {
version = "0.0.1";
name = "hello-sycl";
buildInputs = [
stdenv
strace
];
src = hello_sycl;
dontUnpack = true;
dontConfigure = true;
NIX_DEBUG = 0;
buildPhase = ''
cp $src hello.cpp
set -x
echo CXX=$CXX
command -v $CXX
$CXX -fsycl hello.cpp -o hello
./hello
set +x
'';
installPhase = ''
touch $out
'';
}

View File

@@ -0,0 +1,81 @@
{
writeText,
intelPackages,
nodes,
nosv,
clangOmpss2Nodes,
strace,
}:
let
hello_cpp = writeText "hello.cpp" ''
#include <cstdio>
#include <sycl/sycl.hpp>
int main(int argc, char** argv) try {
sycl::queue queue;
std::cout << "Running on: "
<< queue.get_device().get_info<sycl::info::device::name>()
<< std::endl;
#pragma oss task
queue.submit([&] (sycl::handler& cgh) {
auto os = sycl::stream{128, 128, cgh};
cgh.single_task<class hello_world>([=]() {
os << "Hello World! (on device)\n";
});
}).wait();
return 0;
} catch (sycl::exception &e) {
std::cout << "SYCL exception: " << e.what() << std::endl;
return 0; // we expect to fail since no devices should be available;
}
'';
in
intelPackages.stdenv.mkDerivation {
version = "0.0.1";
name = "hello-syclompss";
src = hello_cpp;
nativeBuildInputs = [
strace
nodes
nosv
];
dontUnpack = true;
dontConfigure = true;
# NODES requires access to /sys/devices to request NUMA information
requiredSystemFeatures = [ "sys-devices" ];
env.NODES_HOME = nodes;
NIX_DEBUG = 1;
buildPhase = ''
cp $src hello.cpp
set -x
echo CXX=$CXX
echo NODES_HOME=$NODES_HOME
command -v $CXX
icpx -Wno-deprecated-declarations -fsycl \
-fsycl-host-compiler=${clangOmpss2Nodes.forIcpx}/bin/clang++ \
-fsycl-host-compiler-options='-Wno-deprecated-declarations -fompss-2=libnodes' \
-lnodes -lnosv \
$NODES_HOME/lib/nodes-main-wrapper.o \
hello.cpp -o hello
./hello
set +x
'';
installPhase = ''
touch $out
'';
}