8 Commits

Author SHA1 Message Date
5cfd7f0858 Fetch website from its own git repository
Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-02 15:45:21 +02:00
9c5e22d62d Add script to trim the repository
Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-02 15:44:56 +02:00
00fe0f46a1 Add acinca user
Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-10-01 12:27:43 +02:00
79940876c3 Restart slurmd on failure
A failure to reach the control node can cause slurmd to fail and the
unit remains in the failed state until is manually restarted. Instead,
try to restart the service every 30 seconds, forever:

    owl1% systemctl show slurmd | grep -E 'Restart=|RestartUSec='
    Restart=on-failure
    RestartUSec=30s
    owl1% pgrep slurmd
    5903
    owl1% sudo kill -SEGV 5903
    owl1% pgrep slurmd
    6137

Fixes: #177
Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-09-30 17:20:39 +02:00
163d19bd05 Lower connect timeout when using hut substituter
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-09-29 18:44:48 +02:00
360f67cfab Use hut substituter in all nodes
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-09-29 18:44:38 +02:00
a402bc880c Remove machine access for user csiringo
Reviewed-by: Rodrigo Arias Mallo <rodrigo.arias@bsc.es>
2025-09-29 18:23:24 +02:00
c441178910 Add web post update for 2025
Reviewed-by: Aleix Boné <abonerib@bsc.es>
2025-09-29 18:02:21 +02:00
16 changed files with 136 additions and 23 deletions

46
doc/trim.sh Executable file
View File

@@ -0,0 +1,46 @@
#!/bin/sh
# Trims the jungle repository by moving the website to its own repository and
# removing it from jungle. It also removes big pdf files and kernel
# configurations so the jungle repository is small.
set -e
if [ -e oldjungle -o -e newjungle -o -e website ]; then
echo "remove oldjungle/, newjungle/ and website/ first"
exit 1
fi
# Clone the old jungle repo
git clone gitea@tent:rarias/jungle.git oldjungle
# First split the website into a new repository
mkdir website && git -C website init -b master
git-filter-repo \
--path web \
--subdirectory-filter web \
--source oldjungle \
--target website
# Then remove the website, pdf files and big kernel configs
mkdir newjungle && git -C newjungle init -b master
git-filter-repo \
--invert-paths \
--path web \
--path-glob 'doc*.pdf' \
--path-glob '**/kernel/configs/lockdep' \
--path-glob '**/kernel/configs/defconfig' \
--source oldjungle \
--target newjungle
set -x
du -sh oldjungle newjungle website
# 57M oldjungle
# 2,3M newjungle
# 6,4M website
du -sh --exclude=.git oldjungle newjungle website
# 30M oldjungle
# 700K newjungle
# 3,5M website

View File

@@ -5,6 +5,7 @@
../common/xeon.nix
../common/ssf/hosts.nix
../module/ceph.nix
../module/hut-substituter.nix
../module/slurm-server.nix
./nfs.nix
./wireguard.nix
@@ -65,10 +66,4 @@
iptables -I nixos-fw 2 -p tcp -s 84.88.52.176 -j nixos-fw-refuse
'';
};
# Use tent for cache
nix.settings = {
extra-substituters = [ "https://jungle.bsc.es/cache" ];
extra-trusted-public-keys = [ "jungle.bsc.es:pEc7MlAT0HEwLQYPtpkPLwRsGf80ZI26aj29zMw/HH0=" ];
};
}

View File

@@ -3,6 +3,7 @@
{
imports = [
../common/ssf.nix
../module/hut-substituter.nix
../module/monitoring.nix
];

View File

@@ -156,18 +156,30 @@
};
csiringo = {
# Arbitrary UID but large so it doesn't collide with other users on ssfhead.
uid = 9653;
isNormalUser = true;
home = "/home/Computational/csiringo";
description = "Cesare Siringo";
group = "Computational";
hosts = [ "apex" "weasel" ];
hosts = [ ];
hashedPassword = "$6$0IsZlju8jFukLlAw$VKm0FUXbS.mVmPm3rcJeizTNU4IM5Nmmy21BvzFL.cQwvlGwFI1YWRQm6gsbd4nbg47mPDvYkr/ar0SlgF6GO1";
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHA65zvvG50iuFEMf+guRwZB65jlGXfGLF4HO+THFaed csiringo@bsc.es"
];
};
acinca = {
uid = 9654;
isNormalUser = true;
home = "/home/Computational/acinca";
description = "Arnau Cinca";
group = "Computational";
hosts = [ "apex" "hut" "fox" "owl1" "owl2" ];
hashedPassword = "$6$S6PUeRpdzYlidxzI$szyvWejQ4hEN76yBYhp1diVO5ew1FFg.cz4lKiXt2Idy4XdpifwrFTCIzLTs5dvYlR62m7ekA5MrhcVxR5F/q/";
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFmMqKqPg4uocNOr3O41kLbZMOMJn3m2ZdN1JvTR96z3 bsccns@arnau-bsc"
];
};
};
groups = {

View File

@@ -9,6 +9,7 @@
./cpufreq.nix
./fs.nix
./users.nix
../module/hut-substituter.nix
../module/debuginfod.nix
];

View File

@@ -8,6 +8,7 @@
../module/emulation.nix
../module/nvidia.nix
../module/slurm-client.nix
../module/hut-substituter.nix
./wireguard.nix
];
@@ -62,12 +63,6 @@
interfaces.enp1s0f0np0.useDHCP = true;
};
# Use hut for cache
nix.settings = {
extra-substituters = [ "https://jungle.bsc.es/cache" ];
extra-trusted-public-keys = [ "jungle.bsc.es:pEc7MlAT0HEwLQYPtpkPLwRsGf80ZI26aj29zMw/HH0=" ];
};
# Recommended for new graphics cards
hardware.nvidia.open = true;

View File

@@ -2,10 +2,13 @@
let
website = pkgs.stdenv.mkDerivation {
name = "jungle-web";
src = theFlake;
src = pkgs.fetchgit {
url = "https://jungle.bsc.es/git/rarias/jungle-website.git";
rev = "739bf0175a7f05380fe7ad7023ff1d60db1710e1";
hash = "sha256-ea5DzhYTzZ9TmqD+x95rdNdLbxPnBluqlYH2NmBYmc4=";
};
buildInputs = [ pkgs.hugo ];
buildPhase = ''
cd web
rm -rf public/
hugo
'';

View File

@@ -4,6 +4,7 @@
imports = [
../common/ssf.nix
../module/monitoring.nix
../module/hut-substituter.nix
];
boot.loader.grub.device = "/dev/disk/by-id/wwn-0x55cd2e414d53563a";

View File

@@ -6,5 +6,8 @@
{
extra-substituters = [ "http://hut/cache" ];
extra-trusted-public-keys = [ "jungle.bsc.es:pEc7MlAT0HEwLQYPtpkPLwRsGf80ZI26aj29zMw/HH0=" ];
# Set a low timeout in case hut is down
connect-timeout = 3; # seconds
};
}

View File

@@ -12,6 +12,12 @@
# https://github.com/NixOS/nixpkgs/commit/ae93ed0f0d4e7be0a286d1fca86446318c0c6ffb
# https://bugs.schedmd.com/show_bug.cgi?id=2095#c24
KillMode = lib.mkForce "control-group";
# If slurmd fails to contact the control server it will fail, causing the
# node to remain out of service until manually restarted. Always try to
# restart it.
Restart = "always";
RestartSec = "30s";
};
services.slurm.client.enable = true;

View File

@@ -9,6 +9,7 @@
../module/nvidia.nix
../eudy/kernel/perf.nix
./wireguard.nix
../module/hut-substituter.nix
];
# Don't install Grub on the disk yet
@@ -51,11 +52,6 @@
options = [ "nfsvers=3" "rsize=1024" "wsize=1024" "cto" "nofail" ];
};
nix.settings = {
extra-substituters = [ "https://jungle.bsc.es/cache" ];
extra-trusted-public-keys = [ "jungle.bsc.es:pEc7MlAT0HEwLQYPtpkPLwRsGf80ZI26aj29zMw/HH0=" ];
};
# Enable performance governor
powerManagement.cpuFreqGovernor = "performance";

View File

@@ -15,6 +15,7 @@
../hut/msmtp.nix
../module/p.nix
../module/vpn-dac.nix
../module/hut-substituter.nix
];
# Select the this using the ID to avoid mismatches

View File

@@ -2,10 +2,13 @@
let
website = pkgs.stdenv.mkDerivation {
name = "jungle-web";
src = theFlake;
src = pkgs.fetchgit {
url = "https://jungle.bsc.es/git/rarias/jungle-website.git";
rev = "739bf0175a7f05380fe7ad7023ff1d60db1710e1";
hash = "sha256-ea5DzhYTzZ9TmqD+x95rdNdLbxPnBluqlYH2NmBYmc4=";
};
buildInputs = [ pkgs.hugo ];
buildPhase = ''
cd web
rm -rf public/
hugo
'';

View File

@@ -3,6 +3,7 @@
{
imports = [
../common/ssf.nix
../module/hut-substituter.nix
];
# Select this using the ID to avoid mismatches

View File

@@ -0,0 +1,49 @@
---
title: "Update 2025-09-26"
author: "Rodrigo Arias Mallo"
date: 2025-09-26
---
This is a summary of notable changes introduced in the last two years. We
continue to maintain all machines updated to the last NixOS release (currently
NixOS 25.05).
### New compute node: fox
We have a new [fox machine](/fox), with two AMD Genoa 9684X CPUs and two NVIDIA
RTX4000 GPUs. During the last months we have been doing some tests and it seems
that most of the components work well. We have configured CUDA to use the NVIDIA
GPUs, as well as AMD uProf to trace performance and energy counters from the
CPUs.
### Upgraded login node: apex
We have upgraded the operating system on the login node to NixOS, which now runs
Linux 6.15.6. During the upgrade, we have detected a problem with the storage
disks. The `/` and `/home` partitions sit on a
[RAID 5](https://en.wikipedia.org/wiki/Standard_RAID_levels#RAID_5),
transparently handled by a RAID hardware controller which starts its own
firmware before passing the control to the BIOS to continue the boot sequence. A
problem during the startup of the firmware prevented the node to even reach the
BIOS screen.
After a long debugging session, we detected that the flash memory that stores
the firmware of the hardware controller was likely to be the issue, since
[memory cells](https://en.wikipedia.org/wiki/Flash_memory#Principles_of_operation)
may lose charge over time and can end up corrupting the content. We flashed
the latest firmware so the memory cells are charged again with the new bits and
that fixed the problem. Hopefully we will be able to use it for some more years.
The SLURM server has been moved to apex which allows users to also submit jobs
to fox.
### Migrated machines to BSC building
The server room had a temperature issue that had been affecting our machines
since the end of February of 2025. As the summer approached, the temperature
exceeded the safe limits for our hardware, so we had to shutdown the cluster.
![Room temperature](temp.png)
Since then, we have moved the cluster to BSC premises, where it now rests at a
stable temperature, so hopefully we won't have more unscheduled downtime.

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB