Compare commits

..

10 Commits

Author SHA1 Message Date
7e6efdc136 weasel: use tent cache 2025-09-26 12:35:21 +02:00
17a88aa52e Add nixfmt-rfc-style to common packages 2025-09-26 12:35:21 +02:00
b88fd61e1a Add packages to user abonerib 2025-09-26 12:35:20 +02:00
e9845889bd Add nix-output-monitor to default packages 2025-09-26 12:35:20 +02:00
a3b7a2db07 Set fish shell for user abonerib 2025-09-26 12:35:20 +02:00
d2b64630d0 weasel: create user folders in /var/lib/podman-users
/home is a nfs mount, which does not support extra filesystem arguments
needed to run podman. We need to have a local home.
2025-09-26 12:35:20 +02:00
a176d4f0d5 weasel: add podman 2025-09-26 12:35:20 +02:00
30bb59a354 Mega Merge 2025-09-26 12:35:20 +02:00
c993962708 Use hut substituter in all nodes 2025-09-26 12:34:37 +02:00
2e74af2cd8 Enable nosv system feature 2025-09-26 12:32:46 +02:00
15 changed files with 71 additions and 129 deletions

View File

@@ -1,46 +0,0 @@
#!/bin/sh
# Trims the jungle repository by moving the website to its own repository and
# removing it from jungle. It also removes big pdf files and kernel
# configurations so the jungle repository is small.
set -e
if [ -e oldjungle -o -e newjungle -o -e website ]; then
echo "remove oldjungle/, newjungle/ and website/ first"
exit 1
fi
# Clone the old jungle repo
git clone gitea@tent:rarias/jungle.git oldjungle
# First split the website into a new repository
mkdir website && git -C website init -b master
git-filter-repo \
--path web \
--subdirectory-filter web \
--source oldjungle \
--target website
# Then remove the website, pdf files and big kernel configs
mkdir newjungle && git -C newjungle init -b master
git-filter-repo \
--invert-paths \
--path web \
--path-glob 'doc*.pdf' \
--path-glob '**/kernel/configs/lockdep' \
--path-glob '**/kernel/configs/defconfig' \
--source oldjungle \
--target newjungle
set -x
du -sh oldjungle newjungle website
# 57M oldjungle
# 2,3M newjungle
# 6,4M website
du -sh --exclude=.git oldjungle newjungle website
# 30M oldjungle
# 700K newjungle
# 3,5M website

View File

@@ -11,11 +11,13 @@
./base/hw.nix
./base/net.nix
./base/nix.nix
./base/nosv.nix
./base/ntp.nix
./base/rev.nix
./base/ssh.nix
./base/users.nix
./base/watchdog.nix
./base/zsh.nix
./base/fish.nix
];
}

View File

@@ -5,6 +5,8 @@
vim wget git htop tmux pciutils tcpdump ripgrep nix-index nixos-option
nix-diff ipmitool freeipmi ethtool lm_sensors cmake gnumake file tree
ncdu config.boot.kernelPackages.perf ldns pv
nix-output-monitor
nixfmt-rfc-style
# From bsckgs overlay
osumb
];

4
m/common/base/fish.nix Normal file
View File

@@ -0,0 +1,4 @@
{ ... }:
{
programs.fish.enable = true;
}

9
m/common/base/nosv.nix Normal file
View File

@@ -0,0 +1,9 @@
{ ... }:
{
nix.settings.system-features = [ "nosv" ];
programs.nix-required-mounts.enable = true;
programs.nix-required-mounts.allowedPatterns.nosv.paths = [
"/sys/devices/system/cpu"
"/sys/devices/system/node"
];
}

View File

@@ -87,6 +87,12 @@
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIIFiqXqt88VuUfyANkZyLJNiuroIITaGlOOTMhVDKjf abonerib@bsc"
];
shell = pkgs.fish;
packages = with pkgs; [
starship
jujutsu
neovim
];
};
vlopez = {
@@ -156,30 +162,18 @@
};
csiringo = {
# Arbitrary UID but large so it doesn't collide with other users on ssfhead.
uid = 9653;
isNormalUser = true;
home = "/home/Computational/csiringo";
description = "Cesare Siringo";
group = "Computational";
hosts = [ ];
hosts = [ "apex" "weasel" ];
hashedPassword = "$6$0IsZlju8jFukLlAw$VKm0FUXbS.mVmPm3rcJeizTNU4IM5Nmmy21BvzFL.cQwvlGwFI1YWRQm6gsbd4nbg47mPDvYkr/ar0SlgF6GO1";
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHA65zvvG50iuFEMf+guRwZB65jlGXfGLF4HO+THFaed csiringo@bsc.es"
];
};
acinca = {
uid = 9654;
isNormalUser = true;
home = "/home/Computational/acinca";
description = "Arnau Cinca";
group = "Computational";
hosts = [ "apex" "hut" "fox" "owl1" "owl2" ];
hashedPassword = "$6$S6PUeRpdzYlidxzI$szyvWejQ4hEN76yBYhp1diVO5ew1FFg.cz4lKiXt2Idy4XdpifwrFTCIzLTs5dvYlR62m7ekA5MrhcVxR5F/q/";
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFmMqKqPg4uocNOr3O41kLbZMOMJn3m2ZdN1JvTR96z3 bsccns@arnau-bsc"
];
};
};
groups = {

View File

@@ -2,13 +2,10 @@
let
website = pkgs.stdenv.mkDerivation {
name = "jungle-web";
src = pkgs.fetchgit {
url = "https://jungle.bsc.es/git/rarias/jungle-website.git";
rev = "739bf0175a7f05380fe7ad7023ff1d60db1710e1";
hash = "sha256-ea5DzhYTzZ9TmqD+x95rdNdLbxPnBluqlYH2NmBYmc4=";
};
src = theFlake;
buildInputs = [ pkgs.hugo ];
buildPhase = ''
cd web
rm -rf public/
hugo
'';

View File

@@ -4,7 +4,6 @@
imports = [
../common/ssf.nix
../module/monitoring.nix
../module/hut-substituter.nix
];
boot.loader.grub.device = "/dev/disk/by-id/wwn-0x55cd2e414d53563a";

View File

@@ -6,8 +6,5 @@
{
extra-substituters = [ "http://hut/cache" ];
extra-trusted-public-keys = [ "jungle.bsc.es:pEc7MlAT0HEwLQYPtpkPLwRsGf80ZI26aj29zMw/HH0=" ];
# Set a low timeout in case hut is down
connect-timeout = 3; # seconds
};
}

View File

@@ -12,12 +12,6 @@
# https://github.com/NixOS/nixpkgs/commit/ae93ed0f0d4e7be0a286d1fca86446318c0c6ffb
# https://bugs.schedmd.com/show_bug.cgi?id=2095#c24
KillMode = lib.mkForce "control-group";
# If slurmd fails to contact the control server it will fail, causing the
# node to remain out of service until manually restarted. Always try to
# restart it.
Restart = "always";
RestartSec = "30s";
};
services.slurm.client.enable = true;

View File

@@ -2,13 +2,10 @@
let
website = pkgs.stdenv.mkDerivation {
name = "jungle-web";
src = pkgs.fetchgit {
url = "https://jungle.bsc.es/git/rarias/jungle-website.git";
rev = "739bf0175a7f05380fe7ad7023ff1d60db1710e1";
hash = "sha256-ea5DzhYTzZ9TmqD+x95rdNdLbxPnBluqlYH2NmBYmc4=";
};
src = theFlake;
buildInputs = [ pkgs.hugo ];
buildPhase = ''
cd web
rm -rf public/
hugo
'';

View File

@@ -4,6 +4,7 @@
imports = [
../common/ssf.nix
../module/hut-substituter.nix
./virtualization.nix
];
# Select this using the ID to avoid mismatches
@@ -30,4 +31,5 @@
prefixLength = 24;
} ];
};
}

View File

@@ -0,0 +1,40 @@
{
lib,
pkgs,
config,
...
}:
{
# Enable common container config files in /etc/containers
virtualisation.containers.enable = true;
virtualisation = {
podman = {
enable = true;
# Required for containers under podman-compose to be able to talk to each other.
defaultNetwork.settings.dns_enabled = true;
};
};
# We cannot use /home since nfs does not support fileattrs needed by podman
systemd.tmpfiles.settings = {
"podman-users" = lib.mapAttrs' (
name: value:
lib.nameValuePair ("/var/lib/podman-users/" + name) {
d = {
group = value.group;
mode = value.homeMode;
user = name;
};
}
) (lib.filterAttrs (_: x: x.isNormalUser) config.users.users);
};
# Useful other development tools
environment.systemPackages = with pkgs; [
dive # look into docker image layers
podman-tui # status of containers in the terminal
podman-compose # start group of containers for dev
];
}

View File

@@ -1,49 +0,0 @@
---
title: "Update 2025-09-26"
author: "Rodrigo Arias Mallo"
date: 2025-09-26
---
This is a summary of notable changes introduced in the last two years. We
continue to maintain all machines updated to the last NixOS release (currently
NixOS 25.05).
### New compute node: fox
We have a new [fox machine](/fox), with two AMD Genoa 9684X CPUs and two NVIDIA
RTX4000 GPUs. During the last months we have been doing some tests and it seems
that most of the components work well. We have configured CUDA to use the NVIDIA
GPUs, as well as AMD uProf to trace performance and energy counters from the
CPUs.
### Upgraded login node: apex
We have upgraded the operating system on the login node to NixOS, which now runs
Linux 6.15.6. During the upgrade, we have detected a problem with the storage
disks. The `/` and `/home` partitions sit on a
[RAID 5](https://en.wikipedia.org/wiki/Standard_RAID_levels#RAID_5),
transparently handled by a RAID hardware controller which starts its own
firmware before passing the control to the BIOS to continue the boot sequence. A
problem during the startup of the firmware prevented the node to even reach the
BIOS screen.
After a long debugging session, we detected that the flash memory that stores
the firmware of the hardware controller was likely to be the issue, since
[memory cells](https://en.wikipedia.org/wiki/Flash_memory#Principles_of_operation)
may lose charge over time and can end up corrupting the content. We flashed
the latest firmware so the memory cells are charged again with the new bits and
that fixed the problem. Hopefully we will be able to use it for some more years.
The SLURM server has been moved to apex which allows users to also submit jobs
to fox.
### Migrated machines to BSC building
The server room had a temperature issue that had been affecting our machines
since the end of February of 2025. As the summer approached, the temperature
exceeded the safe limits for our hardware, so we had to shutdown the cluster.
![Room temperature](temp.png)
Since then, we have moved the cluster to BSC premises, where it now rests at a
stable temperature, so hopefully we won't have more unscheduled downtime.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 97 KiB