Archived
1
0
forked from rarias/bscpkgs

Simplify paths

This commit is contained in:
2020-09-21 14:34:08 +02:00
parent dba1cc22bc
commit 126f05e92c
26 changed files with 392 additions and 95 deletions

View File

@@ -1,28 +0,0 @@
{
stdenv
, bash
}:
{
program
, env ? ""
, argv # bash array as string, example: argv=''(-f "file with spaces" -t 10)''
}:
stdenv.mkDerivation {
name = "argv";
preferLocalBuild = true;
phases = [ "installPhase" ];
installPhase = ''
cat > $out <<EOF
#!${bash}/bin/bash
# Requires /nix to use bash
${env}
argv=${argv}
exec ${program} \''${argv[@]}
EOF
chmod +x $out
'';
}

View File

@@ -1,25 +0,0 @@
{
stdenv
}:
{
program
, loops ? 30
}:
stdenv.mkDerivation {
name = "control";
preferLocalBuild = true;
phases = [ "installPhase" ];
dontPatchShebangs = true;
installPhase = ''
cat > $out <<EOF
#!/bin/sh
#set -e
for n in {1..${toString loops}}; do
${program}
done
EOF
chmod +x $out
'';
}

View File

@@ -1,64 +0,0 @@
{
pkgs
, callPackage
, callPackages
}:
let
garlic = {
# Load some helper functions to generate app variants
inherit (import ./gen.nix) genApps genApp genConfigs;
mpptest = callPackage ./mpptest { };
ppong = callPackage ./ppong {
mpi = pkgs.mpi;
};
nbody = callPackage ./nbody {
cc = pkgs.icc;
mpi = pkgs.impi;
tampi = pkgs.tampi;
gitBranch = "garlic/seq";
};
runWrappers = {
sbatch = callPackage ./sbatch.nix { };
srun = callPackage ./srun.nix { };
launch = callPackage ./launcher.nix { };
control = callPackage ./control.nix { };
nixsetup= callPackage ./nix-setup.nix { };
argv = callPackage ./argv.nix { };
statspy = callPackage ./statspy.nix { };
extrae = callPackage ./extrae.nix { };
stagen = callPackage ./stagen.nix { };
};
# Perf is tied to a linux kernel specific version
linuxPackages = pkgs.linuxPackages_4_4;
perfWrapper = callPackage ./perf.nix {
perf = pkgs.linuxPackages.perf;
};
exp = {
noise = callPackage ./exp/noise.nix { };
nbody = {
bs = callPackage ./exp/nbody/bs.nix {
pkgs = pkgs // garlic;
};
mpi = callPackage ./exp/nbody/mpi.nix { };
};
osu = rec {
latency-internode = callPackage ./exp/osu/latency.nix { };
latency-intranode = callPackage ./exp/osu/latency.nix {
interNode = false;
};
latency = latency-internode;
};
};
};
in
garlic

View File

@@ -1,151 +0,0 @@
{
stdenv
, nixpkgs
, pkgs
, genApp
, genConfigs
, runWrappers
}:
with stdenv.lib;
let
# Set variable configuration for the experiment
varConfig = {
cc = [ pkgs.bsc.icc ];
blocksize = [ 1024 ];
};
# Common configuration
common = {
# Compile time nbody config
gitBranch = "garlic/mpi+send";
mpi = pkgs.bsc.impi;
# nbody runtime options
particles = 1024*128;
timesteps = 20;
# Resources
ntasksPerNode = "48";
nodes = "1";
# Stage configuration
enableSbatch = true;
enableControl = true;
enableExtrae = false;
enablePerf = false;
# MN4 path
nixPrefix = "/gpfs/projects/bsc15/nix";
};
# Compute the cartesian product of all configurations
configs = map (conf: conf // common) (genConfigs varConfig);
stageProgram = stage:
if stage ? programPath
then "${stage}${stage.programPath}" else "${stage}";
w = runWrappers;
sbatch = {stage, conf, ...}: with conf; w.sbatch {
program = stageProgram stage;
exclusive = true;
time = "02:00:00";
qos = "debug";
jobName = "nbody-bs";
inherit nixPrefix nodes ntasksPerNode;
};
control = {stage, conf, ...}: with conf; w.control {
program = stageProgram stage;
};
srun = {stage, conf, ...}: with conf; w.srun {
program = stageProgram stage;
srunOptions = "--cpu-bind=verbose,rank";
inherit nixPrefix;
};
statspy = {stage, conf, ...}: with conf; w.statspy {
program = stageProgram stage;
};
perf = {stage, conf, ...}: with conf; w.perf {
program = stageProgram stage;
perfArgs = "sched record -a";
};
nixsetup = {stage, conf, ...}: with conf; w.nixsetup {
program = stageProgram stage;
};
extrae = {stage, conf, ...}: w.extrae {
program = stageProgram stage;
traceLib = "mpi"; # mpi -> libtracempi.so
configFile = ./extrae.xml;
};
argv = {stage, conf, ...}: w.argv {
program = stageProgram stage;
env = ''
set -e
export I_MPI_THREAD_SPLIT=1
'';
argv = ''( -t ${toString conf.timesteps}
-p ${toString conf.particles} )'';
};
bscOverlay = import ../../../../overlay.nix;
genPkgs = newOverlay: nixpkgs {
overlays = [
bscOverlay
newOverlay
];
};
# We may be able to use overlays by invoking the fix function directly, but we
# have to get the definition of the bsc packages and the garlic ones as
# overlays.
nbodyFn = {stage, conf, ...}: with conf;
let
# We set the mpi implementation to the one specified in the conf, so all
# packages in bsc will use that one.
customPkgs = genPkgs (self: super: {
bsc = super.bsc // { mpi = conf.mpi; };
});
in
customPkgs.bsc.garlic.nbody.override {
inherit cc blocksize mpi gitBranch;
};
stages = with common; []
# Use sbatch to request resources first
++ optional enableSbatch sbatch
# Repeats the next stages N times
++ optionals enableControl [ nixsetup control ]
# Executes srun to launch the program in the requested nodes, and
# immediately after enters the nix environment again, as slurmstepd launches
# the next stages from outside the namespace.
++ [ srun nixsetup ]
# Intrumentation with extrae
++ optional enableExtrae extrae
# Optionally profile the next stages with perf
++ optional enablePerf perf
# Execute the nbody app with the argv and env vars
++ [ argv nbodyFn ];
# List of actual programs to be executed
jobs = map (conf: w.stagen { inherit conf stages; }) configs;
in
# We simply run each program one after another
w.launch jobs

View File

@@ -1,211 +0,0 @@
<?xml version='1.0'?>
<!-- Here comes the Extrae configuration.
As a general rule, "enabled" means that the feature is enabled :) If
it's not enabled, then the value can be set to some default.
-->
<!-- Must we activate the tracing? Which is the tracing mode? (detail/bursts) Where is it located? Which kind of trace? Version of the XML parser?-->
<trace enabled="yes"
home="/nix/store/j80mlqa12d1baifg30jsx2smv90akzvc-extrae"
initial-mode="detail"
type="paraver"
>
<!-- Configuration of some MPI dependant values -->
<mpi enabled="yes">
<!-- Gather counters in the MPI routines? -->
<counters enabled="yes" />
</mpi>
<!-- Emit information of the callstack -->
<callers enabled="yes">
<!-- At MPI calls, select depth level -->
<mpi enabled="yes">1-3</mpi>
<!-- At sampling points, select depth level -->
<sampling enabled="yes">1-5</sampling>
<!-- At dynamic memory system calls -->
<dynamic-memory enabled="no">1-3</dynamic-memory>
<!-- At I/O system calls -->
<input-output enabled="no">1-3</input-output>
<!-- At other system calls -->
<syscall enabled="no">1-3</syscall>
</callers>
<!-- Configuration of some OpenMP dependant values -->
<openmp enabled="no" ompt="no">
<!-- If the library instruments OpenMP, shall we gather info about locks?
Obtaining such information can make the final trace quite large.
-->
<locks enabled="no" />
<!-- Gather info about taskloops? -->
<taskloop enabled="no" />
<!-- Gather counters in the OpenMP routines? -->
<counters enabled="yes" />
</openmp>
<!-- Configuration of some pthread dependant values -->
<pthread enabled="no">
<!-- If the library instruments pthread, shall we gather info about locks,
mutexs and conds?
Obtaining such information can make the final trace quite large.
-->
<locks enabled="no" />
<!-- Gather counters in the pthread routines? -->
<counters enabled="yes" />
</pthread>
<!-- Configuration of User Functions -->
<user-functions enabled="no" list="/home/bsc41/bsc41273/user-functions.dat" exclude-automatic-functions="no">
<!-- Gather counters on the UF routines? -->
<counters enabled="yes" />
</user-functions>
<!-- Configure which software/hardware counters must be collected -->
<counters enabled="yes">
<!-- Configure the CPU hardware counters. You can define here as many sets
as you want. You can also define if MPI/OpenMP calls must report such
counters.
Starting-set property defines which set is chosen from every task.
Possible values are:
- cyclic : The sets are distributed in a cyclic fashion among all
tasks. So Task 0 takes set 1, Task 1 takes set 2,...
- block : The sets are distributed in block fashion among all tasks.
Task [0..i-1] takes set 1, Task [i..2*i-1] takes set 2, ...
- Number : All the tasks will start with the given set
(from 1..N).
-->
<cpu enabled="yes" starting-set-distribution="1">
<!-- In this example, we configure two sets of counters. The first will
be changed into the second after 5 calls to some collective
operation on MPI_COMM_WORLD. Once the second is activated, it will
turn to the first after 5seconds (aprox. depending on the MPI calls
granularity)
If you want that any set be counting forever, just don't set
changeat-globalops, or, changeat-time.
Each set has it's own properties.
domain -> in which domain must PAPI obtain the information (see
PAPI info)
changeat-globalops=num -> choose the next set after num
MPI_COMM_WORLD operations
changeat-time=numTime -> choose the next set after num Time
(for example 5s, 15m (for ms), 10M (for minutes),..)
-->
<set enabled="yes" domain="all">
PAPI_TOT_INS,PAPI_TOT_CYC
</set>
</cpu>
<!-- Do we want to gather information of the network counters?
Nowadays we can gather information about MX/GM cards.
-->
<network enabled="no" />
<!-- Obtain resource usage information -->
<resource-usage enabled="no" />
<!-- Obtain malloc statistics -->
<memory-usage enabled="no" />
</counters>
<!-- Define the characteristics of the tracing storage. If not defined,
or set, the tracing will send the traces to the current directory
with a default output name.
-->
<storage enabled="no">
<!-- The intermediate files will take the name of the application -->
<trace-prefix enabled="yes">TRACE</trace-prefix>
<!-- Stop the tracing when the intermediate file reaches this amount of MBs -->
<size enabled="no">5</size>
<!-- Where must we store the MPIT files while the app runs? -->
<temporal-directory enabled="yes">/scratch</temporal-directory>
<!-- Where must we store the MPIT files once the app ends? -->
<final-directory enabled="yes">/gpfs/scratch/bsc41/bsc41273</final-directory>
</storage>
<!-- Buffer configuration -->
<buffer enabled="yes">
<!-- How many events can we handle before any flush -->
<size enabled="yes">5000000</size>
<!-- Use the event buffer in a circular manner? You can use this option to
trace the last set of events. Needs MPI global routines operating on
MPI_COMM_WORLD communicator to be merged
-->
<circular enabled="no" />
</buffer>
<!-- Control tracing -->
<trace-control enabled="no">
<!-- We can start the application with a "latent tracing" and wake it up
once a control file is created. Use the property 'frequency' to
choose at which frequency this check must be done. If not supplied,
it will be checked every 100 global operations on MPI_COMM_WORLD.
-->
<file enabled="no" frequency="5M">/gpfs/scratch/bsc41/bsc41273/control</file>
<!--
-->
<global-ops enabled="no"></global-ops>
</trace-control>
<others enabled="yes">
<!-- Want to force a minimum amount of time of tracing? Here we force 10
minutes -->
<minimum-time enabled="no">10M</minimum-time>
<!-- Capture the following signals to finish cleanly -->
<finalize-on-signal enabled="yes"
SIGUSR1="no" SIGUSR2="no" SIGINT="yes"
SIGQUIT="yes" SIGTERM="yes" SIGXCPU="yes"
SIGFPE="yes" SIGSEGV="yes" SIGABRT="yes"
/>
<!-- Use instrumentation poitns to flush sampling buffer -->
<flush-sampling-buffer-at-instrumentation-point enabled="yes" />
</others>
<!-- Bursts library enabled? This requires an special library! -->
<bursts enabled="no">
<!-- Specify the threshold. This is mandatory! In this example, the
threshold is limitted to 500 microseconds
-->
<threshold enabled="yes">500u</threshold>
<!-- Report MPI statistics? -->
<mpi-statistics enabled="yes" />
</bursts>
<!-- Enable sampling capabilities using system clock.
Type may refer to: default, real, prof and virtual.
Period stands for the sampling period (50ms here)
plus a variability of 10ms, which means periods from
45 to 55ms.
-->
<sampling enabled="no" type="default" period="50m" variability="10m" />
<!-- Enable dynamic memory instrumentation (experimental) -->
<dynamic-memory enabled="no" />
<!-- Enable I/O (read, write) instrumentation (experimental) -->
<input-output enabled="no" internals="no"/>
<!-- Enable system calls instrumentation (experimental) -->
<syscall enabled="no" />
<!-- Do merge the intermediate tracefiles into the final tracefile?
Named according to the binary name
options:
synchronization = { default, task, node, no } (default is node)
max-memory = Number (in Mbytes) max memory used in merge step
joint-states = { yes, no } generate joint states?
keep-mpits = { yes, no } keep mpit files after merge?
-->
<merge enabled="yes"
synchronization="default"
tree-fan-out="16"
max-memory="512"
joint-states="yes"
keep-mpits="yes"
sort-addresses="yes"
overwrite="yes"
/>
</trace>

View File

@@ -1,88 +0,0 @@
{
bsc
, nbody
, genApp
, genConfigs
# Wrappers
, launchWrapper
, sbatchWrapper
, srunWrapper
, argvWrapper
, controlWrapper
, nixsetupWrapper
}:
let
# Set the configuration for the experiment
config = {
cc = [ bsc.icc ];
blocksize = [ 2048 ];
mpi = [ bsc.impi bsc.openmpi bsc.mpich ];
};
extraConfig = {
particles = 32*1024;
timesteps = 10;
ntasksPerNode = 2;
nodes = 1;
time = "00:10:00";
qos = "debug";
#mpi = bsc.impi;
#mpi = bsc.openmpi;
gitBranch = "garlic/mpi+send";
gitURL = "ssh://git@bscpm02.bsc.es/garlic/apps/nbody.git";
};
# Compute the cartesian product of all configurations
configs = map (conf: conf // extraConfig) (genConfigs config);
sbatch = conf: app: sbatchWrapper {
app = app;
nixPrefix = "/gpfs/projects/bsc15/nix";
exclusive = false;
ntasksPerNode = "${toString conf.ntasksPerNode}";
nodes = "${toString conf.nodes}";
time = conf.time;
qos = conf.qos;
chdirPrefix = "/home/bsc15/bsc15557/bsc-nixpkgs/out";
};
srun = app: srunWrapper {
app = app;
nixPrefix = "/gpfs/projects/bsc15/nix";
};
argv = conf: app:
with conf;
argvWrapper {
app = app;
argv = ''(-t ${toString timesteps} -p ${toString particles})'';
env = ''
export I_MPI_THREAD_SPLIT=1
'';
};
nbodyFn = conf:
with conf;
nbody.override { inherit cc mpi blocksize gitBranch gitURL; };
pipeline = conf:
sbatch conf (
srun (
nixsetupWrapper (
argv conf (
nbodyFn conf
)
)
)
)
;
# Ideally it should look like this:
#pipeline = sbatch nixsetup control argv nbodyFn;
jobs = map pipeline configs;
in
launchWrapper jobs

View File

@@ -1,132 +0,0 @@
{
bsc
, stdenv
, nbody
, genApp
, genConfigs
, runWrappers
}:
with stdenv.lib;
let
# Set variable configuration for the experiment
varConfig = {
cc = [ bsc.icc ];
blocksize = [ 1024 ];
};
# Common configuration
common = {
# Compile time nbody config
gitBranch = "garlic/mpi+send";
mpi = bsc.impi;
# nbody runtime options
particles = 1024*128;
timesteps = 20;
loops = 1000;
# Resources
ntasksPerNode = "48";
nodes = "1";
# Stage configuration
enableSbatch = true;
enableControl = true;
enableExtrae = false;
enablePerf = false;
# MN4 path
nixPrefix = "/gpfs/projects/bsc15/nix";
};
# Compute the cartesian product of all configurations
configs = map (conf: conf // common) (genConfigs varConfig);
stageProgram = stage:
if stage ? programPath
then "${stage}${stage.programPath}" else "${stage}";
w = runWrappers;
sbatch = {stage, conf, ...}: with conf; w.sbatch {
program = stageProgram stage;
exclusive = true;
time = "02:00:00";
qos = "debug";
jobName = "nbody-bs";
inherit nixPrefix nodes ntasksPerNode;
};
control = {stage, conf, ...}: with conf; w.control {
program = stageProgram stage;
inherit loops;
};
srun = {stage, conf, ...}: with conf; w.srun {
program = stageProgram stage;
srunOptions = "--cpu-bind=verbose,rank";
inherit nixPrefix;
};
statspy = {stage, conf, ...}: with conf; w.statspy {
program = stageProgram stage;
};
perf = {stage, conf, ...}: with conf; w.perf {
program = stageProgram stage;
perfArgs = "sched record -a";
};
nixsetup = {stage, conf, ...}: with conf; w.nixsetup {
program = stageProgram stage;
};
extrae = {stage, conf, ...}: w.extrae {
program = stageProgram stage;
traceLib = "mpi"; # mpi -> libtracempi.so
configFile = ./extrae.xml;
};
argv = {stage, conf, ...}: w.argv {
program = stageProgram stage;
env = ''
set -e
export I_MPI_THREAD_SPLIT=1
'';
argv = ''( -t ${toString conf.timesteps}
-p ${toString conf.particles} )'';
};
nbodyFn = {stage, conf, ...}: with conf; nbody.override {
inherit cc blocksize mpi gitBranch;
};
stages = with common; []
# Use sbatch to request resources first
++ optional enableSbatch sbatch
# Repeats the next stages N times
++ optionals enableControl [ nixsetup control ]
# Executes srun to launch the program in the requested nodes, and
# immediately after enters the nix environment again, as slurmstepd launches
# the next stages from outside the namespace.
++ [ srun nixsetup ]
# Intrumentation with extrae
++ optional enableExtrae extrae
# Optionally profile the next stages with perf
++ optional enablePerf perf
# Execute the nbody app with the argv and env vars
++ [ argv nbodyFn ];
# List of actual programs to be executed
jobs = map (conf: w.stagen { inherit conf stages; }) configs;
in
# We simply run each program one after another
w.launch jobs

View File

@@ -1,86 +0,0 @@
{
bsc
, genApp
, genConfigs
# Wrappers
, launchWrapper
, sbatchWrapper
, srunWrapper
, argvWrapper
, controlWrapper
, nixsetupWrapper
# Should we test the network (true) or the shared memory (false)?
, interNode ? true
# Enable multiple threads?
, multiThread ? false
}:
let
# Set the configuration for the experiment
config = {
mpi = [ bsc.impi bsc.openmpi bsc.mpich ];
};
extraConfig = {
nodes = if interNode then 2 else 1;
ntasksPerNode = if interNode then 1 else 2;
time = "00:10:00";
qos = "debug";
};
# Compute the cartesian product of all configurations
configs = map (conf: conf // extraConfig) (genConfigs config);
sbatch = conf: app: sbatchWrapper {
app = app;
nixPrefix = "/gpfs/projects/bsc15/nix";
exclusive = true;
ntasksPerNode = "${toString conf.ntasksPerNode}";
nodes = "${toString conf.nodes}";
time = conf.time;
qos = conf.qos;
chdirPrefix = "/home/bsc15/bsc15557/bsc-nixpkgs/out";
};
srun = app: srunWrapper {
app = app;
nixPrefix = "/gpfs/projects/bsc15/nix";
};
argv = app:
argvWrapper {
app = app;
program = "bin/osu_latency";
argv = "()";
env = ''
export I_MPI_THREAD_SPLIT=1
'';
};
osumbFn = conf:
with conf;
bsc.osumb.override { inherit mpi; };
pipeline = conf:
sbatch conf (
nixsetupWrapper (
controlWrapper (
srun (
nixsetupWrapper (
argv (
osumbFn conf))))));
#pipeline = conf: sbatch conf (srun (nixsetupWrapper (argv (osumbFn conf))));
#pipeline = conf: sbatch conf (srun (nixsetupWrapper (argv bsc.osumb)));
# Ideally it should look like this:
#pipeline = sbatch nixsetup control argv nbodyFn;
jobs = map pipeline configs;
in
launchWrapper jobs

View File

@@ -1,37 +0,0 @@
{
stdenv
, bash
, extrae
#, writeShellScriptBin
}:
{
program
, configFile
, traceLib
}:
#writeShellScriptBin "extraeWrapper" ''
# export EXTRAE_HOME=${extrae}
# export LD_PRELOAD=${extrae}/lib/lib${traceLib}trace.so:$LD_PRELOAD
# export EXTRAE_CONFIG_FILE=${configFile}
# exec ${program}
#''
stdenv.mkDerivation {
name = "extrae";
preferLocalBuild = true;
phases = [ "installPhase" ];
installPhase = ''
cat > $out <<EOF
#!${bash}/bin/bash
# Requires /nix to use bash
export EXTRAE_HOME=${extrae}
export LD_PRELOAD=${extrae}/lib/lib${traceLib}trace.so:$LD_PRELOAD
export EXTRAE_CONFIG_FILE=${configFile}
exec ${program}
EOF
chmod +x $out
'';
}

View File

@@ -1,34 +0,0 @@
let
lib = import <nixpkgs/lib>;
gen = rec {
# genAttrSets "a" ["hello" "world"]
# [ { a = "hello"; } { a = "world"; } ]
genAttrSets = (name: arr: (map (x: {${name}=x; })) arr);
# addAttrSets "a" [1 2] {e=4;}
# [ { a = 1; e = 4; } { a = 2; e = 4; } ]
addAttrSets = (name: arr: set: (map (x: set // {${name}=x; })) arr);
# attrToList {a=1;}
# [ { name = "a"; value = 1; } ]
attrToList = (set: map (name: {name=name; value=set.${name};} ) (builtins.attrNames set));
# mergeConfig [{e=1;}] {name="a"; value=[1 2]
# [ { a = 1; e = 1; } { a = 2; e = 1; } ]
mergeConfig = (arr: new: lib.flatten ( map (x: addAttrSets new.name new.value x) arr));
# genConfigs {a=[1 2]; b=[3 4];}
# [ { a = 1; b = 3; } { a = 1; b = 4; } { a = 2; b = 3; } { a = 2; b = 4; } ]
genConfigs = (config: lib.foldl mergeConfig [{}] (attrToList config));
# Generate multiple app versions by override with each config
genApp = (app: configs: map (conf: app.override conf // {conf=conf;}) configs);
# Generate app version from an array of apps
genApps = (apps: configs:
lib.flatten (map (app: genApp app configs) apps));
};
in
gen

View File

@@ -1,42 +0,0 @@
{
stdenv
}:
apps: # Each app must be unique
stdenv.mkDerivation {
name = "launcher";
preferLocalBuild = true;
buildInputs = [] ++ apps;
apps = apps;
phases = [ "installPhase" ];
dontPatchShebangs = true;
installPhase = ''
mkdir -p $out/apps
for j in $apps; do
target=$out/apps/$(basename $j)
if [ -e $target ]; then
echo "Duplicated app: $j"
echo
echo "Provided apps: "
printf "%s\n" $apps
echo
exit 1
fi
ln -s $j $target
done
mkdir -p $out/bin
cat > $out/bin/run <<EOF
#!/bin/sh
for j in $out/apps/*; do
\$j/bin/run
done
EOF
chmod +x $out/bin/run
'';
}

View File

@@ -1,16 +0,0 @@
{
stdenv
, mpi
, fetchurl
}:
stdenv.mkDerivation {
name = "mpptest";
src = fetchurl {
url = "http://ftp.mcs.anl.gov/pub/mpi/tools/perftest.tar.gz";
sha256 = "11i22lq3pch3pvmhnbsgxzd8ap4yvpvlhy2f7k8x3krdwjhl0jvl";
};
buildInputs = [ mpi ];
}

View File

@@ -1,29 +0,0 @@
{
stdenv
, particles
, timestamps
, program
, config
}:
stdenv.mkDerivation {
inherit program;
passthru = {
inherit config;
};
name = "${program.name}-argv";
preferLocalBuild = true;
phases = [ "installPhase" ];
dontPatchShebangs = true;
installPhase = ''
mkdir -p $out/bin
cat > $out/bin/run <<EOF
#!/bin/sh
exec ${program}/bin/run -p ${toString config.particles} -t ${toString config.timesteps}
EOF
chmod +x $out/bin/run
'';
}

View File

@@ -1,46 +0,0 @@
{
stdenv
, cc
, tampi ? null
, mpi ? null
, cflags ? null
, gitBranch
, gitURL ? "ssh://git@bscpm02.bsc.es/garlic/apps/nbody.git"
, blocksize ? 2048
}:
with stdenv.lib;
stdenv.mkDerivation rec {
name = "nbody";
src = /home/Computational/rarias/bscpkgs/manual/nbody;
#src = builtins.fetchGit {
# url = "${gitURL}";
# ref = "${gitBranch}";
#};
programPath = "/bin/nbody";
buildInputs = [
cc
]
++ optional (mpi != null) [ mpi ];
preBuild = (if cflags != null then ''
makeFlagsArray+=(CFLAGS="${cflags}")
'' else "");
makeFlags = [
"CC=${cc.cc.CC}"
"BS=${toString blocksize}"
];
dontPatchShebangs = true;
installPhase = ''
echo ${tampi}
mkdir -p $out/bin
cp nbody* $out/bin/${name}
'';
}

View File

@@ -1,28 +0,0 @@
{
stdenv
}:
{
program
}:
stdenv.mkDerivation {
name = "nixsetup";
preferLocalBuild = true;
phases = [ "installPhase" ];
dontPatchShebangs = true;
installPhase = ''
cat > $out <<EOF
#!/bin/sh
# We need to enter the nix namespace first, in order to have /nix
# available, so we use this hack:
if [ ! -e /nix ]; then
exec nix-setup \$0
fi
exec ${program}
EOF
chmod +x $out
'';
}

View File

@@ -1,24 +0,0 @@
{
stdenv
, bash
, perf
}:
{
program
, perfArgs ? "record -a"
}:
stdenv.mkDerivation {
name = "perfWrapper";
preferLocalBuild = true;
phases = [ "installPhase" ];
installPhase = ''
cat > $out <<EOF
#!${bash}/bin/bash
exec ${perf}/bin/perf ${perfArgs} ${program}
EOF
chmod +x $out
'';
}

View File

@@ -1,34 +0,0 @@
{
stdenv
, mpi
, fetchurl
}:
stdenv.mkDerivation {
name = "ppong";
src = fetchurl {
url = "http://www.csl.mtu.edu/cs4331/common/PPong.c";
sha256 = "0d1w72gq9627448cb7ykknhgp2wszwd117dlbalbrpf7d0la8yc0";
};
unpackCmd = ''
mkdir src
cp $src src/ppong.c
'';
dontConfigure = true;
buildPhase = ''
echo mpicc -include stdlib.h ppong.c -o ppong
mpicc -include stdlib.h ppong.c -o ppong
'';
installPhase = ''
mkdir -p $out/bin
cp ppong $out/bin/ppong
ln -s $out/bin/ppong $out/bin/run
'';
buildInputs = [ mpi ];
}

View File

@@ -1,84 +0,0 @@
{
stdenv
, numactl
}:
{
program
, jobName
, chdirPrefix ? "."
, nixPrefix ? ""
, binary ? "/bin/run"
, ntasks ? null
, ntasksPerNode ? null
, nodes ? null
, exclusive ? true # By default we run in exclusive mode
, qos ? null
, reservation ? null
, time ? null
, output ? "job_%j.out"
, error ? "job_%j.err"
, contiguous ? null
, extra ? null
, acctgFreq ? null
}:
with stdenv.lib;
let
sbatchOpt = name: value: optionalString (value!=null)
"#SBATCH --${name}=${value}\n";
sbatchEnable = name: value: optionalString (value!=null)
"#SBATCH --${name}\n";
in
stdenv.mkDerivation rec {
name = "sbatch";
preferLocalBuild = true;
phases = [ "installPhase" ];
#SBATCH --tasks-per-node=48
#SBATCH --ntasks-per-socket=24
#SBATCH --cpus-per-task=1
dontBuild = true;
dontPatchShebangs = true;
programPath = "/${name}";
installPhase = ''
mkdir -p $out
cat > $out/job <<EOF
#!/bin/sh
#SBATCH --job-name="${jobName}"
''
+ sbatchOpt "ntasks" ntasks
+ sbatchOpt "ntasks-per-node" ntasksPerNode
+ sbatchOpt "nodes" nodes
+ sbatchOpt "chdir" "${chdirPrefix}/$(basename $out)"
+ sbatchOpt "output" output
+ sbatchOpt "error" error
+ sbatchEnable "exclusive" exclusive
+ sbatchOpt "time" time
+ sbatchOpt "qos" qos
+ sbatchOpt "reservation" reservation
+ sbatchOpt "acctg-freq" acctgFreq
+ optionalString (extra!=null) extra
+
''
exec ${nixPrefix}${program}
EOF
cat > $out/${name} <<EOF
#!/bin/sh
if [ -e "${chdirPrefix}/$(basename $out)" ]; then
>&2 echo "Execution aborted: '${chdirPrefix}/$(basename $out)' already exists"
exit 1
fi
mkdir -p "${chdirPrefix}/$(basename $out)"
echo sbatch ${nixPrefix}$out/job
sbatch ${nixPrefix}$out/job
EOF
chmod +x $out/${name}
'';
}

View File

@@ -1,22 +0,0 @@
{
stdenv
}:
{
program
, nixPrefix ? ""
, srunOptions ? ""
}:
stdenv.mkDerivation rec {
name = "srun";
preferLocalBuild = true;
phases = [ "installPhase" ];
dontPatchShebangs = true;
installPhase = ''
cat > $out <<EOF
#!/bin/sh
exec srun --mpi=pmi2 ${srunOptions} ${nixPrefix}${program}
EOF
chmod +x $out
'';
}

View File

@@ -1,55 +0,0 @@
{
stdenv
, bash
, extrae
, writeShellScriptBin
, jq
}:
{
stages
, conf
, experimentName ? "run"
}:
with stdenv.lib;
let
dStages = foldr (stageFn: {conf, prevStage, stages}: {
conf = conf;
prevStage = stageFn {stage=prevStage; conf=conf;};
stages = [ (stageFn {stage=prevStage; conf=conf;}) ] ++ stages;
})
{conf=conf; stages=[]; prevStage=null;} stages;
stageProgram = stage:
if stage ? programPath
then "${stage}${stage.programPath}" else "${stage}";
linkStages = imap1 (i: s: {
name = "${toString i}-${baseNameOf s.name}";
path = stageProgram s;
}) dStages.stages;
createLinks = builtins.concatStringsSep "\n"
(map (x: "ln -s ${x.path} $out/bin/${x.name}") linkStages);
firstStageLink = (x: x.name) (elemAt linkStages 0);
in
stdenv.mkDerivation {
name = "stagen";
preferLocalBuild = true;
phases = [ "installPhase" ];
buildInputs = [ jq ];
installPhase = ''
mkdir -p $out/bin
${createLinks}
ln -s ${firstStageLink} $out/bin/${experimentName}
cat > $out/config.raw << EOF
${builtins.toJSON conf}
EOF
jq . $out/config.raw > $out/config.json
rm $out/config.raw
'';
}

View File

@@ -1,29 +0,0 @@
{
stdenv
, bash
}:
{
program
, outputDir ? "."
}:
stdenv.mkDerivation {
name = "statspy";
preferLocalBuild = true;
phases = [ "installPhase" ];
programPath = "/bin/${name}";
installPhase = ''
mkdir -p $out/bin
cat > $out/bin/${name} <<EOF
#!${bash}/bin/bash
mkdir -p ${outputDir}
cat /proc/[0-9]*/stat | sort -n > ${outputDir}/statspy.\$(date +%s.%3N).begin
${program}
cat /proc/[0-9]*/stat | sort -n > ${outputDir}/statspy.\$(date +%s.%3N).end
EOF
chmod +x $out/bin/${name}
'';
}