hpcg: update figures and remove old ones

This commit is contained in:
Rodrigo Arias 2021-04-19 16:05:10 +02:00
parent 866d4561d3
commit 3e197da8a3
10 changed files with 287 additions and 567 deletions

View File

@ -0,0 +1,62 @@
library(ggplot2)
library(dplyr, warn.conflicts = FALSE)
library(scales)
library(jsonlite)
library(viridis, warn.conflicts = FALSE)
args = commandArgs(trailingOnly=TRUE)
if (length(args)>0) { input_file = args[1] } else { input_file = "input" }
df = jsonlite::stream_in(file(input_file), verbose=FALSE) %>%
jsonlite::flatten() %>%
select(config.nblocks,
config.ncomms,
config.hw.cpusPerSocket,
config.blocksPerCpu,
unit,
time) %>%
rename(nblocks=config.nblocks,
ncomms=config.ncomms,
blocksPerCpu=config.blocksPerCpu) %>%
mutate(nblocks = as.factor(nblocks)) %>%
mutate(blocksPerCpu = as.factor(blocksPerCpu)) %>%
mutate(unit = as.factor(unit)) %>%
group_by(unit) %>%
mutate(median.time = median(time)) %>%
mutate(normalized.time = time / median.time - 1) %>%
mutate(log.median.time = log(median.time)) %>%
ungroup()
dpi=300
h=5
w=5
p = ggplot(df, aes(x=blocksPerCpu, y=normalized.time)) +
geom_boxplot() +
geom_hline(yintercept=c(-0.01, 0.01), linetype="dashed", color="red") +
theme_bw() +
labs(x="Blocks per CPU", y="Normalized time", title="HPCG granularity: normalized time",
subtitle=input_file) +
theme(plot.subtitle=element_text(size=8))
ggsave("normalized.time.png", plot=p, width=w, height=h, dpi=dpi)
ggsave("normalized.time.pdf", plot=p, width=w, height=h, dpi=dpi)
p = ggplot(df, aes(x=blocksPerCpu, y=time)) +
geom_point(shape=21, size=3) +
theme_bw() +
labs(x="Blocks per CPU", y="Time (s)", title="HPCG granularity: time",
subtitle=input_file) +
theme(plot.subtitle=element_text(size=8))
ggsave("time.png", plot=p, width=w, height=h, dpi=dpi)
ggsave("time.pdf", plot=p, width=w, height=h, dpi=dpi)

View File

@ -1,112 +0,0 @@
# This R program takes as argument the dataset that contains the results of the
# execution of the heat example experiment and produces some plots. All the
# knowledge to understand how this script works is covered by this nice R book:
#
# Winston Chang, R Graphics Cookbook: Practical Recipes for Visualizing Data,
# OReilly Media (2020). 2nd edition
#
# Which can be freely read it online here: https://r-graphics.org/
#
# Please, search in this book before copying some random (and probably oudated)
# reply on stack overflow.
# We load some R packages to import the required functions. We mainly use the
# tidyverse packages, which are very good for ploting data,
library(ggplot2)
library(dplyr, warn.conflicts = FALSE)
library(scales)
library(jsonlite)
library(viridis, warn.conflicts = FALSE)
# Here we simply load the arguments to find the input dataset. If nothing is
# specified we use the file named `input` in the current directory.
# We can run this script directly using:
# Rscript <path-to-this-script> <input-dataset>
# Load the arguments (argv)
args = commandArgs(trailingOnly=TRUE)
# Set the input dataset if given in argv[1], or use "input" as default
if (length(args)>0) { input_file = args[1] } else { input_file = "input" }
df = jsonlite::stream_in(file(input_file), verbose=FALSE) %>%
# Then we flatten it, as it may contain dictionaries inside the columns
jsonlite::flatten() %>%
# Now the dataframe contains all the configuration of the units inside the
# columns named `config.*`, for example `config.cbs`. We first select only
# the columns that we need:
select(config.nblocks, config.ncommblocks, config.hw.cpusPerSocket, unit, time) %>%
# And then we rename those columns to something shorter:
rename(nblocks=config.nblocks,
ncommblocks=config.ncommblocks,
cpusPerSocket=config.hw.cpusPerSocket) %>%
mutate(blocksPerCpu = nblocks / cpusPerSocket) %>%
mutate(nblocks = as.factor(nblocks)) %>%
mutate(blocksPerCpu = as.factor(blocksPerCpu)) %>%
mutate(unit = as.factor(unit)) %>%
group_by(unit) %>%
# And compute some metrics which are applied to each group. For example we
# compute the median time within the runs of a unit:
mutate(median.time = median(time)) %>%
mutate(normalized.time = time / median.time - 1) %>%
mutate(log.median.time = log(median.time)) %>%
# Then, we remove the grouping. This step is very important, otherwise the
# plotting functions get confused:
ungroup()
dpi=300
h=5
w=5
p = ggplot(df, aes(x=blocksPerCpu, y=normalized.time)) +
# The boxplots are useful to identify outliers and problems with the
# distribution of time
geom_boxplot() +
# We add a line to mark the 1% limit above and below the median
geom_hline(yintercept=c(-0.01, 0.01), linetype="dashed", color="red") +
# The bw theme is recommended for publications
theme_bw() +
# Here we add the title and the labels of the axes
labs(x="Blocks per CPU", y="Normalized time", title="HPCG granularity: normalized time",
subtitle=input_file) +
# And set the subtitle font size a bit smaller, so it fits nicely
theme(plot.subtitle=element_text(size=8))
# Then, we save the plot both in png and pdf
ggsave("normalized.time.png", plot=p, width=w, height=h, dpi=dpi)
ggsave("normalized.time.pdf", plot=p, width=w, height=h, dpi=dpi)
# We plot the time of each run as we vary the block size
p = ggplot(df, aes(x=blocksPerCpu, y=time)) +
# We add a points (scatter plot) using circles (shape=21) a bit larger
# than the default (size=3)
geom_point(shape=21, size=3) +
# The bw theme is recommended for publications
theme_bw() +
# Here we add the title and the labels of the axes
labs(x="Blocks Per CPU", y="Time (s)", title="HPCG granularity: time",
subtitle=input_file) +
# And set the subtitle font size a bit smaller, so it fits nicely
theme(plot.subtitle=element_text(size=8))
# Then, we save the plot both in png and pdf
ggsave("time.png", plot=p, width=w, height=h, dpi=dpi)
ggsave("time.pdf", plot=p, width=w, height=h, dpi=dpi)

View File

@ -1,112 +0,0 @@
# This R program takes as argument the dataset that contains the results of the
# execution of the heat example experiment and produces some plots. All the
# knowledge to understand how this script works is covered by this nice R book:
#
# Winston Chang, R Graphics Cookbook: Practical Recipes for Visualizing Data,
# OReilly Media (2020). 2nd edition
#
# Which can be freely read it online here: https://r-graphics.org/
#
# Please, search in this book before copying some random (and probably oudated)
# reply on stack overflow.
# We load some R packages to import the required functions. We mainly use the
# tidyverse packages, which are very good for ploting data,
library(ggplot2)
library(dplyr, warn.conflicts = FALSE)
library(scales)
library(jsonlite)
library(viridis, warn.conflicts = FALSE)
# Here we simply load the arguments to find the input dataset. If nothing is
# specified we use the file named `input` in the current directory.
# We can run this script directly using:
# Rscript <path-to-this-script> <input-dataset>
# Load the arguments (argv)
args = commandArgs(trailingOnly=TRUE)
# Set the input dataset if given in argv[1], or use "input" as default
if (length(args)>0) { input_file = args[1] } else { input_file = "input" }
df = jsonlite::stream_in(file(input_file), verbose=FALSE) %>%
# Then we flatten it, as it may contain dictionaries inside the columns
jsonlite::flatten() %>%
# Now the dataframe contains all the configuration of the units inside the
# columns named `config.*`, for example `config.cbs`. We first select only
# the columns that we need:
select(config.nblocks, config.ncommblocks, config.hw.cpusPerSocket, unit, time) %>%
# And then we rename those columns to something shorter:
rename(nblocks=config.nblocks,
ncommblocks=config.ncommblocks,
cpusPerSocket=config.hw.cpusPerSocket) %>%
mutate(blocksPerCpu = nblocks / cpusPerSocket) %>%
mutate(nblocks = as.factor(nblocks)) %>%
mutate(blocksPerCpu = as.factor(blocksPerCpu)) %>%
mutate(unit = as.factor(unit)) %>%
group_by(unit) %>%
# And compute some metrics which are applied to each group. For example we
# compute the median time within the runs of a unit:
mutate(median.time = median(time)) %>%
mutate(normalized.time = time / median.time - 1) %>%
mutate(log.median.time = log(median.time)) %>%
# Then, we remove the grouping. This step is very important, otherwise the
# plotting functions get confused:
ungroup()
dpi=300
h=5
w=5
p = ggplot(df, aes(x=blocksPerCpu, y=normalized.time)) +
# The boxplots are useful to identify outliers and problems with the
# distribution of time
geom_boxplot() +
# We add a line to mark the 1% limit above and below the median
geom_hline(yintercept=c(-0.01, 0.01), linetype="dashed", color="red") +
# The bw theme is recommended for publications
theme_bw() +
# Here we add the title and the labels of the axes
labs(x="Blocks per CPU", y="Normalized time", title="HPCG granularity: normalized time",
subtitle=input_file) +
# And set the subtitle font size a bit smaller, so it fits nicely
theme(plot.subtitle=element_text(size=8))
# Then, we save the plot both in png and pdf
ggsave("normalized.time.png", plot=p, width=w, height=h, dpi=dpi)
ggsave("normalized.time.pdf", plot=p, width=w, height=h, dpi=dpi)
# We plot the time of each run as we vary the block size
p = ggplot(df, aes(x=blocksPerCpu, y=time)) +
# We add a points (scatter plot) using circles (shape=21) a bit larger
# than the default (size=3)
geom_point(shape=21, size=3) +
# The bw theme is recommended for publications
theme_bw() +
# Here we add the title and the labels of the axes
labs(x="Blocks Per CPU", y="Time (s)", title="HPCG granularity: time",
subtitle=input_file) +
# And set the subtitle font size a bit smaller, so it fits nicely
theme(plot.subtitle=element_text(size=8))
# Then, we save the plot both in png and pdf
ggsave("time.png", plot=p, width=w, height=h, dpi=dpi)
ggsave("time.pdf", plot=p, width=w, height=h, dpi=dpi)

View File

@ -1,116 +0,0 @@
# This R program takes as argument the dataset that contains the results of the
# execution of the heat example experiment and produces some plots. All the
# knowledge to understand how this script works is covered by this nice R book:
#
# Winston Chang, R Graphics Cookbook: Practical Recipes for Visualizing Data,
# OReilly Media (2020). 2nd edition
#
# Which can be freely read it online here: https://r-graphics.org/
#
# Please, search in this book before copying some random (and probably oudated)
# reply on stack overflow.
# We load some R packages to import the required functions. We mainly use the
# tidyverse packages, which are very good for ploting data,
library(ggplot2)
library(dplyr, warn.conflicts = FALSE)
library(scales)
library(jsonlite)
library(viridis, warn.conflicts = FALSE)
# Here we simply load the arguments to find the input dataset. If nothing is
# specified we use the file named `input` in the current directory.
# We can run this script directly using:
# Rscript <path-to-this-script> <input-dataset>
# Load the arguments (argv)
args = commandArgs(trailingOnly=TRUE)
# Set the input dataset if given in argv[1], or use "input" as default
if (length(args)>0) { input_file = args[1] } else { input_file = "input" }
df = jsonlite::stream_in(file(input_file), verbose=FALSE) %>%
# Then we flatten it, as it may contain dictionaries inside the columns
jsonlite::flatten() %>%
# Now the dataframe contains all the configuration of the units inside the
# columns named `config.*`, for example `config.cbs`. We first select only
# the columns that we need:
select(config.nblocks, config.ncommblocks, config.hw.cpusPerSocket, config.nodes, unit, time) %>%
# And then we rename those columns to something shorter:
rename(nblocks=config.nblocks,
ncommblocks=config.ncommblocks,
cpusPerSocket=config.hw.cpusPerSocket,
nodes=config.nodes) %>%
mutate(blocksPerCpu = nblocks / cpusPerSocket) %>%
mutate(nblocks = as.factor(nblocks)) %>%
mutate(blocksPerCpu = as.factor(blocksPerCpu)) %>%
mutate(nodes = as.factor(nodes)) %>%
mutate(unit = as.factor(unit)) %>%
group_by(unit) %>%
# And compute some metrics which are applied to each group. For example we
# compute the median time within the runs of a unit:
mutate(median.time = median(time)) %>%
mutate(normalized.time = time / median.time - 1) %>%
mutate(log.median.time = log(median.time)) %>%
# Then, we remove the grouping. This step is very important, otherwise the
# plotting functions get confused:
ungroup()
dpi=300
h=5
w=5
p = ggplot(df, aes(x=nodes, y=normalized.time, color=blocksPerCpu)) +
# The boxplots are useful to identify outliers and problems with the
# distribution of time
geom_boxplot() +
# We add a line to mark the 1% limit above and below the median
geom_hline(yintercept=c(-0.01, 0.01), linetype="dashed", color="red") +
# The bw theme is recommended for publications
theme_bw() +
# Here we add the title and the labels of the axes
labs(x="Nodes", y="Normalized time", title="HPCG weak scalability: normalized time",
color="Blocks per CPU",
subtitle=input_file) +
# And set the subtitle font size a bit smaller, so it fits nicely
theme(plot.subtitle=element_text(size=8))
# Then, we save the plot both in png and pdf
ggsave("normalized.time.png", plot=p, width=w, height=h, dpi=dpi)
ggsave("normalized.time.pdf", plot=p, width=w, height=h, dpi=dpi)
# We plot the time of each run as we vary the block size
p = ggplot(df, aes(x=nodes, y=time, color=blocksPerCpu)) +
# We add a points (scatter plot) using circles (shape=21) a bit larger
# than the default (size=3)
geom_point(shape=21, size=3) +
# The bw theme is recommended for publications
theme_bw() +
# Here we add the title and the labels of the axes
labs(x="Nodes", y="Time (s)", title="HPCG weak scalability: time",
color="Blocks per CPU",
subtitle=input_file) +
# And set the subtitle font size a bit smaller, so it fits nicely
theme(plot.subtitle=element_text(size=8))
# Then, we save the plot both in png and pdf
ggsave("time.png", plot=p, width=w, height=h, dpi=dpi)
ggsave("time.pdf", plot=p, width=w, height=h, dpi=dpi)

View File

@ -1,109 +0,0 @@
# This R program takes as argument the dataset that contains the results of the
# execution of the heat example experiment and produces some plots. All the
# knowledge to understand how this script works is covered by this nice R book:
#
# Winston Chang, R Graphics Cookbook: Practical Recipes for Visualizing Data,
# OReilly Media (2020). 2nd edition
#
# Which can be freely read it online here: https://r-graphics.org/
#
# Please, search in this book before copying some random (and probably oudated)
# reply on stack overflow.
# We load some R packages to import the required functions. We mainly use the
# tidyverse packages, which are very good for ploting data,
library(ggplot2)
library(dplyr, warn.conflicts = FALSE)
library(scales)
library(jsonlite)
library(viridis, warn.conflicts = FALSE)
# Here we simply load the arguments to find the input dataset. If nothing is
# specified we use the file named `input` in the current directory.
# We can run this script directly using:
# Rscript <path-to-this-script> <input-dataset>
# Load the arguments (argv)
args = commandArgs(trailingOnly=TRUE)
# Set the input dataset if given in argv[1], or use "input" as default
if (length(args)>0) { input_file = args[1] } else { input_file = "input" }
df = jsonlite::stream_in(file(input_file), verbose=FALSE) %>%
# Then we flatten it, as it may contain dictionaries inside the columns
jsonlite::flatten() %>%
# Now the dataframe contains all the configuration of the units inside the
# columns named `config.*`, for example `config.cbs`. We first select only
# the columns that we need:
select(config.nblocks,
config.ncommblocks,
config.hw.cpusPerSocket,
config.nodes,
config.nprocs.x,
config.nprocs.y,
config.nprocs.z,
unit,
time
) %>%
# And then we rename those columns to something shorter:
rename(nblocks=config.nblocks,
ncommblocks=config.ncommblocks,
cpusPerSocket=config.hw.cpusPerSocket,
nodes=config.nodes,
npx=config.nprocs.x,
npy=config.nprocs.y,
npz=config.nprocs.z
) %>%
mutate(axisColor=as.factor(ifelse(npx != 1, "X", ifelse(npy != 1, "Y", "Z")))) %>%
mutate(blocksPerCpu = nblocks / cpusPerSocket) %>%
mutate(nblocks = as.factor(nblocks)) %>%
mutate(blocksPerCpu = as.factor(blocksPerCpu)) %>%
mutate(nodes = as.factor(nodes)) %>%
mutate(unit = as.factor(unit)) %>%
mutate(timePerNprocs = time * npz) %>%
group_by(unit) %>%
# And compute some metrics which are applied to each group. For example we
# compute the median time within the runs of a unit:
mutate(median.time = median(time)) %>%
mutate(normalized.time = time / median.time - 1) %>%
mutate(log.median.time = log(median.time)) %>%
# Then, we remove the grouping. This step is very important, otherwise the
# plotting functions get confused:
ungroup()
dpi=300
h=5
w=5
# We plot the time of each run as we vary the block size
p = ggplot(df, aes(x=nodes, y=timePerNprocs, color=blocksPerCpu)) +
# We add a points (scatter plot) using circles (shape=21) a bit larger
# than the default (size=3)
geom_point(shape=21, size=3) +
# The bw theme is recommended for publications
theme_bw() +
# Here we add the title and the labels of the axes
labs(x="Nodes", y="Time * Num Procs", title="HPCG strong scalability: Z axis",
color="Blocks Per CPU",
subtitle=input_file) +
# And set the subtitle font size a bit smaller, so it fits nicely
theme(plot.subtitle=element_text(size=8))
# Then, we save the plot both in png and pdf
ggsave("time.png", plot=p, width=w, height=h, dpi=dpi)
ggsave("time.pdf", plot=p, width=w, height=h, dpi=dpi)

View File

@ -1,110 +0,0 @@
# This R program takes as argument the dataset that contains the results of the
# execution of the heat example experiment and produces some plots. All the
# knowledge to understand how this script works is covered by this nice R book:
#
# Winston Chang, R Graphics Cookbook: Practical Recipes for Visualizing Data,
# OReilly Media (2020). 2nd edition
#
# Which can be freely read it online here: https://r-graphics.org/
#
# Please, search in this book before copying some random (and probably oudated)
# reply on stack overflow.
# We load some R packages to import the required functions. We mainly use the
# tidyverse packages, which are very good for ploting data,
library(ggplot2)
library(dplyr, warn.conflicts = FALSE)
library(scales)
library(jsonlite)
library(viridis, warn.conflicts = FALSE)
# Here we simply load the arguments to find the input dataset. If nothing is
# specified we use the file named `input` in the current directory.
# We can run this script directly using:
# Rscript <path-to-this-script> <input-dataset>
# Load the arguments (argv)
args = commandArgs(trailingOnly=TRUE)
# Set the input dataset if given in argv[1], or use "input" as default
if (length(args)>0) { input_file = args[1] } else { input_file = "input" }
df = jsonlite::stream_in(file(input_file), verbose=FALSE) %>%
# Then we flatten it, as it may contain dictionaries inside the columns
jsonlite::flatten() %>%
# Now the dataframe contains all the configuration of the units inside the
# columns named `config.*`, for example `config.cbs`. We first select only
# the columns that we need:
select(config.nblocks,
config.ncommblocks,
config.hw.cpusPerSocket,
config.nodes,
config.nprocs.x,
config.nprocs.y,
config.nprocs.z,
unit,
time
) %>%
# And then we rename those columns to something shorter:
rename(nblocks=config.nblocks,
ncommblocks=config.ncommblocks,
cpusPerSocket=config.hw.cpusPerSocket,
nodes=config.nodes,
npx=config.nprocs.x,
npy=config.nprocs.y,
npz=config.nprocs.z
) %>%
mutate(axisColor=as.factor(ifelse(npx != 1, "X", ifelse(npy != 1, "Y", "Z")))) %>%
mutate(blocksPerCpu = nblocks / cpusPerSocket) %>%
mutate(nblocks = as.factor(nblocks)) %>%
mutate(blocksPerCpu = as.factor(blocksPerCpu)) %>%
mutate(nodes = as.factor(nodes)) %>%
mutate(unit = as.factor(unit)) %>%
group_by(unit) %>%
# And compute some metrics which are applied to each group. For example we
# compute the median time within the runs of a unit:
mutate(median.time = median(time)) %>%
mutate(normalized.time = time / median.time - 1) %>%
mutate(log.median.time = log(median.time)) %>%
# Then, we remove the grouping. This step is very important, otherwise the
# plotting functions get confused:
ungroup()
dpi=300
h=5
w=5
w=3*w
# We plot the time of each run as we vary the block size
p = ggplot(df, aes(x=blocksPerCpu, y=time, color=axisColor)) +
# We add a points (scatter plot) using circles (shape=21) a bit larger
# than the default (size=3)
geom_point(shape=21, size=3) +
facet_wrap(~ nodes, labeller="label_both") +
# The bw theme is recommended for publications
theme_bw() +
# Here we add the title and the labels of the axes
labs(x="Blocks Per CPU", y="Time (s)", title="HPCG weak scalability: time",
color="Axis",
subtitle=input_file) +
# And set the subtitle font size a bit smaller, so it fits nicely
theme(plot.subtitle=element_text(size=8))
# Then, we save the plot both in png and pdf
ggsave("time.png", plot=p, width=w, height=h, dpi=dpi)
ggsave("time.pdf", plot=p, width=w, height=h, dpi=dpi)

70
garlic/fig/hpcg/size.R Normal file
View File

@ -0,0 +1,70 @@
library(ggplot2)
library(dplyr, warn.conflicts = FALSE)
library(scales)
library(jsonlite)
library(viridis, warn.conflicts = FALSE)
args = commandArgs(trailingOnly=TRUE)
if (length(args)>0) { input_file = args[1] } else { input_file = "input" }
df = jsonlite::stream_in(file(input_file), verbose=FALSE) %>%
jsonlite::flatten() %>%
select(config.nblocks,
config.hw.cpusPerSocket,
config.nodes,
config.nprocs.x,
config.nprocs.y,
config.nprocs.z,
config.blocksPerCpu,
config.sizePerCpu.z,
unit,
time
) %>%
rename(nblocks=config.nblocks,
cpusPerSocket=config.hw.cpusPerSocket,
nodes=config.nodes,
blocksPerCpu=config.blocksPerCpu,
sizePerCpu.z=config.sizePerCpu.z,
npx=config.nprocs.x,
npy=config.nprocs.y,
npz=config.nprocs.z
) %>%
mutate(time.nodes = time * nodes) %>%
mutate(time.nodes.elem = time.nodes / sizePerCpu.z) %>%
mutate(axisColor=as.factor(ifelse(npx != 1, "X", ifelse(npy != 1, "Y", "Z")))) %>%
mutate(nblocks = as.factor(nblocks)) %>%
mutate(blocksPerCpu = as.factor(blocksPerCpu)) %>%
mutate(sizePerCpu.z = as.factor(sizePerCpu.z)) %>%
mutate(nodes = as.factor(nodes)) %>%
mutate(unit = as.factor(unit)) %>%
group_by(unit) %>%
mutate(median.time = median(time)) %>%
mutate(normalized.time = time / median.time - 1) %>%
mutate(log.median.time = log(median.time)) %>%
ungroup()
dpi=300
h=5
w=5
p = ggplot(df, aes(x=sizePerCpu.z, y=time.nodes.elem)) +
geom_point(shape=21, size=3) +
theme_bw() +
labs(x="Size per CPU in Z", y="Time * nodes / spcz (s)",
title="HPCG size: time * nodes / spcz",
subtitle=input_file) +
theme(plot.subtitle=element_text(size=8),
legend.position="bottom")
ggsave("time.nodes.png", plot=p, width=w, height=h, dpi=dpi)
ggsave("time.nodes.pdf", plot=p, width=w, height=h, dpi=dpi)

81
garlic/fig/hpcg/ss.R Normal file
View File

@ -0,0 +1,81 @@
library(ggplot2)
library(dplyr, warn.conflicts = FALSE)
library(scales)
library(jsonlite)
library(viridis, warn.conflicts = FALSE)
args = commandArgs(trailingOnly=TRUE)
if (length(args)>0) { input_file = args[1] } else { input_file = "input" }
df = jsonlite::stream_in(file(input_file), verbose=FALSE) %>%
jsonlite::flatten() %>%
select(config.nblocks,
config.hw.cpusPerSocket,
config.nodes,
config.nprocs.x,
config.nprocs.y,
config.nprocs.z,
config.blocksPerCpu,
config.sizePerCpu.z,
unit,
time
) %>%
rename(nblocks=config.nblocks,
cpusPerSocket=config.hw.cpusPerSocket,
nodes=config.nodes,
blocksPerCpu=config.blocksPerCpu,
sizePerCpu.z=config.sizePerCpu.z,
npx=config.nprocs.x,
npy=config.nprocs.y,
npz=config.nprocs.z
) %>%
mutate(time.sizeZ = time / sizePerCpu.z) %>%
mutate(time.nodes = time * nodes) %>%
mutate(axisColor=as.factor(ifelse(npx != 1, "X", ifelse(npy != 1, "Y", "Z")))) %>%
mutate(nblocks = as.factor(nblocks)) %>%
mutate(blocksPerCpu = as.factor(blocksPerCpu)) %>%
mutate(nodes = as.factor(nodes)) %>%
mutate(unit = as.factor(unit)) %>%
mutate(sizePerCpu.z = as.factor(sizePerCpu.z)) %>%
group_by(unit) %>%
mutate(median.time = median(time)) %>%
mutate(normalized.time = time / median.time - 1) %>%
mutate(log.median.time = log(median.time)) %>%
ungroup()
dpi=300
h=7
w=7
p = ggplot(df, aes(x=nodes, y=time.nodes)) +
geom_boxplot() +
theme_bw() +
labs(x="Nodes", y="Time * nodes (s)",
title="HPCG strong scalability in Z",
subtitle=input_file) +
theme(plot.subtitle=element_text(size=8),
legend.position="bottom")
ggsave("time.nodes.png", plot=p, width=w, height=h, dpi=dpi)
ggsave("time.nodes.pdf", plot=p, width=w, height=h, dpi=dpi)
p = ggplot(df, aes(x=nodes, y=time.sizeZ, fill=sizePerCpu.z)) +
geom_boxplot() +
theme_bw() +
labs(x="Nodes", y="Time / npcz (s)", title="HPCG strong scalability in Z",
color="Size per CPU in Z",
subtitle=input_file) +
theme(plot.subtitle=element_text(size=8),
legend.position="bottom")
ggsave("time.size.png", plot=p, width=w, height=h, dpi=dpi)
ggsave("time.size.pdf", plot=p, width=w, height=h, dpi=dpi)

70
garlic/fig/hpcg/ws.R Normal file
View File

@ -0,0 +1,70 @@
library(ggplot2)
library(dplyr, warn.conflicts = FALSE)
library(scales)
library(jsonlite)
library(viridis, warn.conflicts = FALSE)
args = commandArgs(trailingOnly=TRUE)
if (length(args)>0) { input_file = args[1] } else { input_file = "input" }
df = jsonlite::stream_in(file(input_file), verbose=FALSE) %>%
jsonlite::flatten() %>%
select(config.nblocks,
config.hw.cpusPerSocket,
config.nodes,
config.nprocs.x,
config.nprocs.y,
config.nprocs.z,
config.blocksPerCpu,
config.sizePerCpu.z,
unit,
time
) %>%
rename(nblocks=config.nblocks,
cpusPerSocket=config.hw.cpusPerSocket,
nodes=config.nodes,
blocksPerCpu=config.blocksPerCpu,
sizePerCpu.z=config.sizePerCpu.z,
npx=config.nprocs.x,
npy=config.nprocs.y,
npz=config.nprocs.z
) %>%
mutate(axisColor=as.factor(ifelse(npx != 1, "X", ifelse(npy != 1, "Y", "Z")))) %>%
mutate(time.sizeZ = time / sizePerCpu.z) %>%
mutate(nblocks = as.factor(nblocks)) %>%
mutate(blocksPerCpu = as.factor(blocksPerCpu)) %>%
mutate(nodes = as.factor(nodes)) %>%
mutate(unit = as.factor(unit)) %>%
mutate(sizePerCpu.z = as.factor(sizePerCpu.z)) %>%
mutate(timePerNprocs = time * npz) %>%
group_by(unit) %>%
mutate(median.time = median(time)) %>%
mutate(normalized.time = time / median.time - 1) %>%
mutate(log.median.time = log(median.time)) %>%
ungroup()
dpi=300
h=7
w=7
p = ggplot(df, aes(x=nodes, y=time, fill=sizePerCpu.z)) +
geom_boxplot() +
theme_bw() +
labs(x="Nodes", y="Time (s)", title="HPCG weak scaling in Z",
color="Size per CPU in Z",
subtitle=input_file) +
theme(plot.subtitle=element_text(size=8),
legend.position="bottom")
ggsave("time.nodes.png", plot=p, width=w, height=h, dpi=dpi)
ggsave("time.nodes.pdf", plot=p, width=w, height=h, dpi=dpi)

View File

@ -38,14 +38,10 @@ in
}; };
hpcg = with exp.hpcg; { hpcg = with exp.hpcg; {
# /nix/store/8dr191vch1nw7vfz8nj36d5nhwnbdnf3-plot ss = stdPlot ./hpcg/ss.R [ ss ];
ossGranularity = stdPlot ./hpcg/oss.granularity.R [ ossGranularity ]; ws = stdPlot ./hpcg/ws.R [ ws ];
size = stdPlot ./hpcg/size.R [ size ];
# /nix/store/a3x76fbnfbacn2xhz3q65fklfp0qbb6p-plot granularity = stdPlot ./hpcg/granularity.R [ granularity ];
ossWeakscalingPerAxisPerBlock = stdPlot ./hpcg/oss.slices.weakscaling.R [ ossSlicesWeakscaling ];
# /nix/store/096rl6344pbz5wrzgxgqn651pysfkkjc-plot
ossStrongscalingPerBlock = stdPlot ./hpcg/oss.slices.strongscaling.R [ ossSlicesStrongscaling ];
}; };
saiph = with exp.saiph; { saiph = with exp.saiph; {