Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Memory units #293

Merged
merged 13 commits into from
Jul 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion abipy/abilab.py
Original file line number Diff line number Diff line change
Expand Up @@ -544,7 +544,7 @@ def install_config_files(workdir: Optional[str] = None, force_reinstall: Optiona
num_nodes: 1
sockets_per_node: 1
cores_per_socket: 2
mem_per_node: 4 Gb
mem_per_node: 4 GB
"""

# Write configuration files.
Expand Down
12 changes: 6 additions & 6 deletions abipy/core/mixins.py
Original file line number Diff line number Diff line change
Expand Up @@ -575,12 +575,12 @@ def dump(self, filepath: str) -> str:


_ABBREVS = [
(1 << 50, 'Pb'),
(1 << 40, 'Tb'),
(1 << 30, 'Gb'),
(1 << 20, 'Mb'),
(1 << 10, 'kb'),
(1, 'b'),
(1 << 50, 'PB'),
(1 << 40, 'TB'),
(1 << 30, 'GB'),
(1 << 20, 'MB'),
(1 << 10, 'kB'),
(1, 'B'),
]


Expand Down
2 changes: 1 addition & 1 deletion abipy/data/managers/dragon1_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ hardware: &hardware
num_nodes: 26
sockets_per_node: 2
cores_per_socket: 8
mem_per_node: 112Gb
mem_per_node: 112GB

job: &job
mpi_runner: mpirun
Expand Down
2 changes: 1 addition & 1 deletion abipy/data/managers/gmac_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ qadapters:
num_nodes: 1
sockets_per_node: 1
cores_per_socket: 2
mem_per_node: 4 Gb
mem_per_node: 4 GB
# Optional
#condition: {"$eq": {omp_threads: 2}}

Expand Down
2 changes: 1 addition & 1 deletion abipy/data/managers/hercules_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ hardware: &hardware
num_nodes: 65
sockets_per_node: 2
cores_per_socket: 8
mem_per_node: 54Gb
mem_per_node: 54GB

job: &job
mpi_runner: mpirun
Expand Down
6 changes: 3 additions & 3 deletions abipy/data/managers/hmem_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,19 +4,19 @@ high: &high
num_nodes: 2
sockets_per_node: 4
cores_per_socket: 12
mem_per_node: 512Gb
mem_per_node: 512GB

middle: &middle
num_nodes: 7
sockets_per_node: 4
cores_per_socket: 12
mem_per_node: 256Gb
mem_per_node: 256GB

low: &low
num_nodes: 7
sockets_per_node: 4
cores_per_socket: 12
mem_per_node: 128Gb
mem_per_node: 128GB

job: &job
mpi_runner: mpirun
Expand Down
4 changes: 2 additions & 2 deletions abipy/data/managers/juqueen_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ batch: &batch
num_nodes: 128
sockets_per_node: 1
cores_per_socket: 16
mem_per_node: 128Gb
mem_per_node: 128GB

job: &job
mpi_runner: runjob
Expand Down Expand Up @@ -48,7 +48,7 @@ qadapters:
num_nodes: 1
sockets_per_node: 1
cores_per_socket: 1
mem_per_node: 12Gb
mem_per_node: 12GB
job:
#mpi_runner: runjob
shell_env:
Expand Down
4 changes: 2 additions & 2 deletions abipy/data/managers/jureca_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,13 @@ devel: &devel
num_nodes: 8
sockets_per_node: 2
cores_per_socket: 12
mem_per_node: 128Gb
mem_per_node: 128GB

batch: &batch
num_nodes: 128
sockets_per_node: 2
cores_per_socket: 12
mem_per_node: 128Gb
mem_per_node: 128GB

job: &job
# mpirun is not available on jureca.
Expand Down
2 changes: 1 addition & 1 deletion abipy/data/managers/lemaitre2_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ hardware: &hardware
num_nodes: 112
sockets_per_node: 2
cores_per_socket: 6
mem_per_node: 48Gb
mem_per_node: 48GB

job: &job
mpi_runner: mpirun
Expand Down
2 changes: 1 addition & 1 deletion abipy/data/managers/lemaitre3_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ hardware: &hardware
num_nodes: 80
sockets_per_node: 2
cores_per_socket: 12
mem_per_node: 95Gb
mem_per_node: 95GB

job: &job
mpi_runner: mpirun
Expand Down
2 changes: 1 addition & 1 deletion abipy/data/managers/lumi_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ hardware: &hardware
num_nodes: 1376
sockets_per_node: 2
cores_per_socket: 64
mem_per_node: 256Gb
mem_per_node: 256GB

job: &job
mpi_runner: srun
Expand Down
6 changes: 3 additions & 3 deletions abipy/data/managers/manneback_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,19 +3,19 @@ Def: &Def
num_nodes: 672
sockets_per_node: 2
cores_per_socket: 4
mem_per_node: 24 Gb
mem_per_node: 24 GB

ObanAMD: &ObanAMD
num_nodes: 6
sockets_per_node: 4
cores_per_socket: 8
mem_per_node: 128 Gb
mem_per_node: 128 GB

ObanIntel: &ObanIntel
num_nodes: 3
sockets_per_node: 4
cores_per_socket: 8
mem_per_node: 256 Gb
mem_per_node: 256 GB

# Environment, modules, and parameters used to launch jobs.
job: &job
Expand Down
2 changes: 1 addition & 1 deletion abipy/data/managers/nic4_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ hardware: &hardware
num_nodes: 120
sockets_per_node: 2
cores_per_socket: 8
mem_per_node: 64Gb
mem_per_node: 64GB

job: &job
mpi_runner: "mpirun"
Expand Down
2 changes: 1 addition & 1 deletion abipy/data/managers/shell_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@ qadapters:
num_nodes: 1
sockets_per_node: 1
cores_per_socket: 2
mem_per_node: 4 Gb
mem_per_node: 4 GB
2 changes: 1 addition & 1 deletion abipy/data/managers/shell_nompi_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@ qadapters:
num_nodes: 1
sockets_per_node: 1
cores_per_socket: 2
mem_per_node: 4 Gb
mem_per_node: 4 GB
2 changes: 1 addition & 1 deletion abipy/data/managers/travis_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@ qadapters:
num_nodes: 1
sockets_per_node: 1
cores_per_socket: 2
mem_per_node: 4 Gb
mem_per_node: 4 GB
2 changes: 1 addition & 1 deletion abipy/data/managers/ubu_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,4 @@ qadapters:
num_nodes: 1
sockets_per_node: 1
cores_per_socket: 24
mem_per_node: 4 Gb
mem_per_node: 4 GB
2 changes: 1 addition & 1 deletion abipy/data/managers/vega_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ hardware: &hardware
num_nodes: 44
sockets_per_node: 4
cores_per_socket: 16
mem_per_node: 256Gb
mem_per_node: 256GB

job: &job
mpi_runner: mpirun
Expand Down
2 changes: 1 addition & 1 deletion abipy/data/managers/viper_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ hardware: &hardware
num_nodes: 1
sockets_per_node: 2
cores_per_socket: 4
mem_per_node: 32Gb
mem_per_node: 32GB

job: &job
mpi_runner: ~/bin/mpirun.openmpi
Expand Down
4 changes: 2 additions & 2 deletions abipy/data/managers/zenobe_manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@ westmere: &westmere
num_nodes: 274
sockets_per_node: 2
cores_per_socket: 6
mem_per_node: 24 Gb
mem_per_node: 24 GB

ivybridge: &ivybridge
num_nodes: 342
sockets_per_node: 2
cores_per_socket: 12
mem_per_node: 64 Gb
mem_per_node: 64 GB

# Environment, modules, and parameters used to launch jobs.
job: &job
Expand Down
2 changes: 1 addition & 1 deletion abipy/electrons/arpes.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def model_from_ebands(cls, ebands, tmesh=(0, 300, 600), poorman_polaron=False):
#aw: [nwr, ntemp, max_nbcalc, nkcalc, nsppol] array
#aw_meshes: [max_nbcalc, nkcalc, nsppol] array with energy mesh in eV
from abipy.tools.numtools import lorentzian
try :
try:
from scipy.integrate import cumulative_trapezoid as cumtrapz
except ImportError:
from scipy.integrate import cumtrapz
Expand Down
4 changes: 3 additions & 1 deletion abipy/electrons/lobster.py
Original file line number Diff line number Diff line change
Expand Up @@ -771,7 +771,9 @@ def plot(self, ax=None, **kwargs) -> Figure:
"""Barplot with average values."""
ax, fig, plt = get_ax_fig_plt(ax=ax)
import seaborn as sns
sns.barplot(x="average", y="pair", hue="spin", data=self.dataframe, ax=ax)
df = self.dataframe.copy()
df["pair"] = df["type0"] + "-" + df["type1"]
sns.barplot(x="average", y="pair", hue="spin", data=df, ax=ax)
return fig

def yield_figs(self, **kwargs): # pragma: no cover
Expand Down
97 changes: 95 additions & 2 deletions abipy/flowtk/flows.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
from monty.termcolor import cprint, colored, cprint_map, get_terminal_size
from monty.inspect import find_top_pyfile
from monty.json import MSONable
from pymatgen.core.units import Memory, UnitError
from abipy.tools.iotools import AtomicFile
from abipy.tools.serialization import pmg_pickle_load, pmg_pickle_dump, pmg_serialize
from abipy.tools.typing import Figure, TYPE_CHECKING
Expand Down Expand Up @@ -1290,8 +1291,12 @@ def show_status(self, return_df=False, **kwargs):
if report is not None:
events = '{:>4}|{:>3}'.format(*map(str, (report.num_warnings, report.num_comments)))

para_info = '{:>4}|{:>3}|{:>3}'.format(*map(str, (
task.mpi_procs, task.omp_threads, "%.1f" % task.mem_per_proc.to("Gb"))))
try:
para_info = '{:>4}|{:>3}|{:>3}'.format(*map(str, (
task.mpi_procs, task.omp_threads, "%.1f" % task.mem_per_proc.to("GB"))))
except (KeyError, UnitError):
para_info = '{:>4}|{:>3}|{:>3}'.format(*map(str, (
task.mpi_procs, task.omp_threads, "%.1f" % task.mem_per_proc.to("Gb"))))

task_info = list(map(str, [task.__class__.__name__,
(task.num_launches, task.num_restarts, task.num_corrections), stime, task.node_id]))
Expand Down Expand Up @@ -2478,6 +2483,94 @@ def make_light_tarfile(self, name=None):
name = os.path.basename(self.workdir) + "-light.tar.gz" if name is None else name
return self.make_tarfile(name=name, exclude_dirs=["outdata", "indata", "tmpdata"])

def make_tarfile(self, name=None, max_filesize=None, exclude_exts=None, exclude_dirs=None, verbose=0, **kwargs):
"""
Create a tarball file.

Args:
name: Name of the tarball file. Set to os.path.basename(`flow.workdir`) + "tar.gz"` if name is None.
max_filesize (int or string with unit): a file is included in the tar file if its size <= max_filesize
Can be specified in bytes e.g. `max_files=1024` or with a string with unit e.g. `max_filesize="1 MB"`.
No check is done if max_filesize is None.
exclude_exts: List of file extensions to be excluded from the tar file.
exclude_dirs: List of directory basenames to be excluded.
verbose (int): Verbosity level.
kwargs: keyword arguments passed to the :class:`TarFile` constructor.

Returns: The name of the tarfile.
"""
def any2bytes(s):
"""Convert string or number to memory in bytes."""
if is_string(s):
try:
# latest pymatgen version (as of july 2024)
mem = int(Memory.from_str(s.upper()).to("B"))
except (KeyError, UnitError): # For backward compatibility with older pymatgen versions
try:
mem = int(Memory.from_str(s.replace("B", "b")).to("b"))
except AttributeError: # For even older pymatgen versions
mem = int(Memory.from_string(s.replace("B", "b")).to("b"))
return mem
else:
return int(s)

if max_filesize is not None:
max_filesize = any2bytes(max_filesize)

if exclude_exts:
# Add/remove ".nc" so that we can simply pass "GSR" instead of "GSR.nc"
# Moreover this trick allows one to treat WFK.nc and WFK file on the same footing.
exts = []
for e in list_strings(exclude_exts):
exts.append(e)
if e.endswith(".nc"):
exts.append(e.replace(".nc", ""))
else:
exts.append(e + ".nc")
exclude_exts = exts

def filter(tarinfo):
"""
Function that takes a TarInfo object argument and returns the changed TarInfo object.
If it instead returns None the TarInfo object will be excluded from the archive.
"""
# Skip links.
if tarinfo.issym() or tarinfo.islnk():
if verbose: print("Excluding link: %s" % tarinfo.name)
return None

# Check size in bytes
if max_filesize is not None and tarinfo.size > max_filesize:
if verbose: print("Excluding %s due to max_filesize" % tarinfo.name)
return None

# Filter filenames.
if exclude_exts and any(tarinfo.name.endswith(ext) for ext in exclude_exts):
if verbose: print("Excluding %s due to extension" % tarinfo.name)
return None

# Exlude directories (use dir basenames).
if exclude_dirs and any(dir_name in exclude_dirs for dir_name in tarinfo.name.split(os.path.sep)):
if verbose: print("Excluding %s due to exclude_dirs" % tarinfo.name)
return None

return tarinfo

back = os.getcwd()
os.chdir(os.path.join(self.workdir, ".."))

import tarfile
name = os.path.basename(self.workdir) + ".tar.gz" if name is None else name
with tarfile.open(name=name, mode='w:gz', **kwargs) as tar:
tar.add(os.path.basename(self.workdir), arcname=None, recursive=True, filter=filter)

# Add the script used to generate the flow.
if self.pyfile is not None and os.path.exists(self.pyfile):
tar.add(self.pyfile)

os.chdir(back)
return name

def explain(self, what="all", nids=None, verbose=0) -> str:
"""
Return string with the docstrings of the works/tasks in the Flow grouped by class.
Expand Down
Loading
Loading