Skip to content

Commit

Permalink
Added memory limiting variable to HIPPYNN settings
Browse files Browse the repository at this point in the history
  • Loading branch information
bnebgen-LANL committed Jul 23, 2024
1 parent c729366 commit d6af7c2
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
1 change: 1 addition & 0 deletions hippynn/_settings_setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ def kernel_handler(kernel_string):
"USE_CUSTOM_KERNELS": ("auto", kernel_handler),
"WARN_LOW_DISTANCES": (True, strtobool),
"TIMEPLOT_AUTOSCALING": (True, strtobool),
"PYTORCH_GPU_MEM_FRAC": (1.0, float),
}

settings = SimpleNamespace(**{k: default for k, (default, handler) in default_settings.items()})
Expand Down
5 changes: 2 additions & 3 deletions hippynn/interfaces/lammps_interface/mliap_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
"""
import pickle
import warnings
from os import environ

import numpy as np
import torch
Expand Down Expand Up @@ -36,8 +35,8 @@ def __init__(self, energy_node, element_types, ndescriptors=1, model_device=torc
:param model_device: the device to send torch data to (cpu or cuda)
"""
super().__init__()
if environ("HIPPYNN_PYTORCH_GPU_MEM_FRAC") is not None:
torch.cuda.set_per_process_memory_fraction(float(environ("HIPPYNN_PYTORCH_GPU_MEM_FRAC")))
if hippynn.settings.PYTORCH_GPU_MEM_FRAC < 1.0:
torch.cuda.set_per_process_memory_fraction(hippynn.settings.PYTORCH_GPU_MEM_FRAC)
self.element_types = element_types
self.ndescriptors = ndescriptors
self.model_device = model_device
Expand Down

0 comments on commit d6af7c2

Please sign in to comment.