Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: support preset bias of atomic model output #4116

Merged
merged 8 commits into from
Sep 11, 2024
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions deepmd/pt/model/atomic_model/base_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
Union,
)

import numpy as np
import torch

from deepmd.dpmodel.atomic_model import (
Expand Down Expand Up @@ -66,7 +67,7 @@ class BaseAtomicModel(torch.nn.Module, BaseAtomicModel_):
of the atomic model. Implemented by removing the pairs from the nlist.
rcond : float, optional
The condition number for the regression of atomic energy.
preset_out_bias : Dict[str, List[Optional[torch.Tensor]]], optional
preset_out_bias : Dict[str, List[Optional[np.array]]], optional
Specifying atomic energy contribution in vacuum. Given by key:value pairs.
The value is a list specifying the bias. the elements can be None or np.array of output shape.
For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.]
Expand All @@ -80,7 +81,7 @@ def __init__(
atom_exclude_types: List[int] = [],
pair_exclude_types: List[Tuple[int, int]] = [],
rcond: Optional[float] = None,
preset_out_bias: Optional[Dict[str, torch.Tensor]] = None,
preset_out_bias: Optional[Dict[str, np.array]] = None,
wanghan-iapcm marked this conversation as resolved.
Show resolved Hide resolved
):
torch.nn.Module.__init__(self)
BaseAtomicModel_.__init__(self)
Expand Down
18 changes: 18 additions & 0 deletions deepmd/pt/model/model/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,19 @@
)


def _convert_preset_out_bias_to_array(preset_out_bias, type_map):
if preset_out_bias is not None:
njzjz marked this conversation as resolved.
Show resolved Hide resolved
for kk in preset_out_bias:
if len(preset_out_bias[kk]) != len(type_map):
raise ValueError(

Check warning on line 158 in deepmd/pt/model/model/__init__.py

View check run for this annotation

Codecov / codecov/patch

deepmd/pt/model/model/__init__.py#L158

Added line #L158 was not covered by tests
"length of the preset_out_bias should be the same as the type_map"
)
for jj in range(len(preset_out_bias[kk])):
if preset_out_bias[kk][jj] is not None:
preset_out_bias[kk][jj] = np.array(preset_out_bias[kk][jj])
return preset_out_bias


def get_standard_model(model_params):
model_params_old = model_params
model_params = copy.deepcopy(model_params)
Expand All @@ -176,6 +189,10 @@
fitting = BaseFitting(**fitting_net)
atom_exclude_types = model_params.get("atom_exclude_types", [])
pair_exclude_types = model_params.get("pair_exclude_types", [])
preset_out_bias = model_params.get("preset_out_bias")
preset_out_bias = _convert_preset_out_bias_to_array(
preset_out_bias, model_params["type_map"]
)

if fitting_net["type"] == "dipole":
modelcls = DipoleModel
Expand All @@ -196,6 +213,7 @@
type_map=model_params["type_map"],
atom_exclude_types=atom_exclude_types,
pair_exclude_types=pair_exclude_types,
preset_out_bias=preset_out_bias,
wanghan-iapcm marked this conversation as resolved.
Show resolved Hide resolved
)
model.model_def_script = json.dumps(model_params_old)
return model
Expand Down
6 changes: 3 additions & 3 deletions deepmd/pt/utils/stat.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ def compute_output_stats(
keys: Union[str, List[str]] = ["energy"],
stat_file_path: Optional[DPPath] = None,
rcond: Optional[float] = None,
preset_bias: Optional[Dict[str, List[Optional[torch.Tensor]]]] = None,
preset_bias: Optional[Dict[str, List[Optional[np.array]]]] = None,
wanghan-iapcm marked this conversation as resolved.
Show resolved Hide resolved
model_forward: Optional[Callable[..., torch.Tensor]] = None,
atomic_output: Optional[FittingOutputDef] = None,
):
Expand All @@ -264,7 +264,7 @@ def compute_output_stats(
The path to the stat file.
rcond : float, optional
The condition number for the regression of atomic energy.
preset_bias : Dict[str, List[Optional[torch.Tensor]]], optional
preset_bias : Dict[str, List[Optional[np.array]]], optional
Specifying atomic energy contribution in vacuum. Given by key:value pairs.
The value is a list specifying the bias. the elements can be None or np.array of output shape.
For example: [None, [2.]] means type 0 is not set, type 1 is set to [2.]
Expand Down Expand Up @@ -405,7 +405,7 @@ def compute_output_stats_global(
ntypes: int,
keys: List[str],
rcond: Optional[float] = None,
preset_bias: Optional[Dict[str, List[Optional[torch.Tensor]]]] = None,
preset_bias: Optional[Dict[str, List[Optional[np.array]]]] = None,
wanghan-iapcm marked this conversation as resolved.
Show resolved Hide resolved
model_pred: Optional[Dict[str, np.ndarray]] = None,
atomic_output: Optional[FittingOutputDef] = None,
):
Expand Down
9 changes: 9 additions & 0 deletions deepmd/utils/argcheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import warnings
from typing import (
Callable,
Dict,
List,
Optional,
Union,
Expand Down Expand Up @@ -1771,6 +1772,7 @@ def model_args(exclude_hybrid=False):
doc_spin = "The settings for systems with spin."
doc_atom_exclude_types = "Exclude the atomic contribution of the listed atom types"
doc_pair_exclude_types = "The atom pairs of the listed types are not treated to be neighbors, i.e. they do not see each other."
doc_preset_out_bias = "The preset bias of the atomic output. Is provided as a dict. Taking the energy model that has three atom types for example, the preset_out_bias may be given as `{ 'energy': [null, 0., 1.] }`. In this case the bias of type 1 and 2 are set to 0. and 1., respectively.The set_davg_zero should be set to true."
doc_finetune_head = (
"The chosen fitting net to fine-tune on, when doing multi-task fine-tuning. "
"If not set or set to 'RANDOM', the fitting net will be randomly initialized."
Expand Down Expand Up @@ -1833,6 +1835,13 @@ def model_args(exclude_hybrid=False):
default=[],
doc=doc_only_pt_supported + doc_atom_exclude_types,
),
Argument(
"preset_out_bias",
Dict[str, Optional[float]],
optional=True,
default=None,
doc=doc_only_pt_supported + doc_preset_out_bias,
),
Argument(
"srtab_add_bias",
bool,
Expand Down
82 changes: 82 additions & 0 deletions source/tests/pt/model/test_get_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
import copy
import unittest

import numpy as np
import torch

from deepmd.pt.model.model import (
get_model,
)
from deepmd.pt.utils import (
env,
)

dtype = torch.float64

model_se_e2_a = {
"type_map": ["O", "H", "B"],
"descriptor": {
"type": "se_e2_a",
"sel": [46, 92, 4],
"rcut_smth": 0.50,
"rcut": 4.00,
"neuron": [25, 50, 100],
"resnet_dt": False,
"axis_neuron": 16,
"seed": 1,
},
"fitting_net": {
"neuron": [24, 24, 24],
"resnet_dt": True,
"seed": 1,
},
"data_stat_nbatch": 20,
"atom_exclude_types": [1],
"pair_exclude_types": [[1, 2]],
"preset_out_bias": {
"energy": [
None,
[1.0],
[3.0],
]
},
}


class TestGetModel(unittest.TestCase):
def test_model_attr(self):
model_params = copy.deepcopy(model_se_e2_a)
self.model = get_model(model_params).to(env.DEVICE)
atomic_model = self.model.atomic_model
self.assertEqual(atomic_model.type_map, ["O", "H", "B"])
self.assertEqual(
atomic_model.preset_out_bias,
{
"energy": [
None,
np.array([1.0]),
np.array([3.0]),
]
},
)
self.assertEqual(atomic_model.atom_exclude_types, [1])
self.assertEqual(atomic_model.pair_exclude_types, [[1, 2]])

def test_notset_model_attr(self):
model_params = copy.deepcopy(model_se_e2_a)
model_params.pop("atom_exclude_types")
model_params.pop("pair_exclude_types")
model_params.pop("preset_out_bias")
self.model = get_model(model_params).to(env.DEVICE)
atomic_model = self.model.atomic_model
self.assertEqual(atomic_model.type_map, ["O", "H", "B"])
self.assertEqual(atomic_model.preset_out_bias, None)
self.assertEqual(atomic_model.atom_exclude_types, [])
self.assertEqual(atomic_model.pair_exclude_types, [])

def test_preset_wrong_len(self):
model_params = copy.deepcopy(model_se_e2_a)
model_params["preset_out_bias"] = {"energy": [None]}
with self.assertRaises(ValueError):
self.model = get_model(model_params).to(env.DEVICE)