Skip to content
13 changes: 13 additions & 0 deletions deepmd/dpmodel/descriptor/dpa1.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,6 +319,7 @@ def __init__(
trainable_ln=trainable_ln,
ln_eps=ln_eps,
seed=child_seed(seed, 0),
trainable=trainable,
)
self.use_econf_tebd = use_econf_tebd
self.use_tebd_bias = use_tebd_bias
Expand All @@ -333,6 +334,7 @@ def __init__(
use_tebd_bias=use_tebd_bias,
type_map=type_map,
seed=child_seed(seed, 1),
trainable=trainable,
)
self.tebd_dim = tebd_dim
self.concat_output_tebd = concat_output_tebd
Expand Down Expand Up @@ -691,6 +693,7 @@ def __init__(
ln_eps: Optional[float] = 1e-5,
smooth: bool = True,
seed: Optional[Union[int, list[int]]] = None,
trainable: bool = True,
) -> None:
self.rcut = rcut
self.rcut_smth = rcut_smth
Expand Down Expand Up @@ -741,6 +744,7 @@ def __init__(
self.resnet_dt,
self.precision,
seed=child_seed(seed, 0),
trainable=trainable,
)
self.embeddings = embeddings
if self.tebd_input_mode in ["strip"]:
Expand All @@ -756,6 +760,7 @@ def __init__(
self.resnet_dt,
self.precision,
seed=child_seed(seed, 1),
trainable=trainable,
)
self.embeddings_strip = embeddings_strip
else:
Expand All @@ -774,6 +779,7 @@ def __init__(
smooth=self.smooth,
precision=self.precision,
seed=child_seed(seed, 2),
trainable=trainable,
)

wanted_shape = (self.ntypes, self.nnei, 4)
Expand Down Expand Up @@ -1186,6 +1192,7 @@ def __init__(
smooth: bool = True,
precision: str = DEFAULT_PRECISION,
seed: Optional[Union[int, list[int]]] = None,
trainable: bool = True,
) -> None:
"""Construct a neighbor-wise attention net."""
super().__init__()
Expand Down Expand Up @@ -1219,6 +1226,7 @@ def __init__(
smooth=smooth,
precision=precision,
seed=child_seed(seed, ii),
trainable=trainable,
)
for ii in range(layer_num)
]
Expand Down Expand Up @@ -1314,6 +1322,7 @@ def __init__(
smooth: bool = True,
precision: str = DEFAULT_PRECISION,
seed: Optional[Union[int, list[int]]] = None,
trainable: bool = True,
) -> None:
"""Construct a neighbor-wise attention layer."""
super().__init__()
Expand All @@ -1340,6 +1349,7 @@ def __init__(
smooth=smooth,
precision=precision,
seed=child_seed(seed, 0),
trainable=trainable,
)
self.attn_layer_norm = LayerNorm(
self.embed_dim,
Expand Down Expand Up @@ -1420,6 +1430,7 @@ def __init__(
smooth: bool = True,
precision: str = DEFAULT_PRECISION,
seed: Optional[Union[int, list[int]]] = None,
trainable: bool = True,
) -> None:
"""Construct a multi-head neighbor-wise attention net."""
super().__init__()
Expand Down Expand Up @@ -1449,6 +1460,7 @@ def __init__(
use_timestep=False,
precision=precision,
seed=child_seed(seed, 0),
trainable=trainable,
)
self.out_proj = NativeLayer(
hidden_dim,
Expand All @@ -1457,6 +1469,7 @@ def __init__(
use_timestep=False,
precision=precision,
seed=child_seed(seed, 1),
trainable=trainable,
)

def call(self, query, nei_mask, input_r=None, sw=None, attnw_shift=20.0):
Expand Down
6 changes: 6 additions & 0 deletions deepmd/dpmodel/descriptor/dpa2.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,6 +474,7 @@ def init_subclass_params(sub_data, sub_class):
smooth=smooth,
type_one_side=self.repinit_args.type_one_side,
seed=child_seed(seed, 0),
trainable=trainable,
)
self.use_three_body = self.repinit_args.use_three_body
if self.use_three_body:
Expand All @@ -493,6 +494,7 @@ def init_subclass_params(sub_data, sub_class):
resnet_dt=self.repinit_args.resnet_dt,
smooth=smooth,
seed=child_seed(seed, 5),
trainable=trainable,
)
else:
self.repinit_three_body = None
Expand Down Expand Up @@ -533,6 +535,7 @@ def init_subclass_params(sub_data, sub_class):
g1_out_mlp=self.repformer_args.g1_out_mlp,
ln_eps=self.repformer_args.ln_eps,
seed=child_seed(seed, 1),
trainable=trainable,
)
self.rcsl_list = [
(self.repformers.get_rcut(), self.repformers.get_nsel()),
Expand Down Expand Up @@ -562,6 +565,7 @@ def init_subclass_params(sub_data, sub_class):
use_tebd_bias=use_tebd_bias,
type_map=type_map,
seed=child_seed(seed, 2),
trainable=trainable,
)
self.concat_output_tebd = concat_output_tebd
self.precision = precision
Expand All @@ -585,6 +589,7 @@ def init_subclass_params(sub_data, sub_class):
bias=False,
precision=precision,
seed=child_seed(seed, 3),
trainable=trainable,
)
self.tebd_transform = None
if self.add_tebd_to_repinit_out:
Expand All @@ -594,6 +599,7 @@ def init_subclass_params(sub_data, sub_class):
bias=False,
precision=precision,
seed=child_seed(seed, 4),
trainable=trainable,
)
assert self.repinit.rcut > self.repformers.rcut
assert self.repinit.sel[0] > self.repformers.sel[0]
Expand Down
2 changes: 2 additions & 0 deletions deepmd/dpmodel/descriptor/dpa3.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,6 +357,7 @@ def init_subclass_params(sub_data, sub_class):
env_protection=env_protection,
precision=precision,
seed=child_seed(seed, 1),
trainable=trainable,
)

self.use_econf_tebd = use_econf_tebd
Expand All @@ -374,6 +375,7 @@ def init_subclass_params(sub_data, sub_class):
use_tebd_bias=use_tebd_bias,
type_map=type_map,
seed=child_seed(seed, 2),
trainable=trainable,
)
self.concat_output_tebd = concat_output_tebd
self.precision = precision
Expand Down
33 changes: 31 additions & 2 deletions deepmd/dpmodel/descriptor/repflows.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,8 @@ class DescrptBlockRepflows(NativeOP, DescriptorBlock):
For example, when using paddings, there may be zero distances of neighbors, which may make division by zero error during environment matrix calculations without protection.
seed : int, optional
Random seed for parameter initialization.
trainable : bool, default: True
Whether the block is trainable
"""

def __init__(
Expand Down Expand Up @@ -205,6 +207,7 @@ def __init__(
sel_reduce_factor: float = 10.0,
use_loc_mapping: bool = True,
seed: Optional[Union[int, list[int]]] = None,
trainable: bool = True,
) -> None:
super().__init__()
self.e_rcut = float(e_rcut)
Expand Down Expand Up @@ -269,10 +272,19 @@ def __init__(
self.seed = seed

self.edge_embd = NativeLayer(
1, self.e_dim, precision=precision, seed=child_seed(seed, 0)
1,
self.e_dim,
precision=precision,
seed=child_seed(seed, 0),
trainable=trainable,
)
self.angle_embd = NativeLayer(
1, self.a_dim, precision=precision, bias=False, seed=child_seed(seed, 1)
1,
self.a_dim,
precision=precision,
bias=False,
seed=child_seed(seed, 1),
trainable=trainable,
)
layers = []
for ii in range(nlayers):
Expand Down Expand Up @@ -304,6 +316,7 @@ def __init__(
sel_reduce_factor=self.sel_reduce_factor,
smooth_edge_update=self.smooth_edge_update,
seed=child_seed(child_seed(seed, 1), ii),
trainable=trainable,
)
)
self.layers = layers
Expand Down Expand Up @@ -860,6 +873,7 @@ def __init__(
update_residual_init: str = "const",
precision: str = "float64",
seed: Optional[Union[int, list[int]]] = None,
trainable: bool = True,
) -> None:
super().__init__()
self.epsilon = 1e-4 # protection of 1./nnei
Expand Down Expand Up @@ -922,6 +936,7 @@ def __init__(
n_dim,
precision=precision,
seed=child_seed(seed, 0),
trainable=trainable,
)
if self.update_style == "res_residual":
self.n_residual.append(
Expand All @@ -931,6 +946,7 @@ def __init__(
self.update_residual_init,
precision=precision,
seed=child_seed(seed, 1),
trainable=trainable,
)
)

Expand All @@ -941,6 +957,7 @@ def __init__(
n_dim,
precision=precision,
seed=child_seed(seed, 2),
trainable=trainable,
)
if self.update_style == "res_residual":
self.n_residual.append(
Expand All @@ -950,6 +967,7 @@ def __init__(
self.update_residual_init,
precision=precision,
seed=child_seed(seed, 3),
trainable=trainable,
)
)

Expand All @@ -959,6 +977,7 @@ def __init__(
self.n_multi_edge_message * n_dim,
precision=precision,
seed=child_seed(seed, 4),
trainable=trainable,
)
if self.update_style == "res_residual":
for head_index in range(self.n_multi_edge_message):
Expand All @@ -969,6 +988,7 @@ def __init__(
self.update_residual_init,
precision=precision,
seed=child_seed(child_seed(seed, 5), head_index),
trainable=trainable,
)
)

Expand All @@ -978,6 +998,7 @@ def __init__(
e_dim,
precision=precision,
seed=child_seed(seed, 6),
trainable=trainable,
)
if self.update_style == "res_residual":
self.e_residual.append(
Expand All @@ -987,6 +1008,7 @@ def __init__(
self.update_residual_init,
precision=precision,
seed=child_seed(seed, 7),
trainable=trainable,
)
)

Expand Down Expand Up @@ -1015,13 +1037,15 @@ def __init__(
precision=precision,
bias=False,
seed=child_seed(seed, 8),
trainable=trainable,
)
self.a_compress_e_linear = NativeLayer(
self.e_dim,
self.e_a_compress_dim,
precision=precision,
bias=False,
seed=child_seed(seed, 9),
trainable=trainable,
)
else:
self.a_compress_n_linear = None
Expand All @@ -1033,12 +1057,14 @@ def __init__(
self.e_dim,
precision=precision,
seed=child_seed(seed, 10),
trainable=trainable,
)
self.edge_angle_linear2 = NativeLayer(
self.e_dim,
self.e_dim,
precision=precision,
seed=child_seed(seed, 11),
trainable=trainable,
)
if self.update_style == "res_residual":
self.e_residual.append(
Expand All @@ -1048,6 +1074,7 @@ def __init__(
self.update_residual_init,
precision=precision,
seed=child_seed(seed, 12),
trainable=trainable,
)
)

Expand All @@ -1057,6 +1084,7 @@ def __init__(
self.a_dim,
precision=precision,
seed=child_seed(seed, 13),
trainable=trainable,
)
if self.update_style == "res_residual":
self.a_residual.append(
Expand All @@ -1066,6 +1094,7 @@ def __init__(
self.update_residual_init,
precision=precision,
seed=child_seed(seed, 14),
trainable=trainable,
)
)
else:
Expand Down
Loading
Loading