Skip to content
Merged
Show file tree
Hide file tree
Changes from 93 commits
Commits
Show all changes
100 commits
Select commit Hold shift + click to select a range
e23dc5f
add dpa3 alpha
iProzd Dec 17, 2024
527cb85
rename and add uts
iProzd Dec 18, 2024
4fdc64f
add skip stat
iProzd Dec 18, 2024
d57b97b
rename
iProzd Dec 18, 2024
63040af
Update dpa3.py
iProzd Dec 18, 2024
ffb70c3
Update descriptor.py
iProzd Dec 18, 2024
9cb24d8
Update descriptor.py
iProzd Dec 18, 2024
fe6a92e
Update dpa3.py
iProzd Dec 18, 2024
389287a
add mae
iProzd Dec 11, 2024
20a60c6
add mae
iProzd Dec 11, 2024
1309e26
add compress
iProzd Dec 23, 2024
57acd99
add compress
iProzd Dec 23, 2024
4e65d8b
feat(pt): add dpa3 alpha descriptor (#4476)
iProzd Dec 24, 2024
76f28e9
Merge branch 'dpa3-alpha' into 1218_dpa3_alpha_nostat
iProzd Dec 24, 2024
8ac8180
(dpa3 alpha) add skip stat (#4501)
iProzd Dec 24, 2024
dc1b1a3
Refactor property (#37)
iProzd Dec 24, 2024
b675aad
add multig1 mess
iProzd Dec 24, 2024
55f7ef6
support CINN compiler for DPA2 example
HydrogenSulfate Dec 27, 2024
7ca2a9e
refine CINN flag
HydrogenSulfate Dec 27, 2024
b3e3c1f
Merge branch 'devel' into add_paddle_cinn_dpa2
HydrogenSulfate Dec 29, 2024
77c34e7
import annotations for compatibility with python<=3.9
HydrogenSulfate Dec 29, 2024
3a611ec
Merge branch 'devel' into add_paddle_cinn_dpa2
HydrogenSulfate Dec 29, 2024
779d748
Merge branch 'add_paddle_cinn_dpa2' of https://github.com/HydrogenSul…
HydrogenSulfate Dec 29, 2024
4874218
Perf: replace unnecessary torch.split with indexing
iProzd Jan 2, 2025
0598483
Perf: replace unnecessary torch.split with indexing (#4524)
iProzd Jan 2, 2025
6f4324b
add a_compress_e_rate
iProzd Dec 27, 2024
5fa7cee
fix typo
iProzd Jan 3, 2025
fe5c318
add a_compress_use_split
iProzd Dec 31, 2024
fec6462
fix typo
iProzd Jan 3, 2025
9596f94
feat(dpa3): add flexible compression for angle (#4526)
iProzd Jan 3, 2025
9d9dc8f
add n_multi_edge_message
iProzd Dec 24, 2024
ed335d1
feat(dpa3) add n_multi_edge_message (#4528)
iProzd Jan 4, 2025
0d2008f
Merge branch 'dpa3-alpha' into 1224_dpa3_alpha_mulg1mess
iProzd Jan 4, 2025
acfe81f
feat(dpa3) add property from devel branch (#4529)
iProzd Jan 4, 2025
930e581
add property from devel branch (#38)
iProzd Jan 4, 2025
2964c01
add huber loss
iProzd Jul 23, 2024
3feb0f6
Merge branch 'dpa3-alpha' into 0103_prob_scp_e1a
iProzd Jan 6, 2025
5702a47
Update auto_batch_size.py
iProzd Jan 7, 2025
7eaba53
Merge branch 'devel' into dpa3_alpha_paddle
HydrogenSulfate Jan 8, 2025
d6efb80
adapt dpa3 to paddle for compiler experiment
HydrogenSulfate Jan 8, 2025
6f85f12
Merge branch 'add_paddle_cinn_dpa2' into dpa3_alpha_paddle
HydrogenSulfate Jan 8, 2025
e2049fd
update fake json
HydrogenSulfate Jan 8, 2025
c1de85e
fix assert
HydrogenSulfate Jan 8, 2025
4f8b6a5
feat(dpa3): add huber loss (#4549)
iProzd Jan 13, 2025
492dd68
add CustomSilu
iProzd Jan 25, 2025
56f5ebe
add custom dsilu
iProzd Jan 30, 2025
277ae77
add optim_angle
iProzd Jan 31, 2025
387ce95
more mantainable code for optimized angle update (#39)
wanghan-iapcm Jan 31, 2025
554779f
Update repflow_layer.py
iProzd Jan 31, 2025
fa81155
add optim_edge
iProzd Jan 31, 2025
a3e71df
rename to optim_update
iProzd Jan 31, 2025
b292a91
cherry-pick #4478
iProzd Jan 31, 2025
b1b83d4
feat(pt): customized op for combination of silu and tanh
njzjz Feb 3, 2025
28b7e80
fix a typo
njzjz Feb 3, 2025
b711d52
add thsilu
iProzd Feb 4, 2025
0dae7e3
Update repflow_layer.py
iProzd Feb 4, 2025
ca6e97c
customized dsilu OP (#40)
njzjz Feb 6, 2025
b2a1f3a
fix numerical error for thsilu & cdsilu
iProzd Feb 6, 2025
6f0b708
fix numerical error for cdsilu (#42)
njzjz Feb 7, 2025
12aa429
Update utils.py
iProzd Feb 10, 2025
14c5459
Update utils.py
iProzd Feb 7, 2025
cde1877
fix jit
iProzd Feb 8, 2025
c61eb1d
fix grad backward (#43)
njzjz Feb 10, 2025
284d492
add dpa3 example
iProzd Feb 17, 2025
ed508e5
Update input_torch.json
iProzd Feb 17, 2025
356583f
update dpa3 with custom op and optimized update (#4600)
iProzd Feb 17, 2025
7517e22
Update input_torch.json
iProzd Feb 19, 2025
9e18b4d
feat(dpa3): update dpa3 example (#4606)
iProzd Feb 19, 2025
ab43e31
Merge branch 'dpa3-alpha' into dpa3_alpha_paddle
HydrogenSulfate Mar 5, 2025
fa71684
update code
HydrogenSulfate Mar 5, 2025
c115d48
update debug code
HydrogenSulfate Mar 6, 2025
488222a
refine code
HydrogenSulfate Mar 19, 2025
e732555
support dpa3 with paddle backend(eager mode)
HydrogenSulfate Apr 10, 2025
c9aab46
Merge branch 'devel' into dpa3_alpha_paddle
HydrogenSulfate Apr 10, 2025
fb3decc
restore files
HydrogenSulfate Apr 10, 2025
88d2a1e
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Apr 10, 2025
34ff815
tidy code
HydrogenSulfate Apr 10, 2025
193e8d6
Merge branch 'dpa3_alpha_paddle' of https://github.com/HydrogenSulfat…
HydrogenSulfate Apr 10, 2025
4628e60
tidy code
HydrogenSulfate Apr 10, 2025
a5e5402
remove redundant codes
HydrogenSulfate Apr 10, 2025
ef25a7f
fix pt->pd
HydrogenSulfate Apr 10, 2025
2d40ee0
fix typos and add unitests
HydrogenSulfate Apr 10, 2025
531fc16
add CINN UT and refine codes
HydrogenSulfate Apr 10, 2025
04768ce
workaround with SiLUT
HydrogenSulfate Apr 10, 2025
127caf3
upload missing files
HydrogenSulfate Apr 10, 2025
d723d7a
remove redundant code
HydrogenSulfate Apr 10, 2025
fdfa652
tidy UT
HydrogenSulfate Apr 10, 2025
d026fa4
update tabulate code
HydrogenSulfate Apr 10, 2025
068e42c
fix repeat -> tile
HydrogenSulfate Apr 10, 2025
b29ca55
update compression code
HydrogenSulfate Apr 10, 2025
15507e6
Merge branch 'devel' into dpa3_alpha_paddle
HydrogenSulfate Apr 10, 2025
06a6180
fix code
HydrogenSulfate Apr 10, 2025
4a4129d
remove fp64 for compability
HydrogenSulfate Apr 10, 2025
bf98888
fix
HydrogenSulfate Apr 10, 2025
e441eb9
fix
HydrogenSulfate Apr 11, 2025
7298021
remove test_compressed_descriptor_se_a.py
HydrogenSulfate Apr 11, 2025
8c02f99
code update
HydrogenSulfate Apr 15, 2025
60ebbf1
fix ut
HydrogenSulfate Apr 15, 2025
06e8b53
remove compress code temporarily
HydrogenSulfate Apr 15, 2025
1c4c38d
Merge branch 'devel' into dpa3_alpha_paddle
HydrogenSulfate Apr 15, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 26 additions & 26 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -65,13 +65,13 @@ repos:
- id: clang-format
exclude: ^(source/3rdparty|source/lib/src/gpu/cudart/.+\.inc|.+\.ipynb$|.+\.json$)
# markdown, yaml, CSS, javascript
- repo: https://github.com/pre-commit/mirrors-prettier
rev: v4.0.0-alpha.8
hooks:
- id: prettier
types_or: [markdown, yaml, css]
# workflow files cannot be modified by pre-commit.ci
exclude: ^(source/3rdparty|\.github/workflows|\.clang-format)
# - repo: https://github.com/pre-commit/mirrors-prettier
# rev: v4.0.0-alpha.8
# hooks:
# - id: prettier
# types_or: [markdown, yaml, css]
# # workflow files cannot be modified by pre-commit.ci
# exclude: ^(source/3rdparty|\.github/workflows|\.clang-format)
# Shell
- repo: https://github.com/scop/pre-commit-shfmt
rev: v3.11.0-1
Expand All @@ -83,25 +83,25 @@ repos:
hooks:
- id: cmake-format
#- id: cmake-lint
- repo: https://github.com/njzjz/mirrors-bibtex-tidy
rev: v1.13.0
hooks:
- id: bibtex-tidy
args:
- --curly
- --numeric
- --align=13
- --blank-lines
# disable sort: the order of keys and fields has explict meanings
#- --sort=key
- --duplicates=key,doi,citation,abstract
- --merge=combine
#- --sort-fields
#- --strip-comments
- --trailing-commas
- --encode-urls
- --remove-empty-fields
- --wrap=80
# - repo: https://github.com/njzjz/mirrors-bibtex-tidy
# rev: v1.13.0
# hooks:
# - id: bibtex-tidy
# args:
# - --curly
# - --numeric
# - --align=13
# - --blank-lines
# # disable sort: the order of keys and fields has explict meanings
# #- --sort=key
# - --duplicates=key,doi,citation,abstract
# - --merge=combine
# #- --sort-fields
# #- --strip-comments
# - --trailing-commas
# - --encode-urls
# - --remove-empty-fields
# - --wrap=80
# license header
- repo: https://github.com/Lucas-C/pre-commit-hooks
rev: v1.5.5
Expand Down
2 changes: 2 additions & 0 deletions deepmd/pd/loss/__init__.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
# SPDX-License-Identifier: LGPL-3.0-or-later
from .ener import (
EnergyHessianStdLoss,
EnergyStdLoss,
)
from .loss import (
TaskLoss,
)

__all__ = [
"EnergyHessianStdLoss",
"EnergyStdLoss",
"TaskLoss",
]
101 changes: 86 additions & 15 deletions deepmd/pd/loss/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def __init__(
use_huber=False,
huber_delta=0.01,
**kwargs,
):
) -> None:
r"""Construct a layer to compute loss on energy, force and virial.

Parameters
Expand Down Expand Up @@ -287,9 +287,9 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False):
rmse_f.detach(), find_force
)
else:
l1_force_loss = F.l1_loss(force_label, force_pred, reduction="none")
l1_force_loss = F.l1_loss(force_label, force_pred, reduction="mean")
more_loss["mae_f"] = self.display_if_exist(
l1_force_loss.mean().detach(), find_force
l1_force_loss.detach(), find_force
)
l1_force_loss = l1_force_loss.sum(-1).mean(-1).sum()
loss += (pref_f * l1_force_loss).to(GLOBAL_PD_FLOAT_PRECISION)
Expand Down Expand Up @@ -324,20 +324,19 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False):
drdq_reshape = drdq.reshape(
[-1, natoms * 3, self.numb_generalized_coord]
)
gen_force_label = paddle.einsum(
"bij,bi->bj", drdq_reshape, force_label_reshape_nframes
)
# gen_force_label = (
# drdq_reshape * force_label_reshape_nframes.unsqueeze(-1)
# ).sum([-2])

# gen_force_label = paddle.einsum(
# "bij,bi->bj", drdq_reshape, force_label_reshape_nframes
# )
gen_force_label = (
drdq_reshape * force_label_reshape_nframes.unsqueeze(-1)
).sum([-2])

# gen_force = paddle.einsum(
# "bij,bi->bj", drdq_reshape, force_reshape_nframes
# )
gen_force = (drdq_reshape * force_reshape_nframes.unsqueeze(-1)).sum(
[-2]
gen_force = paddle.einsum(
"bij,bi->bj", drdq_reshape, force_reshape_nframes
)
# gen_force = (drdq_reshape * force_reshape_nframes.unsqueeze(-1)).sum(
# [-2]
# )

diff_gen_force = gen_force_label - gen_force
l2_gen_force_loss = paddle.square(diff_gen_force).mean()
Expand Down Expand Up @@ -534,3 +533,75 @@ def deserialize(cls, data: dict) -> "TaskLoss":
check_version_compatibility(data.pop("@version"), 2, 1)
data.pop("@class")
return cls(**data)


class EnergyHessianStdLoss(EnergyStdLoss):
def __init__(
self,
start_pref_h=0.0,
limit_pref_h=0.0,
**kwargs,
):
r"""Enable the layer to compute loss on hessian.

Parameters
----------
start_pref_h : float
The prefactor of hessian loss at the start of the training.
limit_pref_h : float
The prefactor of hessian loss at the end of the training.
**kwargs
Other keyword arguments.
"""
super().__init__(**kwargs)
self.has_h = (start_pref_h != 0.0 and limit_pref_h != 0.0) or self.inference

self.start_pref_h = start_pref_h
self.limit_pref_h = limit_pref_h

def forward(self, input_dict, model, label, natoms, learning_rate, mae=False):
model_pred, loss, more_loss = super().forward(
input_dict, model, label, natoms, learning_rate, mae=mae
)
coef = learning_rate / self.starter_learning_rate
pref_h = self.limit_pref_h + (self.start_pref_h - self.limit_pref_h) * coef

if self.has_h and "hessian" in model_pred and "hessian" in label:
find_hessian = label.get("find_hessian", 0.0)
pref_h = pref_h * find_hessian
diff_h = label["hessian"].reshape(
[-1],
) - model_pred["hessian"].reshape(
[-1],
)
l2_hessian_loss = paddle.mean(paddle.square(diff_h))
if not self.inference:
more_loss["l2_hessian_loss"] = self.display_if_exist(
l2_hessian_loss.detach(), find_hessian
)
loss += pref_h * l2_hessian_loss
rmse_h = l2_hessian_loss.sqrt()
more_loss["rmse_h"] = self.display_if_exist(rmse_h.detach(), find_hessian)
if mae:
mae_h = paddle.mean(paddle.abs(diff_h))
more_loss["mae_h"] = self.display_if_exist(mae_h.detach(), find_hessian)

if not self.inference:
more_loss["rmse"] = paddle.sqrt(loss.detach())
return model_pred, loss, more_loss

@property
def label_requirement(self) -> list[DataRequirementItem]:
"""Add hessian label requirement needed for this loss calculation."""
label_requirement = super().label_requirement
if self.has_h:
label_requirement.append(
DataRequirementItem(
"hessian",
ndof=1, # 9=3*3 --> 3N*3N=ndof*natoms*natoms
atomic=True,
must=False,
high_prec=False,
)
)
return label_requirement
4 changes: 4 additions & 0 deletions deepmd/pd/model/descriptor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@
from .dpa2 import (
DescrptDPA2,
)
from .dpa3 import (
DescrptDPA3,
)
from .env_mat import (
prod_env_mat,
)
Expand Down Expand Up @@ -39,6 +42,7 @@
"DescrptBlockSeTTebd",
"DescrptDPA1",
"DescrptDPA2",
"DescrptDPA3",
"DescrptSeA",
"DescrptSeAttenV2",
"DescrptSeTTebd",
Expand Down
24 changes: 20 additions & 4 deletions deepmd/pd/model/descriptor/descriptor.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,16 @@
)
from typing import (
Callable,
NoReturn,
Optional,
Union,
)

import paddle

from deepmd.pd.model.network.network import (
TypeEmbedNet,
)
from deepmd.pd.utils import (
env,
)
Expand Down Expand Up @@ -99,7 +103,7 @@ def compute_input_stats(
self,
merged: Union[Callable[[], list[dict]], list[dict]],
path: Optional[DPPath] = None,
):
) -> NoReturn:
"""
Compute the input statistics (e.g. mean and stddev) for the descriptors from packed data.

Expand All @@ -122,7 +126,7 @@ def get_stats(self) -> dict[str, StatItem]:
"""Get the statistics of the descriptor."""
raise NotImplementedError

def share_params(self, base_class, shared_level, resume=False):
def share_params(self, base_class, shared_level, resume=False) -> None:
"""
Share the parameters of self to the base_class with shared_level during multitask training.
If not start from checkpoint (resume is False),
Expand All @@ -134,7 +138,10 @@ def share_params(self, base_class, shared_level, resume=False):
if shared_level == 0:
# link buffers
if hasattr(self, "mean"):
if not resume:
if not resume and (
not getattr(self, "set_stddev_constant", False)
or not getattr(self, "set_davg_zero", False)
):
# in case of change params during resume
base_env = EnvMatStatSe(base_class)
base_env.stats = base_class.stats
Expand Down Expand Up @@ -172,6 +179,7 @@ def forward(
extended_atype: paddle.Tensor,
extended_atype_embd: Optional[paddle.Tensor] = None,
mapping: Optional[paddle.Tensor] = None,
type_embedding: Optional[paddle.Tensor] = None,
):
"""Calculate DescriptorBlock."""
pass
Expand All @@ -185,7 +193,15 @@ def need_sorted_nlist_for_lower(self) -> bool:
"""Returns whether the descriptor block needs sorted nlist when using `forward_lower`."""


def extend_descrpt_stat(des, type_map, des_with_stat=None):
def make_default_type_embedding(
ntypes,
):
aux = {}
aux["tebd_dim"] = 8
return TypeEmbedNet(ntypes, aux["tebd_dim"]), aux


def extend_descrpt_stat(des, type_map, des_with_stat=None) -> None:
r"""
Extend the statistics of a descriptor block with types from newly provided `type_map`.

Expand Down
60 changes: 59 additions & 1 deletion deepmd/pd/model/descriptor/dpa1.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,15 @@
PRECISION_DICT,
RESERVED_PRECISION_DICT,
)
from deepmd.pd.utils.tabulate import (
DPTabulate,
)
from deepmd.pd.utils.update_sel import (
UpdateSel,
)
from deepmd.pd.utils.utils import (
ActivationFn,
)
from deepmd.utils.data_system import (
DeepmdDataSystem,
)
Expand Down Expand Up @@ -584,7 +590,59 @@ def enable_compression(
The overflow check frequency
"""
# do some checks before the mocel compression process
raise NotImplementedError("Model compression is not supported in paddle yet.")
if self.compress:
raise ValueError("Compression is already enabled.")
assert not self.se_atten.resnet_dt, (
"Model compression error: descriptor resnet_dt must be false!"
)
for tt in self.se_atten.exclude_types:
if (tt[0] not in range(self.se_atten.ntypes)) or (
tt[1] not in range(self.se_atten.ntypes)
):
raise RuntimeError(
"exclude types"
+ str(tt)
+ " must within the number of atomic types "
+ str(self.se_atten.ntypes)
+ "!"
)
if (
self.se_atten.ntypes * self.se_atten.ntypes
- len(self.se_atten.exclude_types)
== 0
):
raise RuntimeError(
"Empty embedding-nets are not supported in model compression!"
)

if self.se_atten.attn_layer != 0:
raise RuntimeError("Cannot compress model when attention layer is not 0.")

if self.tebd_input_mode != "strip":
raise RuntimeError("Cannot compress model when tebd_input_mode == 'concat'")

data = self.serialize()
self.table = DPTabulate(
self,
data["neuron"],
data["type_one_side"],
data["exclude_types"],
ActivationFn(data["activation_function"]),
)
self.table_config = [
table_extrapolate,
table_stride_1,
table_stride_2,
check_frequency,
]
self.lower, self.upper = self.table.build(
min_nbor_dist, table_extrapolate, table_stride_1, table_stride_2
)

self.se_atten.enable_compression(
self.table.data, self.table_config, self.lower, self.upper
)
self.compress = True

def forward(
self,
Expand Down
Loading
Loading