From 975d9e4c12e23c2f06e82044c5a4cdec08fd44fa Mon Sep 17 00:00:00 2001 From: root <2000011006@stu.pku.edu.cn> Date: Mon, 10 Mar 2025 18:03:22 +0800 Subject: [PATCH 01/26] Init branch --- deepmd/pt/loss/denoise.py | 289 ++++++--- deepmd/pt/model/atomic_model/__init__.py | 4 + .../atomic_model/denoise_atomic_model.py | 61 ++ deepmd/pt/model/model/__init__.py | 9 +- deepmd/pt/model/model/denoise_model.py | 107 ++++ deepmd/pt/model/task/denoise.py | 587 +++++++++++++++--- deepmd/utils/argcheck.py | 154 +++++ 7 files changed, 1055 insertions(+), 156 deletions(-) create mode 100644 deepmd/pt/model/atomic_model/denoise_atomic_model.py create mode 100644 deepmd/pt/model/model/denoise_model.py diff --git a/deepmd/pt/loss/denoise.py b/deepmd/pt/loss/denoise.py index 574210adb6..70e657beb6 100644 --- a/deepmd/pt/loss/denoise.py +++ b/deepmd/pt/loss/denoise.py @@ -1,4 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import numpy as np import torch import torch.nn.functional as F @@ -8,102 +9,224 @@ from deepmd.pt.utils import ( env, ) +from deepmd.pt.utils.env import ( + GLOBAL_PT_FLOAT_PRECISION, +) +from deepmd.utils.data import ( + DataRequirementItem, +) +from deepmd.pt.utils.region import ( + phys2inter, + inter2phys, +) +def get_cell_perturb_matrix(cell_pert_fraction: float): + # TODO: user fix some component + if cell_pert_fraction < 0: + raise RuntimeError("cell_pert_fraction can not be negative") + e0 = torch.rand(6) + e = e0 * 2 * cell_pert_fraction - cell_pert_fraction + cell_pert_matrix = torch.tensor( + [ + [1 + e[0], 0, 0], + [e[5], 1 + e[1], 0], + [e[4], e[3], 1 + e[2]], + ], + dtype=env.GLOBAL_PT_FLOAT_PRECISION, + device=env.DEVICE + ) + return cell_pert_matrix, e class DenoiseLoss(TaskLoss): def __init__( self, - ntypes, - masked_token_loss=1.0, - masked_coord_loss=1.0, - norm_loss=0.01, - use_l1=True, - beta=1.00, - mask_loss_coord=True, - mask_loss_token=True, + mask_token: bool = False, + mask_coord: bool = True, + mask_cell: bool = False, + token_loss: float = 1.0, + coord_loss: float = 1.0, + cell_loss: float = 1.0, + noise_type: str = "gaussian", + coord_noise: float = 0.2, + cell_pert_fraction: float = 0.0, + noise_mode: str = "prob", + mask_num: int = 1, + mask_prob: float = 0.2, + loss_func: str = "rmse", **kwargs, ) -> None: - """Construct a layer to compute loss on coord, and type reconstruction.""" + r"""Construct a layer to compute loss on token, coord and cell. + + Parameters + ---------- + mask_token: bool + Whether to mask token. + mask_coord: bool + Whether to mask coordinate. + mask_cell: bool + Whether to mask cell. + token_loss: float + The preference factor for token denoise. + coord_loss: float + The preference factor for coordinate denoise. + cell_loss: float + The preference factor for cell denoise. + noise_type : str + The type of noise to add to the coordinate. It can be 'uniform' or 'gaussian'. + coord_noise : float + The magnitude of noise to add to the coordinate. + cell_pert_fraction: float + A value determines how much will cell deform. + noise_mode : str + "'prob' means the noise is added with a probability.'fix_num' means the noise is added with a fixed number." + mask_num : int + The number of atoms to mask coordinates. It is only used when noise_mode is 'fix_num'. + mask_prob : float + The probability of masking coordinates. It is only used when noise_mode is 'prob'. + loss_func: str + The loss function to minimize, it can be 'mae' or 'rmse'. + **kwargs + Other keyword arguments. + """ super().__init__() - self.ntypes = ntypes - self.masked_token_loss = masked_token_loss - self.masked_coord_loss = masked_coord_loss - self.norm_loss = norm_loss - self.has_coord = self.masked_coord_loss > 0.0 - self.has_token = self.masked_token_loss > 0.0 - self.has_norm = self.norm_loss > 0.0 - self.use_l1 = use_l1 - self.beta = beta - self.frac_beta = 1.00 / self.beta - self.mask_loss_coord = mask_loss_coord - self.mask_loss_token = mask_loss_token - - def forward(self, model_pred, label, natoms, learning_rate, mae=False): - """Return loss on coord and type denoise. + self.mask_token = mask_token + self.mask_coord = mask_coord + self.mask_cell = mask_cell + self.token_loss = token_loss + self.coord_loss = coord_loss + self.cell_loss = cell_loss + self.noise_type = noise_type + self.coord_noise = coord_noise + self.cell_pert_fraction = cell_pert_fraction + self.noise_mode = noise_mode + self.mask_num = mask_num + self.mask_prob = mask_prob + self.loss_func = loss_func + + def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): + """Return loss on token,coord and cell. + + Parameters + ---------- + input_dict : dict[str, torch.Tensor] + Model inputs. + model : torch.nn.Module + Model to be used to output the predictions. + label : dict[str, torch.Tensor] + Labels. + natoms : int + The local atom number. Returns ------- - - loss: Loss to minimize. + model_pred: dict[str, torch.Tensor] + Model predictions. + loss: torch.Tensor + Loss for model to minimize. + more_loss: dict[str, torch.Tensor] + Other losses for display. """ - updated_coord = model_pred["updated_coord"] - logits = model_pred["logits"] - clean_coord = label["clean_coord"] - clean_type = label["clean_type"] - coord_mask = label["coord_mask"] - type_mask = label["type_mask"] - loss = torch.zeros(1, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE)[0] - more_loss = {} - if self.has_coord: - if self.mask_loss_coord: - masked_updated_coord = updated_coord[coord_mask] - masked_clean_coord = clean_coord[coord_mask] - if masked_updated_coord.size(0) > 0: - coord_loss = F.smooth_l1_loss( - masked_updated_coord.view(-1, 3), - masked_clean_coord.view(-1, 3), - reduction="mean", - beta=self.beta, - ) - else: - coord_loss = torch.zeros( - 1, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE - )[0] + nloc = input_dict["atype"].shape[1] + nbz = input_dict["atype"].shape[0] + input_dict["box"] = input_dict["box"].cuda() + + # TODO: Change lattice to lower triangular matrix + + label["clean_coord"] = input_dict["coord"].clone().detach() + label["clean_box"] = input_dict["box"].clone().detach() + origin_frac_coord = phys2inter(label["clean_coord"], label["clean_box"].reshape(nbz,3,3)) + label["clean_frac_coord"] = origin_frac_coord.clone().detach() + if self.mask_cell: + strain_components_all = torch.zeros((nbz,3), dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) + for ii in range(nbz): + cell_perturb_matrix, strain_components = get_cell_perturb_matrix_HEA(self.cell_noise) + # left-multiplied by `cell_perturb_matrix`` to get the noise box + input_dict["box"][ii] = torch.matmul(cell_perturb_matrix, input_dict["box"][ii].reshape(3,3)).reshape(-1) + input_dict["coord"][ii] = torch.matmul(origin_frac_coord[ii].reshape(nloc,3), input_dict["box"][ii].reshape(3,3)) + strain_components_all[ii] = strain_components.reshape(-1) + label["strain_components"] = strain_components_all.clone().detach() + + if self.mask_coord: + # add noise to coordinates and update label['updated_coord'] + mask_num = 0 + if self.noise_mode == "fix_num": + mask_num = self.mask_num + if(nloc < mask_num): + mask_num = nloc + elif self.noise_mode == "prob": + mask_num = int(self.mask_prob * nloc) + if mask_num == 0: + mask_num = 1 else: - coord_loss = F.smooth_l1_loss( - updated_coord.view(-1, 3), - clean_coord.view(-1, 3), - reduction="mean", - beta=self.beta, - ) - loss += self.masked_coord_loss * coord_loss - more_loss["coord_l1_error"] = coord_loss.detach() - if self.has_token: - if self.mask_loss_token: - masked_logits = logits[type_mask] - masked_target = clean_type[type_mask] - if masked_logits.size(0) > 0: - token_loss = F.nll_loss( - F.log_softmax(masked_logits, dim=-1), - masked_target, - reduction="mean", + NotImplementedError(f"Unknown noise mode {self.noise_mode}!") + + coord_mask_all = torch.zeros(input_dict["atype"].shape, dtype=torch.bool, device=env.DEVICE) + for ii in range(nbz): + noise_on_coord = 0.0 + coord_mask_res = np.random.choice(range(nloc), mask_num, replace=False).tolist() + coord_mask = np.isin(range(nloc), coord_mask_res) # nloc + if self.noise_type == "uniform": + noise_on_coord = np.random.uniform( + low=-self.noise, high=self.noise, size=(mask_num, 3) + ) + elif self.noise_type == "gaussian": + noise_on_coord = np.random.normal( + loc=0.0, scale=self.noise, size=(mask_num, 3) ) else: - token_loss = torch.zeros( - 1, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE - )[0] - else: - token_loss = F.nll_loss( - F.log_softmax(logits.view(-1, self.ntypes - 1), dim=-1), - clean_type.view(-1), - reduction="mean", - ) - loss += self.masked_token_loss * token_loss - more_loss["token_error"] = token_loss.detach() - if self.has_norm: - norm_x = model_pred["norm_x"] - norm_delta_pair_rep = model_pred["norm_delta_pair_rep"] - loss += self.norm_loss * (norm_x + norm_delta_pair_rep) - more_loss["norm_loss"] = norm_x.detach() + norm_delta_pair_rep.detach() - - return loss, more_loss + raise NotImplementedError(f"Unknown noise type {self.noise_type}!") + + noise_on_coord = torch.tensor(noise_on_coord, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) # mask_num 3 + input_dict["coord"][ii][coord_mask ,:] += noise_on_coord # nbz mask_num 3 // + coord_mask_all[ii] = torch.tensor(coord_mask, dtype=torch.bool, device=env.DEVICE) + label['coord_mask'] = coord_mask_all + frac_coord = phys2inter(input_dict["coord"], input_dict["box"].reshape(nbz,3,3)) + #label["updated_coord"] = (label["clean_frac_coord"] - frac_coord).clone().detach() + label["updated_coord"] = ((label["clean_frac_coord"] - frac_coord) @ label["clean_box"].reshape(nbz,3,3)).clone().detach() + + if self.mask_token: + # TODO: mask_token + pass + + if (not self.mask_coord) and (not self.mask_cell) and (not self.mask_token): + raise RuntimeError("At least one of mask_coord, mask_cell and mask_token should be True!") + + model_pred = model(**input_dict) + + loss = torch.zeros(1, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE)[0] + more_loss = {} + + diff_coord = (label["updated_coord"] - model_pred["updated_coord"]).reshape(-1) + diff_cell = (label["strain_components"] - model_pred["strain_components"]).reshape(-1) + if self.loss_func == "rmse": + l2_coord_loss = torch.mean(torch.square(diff_coord)) + l2_cell_loss = torch.mean(torch.square(diff_cell)) + rmse_f = l2_coord_loss.sqrt() + rmse_v = l2_cell_loss.sqrt() + more_loss["rmse_coord"] = rmse_f.detach() + more_loss["rmse_cell"] = rmse_v.detach() + loss += self.coord_loss * l2_coord_loss.to(GLOBAL_PT_FLOAT_PRECISION) + self.cell_loss * l2_cell_loss.to(GLOBAL_PT_FLOAT_PRECISION) + elif self.loss_func == "mae": + l1_coord_loss = F.l1_loss(label["updated_coord"], model_pred["updated_coord"], reduction="none") + l1_cell_loss = F.l1_loss(label["strain_components"], model_pred["strain_components"], reduction="none") + more_loss["mae_coord"] = l1_coord_loss.mean().detach() + more_loss["mae_cell"] = l1_cell_loss.mean().detach() + l1_coord_loss = l1_coord_loss.sum(-1).mean(-1).sum() + l1_cell_loss = l1_cell_loss.sum() + loss += self.coord_loss * l1_coord_loss.to(GLOBAL_PT_FLOAT_PRECISION) + self.cell_loss * l1_cell_loss.to(GLOBAL_PT_FLOAT_PRECISION) + else: + raise RuntimeError(f"Unknown loss function {self.loss_func}!") + return model_pred, loss, more_loss + + @property + def label_requirement(self) -> list[DataRequirementItem]: + """Return data label requirements needed for this loss calculation.""" + return [] + + def serialize(self) -> dict: + pass + + @classmethod + def deserialize(cls, data: dict) -> "TaskLoss": + pass \ No newline at end of file diff --git a/deepmd/pt/model/atomic_model/__init__.py b/deepmd/pt/model/atomic_model/__init__.py index 4da9bf781b..beb222e37f 100644 --- a/deepmd/pt/model/atomic_model/__init__.py +++ b/deepmd/pt/model/atomic_model/__init__.py @@ -42,6 +42,9 @@ from .property_atomic_model import ( DPPropertyAtomicModel, ) +from .denoise_atomic_model import ( + DPDenoiseAtomicModel +) __all__ = [ "BaseAtomicModel", @@ -54,4 +57,5 @@ "DPZBLLinearEnergyAtomicModel", "LinearEnergyAtomicModel", "PairTabAtomicModel", + "DPDenoiseAtomicModel", ] diff --git a/deepmd/pt/model/atomic_model/denoise_atomic_model.py b/deepmd/pt/model/atomic_model/denoise_atomic_model.py new file mode 100644 index 0000000000..69d1f4196a --- /dev/null +++ b/deepmd/pt/model/atomic_model/denoise_atomic_model.py @@ -0,0 +1,61 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later + +import torch +import logging + +from deepmd.pt.model.task.denoise import ( + DenoiseNet, +) + +from .dp_atomic_model import ( + DPAtomicModel, +) +from IPython import embed + +log = logging.getLogger(__name__) + +class DPDenoiseAtomicModel(DPAtomicModel): + def __init__(self, descriptor, fitting, type_map, **kwargs): + if not isinstance(fitting, DenoiseNet): + raise TypeError( + "fitting must be an instance of DenoiseNet for DPDenoiseAtomicModel" + ) + super().__init__(descriptor, fitting, type_map, **kwargs) + + def apply_out_stat( + self, + ret: dict[str, torch.Tensor], + atype: torch.Tensor, + ): + # hack !!! + ret["virial"] = ret["virial"]/240 + ret["force"] = ret["force"]/29 + + ''' + virial = ret["virial"] # 原始形状 [nbz, nloc, 6] + + # 批量处理所有元素(保留梯度) + # 重塑为二维张量以便处理 [batch_size * nloc, 9] + virial_2d = virial.view(-1, 6) + + # 构建3x3对称矩阵(向量化操作) + # 每个元素的索引对应原始矩阵位置: + # [0, 1, 2] 为对角线元素 + # [3, 4, 5] 对应下三角元素(自动保持对称性) + matrices = torch.zeros(virial_2d.size(0), 3, 3, + dtype=virial.dtype, device=virial.device) + + # 填充对角线元素 + matrices[:, 0, 0] = 1 + virial_2d[:, 0] + matrices[:, 1, 1] = 1 + virial_2d[:, 1] + matrices[:, 2, 2] = 1 + virial_2d[:, 2] + + # 填充对称的非对角线元素 + matrices[:, 0, 1] = matrices[:, 1, 0] = 0.5 * virial_2d[:, 5] # (0,1) & (1,0) + matrices[:, 0, 2] = matrices[:, 2, 0] = 0.5 * virial_2d[:, 4] # (0,2) & (2,0) + matrices[:, 1, 2] = matrices[:, 2, 1] = 0.5 * virial_2d[:, 3] # (1,2) & (2,1) + + # 恢复原始形状 [nbz, nloc, 3, 3] -> [nbz, nloc, 9] + ret["virial"] = matrices.view(virial.shape[0], virial.shape[1], 9) + ''' + return ret diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index 37e664e82a..3bf5da5253 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -69,6 +69,9 @@ from .property_model import ( PropertyModel, ) +from .denoise_model import ( + DenoiseModel, +) from .spin_model import ( SpinEnergyModel, SpinModel, @@ -94,6 +97,8 @@ def _get_standard_model_components(model_params, ntypes): fitting_net["embedding_width"] = descriptor.get_dim_emb() fitting_net["dim_descrpt"] = descriptor.get_dim_out() grad_force = "direct" not in fitting_net["type"] + if fitting_net["type"] in ["denoise"]: + fitting_net["out_dim"] = descriptor.get_dim_emb() if not grad_force: fitting_net["out_dim"] = descriptor.get_dim_emb() if "ener" in fitting_net["type"]: @@ -265,6 +270,8 @@ def get_standard_model(model_params): modelcls = EnergyModel elif fitting_net_type == "property": modelcls = PropertyModel + elif fitting_net_type == "denoise": + modelcls = DenoiseModel else: raise RuntimeError(f"Unknown fitting type: {fitting_net_type}") @@ -276,8 +283,6 @@ def get_standard_model(model_params): pair_exclude_types=pair_exclude_types, preset_out_bias=preset_out_bias, ) - if model_params.get("hessian_mode"): - model.enable_hessian() model.model_def_script = json.dumps(model_params_old) return model diff --git a/deepmd/pt/model/model/denoise_model.py b/deepmd/pt/model/model/denoise_model.py new file mode 100644 index 0000000000..788724752c --- /dev/null +++ b/deepmd/pt/model/model/denoise_model.py @@ -0,0 +1,107 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, +) + +import torch + +from deepmd.pt.model.atomic_model import ( + DPDenoiseAtomicModel, +) +from deepmd.pt.model.model.model import ( + BaseModel, +) + +from .dp_model import ( + DPModelCommon, +) +from .make_model import ( + make_model, +) +from IPython import embed + +DPDenoiseModel_ = make_model(DPDenoiseAtomicModel) + + +@BaseModel.register("denoise") +class DenoiseModel(DPModelCommon, DPDenoiseModel_): + model_type = "property" + + def __init__( + self, + *args, + **kwargs, + ) -> None: + DPModelCommon.__init__(self) + DPDenoiseModel_.__init__(self, *args, **kwargs) + + def translated_output_def(self): + pass + ''' + out_def_data = self.model_output_def().get_data() + output_def = { + f"atom_{self.get_var_name()}": out_def_data[self.get_var_name()], + self.get_var_name(): out_def_data[f"{self.get_var_name()}_redu"], + } + if "mask" in out_def_data: + output_def["mask"] = out_def_data["mask"] + return output_def + ''' + + def forward( + self, + coord, + atype, + box: Optional[torch.Tensor] = None, + fparam: Optional[torch.Tensor] = None, + aparam: Optional[torch.Tensor] = None, + do_atomic_virial: bool = False, + ) -> dict[str, torch.Tensor]: + model_ret = self.forward_common( + coord, + atype, + box, + fparam=fparam, + aparam=aparam, + do_atomic_virial=do_atomic_virial, + ) + model_predict = {} + model_predict["force"] = model_ret["force"] + model_predict["atom_virial"] = model_ret["virial"] + model_predict["virial"] = model_ret["virial_redu"] + if "mask" in model_ret: + model_predict["mask"] = model_ret["mask"] + return model_predict + + @torch.jit.export + def forward_lower( + self, + extended_coord, + extended_atype, + nlist, + mapping: Optional[torch.Tensor] = None, + fparam: Optional[torch.Tensor] = None, + aparam: Optional[torch.Tensor] = None, + do_atomic_virial: bool = False, + comm_dict: Optional[dict[str, torch.Tensor]] = None, + ): + pass + ''' + model_ret = self.forward_common_lower( + extended_coord, + extended_atype, + nlist, + mapping, + fparam=fparam, + aparam=aparam, + do_atomic_virial=do_atomic_virial, + comm_dict=comm_dict, + extra_nlist_sort=self.need_sorted_nlist_for_lower(), + ) + model_predict = {} + model_predict[f"atom_{self.get_var_name()}"] = model_ret[self.get_var_name()] + model_predict[self.get_var_name()] = model_ret[f"{self.get_var_name()}_redu"] + if "mask" in model_ret: + model_predict["mask"] = model_ret["mask"] + return model_predict + ''' diff --git a/deepmd/pt/model/task/denoise.py b/deepmd/pt/model/task/denoise.py index fc9e8943e9..57ec0c3d3e 100644 --- a/deepmd/pt/model/task/denoise.py +++ b/deepmd/pt/model/task/denoise.py @@ -1,89 +1,284 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +import logging from typing import ( Optional, + Union, ) +import numpy as np import torch +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.pt.model.network.mlp import ( + FittingNet, + NetworkCollection, +) from deepmd.dpmodel import ( FittingOutputDef, OutputVariableDef, fitting_check_output, ) from deepmd.pt.model.network.network import ( - MaskLMHead, - NonLinearHead, + ResidualDeep, ) from deepmd.pt.model.task.fitting import ( Fitting, + GeneralFitting, +) +from deepmd.pt.model.task.invar_fitting import ( + InvarFitting, ) from deepmd.pt.utils import ( env, ) +from deepmd.pt.utils.env import ( + PRECISION_DICT, +) +from deepmd.pt.utils.exclude_mask import ( + AtomExcludeMask, +) +from deepmd.pt.utils.env import ( + DEFAULT_PRECISION, +) +dtype = env.GLOBAL_PT_FLOAT_PRECISION +device = env.DEVICE +@Fitting.register("denoise") @fitting_check_output class DenoiseNet(Fitting): def __init__( self, - feature_dim, ntypes, - attn_head=8, - prefactor=[0.5, 0.5], - activation_function="gelu", + dim_descrpt, + neuron, + bias_atom_e=None, + out_dim=1, + resnet_dt=True, + numb_fparam: int = 0, + numb_aparam: int = 0, + dim_case_embd: int = 0, + activation_function: str = "tanh", + precision: str = DEFAULT_PRECISION, + mixed_types: bool = True, + rcond: Optional[float] = None, + seed: Optional[Union[int, list[int]]] = None, + exclude_types: list[int] = [], + trainable: Union[bool, list[bool]] = True, + type_map: Optional[list[str]] = None, + use_aparam_as_mask: bool = False, **kwargs, ) -> None: - """Construct a denoise net. + """Construct a direct token, coordinate and cell fitting net. - Args: - - ntypes: Element count. - - embedding_width: Embedding width per atom. - - neuron: Number of neurons in each hidden layers of the fitting net. - - bias_atom_e: Average energy per atom for each element. - - resnet_dt: Using time-step in the ResNet construction. + Parameters + ---------- + ntypes : int + Element count. + dim_descrpt : int + Embedding width per atom. + neuron : list[int] + Number of neurons in each hidden layers of the fitting net. + bias_atom_e : torch.Tensor, optional + Average energy per atom for each element. + resnet_dt : bool + Using time-step in the ResNet construction. + out_dim : int + The output dimension of the fitting net. + numb_fparam : int + Number of frame parameters. + numb_aparam : int + Number of atomic parameters. + dim_case_embd : int + Dimension of case specific embedding. + activation_function : str + Activation function. + precision : str + Numerical precision. + mixed_types : bool + If true, use a uniform fitting net for all atom types, otherwise use + different fitting nets for different atom types. + rcond : float, optional + The condition number for the regression of atomic energy. + seed : int, optional + Random seed. + exclude_types: list[int] + Atomic contributions of the excluded atom types are set zero. + trainable : Union[list[bool], bool] + If the parameters in the fitting net are trainable. + Now this only supports setting all the parameters in the fitting net at one state. + When in list[bool], the trainable will be True only if all the boolean parameters are True. + type_map: list[str], Optional + A list of strings. Give the name to each type of atoms. + use_aparam_as_mask: bool + If True, the aparam will not be used in fitting net for embedding. """ super().__init__() - self.feature_dim = feature_dim self.ntypes = ntypes - self.attn_head = attn_head - self.prefactor = torch.tensor( - prefactor, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + self.dim_descrpt = dim_descrpt + self.neuron = neuron + self.mixed_types = mixed_types + self.resnet_dt = resnet_dt + self.out_dim = out_dim + self.numb_fparam = numb_fparam + self.numb_aparam = numb_aparam + self.dim_case_embd = dim_case_embd + self.activation_function = activation_function + self.precision = precision + self.prec = PRECISION_DICT[self.precision] + self.rcond = rcond + self.seed = seed + self.type_map = type_map + self.use_aparam_as_mask = use_aparam_as_mask + # order matters, should be place after the assignment of ntypes + self.reinit_exclude(exclude_types) + self.trainable = trainable + # need support for each layer settings + self.trainable = ( + all(self.trainable) if isinstance(self.trainable, list) else self.trainable ) - self.lm_head = MaskLMHead( - embed_dim=self.feature_dim, - output_dim=ntypes, - activation_fn=activation_function, - weight=None, + # init constants + if bias_atom_e is None: + bias_atom_e = np.zeros([self.ntypes, 1], dtype=np.float64) + bias_atom_e = torch.tensor( + bias_atom_e, device=env.DEVICE, dtype=env.GLOBAL_PT_FLOAT_PRECISION ) + bias_atom_e = bias_atom_e.view([self.ntypes, 1]) + if not self.mixed_types: + assert self.ntypes == bias_atom_e.shape[0], "Element count mismatches!" + self.register_buffer("bias_atom_e", bias_atom_e) + + if self.numb_fparam > 0: + self.register_buffer( + "fparam_avg", + torch.zeros(self.numb_fparam, dtype=self.prec, device=device), + ) + self.register_buffer( + "fparam_inv_std", + torch.ones(self.numb_fparam, dtype=self.prec, device=device), + ) + else: + self.fparam_avg, self.fparam_inv_std = None, None + if self.numb_aparam > 0: + self.register_buffer( + "aparam_avg", + torch.zeros(self.numb_aparam, dtype=self.prec, device=device), + ) + self.register_buffer( + "aparam_inv_std", + torch.ones(self.numb_aparam, dtype=self.prec, device=device), + ) + else: + self.aparam_avg, self.aparam_inv_std = None, None - if not isinstance(self.attn_head, list): - self.pair2coord_proj = NonLinearHead( - self.attn_head, 1, activation_fn=activation_function + if self.dim_case_embd > 0: + self.register_buffer( + "case_embd", + torch.zeros(self.dim_case_embd, dtype=self.prec, device=device), + # torch.eye(self.dim_case_embd, dtype=self.prec, device=device)[0], ) else: - self.pair2coord_proj = [] - self.ndescriptor = len(self.attn_head) - for ii in range(self.ndescriptor): - _pair2coord_proj = NonLinearHead( - self.attn_head[ii], 1, activation_fn=activation_function + self.case_embd = None + + in_dim = ( + self.dim_descrpt + + self.numb_fparam + + (0 if self.use_aparam_as_mask else self.numb_aparam) + + self.dim_case_embd + ) + + self.filter_layers_coord = NetworkCollection( + 1 if not self.mixed_types else 0, + self.ntypes, + network_type="fitting_network", + networks=[ + FittingNet( + in_dim, + self.out_dim, + self.neuron, + self.activation_function, + self.resnet_dt, + self.precision, + bias_out=True, + seed=child_seed(self.seed, ii), ) - self.pair2coord_proj.append(_pair2coord_proj) - self.pair2coord_proj = torch.nn.ModuleList(self.pair2coord_proj) + for ii in range(self.ntypes if not self.mixed_types else 1) + ], + ) + + self.filter_layers_cell = NetworkCollection( + 1 if not self.mixed_types else 0, + self.ntypes, + network_type="fitting_network", + networks=[ + FittingNet( + in_dim, + 6, + self.neuron, + self.activation_function, + self.resnet_dt, + self.precision, + bias_out=True, + seed=child_seed(self.seed, ii), + ) + for ii in range(self.ntypes if not self.mixed_types else 1) + ], + ) + + # TODO: Type denoise + + # set trainable + for param in self.parameters(): + param.requires_grad = self.trainable + + def reinit_exclude( + self, + exclude_types: list[int] = [], + ) -> None: + self.exclude_types = exclude_types + self.emask = AtomExcludeMask(self.ntypes, self.exclude_types) + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert self.type_map is not None, ( + "'type_map' must be defined when performing type changing!" + ) + assert self.mixed_types, "Only models in mixed types can perform type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + self.type_map = type_map + self.ntypes = len(type_map) + self.reinit_exclude(map_atom_exclude_types(self.exclude_types, remap_index)) + if has_new_type: + extend_shape = [len(type_map), *list(self.bias_atom_e.shape[1:])] + extend_bias_atom_e = torch.zeros( + extend_shape, + dtype=self.bias_atom_e.dtype, + device=self.bias_atom_e.device, + ) + self.bias_atom_e = torch.cat([self.bias_atom_e, extend_bias_atom_e], dim=0) + self.bias_atom_e = self.bias_atom_e[remap_index] def output_def(self): return FittingOutputDef( [ OutputVariableDef( - "updated_coord", - [3], - reducible=False, + "strain_components", + [6], + reducible=True, r_differentiable=False, c_differentiable=False, + intensive=True, ), OutputVariableDef( - "logits", - [-1], + "updated_coord", + [3], reducible=False, r_differentiable=False, c_differentiable=False, @@ -91,47 +286,297 @@ def output_def(self): ] ) + def serialize(self) -> dict: + """Serialize the fitting to dict.""" + return { + "@class": "Fitting", + "@version": 3, + "ntypes": self.ntypes, + "out_dim": self.out_dim, + "dim_descrpt": self.dim_descrpt, + "neuron": self.neuron, + "resnet_dt": self.resnet_dt, + "numb_fparam": self.numb_fparam, + "numb_aparam": self.numb_aparam, + "dim_case_embd": self.dim_case_embd, + "activation_function": self.activation_function, + "precision": self.precision, + "mixed_types": self.mixed_types, + "cell_nets": self.filter_layers_cell.serialize(), + "coord_nets": self.filter_layers_coord.serialize(), + "rcond": self.rcond, + "exclude_types": self.exclude_types, + "@variables": { + "bias_atom_e": to_numpy_array(self.bias_atom_e), + "case_embd": to_numpy_array(self.case_embd), + "fparam_avg": to_numpy_array(self.fparam_avg), + "fparam_inv_std": to_numpy_array(self.fparam_inv_std), + "aparam_avg": to_numpy_array(self.aparam_avg), + "aparam_inv_std": to_numpy_array(self.aparam_inv_std), + }, + "type_map": self.type_map, + # "tot_ener_zero": self.tot_ener_zero , + # "trainable": self.trainable , + # "atom_ener": self.atom_ener , + # "layer_name": self.layer_name , + # "spin": self.spin , + ## NOTICE: not supported by far + "tot_ener_zero": False, + "trainable": [self.trainable] * (len(self.neuron) + 1), + "layer_name": None, + "use_aparam_as_mask": self.use_aparam_as_mask, + "spin": None, + } + + + def deserialize(self) -> "DenoiseNet": + data = data.copy() + variables = data.pop("@variables") + cell_nets = data.pop("cell_nets") + coord_nets = data.pop("coord_nets") + obj = cls(**data) + for kk in variables.keys(): + obj[kk] = to_torch_tensor(variables[kk]) + obj.filter_layers_cell = NetworkCollection.deserialize(cell_nets) + obj.filter_layers_coord = NetworkCollection.deserialize(coord_nets) + return obj + + def get_dim_fparam(self) -> int: + """Get the number (dimension) of frame parameters of this atomic model.""" + return self.numb_fparam + + def get_dim_aparam(self) -> int: + """Get the number (dimension) of atomic parameters of this atomic model.""" + return self.numb_aparam + + # make jit happy + exclude_types: list[int] + + def get_sel_type(self) -> list[int]: + """Get the selected atom types of this model. + + Only atoms with selected atom types have atomic contribution + to the result of the model. + If returning an empty list, all atom types are selected. + """ + # make jit happy + sel_type: list[int] = [] + for ii in range(self.ntypes): + if ii not in self.exclude_types: + sel_type.append(ii) + return sel_type + + def get_type_map(self) -> list[str]: + """Get the name to each type of atoms.""" + return self.type_map + + def set_case_embd(self, case_idx: int): + """ + Set the case embedding of this fitting net by the given case_idx, + typically concatenated with the output of the descriptor and fed into the fitting net. + """ + self.case_embd = torch.eye(self.dim_case_embd, dtype=self.prec, device=device)[ + case_idx + ] + + def __setitem__(self, key, value) -> None: + if key in ["bias_atom_e"]: + value = value.view([self.ntypes, 1]) + self.bias_atom_e = value + elif key in ["fparam_avg"]: + self.fparam_avg = value + elif key in ["fparam_inv_std"]: + self.fparam_inv_std = value + elif key in ["aparam_avg"]: + self.aparam_avg = value + elif key in ["aparam_inv_std"]: + self.aparam_inv_std = value + elif key in ["case_embd"]: + self.case_embd = value + elif key in ["scale"]: + self.scale = value + else: + raise KeyError(key) + + def __getitem__(self, key): + if key in ["bias_atom_e"]: + return self.bias_atom_e + elif key in ["fparam_avg"]: + return self.fparam_avg + elif key in ["fparam_inv_std"]: + return self.fparam_inv_std + elif key in ["aparam_avg"]: + return self.aparam_avg + elif key in ["aparam_inv_std"]: + return self.aparam_inv_std + elif key in ["case_embd"]: + return self.case_embd + elif key in ["scale"]: + return self.scale + else: + raise KeyError(key) + + def _extend_f_avg_std(self, xx: torch.Tensor, nb: int) -> torch.Tensor: + return torch.tile(xx.view([1, self.numb_fparam]), [nb, 1]) + + def _extend_a_avg_std(self, xx: torch.Tensor, nb: int, nloc: int) -> torch.Tensor: + return torch.tile(xx.view([1, 1, self.numb_aparam]), [nb, nloc, 1]) + def forward( self, - pair_weights, - diff, - nlist_mask, - features, - sw, - masked_tokens: Optional[torch.Tensor] = None, - ): - """Calculate the updated coord. + descriptor: torch.Tensor, + atype: torch.Tensor, + gr: Optional[torch.Tensor] = None, + g2: Optional[torch.Tensor] = None, + h2: Optional[torch.Tensor] = None, + fparam: Optional[torch.Tensor] = None, + aparam: Optional[torch.Tensor] = None, + ) -> dict[str, torch.Tensor]: + """Based on embedding net output, alculate total energy. + Args: - - coord: Input noisy coord with shape [nframes, nloc, 3]. - - pair_weights: Input pair weights with shape [nframes, nloc, nnei, head]. - - diff: Input pair relative coord list with shape [nframes, nloc, nnei, 3]. - - nlist_mask: Input nlist mask with shape [nframes, nloc, nnei]. + - inputs: Embedding matrix. Its shape is [nframes, natoms[0], self.dim_descrpt]. + - natoms: Tell atom count and element count. Its shape is [2+self.ntypes]. Returns ------- - - denoised_coord: Denoised updated coord with shape [nframes, nloc, 3]. + - `torch.Tensor`: Total energy with shape [nframes, natoms[0]]. """ - # [nframes, nloc, nnei, 1] - logits = self.lm_head(features, masked_tokens=masked_tokens) - if not isinstance(self.attn_head, list): - attn_probs = self.pair2coord_proj(pair_weights) - out_coord = (attn_probs * diff).sum(dim=-2) / ( - sw.sum(dim=-1).unsqueeze(-1) + 1e-6 + # cast the input to internal precsion + xx = descriptor.to(self.prec) + fparam = fparam.to(self.prec) if fparam is not None else None + aparam = aparam.to(self.prec) if aparam is not None else None + + xx_zeros = None + nf, nloc, nd = xx.shape + + if nd != self.dim_descrpt: + raise ValueError( + f"get an input descriptor of dim {nd}," + f"which is not consistent with {self.dim_descrpt}." ) - else: - assert len(self.prefactor) == self.ndescriptor - all_coord_update = [] - assert len(pair_weights) == len(diff) == len(nlist_mask) == self.ndescriptor - for ii in range(self.ndescriptor): - _attn_probs = self.pair2coord_proj[ii](pair_weights[ii]) - _coord_update = (_attn_probs * diff[ii]).sum(dim=-2) / ( - nlist_mask[ii].sum(dim=-1).unsqueeze(-1) + 1e-6 + # check fparam dim, concate to input descriptor + if self.numb_fparam > 0: + assert fparam is not None, "fparam should not be None" + assert self.fparam_avg is not None + assert self.fparam_inv_std is not None + if fparam.shape[-1] != self.numb_fparam: + raise ValueError( + "get an input fparam of dim {fparam.shape[-1]}, ", + "which is not consistent with {self.numb_fparam}.", + ) + fparam = fparam.view([nf, self.numb_fparam]) + nb, _ = fparam.shape + t_fparam_avg = self._extend_f_avg_std(self.fparam_avg, nb) + t_fparam_inv_std = self._extend_f_avg_std(self.fparam_inv_std, nb) + fparam = (fparam - t_fparam_avg) * t_fparam_inv_std + fparam = torch.tile(fparam.reshape([nf, 1, -1]), [1, nloc, 1]) + xx = torch.cat( + [xx, fparam], + dim=-1, + ) + if xx_zeros is not None: + xx_zeros = torch.cat( + [xx_zeros, fparam], + dim=-1, + ) + # check aparam dim, concate to input descriptor + if self.numb_aparam > 0 and not self.use_aparam_as_mask: + assert aparam is not None, "aparam should not be None" + assert self.aparam_avg is not None + assert self.aparam_inv_std is not None + if aparam.shape[-1] != self.numb_aparam: + raise ValueError( + f"get an input aparam of dim {aparam.shape[-1]}, ", + f"which is not consistent with {self.numb_aparam}.", ) - all_coord_update.append(_coord_update) - out_coord = self.prefactor[0] * all_coord_update[0] - for ii in range(self.ndescriptor - 1): - out_coord += self.prefactor[ii + 1] * all_coord_update[ii + 1] + aparam = aparam.view([nf, -1, self.numb_aparam]) + nb, nloc, _ = aparam.shape + t_aparam_avg = self._extend_a_avg_std(self.aparam_avg, nb, nloc) + t_aparam_inv_std = self._extend_a_avg_std(self.aparam_inv_std, nb, nloc) + aparam = (aparam - t_aparam_avg) * t_aparam_inv_std + xx = torch.cat( + [xx, aparam], + dim=-1, + ) + if xx_zeros is not None: + xx_zeros = torch.cat( + [xx_zeros, aparam], + dim=-1, + ) + + if self.dim_case_embd > 0: + assert self.case_embd is not None + case_embd = torch.tile(self.case_embd.reshape([1, 1, -1]), [nf, nloc, 1]) + xx = torch.cat( + [xx, case_embd], + dim=-1, + ) + if xx_zeros is not None: + xx_zeros = torch.cat( + [xx_zeros, case_embd], + dim=-1, + ) + + outs = torch.zeros( + (nf, nloc, 6), + dtype=self.prec, + device=descriptor.device, + ) # jit assertion + if self.mixed_types: + # direct coord fitting + vec_out = self.filter_layers_coord.networks[0](xx) + assert list(vec_out.size()) == [nf, nloc, self.out_dim] + # (nf x nloc) x 1 x od + vec_out = vec_out.view(-1, 1, self.out_dim) + assert gr is not None + # (nf x nloc) x od x 3 + gr = gr.view(-1, self.out_dim, 3) + vec_out = ( + torch.bmm(vec_out, gr).squeeze(-2).view(nf, nloc, 3) + ) # Shape is [nf, nloc, 3] + # direct cell fitting + atom_strain_components = self.filter_layers_cell.networks[0](xx) + outs = outs + atom_strain_components # Shape is [nframes, natoms[0], 6] + else: + vec_out = torch.zeros( + (nf, nloc, 3), + dtype=self.prec, + device=descriptor.device, + ) # jit assertion + # direct coord fitting + for type_i, ll in enumerate(self.filter_layers_coord.networks): + mask = (atype == type_i).unsqueeze(-1) + mask = torch.tile(mask, (1, 1, 1)) + vec_out_type = ll(xx) + assert list(vec_out_type.size()) == [nf, nloc, self.out_dim] + # (nf x nloc) x 1 x od + vec_out_type = vec_out_type.view(-1, 1, self.out_dim) + assert gr is not None + # (nf x nloc) x od x 3 + gr = gr.view(-1, self.out_dim, 3) + vec_out_type = ( + torch.bmm(vec_out_type, gr).squeeze(-2).view(nf, nloc, 3) + ) # Shape is [nf, nloc, 3] + vec_out_type = torch.where(mask, vec_out_type, 0.0) + vec_out = ( + vec_out + vec_out_type + ) # Shape is [nframes, natoms[0], 3] + # direct cell fitting + for type_i, ll in enumerate(self.filter_layers_cell.networks): + mask = (atype == type_i).unsqueeze(-1) + mask = torch.tile(mask, (1, 1, 1)) + atom_strain_components = ll(xx) + atom_strain_components = torch.where(mask, atom_strain_components, 0.0) + outs = ( + outs + atom_strain_components + ) # Shape is [nframes, natoms[0], 6] + # nf x nloc + mask = self.emask(atype).to(torch.bool) + # nf x nloc x nod + outs = torch.where(mask[:, :, None], outs, 0.0) + vec_out = torch.where(mask[:, :, None], vec_out, 0.0) return { - "updated_coord": out_coord, - "logits": logits, - } + "strain_components": outs.to(env.GLOBAL_PT_FLOAT_PRECISION), + "updated_coord": vec_out, + } \ No newline at end of file diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index 1d897ceb57..e16ecd89d1 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -1618,6 +1618,45 @@ def fitting_property(): ), ] +@fitting_args_plugin.register("denoise", doc=doc_only_pt_supported) +def fitting_denoise(): + doc_numb_fparam = "The dimension of the frame parameter. If set to >0, file `fparam.npy` should be included to provided the input fparams." + doc_numb_aparam = "The dimension of the atomic parameter. If set to >0, file `aparam.npy` should be included to provided the input aparams." + doc_dim_case_embd = "The dimension of the case embedding embedding. When training or fine-tuning a multitask model with case embedding embeddings, this number should be set to the number of model branches." + doc_neuron = "The number of neurons in each hidden layers of the fitting net. When two hidden layers are of the same size, a skip connection is built" + doc_activation_function = f'The activation function in the fitting net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())} Note that "gelu" denotes the custom operator version, and "gelu_tf" denotes the TF standard version. If you set "None" or "none" here, no activation function will be used.' + doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection' + doc_precision = f"The precision of the fitting net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())} Default follows the interface precision." + doc_seed = "Random seed for parameter initialization of the fitting net" + return [ + Argument("numb_fparam", int, optional=True, default=0, doc=doc_numb_fparam), + Argument("numb_aparam", int, optional=True, default=0, doc=doc_numb_aparam), + Argument( + "dim_case_embd", + int, + optional=True, + default=0, + doc=doc_only_pt_supported + doc_dim_case_embd, + ), + Argument( + "neuron", + list[int], + optional=True, + default=[120, 120, 120], + alias=["n_neuron"], + doc=doc_neuron, + ), + Argument( + "activation_function", + str, + optional=True, + default="tanh", + doc=doc_activation_function, + ), + Argument("resnet_dt", bool, optional=True, default=True, doc=doc_resnet_dt), + Argument("precision", str, optional=True, default="default", doc=doc_precision), + Argument("seed", [int, None], optional=True, doc=doc_seed), + ] @fitting_args_plugin.register("polar", doc=doc_polar) def fitting_polar(): @@ -2523,6 +2562,121 @@ def loss_property(): ), ] +@loss_args_plugin.register("denoise") +def loss_denoise(): + doc_mask_token = "Whether to mask the token" + doc_mask_coord = "Whether to mask the coordinate." + doc_mask_cell = "Whether to mask the cell." + doc_token_loss = "The preference factor for token denoise." + doc_coord_loss = "The preference factor for coordinate denoise." + doc_cell_loss = "The preference factor for cell denoise." + doc_noise_type = "The type of noise to add to the coordinate. It can be 'uniform' or 'gaussian'." + doc_coord_noise = "The magnitude of noise to add to the coordinate." + doc_cell_pert_fraction = "A value determines how much will cell deform." + doc_noise_mode = "'prob' means the noise is added with a probability.'fix_num' means the noise is added with a fixed number." + doc_mask_num = "The number of atoms to mask coordinates. It is only used when noise_mode is 'fix_num'." + doc_mask_prob = "The probability of masking coordinates. It is only used when noise_mode is 'prob'." + doc_loss_func = "The loss function to minimize, it can be 'mae' or 'rmse'." + return [ + Argument( + "mask_token", + bool, + optional=True, + default=False, + doc=doc_mask_token, + ), + Argument( + "mask_coord", + bool, + optional=True, + default=True, + doc=doc_mask_coord, + ), + Argument( + "mask_cell", + bool, + optional=True, + default=False, + doc=doc_mask_cell, + ), + Argument( + "token_loss", + float, + optional=True, + default=1.0, + doc=doc_token_loss, + ), + Argument( + "coord_loss", + float, + optional=True, + default=1.0, + doc=doc_coord_loss, + ), + Argument( + "token_loss", + float, + optional=True, + default=1.0, + doc=doc_token_loss, + ), + Argument( + "cell_loss", + float, + optional=True, + default=1.0, + doc=doc_cell_loss, + ), + Argument( + "noise_type", + str, + optional=True, + default="gaussian", + doc=doc_noise_type, + ), + Argument( + "coord_noise", + float, + optional=True, + default=0.2, + doc=doc_coord_noise, + ), + Argument( + "cell_pert_fraction", + float, + optional=True, + default=0.0, + doc=doc_cell_pert_fraction, + ), + Argument( + "noise_mode", + str, + optional=True, + default="prob", + doc=doc_noise_mode, + ), + Argument( + "mask_num", + int, + optional=True, + default=1, + doc=doc_mask_num, + ), + Argument( + "mask_prob", + float, + optional=True, + default=0.2, + doc=doc_mask_prob, + ), + Argument( + "loss_func", + str, + optional=True, + default="rmse", + doc=doc_loss_func, + ), + ] # YWolfeee: Modified to support tensor type of loss args. @loss_args_plugin.register("tensor") From 2e4d94ec45bf829a744ce36724d80953221db12c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 10 Mar 2025 10:23:25 +0000 Subject: [PATCH 02/26] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/pt/loss/denoise.py | 132 ++++++++++++------ deepmd/pt/model/atomic_model/__init__.py | 8 +- .../atomic_model/denoise_atomic_model.py | 13 +- deepmd/pt/model/model/__init__.py | 6 +- deepmd/pt/model/model/denoise_model.py | 9 +- deepmd/pt/model/task/denoise.py | 46 +++--- deepmd/utils/argcheck.py | 8 +- 7 files changed, 130 insertions(+), 92 deletions(-) diff --git a/deepmd/pt/loss/denoise.py b/deepmd/pt/loss/denoise.py index 70e657beb6..78a0a88642 100644 --- a/deepmd/pt/loss/denoise.py +++ b/deepmd/pt/loss/denoise.py @@ -12,14 +12,14 @@ from deepmd.pt.utils.env import ( GLOBAL_PT_FLOAT_PRECISION, ) -from deepmd.utils.data import ( - DataRequirementItem, -) from deepmd.pt.utils.region import ( phys2inter, - inter2phys, +) +from deepmd.utils.data import ( + DataRequirementItem, ) + def get_cell_perturb_matrix(cell_pert_fraction: float): # TODO: user fix some component if cell_pert_fraction < 0: @@ -28,15 +28,16 @@ def get_cell_perturb_matrix(cell_pert_fraction: float): e = e0 * 2 * cell_pert_fraction - cell_pert_fraction cell_pert_matrix = torch.tensor( [ - [1 + e[0], 0, 0], - [e[5], 1 + e[1], 0], - [e[4], e[3], 1 + e[2]], + [1 + e[0], 0, 0], + [e[5], 1 + e[1], 0], + [e[4], e[3], 1 + e[2]], ], dtype=env.GLOBAL_PT_FLOAT_PRECISION, - device=env.DEVICE + device=env.DEVICE, ) return cell_pert_matrix, e + class DenoiseLoss(TaskLoss): def __init__( self, @@ -59,23 +60,23 @@ def __init__( Parameters ---------- - mask_token: bool + mask_token : bool Whether to mask token. - mask_coord: bool + mask_coord : bool Whether to mask coordinate. - mask_cell: bool + mask_cell : bool Whether to mask cell. - token_loss: float + token_loss : float The preference factor for token denoise. - coord_loss: float + coord_loss : float The preference factor for coordinate denoise. - cell_loss: float + cell_loss : float The preference factor for cell denoise. noise_type : str The type of noise to add to the coordinate. It can be 'uniform' or 'gaussian'. coord_noise : float The magnitude of noise to add to the coordinate. - cell_pert_fraction: float + cell_pert_fraction : float A value determines how much will cell deform. noise_mode : str "'prob' means the noise is added with a probability.'fix_num' means the noise is added with a fixed number." @@ -83,7 +84,7 @@ def __init__( The number of atoms to mask coordinates. It is only used when noise_mode is 'fix_num'. mask_prob : float The probability of masking coordinates. It is only used when noise_mode is 'prob'. - loss_func: str + loss_func : str The loss function to minimize, it can be 'mae' or 'rmse'. **kwargs Other keyword arguments. @@ -126,7 +127,6 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): more_loss: dict[str, torch.Tensor] Other losses for display. """ - nloc = input_dict["atype"].shape[1] nbz = input_dict["atype"].shape[0] input_dict["box"] = input_dict["box"].cuda() @@ -135,15 +135,26 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): label["clean_coord"] = input_dict["coord"].clone().detach() label["clean_box"] = input_dict["box"].clone().detach() - origin_frac_coord = phys2inter(label["clean_coord"], label["clean_box"].reshape(nbz,3,3)) + origin_frac_coord = phys2inter( + label["clean_coord"], label["clean_box"].reshape(nbz, 3, 3) + ) label["clean_frac_coord"] = origin_frac_coord.clone().detach() if self.mask_cell: - strain_components_all = torch.zeros((nbz,3), dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) + strain_components_all = torch.zeros( + (nbz, 3), dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + ) for ii in range(nbz): - cell_perturb_matrix, strain_components = get_cell_perturb_matrix_HEA(self.cell_noise) + cell_perturb_matrix, strain_components = get_cell_perturb_matrix_HEA( + self.cell_noise + ) # left-multiplied by `cell_perturb_matrix`` to get the noise box - input_dict["box"][ii] = torch.matmul(cell_perturb_matrix, input_dict["box"][ii].reshape(3,3)).reshape(-1) - input_dict["coord"][ii] = torch.matmul(origin_frac_coord[ii].reshape(nloc,3), input_dict["box"][ii].reshape(3,3)) + input_dict["box"][ii] = torch.matmul( + cell_perturb_matrix, input_dict["box"][ii].reshape(3, 3) + ).reshape(-1) + input_dict["coord"][ii] = torch.matmul( + origin_frac_coord[ii].reshape(nloc, 3), + input_dict["box"][ii].reshape(3, 3), + ) strain_components_all[ii] = strain_components.reshape(-1) label["strain_components"] = strain_components_all.clone().detach() @@ -152,7 +163,7 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): mask_num = 0 if self.noise_mode == "fix_num": mask_num = self.mask_num - if(nloc < mask_num): + if nloc < mask_num: mask_num = nloc elif self.noise_mode == "prob": mask_num = int(self.mask_prob * nloc) @@ -161,11 +172,15 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): else: NotImplementedError(f"Unknown noise mode {self.noise_mode}!") - coord_mask_all = torch.zeros(input_dict["atype"].shape, dtype=torch.bool, device=env.DEVICE) + coord_mask_all = torch.zeros( + input_dict["atype"].shape, dtype=torch.bool, device=env.DEVICE + ) for ii in range(nbz): noise_on_coord = 0.0 - coord_mask_res = np.random.choice(range(nloc), mask_num, replace=False).tolist() - coord_mask = np.isin(range(nloc), coord_mask_res) # nloc + coord_mask_res = np.random.choice( + range(nloc), mask_num, replace=False + ).tolist() + coord_mask = np.isin(range(nloc), coord_mask_res) # nloc if self.noise_type == "uniform": noise_on_coord = np.random.uniform( low=-self.noise, high=self.noise, size=(mask_num, 3) @@ -176,29 +191,50 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): ) else: raise NotImplementedError(f"Unknown noise type {self.noise_type}!") - - noise_on_coord = torch.tensor(noise_on_coord, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) # mask_num 3 - input_dict["coord"][ii][coord_mask ,:] += noise_on_coord # nbz mask_num 3 // - coord_mask_all[ii] = torch.tensor(coord_mask, dtype=torch.bool, device=env.DEVICE) - label['coord_mask'] = coord_mask_all - frac_coord = phys2inter(input_dict["coord"], input_dict["box"].reshape(nbz,3,3)) - #label["updated_coord"] = (label["clean_frac_coord"] - frac_coord).clone().detach() - label["updated_coord"] = ((label["clean_frac_coord"] - frac_coord) @ label["clean_box"].reshape(nbz,3,3)).clone().detach() - + + noise_on_coord = torch.tensor( + noise_on_coord, + dtype=env.GLOBAL_PT_FLOAT_PRECISION, + device=env.DEVICE, + ) # mask_num 3 + input_dict["coord"][ii][coord_mask, :] += ( + noise_on_coord # nbz mask_num 3 // + ) + coord_mask_all[ii] = torch.tensor( + coord_mask, dtype=torch.bool, device=env.DEVICE + ) + label["coord_mask"] = coord_mask_all + frac_coord = phys2inter( + input_dict["coord"], input_dict["box"].reshape(nbz, 3, 3) + ) + # label["updated_coord"] = (label["clean_frac_coord"] - frac_coord).clone().detach() + label["updated_coord"] = ( + ( + (label["clean_frac_coord"] - frac_coord) + @ label["clean_box"].reshape(nbz, 3, 3) + ) + .clone() + .detach() + ) + if self.mask_token: # TODO: mask_token pass if (not self.mask_coord) and (not self.mask_cell) and (not self.mask_token): - raise RuntimeError("At least one of mask_coord, mask_cell and mask_token should be True!") + raise RuntimeError( + "At least one of mask_coord, mask_cell and mask_token should be True!" + ) - model_pred = model(**input_dict) + model_pred = model(**input_dict) loss = torch.zeros(1, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE)[0] more_loss = {} diff_coord = (label["updated_coord"] - model_pred["updated_coord"]).reshape(-1) - diff_cell = (label["strain_components"] - model_pred["strain_components"]).reshape(-1) + diff_cell = ( + label["strain_components"] - model_pred["strain_components"] + ).reshape(-1) if self.loss_func == "rmse": l2_coord_loss = torch.mean(torch.square(diff_coord)) l2_cell_loss = torch.mean(torch.square(diff_cell)) @@ -206,15 +242,25 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): rmse_v = l2_cell_loss.sqrt() more_loss["rmse_coord"] = rmse_f.detach() more_loss["rmse_cell"] = rmse_v.detach() - loss += self.coord_loss * l2_coord_loss.to(GLOBAL_PT_FLOAT_PRECISION) + self.cell_loss * l2_cell_loss.to(GLOBAL_PT_FLOAT_PRECISION) + loss += self.coord_loss * l2_coord_loss.to( + GLOBAL_PT_FLOAT_PRECISION + ) + self.cell_loss * l2_cell_loss.to(GLOBAL_PT_FLOAT_PRECISION) elif self.loss_func == "mae": - l1_coord_loss = F.l1_loss(label["updated_coord"], model_pred["updated_coord"], reduction="none") - l1_cell_loss = F.l1_loss(label["strain_components"], model_pred["strain_components"], reduction="none") + l1_coord_loss = F.l1_loss( + label["updated_coord"], model_pred["updated_coord"], reduction="none" + ) + l1_cell_loss = F.l1_loss( + label["strain_components"], + model_pred["strain_components"], + reduction="none", + ) more_loss["mae_coord"] = l1_coord_loss.mean().detach() more_loss["mae_cell"] = l1_cell_loss.mean().detach() l1_coord_loss = l1_coord_loss.sum(-1).mean(-1).sum() l1_cell_loss = l1_cell_loss.sum() - loss += self.coord_loss * l1_coord_loss.to(GLOBAL_PT_FLOAT_PRECISION) + self.cell_loss * l1_cell_loss.to(GLOBAL_PT_FLOAT_PRECISION) + loss += self.coord_loss * l1_coord_loss.to( + GLOBAL_PT_FLOAT_PRECISION + ) + self.cell_loss * l1_cell_loss.to(GLOBAL_PT_FLOAT_PRECISION) else: raise RuntimeError(f"Unknown loss function {self.loss_func}!") return model_pred, loss, more_loss @@ -229,4 +275,4 @@ def serialize(self) -> dict: @classmethod def deserialize(cls, data: dict) -> "TaskLoss": - pass \ No newline at end of file + pass diff --git a/deepmd/pt/model/atomic_model/__init__.py b/deepmd/pt/model/atomic_model/__init__.py index beb222e37f..8079678592 100644 --- a/deepmd/pt/model/atomic_model/__init__.py +++ b/deepmd/pt/model/atomic_model/__init__.py @@ -17,6 +17,9 @@ from .base_atomic_model import ( BaseAtomicModel, ) +from .denoise_atomic_model import ( + DPDenoiseAtomicModel, +) from .dipole_atomic_model import ( DPDipoleAtomicModel, ) @@ -42,14 +45,12 @@ from .property_atomic_model import ( DPPropertyAtomicModel, ) -from .denoise_atomic_model import ( - DPDenoiseAtomicModel -) __all__ = [ "BaseAtomicModel", "DPAtomicModel", "DPDOSAtomicModel", + "DPDenoiseAtomicModel", "DPDipoleAtomicModel", "DPEnergyAtomicModel", "DPPolarAtomicModel", @@ -57,5 +58,4 @@ "DPZBLLinearEnergyAtomicModel", "LinearEnergyAtomicModel", "PairTabAtomicModel", - "DPDenoiseAtomicModel", ] diff --git a/deepmd/pt/model/atomic_model/denoise_atomic_model.py b/deepmd/pt/model/atomic_model/denoise_atomic_model.py index 69d1f4196a..3d758c7c55 100644 --- a/deepmd/pt/model/atomic_model/denoise_atomic_model.py +++ b/deepmd/pt/model/atomic_model/denoise_atomic_model.py @@ -1,8 +1,9 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -import torch import logging +import torch + from deepmd.pt.model.task.denoise import ( DenoiseNet, ) @@ -10,10 +11,10 @@ from .dp_atomic_model import ( DPAtomicModel, ) -from IPython import embed log = logging.getLogger(__name__) + class DPDenoiseAtomicModel(DPAtomicModel): def __init__(self, descriptor, fitting, type_map, **kwargs): if not isinstance(fitting, DenoiseNet): @@ -28,10 +29,10 @@ def apply_out_stat( atype: torch.Tensor, ): # hack !!! - ret["virial"] = ret["virial"]/240 - ret["force"] = ret["force"]/29 + ret["virial"] = ret["virial"] / 240 + ret["force"] = ret["force"] / 29 - ''' + """ virial = ret["virial"] # 原始形状 [nbz, nloc, 6] # 批量处理所有元素(保留梯度) @@ -57,5 +58,5 @@ def apply_out_stat( # 恢复原始形状 [nbz, nloc, 3, 3] -> [nbz, nloc, 9] ret["virial"] = matrices.view(virial.shape[0], virial.shape[1], 9) - ''' + """ return ret diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index ff6618b77d..7b1254c9c2 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -33,6 +33,9 @@ Spin, ) +from .denoise_model import ( + DenoiseModel, +) from .dipole_model import ( DipoleModel, ) @@ -69,9 +72,6 @@ from .property_model import ( PropertyModel, ) -from .denoise_model import ( - DenoiseModel, -) from .spin_model import ( SpinEnergyModel, SpinModel, diff --git a/deepmd/pt/model/model/denoise_model.py b/deepmd/pt/model/model/denoise_model.py index 788724752c..93fb7ed496 100644 --- a/deepmd/pt/model/model/denoise_model.py +++ b/deepmd/pt/model/model/denoise_model.py @@ -18,7 +18,6 @@ from .make_model import ( make_model, ) -from IPython import embed DPDenoiseModel_ = make_model(DPDenoiseAtomicModel) @@ -37,7 +36,7 @@ def __init__( def translated_output_def(self): pass - ''' + """ out_def_data = self.model_output_def().get_data() output_def = { f"atom_{self.get_var_name()}": out_def_data[self.get_var_name()], @@ -46,7 +45,7 @@ def translated_output_def(self): if "mask" in out_def_data: output_def["mask"] = out_def_data["mask"] return output_def - ''' + """ def forward( self, @@ -86,7 +85,7 @@ def forward_lower( comm_dict: Optional[dict[str, torch.Tensor]] = None, ): pass - ''' + """ model_ret = self.forward_common_lower( extended_coord, extended_atype, @@ -104,4 +103,4 @@ def forward_lower( if "mask" in model_ret: model_predict["mask"] = model_ret["mask"] return model_predict - ''' + """ diff --git a/deepmd/pt/model/task/denoise.py b/deepmd/pt/model/task/denoise.py index 57ec0c3d3e..52da75c5ed 100644 --- a/deepmd/pt/model/task/denoise.py +++ b/deepmd/pt/model/task/denoise.py @@ -1,5 +1,4 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -import logging from typing import ( Optional, Union, @@ -8,6 +7,11 @@ import numpy as np import torch +from deepmd.dpmodel import ( + FittingOutputDef, + OutputVariableDef, + fitting_check_output, +) from deepmd.dpmodel.utils.seed import ( child_seed, ) @@ -15,37 +19,24 @@ FittingNet, NetworkCollection, ) -from deepmd.dpmodel import ( - FittingOutputDef, - OutputVariableDef, - fitting_check_output, -) -from deepmd.pt.model.network.network import ( - ResidualDeep, -) from deepmd.pt.model.task.fitting import ( Fitting, - GeneralFitting, -) -from deepmd.pt.model.task.invar_fitting import ( - InvarFitting, ) from deepmd.pt.utils import ( env, ) from deepmd.pt.utils.env import ( + DEFAULT_PRECISION, PRECISION_DICT, ) from deepmd.pt.utils.exclude_mask import ( AtomExcludeMask, ) -from deepmd.pt.utils.env import ( - DEFAULT_PRECISION, -) dtype = env.GLOBAL_PT_FLOAT_PRECISION device = env.DEVICE + @Fitting.register("denoise") @fitting_check_output class DenoiseNet(Fitting): @@ -104,15 +95,15 @@ def __init__( The condition number for the regression of atomic energy. seed : int, optional Random seed. - exclude_types: list[int] + exclude_types : list[int] Atomic contributions of the excluded atom types are set zero. trainable : Union[list[bool], bool] If the parameters in the fitting net are trainable. Now this only supports setting all the parameters in the fitting net at one state. When in list[bool], the trainable will be True only if all the boolean parameters are True. - type_map: list[str], Optional + type_map : list[str], Optional A list of strings. Give the name to each type of atoms. - use_aparam_as_mask: bool + use_aparam_as_mask : bool If True, the aparam will not be used in fitting net for embedding. """ super().__init__() @@ -227,7 +218,7 @@ def __init__( for ii in range(self.ntypes if not self.mixed_types else 1) ], ) - + # TODO: Type denoise # set trainable @@ -328,7 +319,6 @@ def serialize(self) -> dict: "spin": None, } - def deserialize(self) -> "DenoiseNet": data = data.copy() variables = data.pop("@variables") @@ -348,7 +338,7 @@ def get_dim_fparam(self) -> int: def get_dim_aparam(self) -> int: """Get the number (dimension) of atomic parameters of this atomic model.""" return self.numb_aparam - + # make jit happy exclude_types: list[int] @@ -537,7 +527,7 @@ def forward( ) # Shape is [nf, nloc, 3] # direct cell fitting atom_strain_components = self.filter_layers_cell.networks[0](xx) - outs = outs + atom_strain_components # Shape is [nframes, natoms[0], 6] + outs = outs + atom_strain_components # Shape is [nframes, natoms[0], 6] else: vec_out = torch.zeros( (nf, nloc, 3), @@ -559,18 +549,14 @@ def forward( torch.bmm(vec_out_type, gr).squeeze(-2).view(nf, nloc, 3) ) # Shape is [nf, nloc, 3] vec_out_type = torch.where(mask, vec_out_type, 0.0) - vec_out = ( - vec_out + vec_out_type - ) # Shape is [nframes, natoms[0], 3] + vec_out = vec_out + vec_out_type # Shape is [nframes, natoms[0], 3] # direct cell fitting for type_i, ll in enumerate(self.filter_layers_cell.networks): mask = (atype == type_i).unsqueeze(-1) mask = torch.tile(mask, (1, 1, 1)) atom_strain_components = ll(xx) atom_strain_components = torch.where(mask, atom_strain_components, 0.0) - outs = ( - outs + atom_strain_components - ) # Shape is [nframes, natoms[0], 6] + outs = outs + atom_strain_components # Shape is [nframes, natoms[0], 6] # nf x nloc mask = self.emask(atype).to(torch.bool) # nf x nloc x nod @@ -579,4 +565,4 @@ def forward( return { "strain_components": outs.to(env.GLOBAL_PT_FLOAT_PRECISION), "updated_coord": vec_out, - } \ No newline at end of file + } diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index f68a6e6cb6..29521ff0a4 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -1861,6 +1861,7 @@ def fitting_property(): ), ] + @fitting_args_plugin.register("denoise", doc=doc_only_pt_supported) def fitting_denoise(): doc_numb_fparam = "The dimension of the frame parameter. If set to >0, file `fparam.npy` should be included to provided the input fparams." @@ -1901,6 +1902,7 @@ def fitting_denoise(): Argument("seed", [int, None], optional=True, doc=doc_seed), ] + @fitting_args_plugin.register("polar", doc=doc_polar) def fitting_polar(): doc_numb_fparam = "The dimension of the frame parameter. If set to >0, file `fparam.npy` should be included to provided the input fparams." @@ -2805,6 +2807,7 @@ def loss_property(): ), ] + @loss_args_plugin.register("denoise") def loss_denoise(): doc_mask_token = "Whether to mask the token" @@ -2813,7 +2816,9 @@ def loss_denoise(): doc_token_loss = "The preference factor for token denoise." doc_coord_loss = "The preference factor for coordinate denoise." doc_cell_loss = "The preference factor for cell denoise." - doc_noise_type = "The type of noise to add to the coordinate. It can be 'uniform' or 'gaussian'." + doc_noise_type = ( + "The type of noise to add to the coordinate. It can be 'uniform' or 'gaussian'." + ) doc_coord_noise = "The magnitude of noise to add to the coordinate." doc_cell_pert_fraction = "A value determines how much will cell deform." doc_noise_mode = "'prob' means the noise is added with a probability.'fix_num' means the noise is added with a fixed number." @@ -2921,6 +2926,7 @@ def loss_denoise(): ), ] + # YWolfeee: Modified to support tensor type of loss args. @loss_args_plugin.register("tensor") def loss_tensor(): From e42a860f41d758f582d2f654ad247b3a5f15f062 Mon Sep 17 00:00:00 2001 From: root <2000011006@stu.pku.edu.cn> Date: Wed, 12 Mar 2025 17:58:56 +0800 Subject: [PATCH 03/26] Add token loss --- deepmd/pt/loss/denoise.py | 40 ++++- .../atomic_model/denoise_atomic_model.py | 42 ++--- deepmd/pt/model/model/__init__.py | 5 + deepmd/pt/model/model/denoise_model.py | 28 +--- deepmd/pt/model/task/denoise.py | 145 ++++++++++++------ deepmd/pt/train/training.py | 4 + deepmd/utils/argcheck.py | 15 +- examples/denoise/data/data_0/set.000/box.npy | Bin 0 -> 3008 bytes .../denoise/data/data_0/set.000/coord.npy | Bin 0 -> 44288 bytes .../data/data_0/set.000/real_atom_types.npy | Bin 0 -> 14848 bytes examples/denoise/data/data_0/type.raw | 46 ++++++ examples/denoise/data/data_0/type_map.raw | 7 + examples/denoise/data/data_1/set.000/box.npy | Bin 0 -> 3008 bytes .../denoise/data/data_1/set.000/coord.npy | Bin 0 -> 44288 bytes .../data/data_1/set.000/real_atom_types.npy | Bin 0 -> 14848 bytes examples/denoise/data/data_1/type.raw | 46 ++++++ examples/denoise/data/data_1/type_map.raw | 7 + examples/denoise/data/data_2/set.000/box.npy | Bin 0 -> 3008 bytes .../denoise/data/data_2/set.000/coord.npy | Bin 0 -> 44288 bytes .../data/data_2/set.000/real_atom_types.npy | Bin 0 -> 14848 bytes examples/denoise/data/data_2/type.raw | 46 ++++++ examples/denoise/data/data_2/type_map.raw | 7 + examples/denoise/train/input.json | 99 ++++++++++++ 23 files changed, 422 insertions(+), 115 deletions(-) create mode 100644 examples/denoise/data/data_0/set.000/box.npy create mode 100644 examples/denoise/data/data_0/set.000/coord.npy create mode 100644 examples/denoise/data/data_0/set.000/real_atom_types.npy create mode 100644 examples/denoise/data/data_0/type.raw create mode 100644 examples/denoise/data/data_0/type_map.raw create mode 100644 examples/denoise/data/data_1/set.000/box.npy create mode 100644 examples/denoise/data/data_1/set.000/coord.npy create mode 100644 examples/denoise/data/data_1/set.000/real_atom_types.npy create mode 100644 examples/denoise/data/data_1/type.raw create mode 100644 examples/denoise/data/data_1/type_map.raw create mode 100644 examples/denoise/data/data_2/set.000/box.npy create mode 100644 examples/denoise/data/data_2/set.000/coord.npy create mode 100644 examples/denoise/data/data_2/set.000/real_atom_types.npy create mode 100644 examples/denoise/data/data_2/type.raw create mode 100644 examples/denoise/data/data_2/type_map.raw create mode 100644 examples/denoise/train/input.json diff --git a/deepmd/pt/loss/denoise.py b/deepmd/pt/loss/denoise.py index 70e657beb6..9d99db1645 100644 --- a/deepmd/pt/loss/denoise.py +++ b/deepmd/pt/loss/denoise.py @@ -40,6 +40,7 @@ def get_cell_perturb_matrix(cell_pert_fraction: float): class DenoiseLoss(TaskLoss): def __init__( self, + ntypes: int, mask_token: bool = False, mask_coord: bool = True, mask_cell: bool = False, @@ -52,6 +53,7 @@ def __init__( noise_mode: str = "prob", mask_num: int = 1, mask_prob: float = 0.2, + same_mask: bool = False, loss_func: str = "rmse", **kwargs, ) -> None: @@ -89,6 +91,7 @@ def __init__( Other keyword arguments. """ super().__init__() + self.mask_type_idx = ntypes-1 self.mask_token = mask_token self.mask_coord = mask_coord self.mask_cell = mask_cell @@ -101,6 +104,7 @@ def __init__( self.noise_mode = noise_mode self.mask_num = mask_num self.mask_prob = mask_prob + self.same_mask = same_mask self.loss_func = loss_func def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): @@ -137,10 +141,11 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): label["clean_box"] = input_dict["box"].clone().detach() origin_frac_coord = phys2inter(label["clean_coord"], label["clean_box"].reshape(nbz,3,3)) label["clean_frac_coord"] = origin_frac_coord.clone().detach() + label["clean_type"] = input_dict["atype"].clone().detach().to(torch.int64) if self.mask_cell: - strain_components_all = torch.zeros((nbz,3), dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) + strain_components_all = torch.zeros((nbz,6), dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) for ii in range(nbz): - cell_perturb_matrix, strain_components = get_cell_perturb_matrix_HEA(self.cell_noise) + cell_perturb_matrix, strain_components = get_cell_perturb_matrix(self.cell_pert_fraction) # left-multiplied by `cell_perturb_matrix`` to get the noise box input_dict["box"][ii] = torch.matmul(cell_perturb_matrix, input_dict["box"][ii].reshape(3,3)).reshape(-1) input_dict["coord"][ii] = torch.matmul(origin_frac_coord[ii].reshape(nloc,3), input_dict["box"][ii].reshape(3,3)) @@ -165,14 +170,14 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): for ii in range(nbz): noise_on_coord = 0.0 coord_mask_res = np.random.choice(range(nloc), mask_num, replace=False).tolist() - coord_mask = np.isin(range(nloc), coord_mask_res) # nloc + coord_mask = np.isin(range(nloc), coord_mask_res) if self.noise_type == "uniform": noise_on_coord = np.random.uniform( - low=-self.noise, high=self.noise, size=(mask_num, 3) + low=-self.noise, high=self.coord_noise, size=(mask_num, 3) ) elif self.noise_type == "gaussian": noise_on_coord = np.random.normal( - loc=0.0, scale=self.noise, size=(mask_num, 3) + loc=0.0, scale=self.coord_noise, size=(mask_num, 3) ) else: raise NotImplementedError(f"Unknown noise type {self.noise_type}!") @@ -186,8 +191,16 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): label["updated_coord"] = ((label["clean_frac_coord"] - frac_coord) @ label["clean_box"].reshape(nbz,3,3)).clone().detach() if self.mask_token: - # TODO: mask_token - pass + type_mask_all = torch.zeros(input_dict["atype"].shape, dtype=torch.bool, device=env.DEVICE) + for ii in range(nbz): + if self.same_mask: + type_mask = coord_mask_all[ii].clone() + else: + type_mask_res = np.random.choice(range(nloc), self.mask_num, replace=False).tolist() + type_mask = np.isin(range(nloc), type_mask_res) + input_dict["atype"][ii][type_mask] = self.mask_type_idx + type_mask_all[ii] = torch.tensor(type_mask, dtype=torch.bool, device=env.DEVICE) + label["type_mask"] = type_mask_all if (not self.mask_coord) and (not self.mask_cell) and (not self.mask_token): raise RuntimeError("At least one of mask_coord, mask_cell and mask_token should be True!") @@ -197,6 +210,7 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): loss = torch.zeros(1, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE)[0] more_loss = {} + # cell and coord loss diff_coord = (label["updated_coord"] - model_pred["updated_coord"]).reshape(-1) diff_cell = (label["strain_components"] - model_pred["strain_components"]).reshape(-1) if self.loss_func == "rmse": @@ -217,6 +231,18 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): loss += self.coord_loss * l1_coord_loss.to(GLOBAL_PT_FLOAT_PRECISION) + self.cell_loss * l1_cell_loss.to(GLOBAL_PT_FLOAT_PRECISION) else: raise RuntimeError(f"Unknown loss function {self.loss_func}!") + # token loss + type_mask = label["type_mask"] + masked_logits = model_pred["logits"][type_mask] + masked_target = label["clean_type"][type_mask] + token_loss = F.nll_loss( + F.log_softmax(masked_logits, dim=-1), + masked_target, + reduction="mean", + ) + more_loss["token_loss"] = token_loss.detach() + loss += self.token_loss * token_loss.to(GLOBAL_PT_FLOAT_PRECISION) + return model_pred, loss, more_loss @property diff --git a/deepmd/pt/model/atomic_model/denoise_atomic_model.py b/deepmd/pt/model/atomic_model/denoise_atomic_model.py index 69d1f4196a..6751b78665 100644 --- a/deepmd/pt/model/atomic_model/denoise_atomic_model.py +++ b/deepmd/pt/model/atomic_model/denoise_atomic_model.py @@ -10,7 +10,6 @@ from .dp_atomic_model import ( DPAtomicModel, ) -from IPython import embed log = logging.getLogger(__name__) @@ -27,35 +26,14 @@ def apply_out_stat( ret: dict[str, torch.Tensor], atype: torch.Tensor, ): - # hack !!! - ret["virial"] = ret["virial"]/240 - ret["force"] = ret["force"]/29 - - ''' - virial = ret["virial"] # 原始形状 [nbz, nloc, 6] - - # 批量处理所有元素(保留梯度) - # 重塑为二维张量以便处理 [batch_size * nloc, 9] - virial_2d = virial.view(-1, 6) - - # 构建3x3对称矩阵(向量化操作) - # 每个元素的索引对应原始矩阵位置: - # [0, 1, 2] 为对角线元素 - # [3, 4, 5] 对应下三角元素(自动保持对称性) - matrices = torch.zeros(virial_2d.size(0), 3, 3, - dtype=virial.dtype, device=virial.device) - - # 填充对角线元素 - matrices[:, 0, 0] = 1 + virial_2d[:, 0] - matrices[:, 1, 1] = 1 + virial_2d[:, 1] - matrices[:, 2, 2] = 1 + virial_2d[:, 2] - - # 填充对称的非对角线元素 - matrices[:, 0, 1] = matrices[:, 1, 0] = 0.5 * virial_2d[:, 5] # (0,1) & (1,0) - matrices[:, 0, 2] = matrices[:, 2, 0] = 0.5 * virial_2d[:, 4] # (0,2) & (2,0) - matrices[:, 1, 2] = matrices[:, 2, 1] = 0.5 * virial_2d[:, 3] # (1,2) & (2,1) - - # 恢复原始形状 [nbz, nloc, 3, 3] -> [nbz, nloc, 9] - ret["virial"] = matrices.view(virial.shape[0], virial.shape[1], 9) - ''' + noise_type = self.fitting_net.get_noise_type() + cell_std = self.fitting_net.get_cell_pert_fraction()/1.732 + if noise_type == "gaussian": + coord_std = self.fitting_net.get_coord_noise() + elif noise_type == "uniform": + coord_std = self.fitting_net.get_coord_noise()/1.732 + else: + raise RuntimeError(f"Unknown noise type {noise_type}") + ret["strain_components"] = ret["strain_components"] * cell_std + ret["updated_coord"] = ret["updated_coord"] * coord_std return ret diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index 3bf5da5253..3606ddffba 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -98,7 +98,12 @@ def _get_standard_model_components(model_params, ntypes): fitting_net["dim_descrpt"] = descriptor.get_dim_out() grad_force = "direct" not in fitting_net["type"] if fitting_net["type"] in ["denoise"]: + assert model_params["type_map"][-1] == "MASKED_TOKEN", \ + f"When using denoise fitting, the last element in `type_map` must be 'MASKED_TOKEN', but got '{model_params['type_map'][-1]}'" fitting_net["out_dim"] = descriptor.get_dim_emb() + fitting_net["coord_noise"] = model_params["coord_noise"] + fitting_net["cell_pert_fraction"] = model_params["cell_pert_fraction"] + fitting_net["noise_type"] = model_params["noise_type"] if not grad_force: fitting_net["out_dim"] = descriptor.get_dim_emb() if "ener" in fitting_net["type"]: diff --git a/deepmd/pt/model/model/denoise_model.py b/deepmd/pt/model/model/denoise_model.py index 788724752c..acd271a075 100644 --- a/deepmd/pt/model/model/denoise_model.py +++ b/deepmd/pt/model/model/denoise_model.py @@ -18,7 +18,6 @@ from .make_model import ( make_model, ) -from IPython import embed DPDenoiseModel_ = make_model(DPDenoiseAtomicModel) @@ -66,9 +65,10 @@ def forward( do_atomic_virial=do_atomic_virial, ) model_predict = {} - model_predict["force"] = model_ret["force"] - model_predict["atom_virial"] = model_ret["virial"] - model_predict["virial"] = model_ret["virial_redu"] + model_predict["updated_coord"] = model_ret["updated_coord"] + model_predict["atom_strain_components"] = model_ret["strain_components"] + model_predict["strain_components"] = model_ret["strain_components_redu"] + model_predict["logits"] = model_ret["logits"] if "mask" in model_ret: model_predict["mask"] = model_ret["mask"] return model_predict @@ -85,23 +85,5 @@ def forward_lower( do_atomic_virial: bool = False, comm_dict: Optional[dict[str, torch.Tensor]] = None, ): + #TODO: implement forward_lower pass - ''' - model_ret = self.forward_common_lower( - extended_coord, - extended_atype, - nlist, - mapping, - fparam=fparam, - aparam=aparam, - do_atomic_virial=do_atomic_virial, - comm_dict=comm_dict, - extra_nlist_sort=self.need_sorted_nlist_for_lower(), - ) - model_predict = {} - model_predict[f"atom_{self.get_var_name()}"] = model_ret[self.get_var_name()] - model_predict[self.get_var_name()] = model_ret[f"{self.get_var_name()}_redu"] - if "mask" in model_ret: - model_predict["mask"] = model_ret["mask"] - return model_predict - ''' diff --git a/deepmd/pt/model/task/denoise.py b/deepmd/pt/model/task/denoise.py index 57ec0c3d3e..fb2fa37fcb 100644 --- a/deepmd/pt/model/task/denoise.py +++ b/deepmd/pt/model/task/denoise.py @@ -69,6 +69,9 @@ def __init__( trainable: Union[bool, list[bool]] = True, type_map: Optional[list[str]] = None, use_aparam_as_mask: bool = False, + coord_noise: Optional[float] = None, + cell_pert_fraction: Optional[float] = None, + noise_type: Optional[str] = None, **kwargs, ) -> None: """Construct a direct token, coordinate and cell fitting net. @@ -132,6 +135,9 @@ def __init__( self.seed = seed self.type_map = type_map self.use_aparam_as_mask = use_aparam_as_mask + self.coord_noise = coord_noise + self.cell_pert_fraction = cell_pert_fraction + self.noise_type = noise_type # order matters, should be place after the assignment of ntypes self.reinit_exclude(exclude_types) self.trainable = trainable @@ -227,8 +233,25 @@ def __init__( for ii in range(self.ntypes if not self.mixed_types else 1) ], ) - - # TODO: Type denoise + + self.filter_layers_token = NetworkCollection( + 1 if not self.mixed_types else 0, + self.ntypes, + network_type="fitting_network", + networks=[ + FittingNet( + in_dim, + self.ntypes-1, + self.neuron, + self.activation_function, + self.resnet_dt, + self.precision, + bias_out=True, + seed=child_seed(self.seed, ii), + ) + for ii in range(self.ntypes if not self.mixed_types else 1) + ], + ) # set trainable for param in self.parameters(): @@ -283,6 +306,13 @@ def output_def(self): r_differentiable=False, c_differentiable=False, ), + OutputVariableDef( + "logits", + [self.ntypes-1], + reducible=False, + r_differentiable=False, + c_differentiable=False, + ), ] ) @@ -304,6 +334,7 @@ def serialize(self) -> dict: "mixed_types": self.mixed_types, "cell_nets": self.filter_layers_cell.serialize(), "coord_nets": self.filter_layers_coord.serialize(), + "token_nets": self.filter_layers_token.serialize(), "rcond": self.rcond, "exclude_types": self.exclude_types, "@variables": { @@ -334,11 +365,13 @@ def deserialize(self) -> "DenoiseNet": variables = data.pop("@variables") cell_nets = data.pop("cell_nets") coord_nets = data.pop("coord_nets") + token_nets = data.pop("token_nets") obj = cls(**data) for kk in variables.keys(): obj[kk] = to_torch_tensor(variables[kk]) obj.filter_layers_cell = NetworkCollection.deserialize(cell_nets) obj.filter_layers_coord = NetworkCollection.deserialize(coord_nets) + obj.filter_layers_token = NetworkCollection.deserialize(token_nets) return obj def get_dim_fparam(self) -> int: @@ -370,6 +403,15 @@ def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map + def get_coord_noise(self): + return self.coord_noise + + def get_cell_pert_fraction(self): + return self.cell_pert_fraction + + def get_noise_type(self): + return self.noise_type + def set_case_embd(self, case_idx: int): """ Set the case embedding of this fitting net by the given case_idx, @@ -518,65 +560,76 @@ def forward( dim=-1, ) - outs = torch.zeros( - (nf, nloc, 6), - dtype=self.prec, - device=descriptor.device, - ) # jit assertion if self.mixed_types: - # direct coord fitting - vec_out = self.filter_layers_coord.networks[0](xx) - assert list(vec_out.size()) == [nf, nloc, self.out_dim] - # (nf x nloc) x 1 x od - vec_out = vec_out.view(-1, 1, self.out_dim) + # coord fitting + updated_coord = self.filter_layers_coord.networks[0](xx) + assert list(updated_coord.size()) == [nf, nloc, self.out_dim] + updated_coord = updated_coord.view(-1, 1, self.out_dim) # (nf x nloc) x 1 x od assert gr is not None - # (nf x nloc) x od x 3 - gr = gr.view(-1, self.out_dim, 3) - vec_out = ( - torch.bmm(vec_out, gr).squeeze(-2).view(nf, nloc, 3) - ) # Shape is [nf, nloc, 3] - # direct cell fitting - atom_strain_components = self.filter_layers_cell.networks[0](xx) - outs = outs + atom_strain_components # Shape is [nframes, natoms[0], 6] + gr = gr.view(-1, self.out_dim, 3) # (nf x nloc) x od x 3 + updated_coord = ( + torch.bmm(updated_coord, gr).squeeze(-2).view(nf, nloc, 3) + ) # [nf, nloc, 3] + # cell fitting + strain_components = self.filter_layers_cell.networks[0](xx) # [nframes, natoms[0], 6] + # token fitting + logits = self.filter_layers_token.networks[0](xx) # [nframes, natoms[0], ntypes-1] else: - vec_out = torch.zeros( + strain_components = torch.zeros( + (nf, nloc, 6), + dtype=self.prec, + device=descriptor.device, + ) + updated_coord = torch.zeros( (nf, nloc, 3), dtype=self.prec, device=descriptor.device, - ) # jit assertion - # direct coord fitting + ) + logits = torch.zeros( + (nf, nloc, self.ntypes-1), + dtype=self.prec, + device=descriptor.device, + ) + # coord fitting for type_i, ll in enumerate(self.filter_layers_coord.networks): mask = (atype == type_i).unsqueeze(-1) mask = torch.tile(mask, (1, 1, 1)) - vec_out_type = ll(xx) - assert list(vec_out_type.size()) == [nf, nloc, self.out_dim] - # (nf x nloc) x 1 x od - vec_out_type = vec_out_type.view(-1, 1, self.out_dim) + updated_coord_type = ll(xx) + assert list(updated_coord_type.size()) == [nf, nloc, self.out_dim] + updated_coord_type = updated_coord_type.view(-1, 1, self.out_dim) # (nf x nloc) x 1 x od assert gr is not None - # (nf x nloc) x od x 3 - gr = gr.view(-1, self.out_dim, 3) - vec_out_type = ( - torch.bmm(vec_out_type, gr).squeeze(-2).view(nf, nloc, 3) - ) # Shape is [nf, nloc, 3] - vec_out_type = torch.where(mask, vec_out_type, 0.0) - vec_out = ( - vec_out + vec_out_type - ) # Shape is [nframes, natoms[0], 3] - # direct cell fitting + gr = gr.view(-1, self.out_dim, 3) # (nf x nloc) x od x 3 + updated_coord_type = ( + torch.bmm(updated_coord_type, gr).squeeze(-2).view(nf, nloc, 3) + ) # [nf, nloc, 3] + updated_coord_type = torch.where(mask, updated_coord_type, 0.0) + updated_coord = ( + updated_coord + updated_coord_type + ) # [nframes, natoms[0], 3] + # cell fitting for type_i, ll in enumerate(self.filter_layers_cell.networks): mask = (atype == type_i).unsqueeze(-1) mask = torch.tile(mask, (1, 1, 1)) - atom_strain_components = ll(xx) - atom_strain_components = torch.where(mask, atom_strain_components, 0.0) - outs = ( - outs + atom_strain_components - ) # Shape is [nframes, natoms[0], 6] + strain_components_type = ll(xx) + strain_components_type = torch.where(mask, strain_components_type, 0.0) + strain_components = ( + strain_components + strain_components_type + ) # [nframes, natoms[0], 6] + # token fitting + for type_i, ll in enumerate(self.filter_layers_token.networks): + mask = (atype == type_i).unsqueeze(-1) + mask = torch.tile(mask, (1, 1, 1)) + logits_type = ll(xx) + logits_type = torch.where(mask, logits_type, 0.0) + logits = logits + logits_type # nf x nloc mask = self.emask(atype).to(torch.bool) # nf x nloc x nod - outs = torch.where(mask[:, :, None], outs, 0.0) - vec_out = torch.where(mask[:, :, None], vec_out, 0.0) + strain_components = torch.where(mask[:, :, None], strain_components, 0.0) + updated_coord = torch.where(mask[:, :, None], updated_coord, 0.0) + logits = torch.where(mask[:, :, None], logits, 0.0) return { - "strain_components": outs.to(env.GLOBAL_PT_FLOAT_PRECISION), - "updated_coord": vec_out, + "strain_components": strain_components, + "updated_coord": updated_coord, + "logits": logits, } \ No newline at end of file diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py index bbf77b4eba..58c15dc5b2 100644 --- a/deepmd/pt/train/training.py +++ b/deepmd/pt/train/training.py @@ -1292,6 +1292,10 @@ def get_model_for_wrapper( if "model_dict" not in _model_params: if _loss_params is not None and whether_hessian(_loss_params): _model_params["hessian_mode"] = True + if _loss_params is not None and _loss_params.get("type", "ener") == "denoise": + _model_params["coord_noise"] = _loss_params.get("coord_noise") + _model_params["cell_pert_fraction"] = _loss_params.get("cell_pert_fraction") + _model_params["noise_type"] = _loss_params.get("noise_type") _model = get_single_model( _model_params, ) diff --git a/deepmd/utils/argcheck.py b/deepmd/utils/argcheck.py index e16ecd89d1..3c4366e277 100644 --- a/deepmd/utils/argcheck.py +++ b/deepmd/utils/argcheck.py @@ -2576,6 +2576,7 @@ def loss_denoise(): doc_noise_mode = "'prob' means the noise is added with a probability.'fix_num' means the noise is added with a fixed number." doc_mask_num = "The number of atoms to mask coordinates. It is only used when noise_mode is 'fix_num'." doc_mask_prob = "The probability of masking coordinates. It is only used when noise_mode is 'prob'." + doc_same_mask = "Whether mask same atoms when masking coordinates and token." doc_loss_func = "The loss function to minimize, it can be 'mae' or 'rmse'." return [ Argument( @@ -2613,13 +2614,6 @@ def loss_denoise(): default=1.0, doc=doc_coord_loss, ), - Argument( - "token_loss", - float, - optional=True, - default=1.0, - doc=doc_token_loss, - ), Argument( "cell_loss", float, @@ -2669,6 +2663,13 @@ def loss_denoise(): default=0.2, doc=doc_mask_prob, ), + Argument( + "same_mask", + bool, + optional=True, + default=False, + doc=doc_same_mask, + ), Argument( "loss_func", str, diff --git a/examples/denoise/data/data_0/set.000/box.npy b/examples/denoise/data/data_0/set.000/box.npy new file mode 100644 index 0000000000000000000000000000000000000000..e17441f1a8ef17cb0cdb762f4d5ed009f6bd57d7 GIT binary patch literal 3008 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+i=qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= zXCxM+0{I#y2099snmP)#3giN=q$M5K>M9f*7|;MjT)-hf;fRrby9|arOu0s2hf4b^ z8wYBc6U^bGuYC~HJrHpLhe8I&`+TgjINdYvx#H%9YPJr8&78k^WsZ?MG5rM*7jQ^? z{Uo>fJMM7)HcKi()Y#VH%e86T>fM;;K*R+cR!aW7QokIxIdKMhvuCnU&zxuJn>}}R zDPqJ2L|o9p>DgOl?Ibx2d06@}_;Y8DNu#ZUao9vLpM{wIf`|(^T;K8I*{!p<{dLK) z!D20gtwX<3!@q=enC3vlf#&!wPiTIE+Zd{%d>Zca{$C`st%GT( zulc@xnBfBv7jT%OGAHOZ1Md7ag^T0g8DZ*~(|O)`hJG4m`hkcGI!Fj2;7oC8nT=@@-UZ3~)?JAC#j_pNz2g?i?&?`=+7 zHU-mP5ODzqW>pS>&)aePYsY6TpI5B54u)Y$M^|9U?+|eT2X+4GdDZ7|n^U=Ps@g{l zTZfE%6Ctj(nC^jy3pl(extnG92)8*~!d3sT6t{I)f4!8gw-nPHh`4~mr{8IPqD{EX z@!a#O;~1N*!`{4xbyp5xngbCRa1ebQ&RxlZJAbWdIQpbXgnH&!a=$9C$;NaKL|ni@ z*Ua?FvI5-x`uAa#PAZSB1GUWY-kDkRnF%vJL&OCgtnMf&{x8EFJ~_hX4$Vfk4uj1c zn-4#@W?{(}5ODzqnay)o+{KeG-Ykw=s`Jyvfm-IgJ+FLhW)x;PL&OCgY&;T@&hz0e zH@@9jKK+xAt-~Mx>05q8V44FF7j(FFSe)T6H*RzKRPvV9#!%0k>}Tg9E4N{~2O=)$ zptZQZ=EGXt?wOD)vBV1*4D@&HjQqp2;u|$PdMaWVKS&~q;P$*?W zS;|&QWoffhk!7sa_x>Db&U|zF{qy_l^L;!%=a2J#^gQ?Dy5Co)_se~4cTpx=H*K+* z!{yC&Om(-DhvQ+@{I`K9}b_{pY!ia zYDST(ywregpZ#Bc=MsPK$HnzKAH{mcC?XTR+`#5+zh-l`U$Z%T-x)6Uj~%vOv-ymD zD$M2!s)+X$#z(P(sX|8++q$uC!@|t@-I}m)v)Z~3{sj2+m9(nIv>S_~yI41K(_r)4 zUH2?kP~hZlhuArLd$7JM>On`FG@%05rJ;3!4=PuFGO<&`!{dlKUn8Vw&~Ed1y}~>) z1Y9|vde#CDCJ*=yE&ZSgLGjK++kY5d z0k>Y#UGcRy_IWyl@}x78IT|uy*H)nepVRG4VqEy(%o0D(%k3Ua>in~HZT&jn^6U9G zGcGbX6h@th7`VoKulW61Zifzh{BdQ!^CA|;uSD2(!<`#?mbB6gs&v3R;T8WJWdaO# zA5!zF=7yZ!L)(q3bs)e2?0JMK&|PFK!3gDsJ=J~QTR-c7z)j5@7uE#Ed_RBIjsNM%%=iA^qbKnNwQ=`GMll0H(~g>o zZY+P_qwmyjnsB#peO0y}0e<|XEY6JU#zseW?5x1kKnafLzD9at727FYeV_+puCx3U z@lF%c%8otWe~u4|@CtfT8b~*8pP;G8(;&3aglQ;9hNlFNJxgrxAU@t8D}walZIV&w z;k{IF_5GgeS%IHv#c&WW$+yAF%kP`>EB1Y~#_-8PpU$DTcW8mx!- zVaL()(yS3Iv$VKpXOJj-ZXN_(q(2wVyeKo#UXI7W+^@ zsE$cCDHR2~0mX`YNXIT+45i!--~++MdM|JKNx`qnrb6fQ=-|V(&&>0U1Q7KTI|jB& z!TH0&sT+`92{morN(!I?zBixZyH*PJ(#(|$ZXo@M4lkD!PGRD@HlG*Yp3Qt%XyuvO zssqIfE+40S;)B>{N20h=!?CcX{GuaYbYKqmvBIGi30PTk_vf3zYuGc%>5D0KI&iuK#j+WGR@~Yu&(8a#CsD0oP2o#cPfM%CU(>%Z$LV*Oz?YL(K-t3 zN_5+}@F_RMmK@NFM!HdQc%S+gJ{;VB6CxwrCk8j8coUa=(19`EGeTR=NPweVtsyTj z30gaw#*#Tg{t*nCC8i=yBN?QL>0H? zNE1JN9^|w9atjaEjPbA&66kz#Y>&MSlMG|eT8ay}1z|Wt;QR$m8o0^rO>F!^g?AW%0RzBjf&GJGQ3%@ z!m8A#L2op1uk}qu`2K-e@0>w~eN9EG*?Kf6x%6^~o^+qtE7n>dX*!N^Q9AIB{)DyU zIED||@`frV3X4k_hqgbVLxw|Q4&Qnr{Ob2TtfN7IGe4yx9KX_`$!?!R%`^q}zSSIz zAQ3=xgVvb;FFKT|w~pVi!GX*#b#JqmQqal!;OC)pm{Hdh`x-UU)d#HC>EVX+;x^>oz#Luw}B~Gs$`sa@e%fktjiN5&43># zy(DcMLn^xn17Zs~dZ3J{LC>K|_CDh}py#0&J~Kt^4ZNi*17I zwF?O zs(<894kttIx)rxRpP@m_@{<#TNKcL?t;s|zITicKOT#;YyZT- z9Vyw+I27-Aa}Q-6tE0n_ck>fA&5?w&+uw}tY>|RR?l0v4+xy(E*j{(P>^ze-h6mYv+@U_>E2liaC7p~`FK>XUTtL|ZZ z&{iI#sLLb)Pm%1-4J;ivWA$v^!nYD2|6>2AB`PH7cqM&E$(R9G?yjtSmP3Vb*WnNC znk0CU!&BG$T?aJQzKHM56@z75m6`a^PuLvk#^tQ&|LMv9YkDHFL8V@_d<-j5yP4km zy$ib+8R6pb_t?Q^q1)8KFxbf0qIGHi(k2k3=Mp{haTLmCPPY@Y+d^|JRI6ZT^)zcAuCRt zQY!gIg+s2-U7v^JA-;aq!f;&_AC0ZMba#*dbDwN?X}(E@u9Ei`wXV@XBU9V=g{KVc zZg!<>wv)j=g*xVANCOMr^uuPi6+y;(%1rML8HC0M_2#Ta`t~elN&dln=2t@4i*D<2 zY;T+4QLiaF{90m=ve1klh83jxhI7WTWBP6DyQQ?@lY?i~#~dX+j$QX)z*ttXsG9#yUw4(FLq(Gk|MBu;#*zC_dEv`om1*1oHqnc=^!@ z9hfgtWOTZ12&?qJn=1B^2i)Dg4f2tm#Pt-EySWkI-jPeLksEp7xyA1OZ|Ho&NHzA< z`ALB`yrS&MTy6+RUno&aV1V?LnP0_WK3KW*g}c>B5`1(`+c7MM*1sVWmjW4@9&#Pi z5Fml@4*ur?mJAr6f397F&ZRq^JO5g>l>|?QRC()u>cFR@hF2>dh`_bra^aP=XoG-j~B?>{I`K9}Z`qGuY2#n}joiW7Ph(gZtYq7uWBc z6Yu=aS=hDuq0gUB^8lN({hH0$e$D1=zh*e;A3JQnW^=Y*vw7KvN#}-~3C#0j+V{o5 zUoiWH0td!W{IbIGHm(@yiPvV4$7(fwSmX`$UnUKj(3d$##aqch{Vi;YW&Q;dIIDVk zry0_XO`@yn`fGIPSeYeMoKwZ)}3$Z%u-zD~+lJQVBPF|ksm!PDwM ze{Usucyhg8;k%0ouL?bnT7SrWn2A9i%#(3+QigKxlN$O5l+OoMKtkU4k~4W3dj)G$i5EFD;9~ zdD1YoKK8x-dpdke%3Cd*PKB=TBz?AcI4CN zWFVwscVoUa4_sMtJ`uN%0ZwP=mCR59Snbm}XYnNx$c0|!RXfCh@ne_tC{bjPe5S18 zY$y(j*cq22By=wFK31gHNClz()nC^(kwC%EKb&IE09CP438y?NoV`gJX$ZsvEz#-0 zf;f(zoN_gsxOaX=I>;`|-5{1v4`i{cOqiZxG2r!#> z2q+whzef2N|9xLhK4gZo_vP>z`_#YRhr`)A!hUMai>}>xN)6cd|I9gh^80!gw(G_B z2W#YcfX&%{&E{;sW^?wwGo18~9kySyIoq$<{IJW$+jnlUu#DLiHfVpm^(~ z3=Ygs1!ZJC!vpX9xUs`1zxwfJlV;Vl1bj6Jit8H0!)c!>Ya>(-k~&=2R~|}*&zbeN zZr;IzFEvqO1s@H(lBB1DTB9%DbOLhFl@w<4EjDY z$_=OaKdQfqbN$9b?6c|$fHu7^0 z0i4{1CnQn5#^pegSL{k0Jhkj(2btc?A4gaEIFVqWco@6GTa2eY;Ki+eZkYiF@o2bfY>^@s=A_B^x%k3o~GCTV&z#Rsz(PS&v;z;D#G((lWFLbf90js(d|)cMeXcX+Z%u zlob}yH=?>s^SYB4N*npWIBBlkH5(EHEx5A!>>LKXEKBvtLwWck=O=#FuOPuAR{zbN zC=SZe5~uA$ek}o2Z&eQxG><7#eH(Qkw>gYk!$kt{uf5OBZx)1|=AQP!2LIC&wmz`G z>p$zve~+HX$eyLk4YIK0kb3&Vs$R_M{sQ8b&zivVytHNME&_~gK3s8oWgoUU$uIEb zpeFd;qYMy&D6nu>SJ2g%KFq)bJO7`$%=2cq`OWijusTe`-L4i77n|gj^XfI>`da0y z>ye(Uc(BH>j)izcY`}Y@Cu=@tQmmTRgp{Mp{Uj@>W!?;tW?bivAtKO|< zEJOwkx0bkTNKbka?+v~=L;?D)GC>cdC#z0qOh5do3Cl{i_;25$2$erezC4m5LyMj5 zrw&v{woGg-txwp8jWD)oyWXC_$XhO&-p{9l3{UE6zc?I_G(D8{j3+SOIE|vZJ9H3u zjV%s-C;`TF@0a)KlUT$^L0;8EI&>#=z95RRu(!(}ls&v73Jdga=zK!D^r_<(?8I#iO+G}05Vv9wg({S;W4xUyhJyC}5Xyp6w)>d3O|D_mn2;Nb4g(<+h2 zq#&=?LuMH139fQlWi6cyACwboIvu3oo#XDLquF$@S`zmv@EjF>E?HV2c|!`$ctqU| zM0KCgjuBFE`yuA87cAezQ@P9s^|m)#Uh9Bvz(I^CfrAB2?emu5Ph);}O&%wrdhR`1 z#r(%4GJG7&ZkZ5E$1Z#{(>PPF1726v3L*54p#c$@=j`@ z^M+|}@<$ZEtaQQ$XyeFGIDUFN-kk)`AGDp>gX&GLR!X=0Pf)?+Rpu41!zi!vdV@eD zst0-IdA!d{q(FAuR%+S?GKg(=L8mj0p1?KTA4;D&@y^D{-M3`WIrGn4@?U)YU+c%P zba_KLt6EOHQ&cPzAmL4f+5G*|l14>o*?;j3zwOJ3UuHOaUk;zKFaP)Za5!5R*w1U4iX(M`&PFc`xmAHxZ(;8Y#an_F}ejYM+g~Xn-my9kQ!r;noR} z_uSBn(RtzXjy*ImQRi{po6ip}9!J*f*d++B{ace8PSD`!GL>&y!;%o?zx~p{S3x*@ z@58c64;qyD5Bah_$-`r%))yf!=R*9m;K`#dC?7)8F4}pT2x5gR=n9v~kXI?6d#UTKLwyTXyVRefcx_(1$M%T1 z?o9W@2QO^`rm#7hFU=qFYQy9E9z1sG{E%8_LyCVgf$i3CZE_)NgX;mkg$>b?&|R^! zk1uTslV5E4Bl$%tB#-9rLQPkkFwRndk-{OZTnOd{y#I(K$e5Mb1^ zNZ*`C8-DC>$a}C&7MfAWadMad%`cR5P7}3Z+jM2ba(8}MsYsk-@aWaZAo}q!*I>SAH_ z1m-racJh~^3)t>8C$HC{x(PMpwu)8@4yG+fH@eG2GOJuOBN-?jvCFyoO?3nM{lBwk z>^8Di?0_7i36ir{Nm+P z3=q{-?duShgw(eaTF1voK(Nr2dv}BZLr-lyos#6?!11$-^Rq~>BvPT#@(Ri)m)P9d zy+9UXB(r}8>C6MJ=#mZF?sD`bB_%-Vnl~rjF)SZcTq}%&KXa6?o3$SPTF=62LKA7L zUUA}`Ly;rWtRMo+=3*mXG-Jal|KbL}?aPT@W;lCa4xh14{ri15oUIG&XD=15S)Nb> zw*5bIPQ3H`dKPx9*q$;vj|bSC?bmEh{M)bDoW1W1C;elG?bmG1_G>mjeKxvYj>^K` zywZLbc&Hy!-EYwL{-GwETy-^|OcUh=bYO$q?k?=Pplh~M(ppo7KI`qA9q6MAj1OJZd9h}HU4id<;Wfg>*OD>PBR+^=-;+JgOQ zO!YOR86~LyByLGX$dz?0%;Sn=xiX6z6s6qEtIZg2Yp&0$-m|Dr;6l6CQYtqT(QXrm zP@h|3*sr^dEfjdYTf=D`ksG1{vfPcH>A=2qjn*H!`GDARA;=_&1f^$x&XqxNjgeA0 z_n}+p&wu&F&4ElY81GwBqh-K=B>qkL_MfS6dmtz8$!-!P%)eXIh2pXc{z0EsWr@ID z&AE-0Ur{`}$Z22WHjbW@G`BChGM95cQEQ#+u;3yN{>;h$;;MhGN9VD9BA?pUaLy-k zH}O^W{REiJbx4Hq5gnO-@!$95oHu4TdtVNpu}}T`eK?%07wo6VwUyQHV%30c|IeJG zC%>;}VWJ(k(+(n?XLGh+vpL(Z*_^%a3@80#hwayF&h~3Ir?qViQ4tu!7^Pc28>{zY z&tj%l`vVQc`R2{j|3QGqVwImLHvJegTs3cl77gl%!dE0kW#HwT2rf#}7wr0%jcGSf zKK5On8Pm%H2Zv$aW^Q3Y`0~I~cbPQmKdJF^wW}e6=@*eihYUR2HvtW4RL@Ot`%cByylXt2XKOzSJE!`@Ija3ciOi#4AZFixO)(55%O))E_KAWpvT zd$k)Gmg?l^)vcky{Irf;dAW*UV))Q)MJ5?u*i=adqy7`e$ro=bKkUbFQa|u~Yp1Y? z-XO+$RL^ZG?h-lbfP-?oBhw2#CNbvqfPk&2j#Jj7+kFBf!_P0#N(1s!m~b7p+-Yv&h_UV7Y|=&efre9#luZFPzFZl0^W)GZxO5P`&s;ZE=|JDC%1qZ{4cH zF9WT(MVAhl5uo+xN%w>iI^4;zSQR3~54hqt_QnR%KzM0+WvB_&uV2Y+j@UznXFW-ru4e1@nhH2VaL17Z5_GxjTK;?!1Dcj?9;4piferoDIK~eFI5_V` zkZ;)k^n|Ss?C<)|I`iM7Cq$9kHO~ddFeCpTx92zaVP5A>+SQ{zG519ihwuVKILWKt zVdl_}m3q3*v(cf!S-#^BE9c38>37j0(&!hAApFDmFp4{#nEUf2oW#Ley9enmL_x6V zu{;zi`@n@n#-pwd+hg3Y&>REfG+t47F zWU0@uN(8&-q+C~|CpSA}Z@3~ov9WlZ`gk*{n_R1Y*MAh{(M5>{&rNAiJMOU$m!}Ay zZl#}nhWcD^PwKde7&M5kDYj|5W`@Okx_Z=apTfHL2(nU2=#q^zwzUp4go0s&7E^mKigH~l+wdIGEfk+bJg%Vl+RwVZQ)AP&*oNX z=f(JrgXqIA767+}OR z<<%941Fw=x_ZF=s!OxMxEUF#@cuXK`JBoLnE_ho$^?(E+J66YSA~Jv__8fn>nhGnD z-4tq2{&r;HafPsTsPAQP!$y^BGGMUM?%p+rUMyK&+~HQp|McYlH9f&qbZqVu8^dbF zuf|IX^kc(|a}Fh9G-%vw!rV582x_m|?hfkqV-r7wT?~+(7+3~KEs>D{9rZsDRqMa>YIIF;~2jj z^_iR#6c8Osqr$DLvBk@h@en57F_-8_1E~XCSxVAG7}BWDtGhx58$}5jvNr06*=1FL zGnD~7)wqcdUSx=^Au7*9=c3mZr$5BsQ3RZ3RbhM~88(05EuLOZgU{Tn;?EzrgnbWv z$R+V*5_|bDHFOfyb30SVtyT8mphaij_#T-_Y|qEukgL6Pc+_<9QsEXd%oCnx8hm#W zgLrpF2u>S{BBGjCccFasgFu}d76hm$mn!V&r^7R~>D#%ez9TdzK5#^h0RGD))J4%b zXN%ZG!?c(T9D6bM+|3mPSj`nUu;UvY-Uapsul#`nV-<KlETQazu{?qJL4i| z>KH;#G+}^PqD=c)<8iE)_10?5L2hW+s-+?v4l{u9yIon}!y(C>7|M%{`5 zDM8~qWOZdAXJcFa)fOI@^SWgb7wUUyHrSgYvWg#CKDpmD*^By5sC&x1F$S21`dnj? zB|(T+2{XcU~yz~2d z7N)NAv-r9k53o7gui2dK*KE$-cZQSxvBUOjHfQ@an+s4Q)UDN6n5*RJO^%Ctuw@&U zQN7-3f`dcF^Qck+_-3W=xAN-8=E@jl^4DrY(-B;d}5C0VX0PqMXF;^kdbC)y*rbO6wN z%B8+%FDN;9*!}BKgx(w)P#=q_h2#+6K=#SfU;Jdy>RCmpZDAgZK-j1rntbn5mli`w%o2ql))a`{h6e>kNZJZ>I^Y-cGDD1J#SA zL=8@!^%%v}UA>?6X%WEXUH*=gQaTV;FE&()BEVTSwHppoq9}iVQT9b19rEVyDJUqU zz}=zhscmVZkl?O;&$$Sle>$ZGA|!EOKc@GFCq)WY&CTW(M|C2yngE#(%`t9QW2j3X zm4e#FzHfB2etS{VqP8Pc*rYYzDj&@QdYxZk^%CV(4m^~In0GLe$>aX)ScyhHmhsT1 zeOZqVC{8m|J09bp)8Eu90S1)wHOgr!jZBPQIWq6rW>6`!uLiyNeGJIv1;)`3&qHjkcc zM)l&Z^aO)_Jn-Ie#9*LH2j~mdn5-i4!H>*_?cDp(+{g5(k9AFG9`Hm+&uK9-Of%nF zE!;_hi$|*Q|EV7(ygCq4d6f!+8kQ+Pkp9>WPiVBGda;>8S$=MX1la9;w`SgAe)#BU z`C9$|su#B{sPWCu$6SmIptsc_kB6~FvHpVa`=pW>fi6f;cUHNKiBi|26P3Xezw2;`ZMR~$?xk~mSD+(;wv-40os{6ETttA$qylb6~d5-30 z9M}k$S2oWPgs6^ae{YmWl|MO>%c2oIXn+aIC2VU^F^P)B5ENsD+hUa@$Uz=O^kXLg%udPe~ zk~!_2A*%ECPBliAqW+VvxEQn1yHarSaFN_ylm{+9W)qWUL{&LJP=*XE+1Bi9$X zwVv?ehAipEW6{$(Af$_rjdeqLc!J(HL)5pHe~uA$pj!v zrl?{2kPaxn7nE;w!a*o6ws6XY1VUSvmzAS)PT=OVGSQ|e{*3c{at-N8o`X!g51Jb? zqrLVmM0t20^NMt@gGf)jgU_u;b3l~RiyuenIN)}95_YnaAHvMUQ%Q!L^U0re<9~WG z^S%G~=!uyALIe8HC}tvXGO2A|4|d&S$nzSSBhEV^s;r++fX~IYU&DO6vAp@)4w2E% z{TMAS_r!V%RLy5uo|)T+WnM5`Zio8Zcozm7MTg2}L@`)>I#>U;U3Jk%j>rw9WnE?fhszI>U# zTFhix69$(rlhgSe$Gjj7w~2=)uruGVCl|Bn5SS*j=?&oE{;vLqr%p^@8SZq`Vf3BO z6JP#z8s%dbey-?Quxt|hkc`tgf!6;DF*ne_v#@=O{o2hJ6F}YC@rp$;>U&?*Tsn7> z04gB^!6NDea68_xBOK{TZ2*_o0MZl6ySns#K?3w|TP~u8bZlc0DI(Ss2bBw>gRPmU zpY6`vw{}QR>K4Az^42E9ns%D%`}McDftSXF^x8WVRRK{*S}?|9urS_!BQ?r8UJfD+kUC^kTaV-<7BQUwU%u>V@hfft>oz@Y|~o3&+vEf8^wU@u9!g zv#|ZxSexJ_&iSMu`EYH}aw5#;bWvtTXMxPW`0x92>NPW*y)TE)*q8tNeK?$b{$M|= zOkx6@?x+FV{+~JLeDeEx78a1F_U2s&53o7gui2dK*KE$-cZQSxvBUOjHfQ@aoBKK) z6^SioVL|r_u62Fs!-R4Y{oHn-`b*u~2$38jsMHAD|K$1wvvPYj`4-LfXz4_BcB4KY znq|z1_w-)OS^j7+brTJa4=4&~<>A2AA|z6Vh>9x<@UIjZGxs29Mm^ z2ooCUN?p}I_!{M7&t0ah4v_?}$o#j7sIUHDocz@>H6;kvi?I5>NfM^GzGZ9gr@^hm zI@t~mRZI!jOh@J~6xY-?i0+Wph85rWmD*6>dr{_qU9-s)rgHLu+znKhS+{@F^3HZi zD6x*wYQs%qKPuN-9Yl4O>sMDrD^Nx;emG6>JxKtn+EZJH`P$HE#wcz`CBpdb36qol z1mGd7JIkVa(9|}Aq>XjxJ>3<>6x#@}duiK)4dU8x&O@X|^bro_E46QBnn=SZNB143 zcx~VfTk)o)1J!qwgIr1cXpS)X-N~b94tk59;qGEvd8pp7KcICv1vY0NOl0wE!@_|* zsyr5#v7@P}t6!Y+!fbm5V+~g`;D~tNoDCX$pm@Ij=+@`OOmZBaIjF>di$*u5queCn za?ivo-lIiWLF9o}-8~G*5OqG{be@H2Sbw_m^DZ~Yld6a9P(DOOD z@npdD>`t60s^8n%w8vikg8ENhQ<;=w3<&y^e`)#`ig)miLYZ(AAtT%bXH&QK$loGi%l?5k9N)q`!!e@M1LbH28b z1NroqIQabQfZMq;LC~*jI=*Bdn!7pNxi|C|8M5B{YLU5xVC;re-OeR6*!H|Y#lTD+ zO0d_JT(#FM1zy=0jw|;CHN_|nxbMY3F`(%w`QZhIBLkXB?rFLFzB;7^VPymXG{?8z^U9+8 zGN7_6_tm#x0)#g=JBo5?!#WE$YzGfNl*$`l@wSnMp=7&5?~(qzJ4rb%vRD#Kwp%2J zu%toEPatCldLIF6hs;}!@-X>%ZHIs}1uV}k%I-(=KEBD|x$H+7_Qd?!ebV`xm=*K2 zx&tsEav*uH&1xJ}{Mg9bZ&S@|+QVJ$;=q9R{f-+hl}f@vJEfCZlg1eTsj>-I)X#P> zb(^C~)(EEF|Gm{fhZh!F`K5eC_22G~HWu=FM0hk|J0#VM@_RD($~K_+9XeNXZtMpc zsM_X`6k^T`SLX>El_Nbl;+!#|jq2|&Y}ab+e@%i1_Iu}?ad_g@H&(5|fZ!gLgHk(0Kx>WFcCnl7SZzvu z-rH1;o>U2L8Q)XG$)6neD?ND$or`AkV}EfJC(l|x!&%rPoX+;g^Eu}e89CFPIVev! zn>V*=C;Osv=4^iSw|zPJkl**^@EQBmzu$+$+2;@T)1q=a_2fl0n6bl|2mGCL&L_Wf z7Dg7(U%ttP2iTnL*KE%AYc^-^JHtu;*kSuMo3s6z%`K@bbJp!2!+6())-*)-VxIUf zZ-P~5@MhPcgS2=89DQ1_#@4eBQ_KlItTvYhOOGzMqf}BrrLf?~Fts1MI6r=x&`1*tZB->ngTDF8+o-$IeFdRM=c}W5-I2@BqF^ZvcAb|w#FxgLJ2!#p_kHu`??CZL z&&?!*8Wxtbj(75)D4Hu8-8W_5Oov_ks~x*?kzUy4zpNS)g*k(jtgdG$AM(;W&mj)w zE$7fgxQ9g{Vyp1eM8wUSZOzH3(w@#merlgm^@x;$ z{6obJ4zKCpKd~XjhDn7ZRW~aA^3Z)L?c6pyP(7OEcS+#%!2;~bm*bH-Pjj*3`cGB; zWf+h%oE_mizz0h=6;Q0hPcq%Oj^$7DFyP*}Rx1U~(ac?I$jJ3Lg$av>TX}9r=YdB} ziN-!-*mJzOWaS+m$k~R|eT?Rg`;5+e7hfa*%iqSU?gbC%+P?lCtH}WMLq{C(sIK?0 za#6{%1|FaZ4$h;ZdJWle!ni&e2SZ8q$EYOq=TJDnrx*3x4>a#m#PDc-M?16bz-AKU zd^oUM81iubPW3TPg1#tuhMe&-xL`JJ;cV=!Yy zQF(yP*?!IDY`i@h@x+&qhMmOdb)grs(BdX&J z>Y7!iqxqDF;ogPWJ=mvJBK~xguORQAKlD5W&8LvZCg=EfVN2#8E!FJTgex)kP6hV! zLExU&5euXb`u?}t+)=%T-_hV(-!};;S5&(Ho-By+yJ4$LP(Ie^)^3tX92HuHB9|4q zIH!$1*iyW7baZJtTgH%c;nt#b( zzfdC;-HYg}%2gaQiFwO1GTxy4iCSdGQ0po(Y>LRG=H*Uc&cgSeJ3getsYP&>2lPZ?=d@|I=^Oc#avSUs-uD36+XcU8RD84A!=PhPr# z`eU>BFI#q@e5@BCS6KQCiU;4ey>BW+^E>CM$zEu#hga*n(c5$hNRG)?E=@xHCw5t$ zb*SF-)qVW2zy&I7yg5Jj=T0g3IA_sZH}szJ%dEfq#9wC~RQV?GPVNrVm@vVIp?Sb} zFY!Ahqxs;-(($IR>yxpvjUQaA&>UZ06$~&@e~XpE9M+vzCoxFY+?$Wq4{s*_>gi!& zm~&*-h(8a6J6?Jjfa*22SMKQSwny`}p-h^b5f5xDdt`@4^&r!PF9QeHQ^4eYU=*Q* z8$z_aPpJqnAOx>a;eh%CY=xUw=cBp5J=*1FYHZNap8NbU? z_FugIw|yzV*0&kX-j~B$|FSRt_xo@-TSwT>55zT12cM|{+y0+9XMX4R^<&t4!(A+E zbZ%sGwqLV3+ppQ2z3&Vc|Hls7ui1RYzC6t4Qd(g8c$kHa@1F`g_@x(9sndTTi}EB_ zU!5wth~{W2RIf=X9qz-b>O!;xd}y#xHbGu)9*TE5C`C2`eb}X{hCve)zf`&<)IId& z2S&T>;U;q-Fq;2sA%83lhHmtRK6aIa=8#W9n>Gul2Bpv?(*SVD6gwI$3{V47Cgd%w_DxW)9y47(3o$$_xN?@h!#P7)8Hf~6=~3#jqVw^ zEwNW)tsFm`6c#@EWMUH22rf}4sA&T^J@ez6d`XzQZTtMs!c&-?(fglFQEixxFhyCII<5D67E>V8LG8#xl+PJE>l#y*GDr}zOc6+$m`7KaLdlh6OT_aI?SYw}8CNf>BLJ}$UK9Bek9 z&G~g5-4iR38=X-q4}y!9@u^8jfW}Ewx+`EnZN=Ucg93TT+tgqd<0b@KB(`Z@fB8Q> zVe13?yZ*Dz{P*a|(Z&mJ>>62E?y^*+JHL9d)^D{M9q2nPv3Olsdxi+r#A$26us*Ek z315raVH!+p38#Z(hg8rk4Sqd>U zSR^K}!tanI2%cO2zQa%mI_XW}ha+i_@;oWs80pEeklN`tzXajnksM{$V>D2yyV~e^ zoCwxZZ@(^Vk%T&ZeXiRX|A}jM8~!u~sp*6yUc|q?a}8f?D{-YjI|yfhnXchU<7MO68&!JV@LqI3+X440ry>~;n*J_J z_ZktJ+Z)q8OnKp??ZRKrP#>)HW|nnWK4( zS?de`TF=6s7S_g8b#daI;2O%y%Tefly;;1=Vxe7166Ig~_kB6>%M54l%i%Njseiu@ zhqHBo{cQD>P#Hz>4%_~pIVaxveLV}i{p!;wQHclGobA_a&h~3IXYV`1(S5(ay$=~? zbGBczIms}E)K)c$6@WOdUbYYGv{uExoJRxuq#Cc&E(EB0!ar;-)r(zLx-DRb;+nqA z4HEhp6zHGUNOOzr#j*wCHg^+gFzlLPWq%Y0CSir=ZG;42rQJY#YCJ(_dOa#@3ILH1g6&8R1b9jK-c)7Q31+(x)r8<3>=q$ zIE{KO3)J`WWmqBT9O^In&=6&~6ZMH?Y)C9@LwzX=okFrm>l8sip`ml{N-_vO&K4uRmb+Kw`Lj0!zLw(y$E0J5nEMnc(1R^&_HuH@|W+;yN=@S z#Hb!Z>331kP^O<>dyfuL)<;F~+bQ5|bB1;Qt0*`P-SK{1Ob5QP;P}H!a3C)CEpVl; z6hv;jsX9_ehZLpd6ZT4E*oBS6ZFiRfxyN~@lTkirjs5v%2R|z0=LCe$e=G%2+6OZ_0u*x z1rj*n0uw@U4CskHQelJs9s+|>WA(~mF%TJFK2ILyXR@3ko;9I(=iQoxwK@FgUKGFJ zy^{w}y*q(g(UM4j0at5}H)!rbdH-jr5l4=m)J99#2lR0CWPO49=H3H1_%la&;92X} z{CIFj5hB=ax6?zeq8`Y^-U`*QeU$Z%T-x*H&#}3=C*_`dy zZ2r!VDCXyZ1j^0G<4srb->Znid9r_S$s)>_!bXq2JRazNArWyCkLZ;)Di$&-$5%xbHo-q zZcAFAK7Ui=+c$Tk`+SOSyw!5eMtvrd{Jv4>zUvg**Y)oM6yaXK%ePNKWaz8e(`mMg z26sEoUj7)9i!BnWBB!}bVz#2&yd9CAyc@m7B%*%%#N3$mYP%*e8Cge*y=Zvp#CZc|}EPD&rLDZMRD(aup{gMu8ol8|T?o;7M&LL~xPf{@IY-qiA z3iZWR>qgl1*J3M!TGnS#Z!qN^e9NZlGvL@N@}!y`4w&?LzBg2hmuXGd^GKV+D(9m6b*o zsPi^tHf^sU4lJC7)2F@IK)ti~J55&;T2$vO?f5bWO2VubcMR~s%2%%MLJYAF@c?0Z zEeQLe43#X7Vw|LC*8Ya2EU2IGbU|iD5CvWq=-kd;&kq__KjjJ3)86`-Bhk}@9p*>K z-P^c;3pnn?*Q))89<`CPU-gAM3_h9gz4I;=^I6RJ_%9rJjA{K0P9wS+J;NUHG5BOM znNw=XZ4xl!qZS@ ztroq+;Zd7{JiPC`7)V&2hyF>V_6oz|I!+*+G1C{5Mtzc-JgSedKPB6FBVYYVF1Y9T zdVqv|%3GcfaBVEZxOb9sb4NNSEVkBHNXL0SN4XLtx61N@iQl_dtW$!pFF$Fr5pjb& ziGxRy#6V9t(C_=8AOzRsJ&tZthV}F0Q=a?Dz<5)602lJso?6Ns_cI0e)d5=5$fM}5bHN=a(VcNqB?q$WWAbPG(E; zByq_qg!S%M>__&QvV4K}okoE%wIlvKFrF@cGv*uSgN}s=TrMC%+fLKB%lvpy-#(Oc zv{D_;O;n6ixWu8w$mzt?cOD29UF)om`IIABv3)wuGs2ndso(S*_8@CYm4 zT!6pFrB=!17cuV>tS*?j9M^9M)n}=@LxF=o#q%f5iNLZq7PbY&s4w?}JDo7TMUB~N zHF0W5G7)#;@u{+QoLiwkki38HT=>)#wsAo529;xc37-Vk&CYj}sjvUndrdl(7VzOJ z)jTMFx}XK)t}fxrZLf?Gu}5Po+~%=C;iXSqB_Y_4ldAT1?FwF4I+!o?go_on42x*` zV_qg=OaAJ#U~$kl3nqGc*&z5HNs1b+39LLdoENZ8FX`){X5RvSpuEa(e2)4^7HfCL z?_Ep=#ky~;wiW!qDnc=9UZM$y^J?FXiQqksRc!B@YJRZkt25fxuK~`LG@Uj{y7pYr$fFmO7L&_89J15DhGDE|9g zf5sVn^80!kp{sC2pjLT+sq_?`$h$H#+ z+?FQ2gr&--wFdK$AKsMgF2nnO;Vz}27Ork$*sMW5tISy^?wtBc_C zTd?gO>JQjexgV&<@74AfSB!7faDrR_HAxP6V2=OSXPVf@5!K#&wJwPhDsqOEJ2+L~ zbrGN4`v3~$8KEp0^2ufS^L8gt7e!2+eegyT2^yZd?R|KU43Z($t70Uv-riU%raV~; z8eGViJy7S>UgMkeBfL*dNf}+$Hk1J`&*mq68_4kG;DJ+`3$gE*y7aHCfjp{q#W^SI zYvaUtXq;0T=4V@NY#!Jdae$vPl?JHKBd_FZqKfy6Ej?b_f+ffxA8x2=7dS?I`OMC% zh4+zo9oe|Df)V0GgTlR&(>xI49401!Jj21gRq#0a=SDlHXGNFtfFY77f8t2sBnP8(EOA}U(_YIY5KGS*GCy${4s*_W@L8k zFI*KM0w3@5Jiq-0=j^O3yH_1g0SmIsM$0E6V5@yB)fw~7#@BttV~!UPJA=mB@&>OH zqAF(%ol$QjK}kKhr;!~}Z?;9u=Zd1PP8qVy($a+D+fIut9tnb(j!FJe1wW#7&CgZA z^N=_64OVI7&=nytX)cjO3hF=+on!H>Z1&Z%%2c$dPjzJ zFXu$h_hj%%#O0l1g}Dqqd0j$j_-McZf5yrGz+e4ZPa|rNn%rA5pW&Z~$JM)f=8}LJ zkLccRvmSppnepHEW$?ocPT!Y-&)BE@{X7hu&KLAg`}?kYjA9glZvW3XgHL{6Pb1ux z+Xl~F!3K1kelBL5elBL5zV8gq|8G0=b1~!eb1~yJt?i+P4I{)JH?9k2s2>|cyYTex zd=*gI6`#2Y{rJYN-b>k$-Gs1r_La+6XOb{>WS3g87(8G5;=xjn9-`Tjton!x`6OW> z%hy#LAe^UW7~;tZkx@j>8=S)x##P{{hV8qJ6GZz`VTcfxn{)|QoyD+ z;^|Z`ClEG6nNhf2kH>PQMj^&MgOrkdEwMk(ZRac_>>o5++&=AteY>1_Brc&yGR$|m zP~VAj2F=`4=cXROd0vsru3tYuhQH)}9*<$XWp>SeB8hBAT{j$gu5$Av(Y5)fc?kLs zZ>ZrNJD#({`j_x}$v!#2h4m{AqZfGJT1^mRO4m7(pQAtE@Bb-8 zhDLl&tFB~C=K;|k;G6YG9YPOZj;jbDfnDkyt0@)|IL{O8-SZIjn$|5JY+NJ`YAtO; z93OD~P~6Fir|OW^?f=6a;~Sq90?5rhBG5e|`)Dn$XD{hiNWy+VrM*4L?4Ly7wZl2@ zAnXs)-W;ebg#A0qr!E<0)QNyk&WU49tHu^ z<#i8YA3@s?0X&a$sRgQr=^EI7*OFgt!ExkN7EJycjEiba{7{*vIvb> zrTSO(^0lZhtjK z@R1Bw-c-an8iRc|F5PDbck6D4l0H80d1!He4E2!~n^ruC#eRuo57x6G%KQ*`hwuE$ z9u3&Nv*UmV>QRr@J6G&j%nvWG*$lg2KcFbJdZ40K3>I#F#Wxh$MZ~)893(sck5B0Q zK!30Q%rpNzd}1!XkE7`G2(cvF-)p&7H_>9wd!Inv5>K}|cc%^T_g6E1s-d-;DBQ8) zcskyLW)E|v9(y1L>0E8vR}y*%K8M`4BMUGeR5!jR2Y>e~6Fn>g)^UQ=vNl0!tQ(o) z*eqR$anjxIUu5xBbHWD}k1rM+n9t=@(7aoM{j<_XzW6@DI=Ajhw|3-{jSH`E|3E(3 zyG0_d(}oP<9rH_1;k?3|vTf27esOrcXHdytKl;(8^Q`iFaE@W`RuR1r8K{6~(^1$b zw2?A?bv4F&`9(JL_;Mvvi{oA87P(In3A|qvzSgV5SJg#3oBP>8`unytVVw!W){2X7 zM+No?ei-;9u?qE=Hcmx;+b}`2ds>95AP*$p60Y`ArxAnVErn^pJixurW?3Kdz&C&L z)sh|9m&z|cvSt^{-)e# zy~8nm*hj1;Un+w9P&{fUw0IfL{fWr;s!A4t(I}CQ=Z`Ud;$N}tBI-@CpMAvt@P!Dt z{510R!28Yzp^v_=a?_|hCscW?m2OcZy#luOqrV~KmliW<$^ldHRm<#}Zd3KoFPgn^ zz9zI^F-}ncGJsdwdKTF-s@8d{rcMz}NI6PMRU8;0QiQfPntf-5kEYJOFHnDe&1$`p zHa`;dv(ggUvHoQ2h2y^C81I$h3^Uhc6^F!cSE`L|5kFACaTV=!=zKL|S;-EW#%v-b zYN(rJm_BZZ^(S|WeHEmw$?zt%N~KT+_0qdyo3CNN-*m+Np&3g3LQlaH40gJ$=@ zQ90xv&yvVTFOm1ulexFl>eBM38225?X6r47(9dMXKR>HKf8aLGjb&ON`rEz?e`5xx@5{g`zwA@~ejWx+ z=LPy_XyE!(jiZV{xBq9H;h+4zo<^kRS&Y42!UlAlelBL5elBL5zV8f{I@MQ*${soa(0$6)Z8IFLZYkH-&T}1hBtP@sWsYRtgC!_ycyBUg##K?w{p+ zi+)RW-9z0^d16p7>#W;s>_=AZuIruZQig4>*i$M}*dZbHNT!o3Cv3mmw9FXey0mq1 z;=#-Ddo>#W{Yfb580-#xEP=Z8@9*vweUeIns^Fy?xb|?u*vWN2p5Q$zsZ)cbb%TWS zYjPWEu}`k!5q=SA-#bw(BNz6kXQAx<&s)_S60}VNJk*_#Jwx*x|!Vb;i z;u(^zA~1Y|*Z3LQaUDz2W5v2$&5K_xIHEeaw>^crCBVVn8sillZdro`PezHMgXvL11lE&yM!$ZB{=;Y)MfmkH z60Eu6`7X8FW5~yRqvtEgeb+Ed`?*1bDzi(veC~HPs=h>$1 z4`*wF7ym%ahn-k=ZSed?gc%tY@i#0}-}FB|VScaw%rpNzd}8+5N4ew%jga|VbYkU- z9-{H_(@kGLD#N;!@n*RvNbuG|FTuR1n>cr|c+Osod&KNkC_RpSzAbX0Vttie#QBJ- zj(zAS)X*djHylO3H=Y%m9XKJR?BqBZ{fdkuTTf(Y<9#@MQDzhJiQpDPCm!@4dNwKe zT}-4v);p`~4{$zU7TI;RI^LU97f*8gVO_54l?zo#nq&~;2(mB3yztqlHx^i&$9nsK zwGQg|{t3K%9xlzT0@R*v$u{H@ZfeV`2-J!9^+tSicg-1o5M)=WI9f zOi%R}y|7#wkvNab`&B0oTrh539)o_%uGY51XJ=7owWgTkRR#}Sa!OszhI}&d_Mn{o zeKA<%xmY+;jRbdpI_kSWP=}P<1!<07*@3&$P%{(jUD*}5SqR+M&U0Ojza8q^o0-0T zgmVB5E|pN*(f)|=ww+=p@V&D!QrwSx(z~eoOFZU%{I8`hWbG&-1e;h)Ln>}i!r2>V`thMS)g%cH6uaFJ{1!#?7u zv&Ykt#t2%~{VVp!Cw#Rx$`;{z*&QqRg-kIol~ZN2aFi8nHY}GglgIfNIk);fSj1t@ zXyXm@dRDOYT_E41gYlDLhiuUiobz7(=2WaIKdgD8vcLi3IbGYA8V{ns(axf@bD1qa zY+v&=#Te^g2hvN1NtY?0v8ucx9M>0L6tLs?hR7 zk7pl7ea^q_(9gw;)6d0>)AyahF(3Ea^WgpBmwg6KKNmAD5`Aju{Q?@{>ld`jHKLn{ z7B;BQ!90$Fh{>~b9$u)APOFK`>?VF(R_&r`qmF%2!L@y<;!vZKx%@(64_BE46H(<?REMWM%L6TS z#evW5$%4iByF^(YH?&xX+nTRPkODr|Q1a^(YzGt^_|B zk`#r4&KBA#^g}|Z#hvZ=_YrAZ`%*$(6h2iO2ySS?dCIMM+Ybj9P#;{GSQ=!ULuD}t z)11P7x@(cQ=Z*GofNG1Z6;IGD>Rbc9tGn=hcfDzo;fwmQ+UaM+IASxXA1~S7>|Use zbuw$t=(5p>_M$d<=?$|X$~8kFMIGZUiJwxV`baSL_4?wmQq*UgbkCi_e!!f^`}D<) z#X(W4FLlQUtj`bFWXghZ8=nuOn81~doK}j z{HWE{b^qfN=J)!~JoDefCjwc$_cmG62$jFYH^bp>!hUZdYqK2s3G=J@C55n#yr7(= z?sGRGI$W>oj&&fh&(kx*)5M|kaO&k%!#y}ZcfM<^Dc(C~m1;hXMm;6a(!HL|39PaG zhJBcq(bUx9Hm@KVdXcM>YK2pL?jh^80S=o0eNpVy$J%1MU%Hv#KTPd8!R~Ry2w3*|8BH2D2 zeBV{ex&n*elHl;RlewRdv4I38ukMn#4Sjb-e!PgJh6BdOjy5iy!nT1FpFgO(;Fido^T~0CW#au17c;NnxL_;vQWsf7Eg9G4@>g8tY)IyA0=?N8MM4inEHM8R8&FBR45=_7d{D zey(dpo%!p%54?m>msr{-&)GT$>)GDF^N&X!kPA~g(}{d?Qu>=#O)w`&9+(`^QBVPn z&L7f7eH2jfVq5S1mJ`-i^(k-0`0eLC%|>PXSigQwAb}!twZaG(|;3`!g>>my5 zPY~CppYDB&?{&%X?LkZ8kxx8MrUW6MP=+f%4z;R-9?J^#{7lr5%#DT z){eSu&U0nRSnsFzJ*ub;aBg0t84C^TX8B+Hi#B&)UWcSS#f$!ln1&Qt5B-z&J+I1a zan1tE;(FRfD-wJ?c5$2QdvzGp-(m3{^PANt+e@;yh{6~3<7cgILQ3c?$nO^LUmer&#B!EV$EY4LX2>7XYH>=$f)h#{Xil?lk5GPq3Tv5GpKLcT^_ z<*bIZtO@3Ei!17|PVW$jT%P|ko4O+{Z+QydQ>Od(T8d)*Pyf50lw$2n>Z(@~MgqtK z({G+coogH+G6&4quFswgp&^sQ+?d}f(07nGc!>9%^96QB*uS%L-lxLHcBm&%&)Vb@+vmCg`cgZz%0yhqdoJMGOo1LBXW0^T-71K;C@+*W3iuA>YUO zk>wgcus`uS5{l2`X~$(ZhCJD2B3nvS-3;gb2|0GK z8Q|Q>WbTW8BtuR(WRdJwj&b0Pg4U;B*rFbVjFihec`^)`ehcPkQ-(Ifg||ZE`b{fz^fj+6%A_|LKpZvB$Gi;82Noru&g-YWdP{rOLYF z#0Td^_U+d&Kcc=X|2EE*;Qw@T&2H?|`5egUxDM;eMpP~~i9ZztwUv#^x8uhN@3E~r zuIH#j^p4m58ZS|gI;Sl*e+~(REymRfd_RKKJY9+-B-(9 z!Yt%)F<5$cGDP_;4^(~MLTboFy(TXGChrJ#7-5b0^mvg7TpQ1pKVFFSBs*)b?j6Pc z;->Q(&P9m8{j{ad)>qU)chfx9+9MQL8}LY!B~ApchSt1#pRW#9dK`_4doK~{WxCG( zakr?SshgS^&)M98t{>OouWL(ufM%~tu&obN;O&9 z<{5|ipz~^y7sOgd2+akVI$o}95JWu_;f8V4i0m5Mh4v)a@#P$EZ9E&8?@bR{f_C!X zz1Zg)BL?AH%cke}V18NSZr>Y>ua=!%SgwG67c8g^Tl0kvWb$So5XAoFS#HnFySQw99@rMysneF* zLu_6pd`Vx8ao=I)jsNkdd@UNT5C$=FnLTUEh#C;amObwjh7bSHdw&5l_l%QT8 zNs1G8nDSOwqkd!H&-*R1lY(I7vhCr!Wt_n7FUcqINEsHLhRY|APvA-23s!Ybn6FkQ zFdun|{l^2hwd?WzkYT((5#xx#&)rygFy0$>KKiu=*3m`zzVVAP#W@e{Uv?iIu_nWBW|4>n0$!w z*fg268n#@t6SP8o(R>n+zh;lk3gLln?*qhlww{pO8qXV=wZ&&l7R~6*tFp$gnLUV0Oh>rw4gY!@^O+$?7m+XyX>CHGYMfq4d6p2k#f^%?7nsGT0%oyU?}L zDV7M$ls$3==ZYFj+n-SUDhP+-_`lZI77+8Ky&LXhT}r|V$E`v>n74{${T}y~72Hp- z+4W#Ot(jlT3EK@Mh_P7kriRJ}3Os3++mIhD$_{&J9l&{`)`aoUGmPKvxoI1W^=ZRz z-QTsgutT1fL2S=3ALI*O4zt8O(HW6}rZd=om4A81&o%O>e-i%fvMN9+(;beP2d=bq1&J z%fM&sOa1*k44m$N&_56K*I5K*Dgxd9pK(S!_V@LuZ?E+>`+f-<&~f^?m~r~Km~r~P zGdRY(etRCwxBaru!0G2=#ziKDLytxerDQZ20dgX<=9PgDOKXm%YzFX{I$^VASc$gb3 z4X^ZAV;t3UjpPpHt7K4krzG_T_1Rwgu1)quy`#NiLUu9e_lD+0IUU4!%lA*;62ncf z@1|#7;!e~Ru}p{>R4f;QQ%Mb8bFn@@yri71p-~(H+%C~}SPH?%hd+K4E2@CXvchm} zO<8crpZ>la`)JiFwhDS<-sjCL)uJu0vZ!0Eb9CiVZy+;FP+X;69eTAr&mPD@U8Cz? zB;IeIAZX5{qP=zMuyo!zLsNAz8^%6RLz*>WecuppHMO zSIDx0NN$Oa(80LJv$~XI@kKaSYo!00<@yw2_T6u~pS-dE_e#fT@$C`9C6?Xg%;ni| zxP5g(5-ZN_+Ujky>=4%5dw<-0pLI6aY;83?h5V!a^ySG<1LE-N<@rs*qikR+XPo#0 zb)=uUe2Dsqf6qI`Jwv$I*qw97zdtuGj~fP);D`5In_So2YtQ#bpjHa(6pl3 z>|L-F2%ZwBDUD$~HY#o9+h|S5JyldSrYZ&|n>6;ea0-FD{$+jkXa=9;EL&S>8O-35 zY+BW{N>*SlNVhy}HBUf49%Qhq!iktmcEd3i!1JDsMcEI$dwOdR+E%!7|@1 z&B-n*uzqX&7Gp6XsP~?{!?TVH@(tbIR3UHt;Q4uP&q^tnt5VaPIK>6ljazc8Fy8CY z;5{CL@e{pwTP2$Lgg_xd#zhl)f)>Eywo9;zEX$##k&u^s)C~s`aF4vKT zEmbYiNr!|$UurPkJwydA3WivCp1DuG<1t9c7ETg>`A`B^V7{jPV)t;k69*Ksx^zu{ zpClFubGFL^NB}mmhwTl*Y1F?);#d@_L6fOa6a;yLGuclf&`3mKHO#If$>=LgPP@XOF%8( zTi!m99V7w*lUM@8Kug|TCG|Vb@hJ^12@MwlNyl1Oo4Kfe@^Z7{GxT$UYXn>8BuYV+ zsFIqlizwK}$q)-s?=kq>0Z*}N>t^p)4~>7RNg1O!w*`Rwzv$@Nn?ggf!Fp z9s`%>fNfR9V*3WHgZ(VIV=nqL+544Do?OKF5i+H(=LVu)!46eFN7TcgYqDJ1Wr-9F zoU~659>+Sjvi^>5XK-$^n&aVO1u57vo_BO^3@144XYu8){~w>w`GNjk|Cwk0d-z1b z)H7oh`!Hc;=f>4~xR)r)YgG!zKAN2$d-+e{`y(}eF@pU_4`E*PFgIi$&i8b9Ss6DW z4$m^5RBW2+AvmwDUzLJ=e>bl^I=%S}_QgzJl}b@OD7ZVKrUhn-u`Y-(SJeoMrP)nC_PpJ29NzK5199IQBIXnzcKR&u3eHFl!@ z(&GHqidFVbaB95(3Fkjp}HQgO01?8VaIqWZr!V86L@Ie1$ zW8`^lx7^#*wigS|Z!UdEwRL#CxB=rE182T;^gZAJ@?wKIWjPg8L1#C?M$GRVzq(T( z5&L1d%Tp54ci*B4h^@V`XFukb?}`shM~@I)hbOo4U>z)B-MGEq6z7$@-8or+bpl@d zEa#64uz@ts-b<5vQ2*p^#D(z-;-EbDZHek!HsJSKRbvyb3D$RiM6sZr%sjF}@7^4K zkXn5sy#nL0=PF;RVtf*x8CkxZ~50&iV1!nwM9_2vGeALBaT3`Fyz6^ik_k9`ojD4xUpNE0dd4c}< zL}y;j>GFT|1J0c5&p1P$?f3Pg1UJv|T@q5WfsWJ9#f;O>#f;PUoxuhEZHIm?W}JR5 zX54LoyR*~SC{eC@)M(hLo0zU$l)lLq>tM;T9L){9pkdxttB}=Aj18}MJQt<{f)f$R z&lMMf@J~G{^6nl&=*kjBcI25u<^`*rcA-uYaeTGC2cWf2(r`y%DoP$K%#V=x!omMIMVXfVm0b4)n5o-ukWY= z9mPR<>()M}J}}9R3P~6z2IQ&f53v5kYINn3f=gh!VpkXY_7TEcGqU0?>Vn)pOPc!-{f#-pmHqmzyrAY^ zW%l(M3AA5`CY(n*+U^f0Rv1XYGykIgkSY?$mWiyrjCk+MjT?$PIN)ius8Y>rF*tli zXRS5rPV350rryX4f!M6uqRTbW-_Hp4zKiQW7LBYu?JWf#54~OfodxHo&0gXzje6SK z#}q9E3Tmi%npRB~0U1;~XZHNHcwZQ#+1&~#m>W9v-JnC}g=9Nb(HG#doTu}+_ z^KDot>&yLs8t$O8NzEAZJ1JB(oqZ#O{d%LyFV|*+2=DFbvng2b`no9DEE9G4o*dJ# zy~GA)_6s*XMEk4aTTN!;J?_{XB{#tYHaH~YwV}@o^=WCdRakCvfd1H51*)e2+}XXl zq3a0h$_^R&XPOJa?LC3T^A`)iL$XB*Zz9H99>wmIJ1GT~!ryjf_y~aUZYBSYzcgV; z`J9&1xe-EMd31LrXD2b4uluU+4uelfBZqxl9y9VgidyDRL-GIkXB_9VFs)bkwVp;O zHPp0iMxLK}Uu9uoC+(eN83~wijqtPgKDXnXDW>(m@5{*7%;5BW8TgES%HPk!!0CKJ z|1`BJ+`J+l=b`_7u0P|9`_Av{X~YX%S6)s#HlXA5b1~!eb1~!eeP?j~f7_v-iy5b% ziy5C{^_4x+Izm|VH}Pp6?;#X^4t>_by0TTf1>*MAVcziWgE^1VajtRs>h~=em)M}S zWqrd(F}QTk&_k8KhY+>e)o6+9mlT}vYtX>?GX8^eG{m@oZTjOJP0X{XO0H4M+>AO$ zpYq2NOTG^oCcJjsx`OtLHZKUBn;Ar@I ztg{W;HZ51-9!Cw1%bO*Ex&%D=->&?H^V^k|5&SPH&|=bzrsgTD$NG_BYK5%lUyi)xsYhw!Ol76{o%H+qCv`K=6rJ=e}wY zh|*-gYEpvp1XZSzgjbV+kQ94;s!{}Izj6Grt`hS{M-*%go>SoZ@$1j3b3}k!?_@wR z?ptupDtLEq5pnK#zN2eO2sQ1;CXK(OQ120R-NZWQz&_=7IV^RNM3GZVWDx2gCEjvY zwMJcU(T{=OZLIH8ZAA=DAHn{<_hF^#LM5Zb&nEltXAZEz<3+}kw=iEkXmHro@ebzo z%)RH%eai;5>9Oe=*uP`u{m69%&c{iNa?ZN%j&taa)$}MU;Qgqc)v@tD=HF9N8}6H7 zUA>31Mk3~MXwG~Q=e^0m$#ab+egyN6^D;A7F)y4!VX-x8pg{U#!48ql{16_zbDlWv cyFyA`-ubX7G+R74sJrhA@yr2zW*aT|eZa{KD5k(QArUTJXq(T-k3KB|?jnYIs1usm{%~$v*ZJin8 zbl1(&`m@(Lci(IKpC5nz;is2R4(|@{Pp@w7e!V??dpv#j`-{`n@$}o<+rMtF-~96S z_SMaQ`uEp=+}(W8?_OX3dGmpP_3WFgbdkh`RO8)?>V1NgBR+~>GR=y=c+H4Gbi^@T=iUfp8Ry5$@iT0J>BbzkH!3Q zy7T!H%0oH#p%?3$gU!1SU5;KZSAV7Rr#|=AyYGA3guU5|{jGGqym|V*?`8-=tVit!Ke2;eo>x2&AHE%r$c?^`suj-&h7iQkqzz3{-9KO^Z2>@_I*L~ zc=4gWIp{gsUoH>r0m_%JbUxIN!|E$;4*I^`5At#Q!1>kr6ZXB!>BCppyyw1jZpycR zdd}wBSKq$v!M)4%&|!7+Md#8xPcJryL+4NZagDdWx`*5}Kk4S$@B8-ufR97H zliz-Gu=7)X>Ze2VMduZrPcPRC^{(=KdA)kd_kC}hvL9%F_Gxdp94;@%FSl=b^ZsrX zryjZX)6LWd=y1~cu+M`#w`fj3A2%<@$0szu?|XaG-JAW;@yFV?_oA=p`|G)K ztNF>-tH<-<(7f|IC)fGr%**5Y>FuGz`g)vKH0S%4r$c(@=|ywRm1%Z-d=gghxDSp>MOqLHQ$``?0fR*{y&trpYMG`IyA?}`H=5?=>9mA zU-@#*F+a`qesXkuP;asO?zzqL>E-T0ht1Qc`|;21(+|y0oDSuod*O0L_oJ70zWX*; zGzaxFW7aq{W*jZdi8Jv>)+etYETo##Wja&!D* zee>0EDA&H}d73NNXCB}8Z4(=IZ}wGv#kHEpr+xE_&hcHq>gM?6)%ooyH&?y9x%QXK z7v&%yFHZL`x9@w~n0*wxPkwd$(tYdmdr;H|`4jK|x8}`3=i{)?Lzj1sekfO*e0_Y_ z`EuveJBRoCp3alk2dkH_?x6=dABX$Cw+(c^<@>(x@5%S=`+@z==tcXT@^m@#bR713 zrcZgk{^sb-;jlUPpu^@TU9R`p_q}bVd;8S;cHV^kE_h#%Zm##7a((}Liu&|GIpp*}rw_zI`I`|>B8&foXF{ib#Q z_E`O~_N{+~{tn1F-}{986}E4hZ*JAgw=ev$@S(la_w8Qxc`mv~^VRK7u0FjeXTB&e ShfjEJ&b+*H=|#Ef_WdtpQY%~l literal 0 HcmV?d00001 diff --git a/examples/denoise/data/data_0/type.raw b/examples/denoise/data/data_0/type.raw new file mode 100644 index 0000000000..e2338f70ca --- /dev/null +++ b/examples/denoise/data/data_0/type.raw @@ -0,0 +1,46 @@ +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 diff --git a/examples/denoise/data/data_0/type_map.raw b/examples/denoise/data/data_0/type_map.raw new file mode 100644 index 0000000000..c365ac55fd --- /dev/null +++ b/examples/denoise/data/data_0/type_map.raw @@ -0,0 +1,7 @@ +Ru +Pt +Ir +Pd +O +Ag +H diff --git a/examples/denoise/data/data_1/set.000/box.npy b/examples/denoise/data/data_1/set.000/box.npy new file mode 100644 index 0000000000000000000000000000000000000000..9c0da084cfc07c81d804ca10bdc291fc4b13340a GIT binary patch literal 3008 zcmd6odrXaC9LG7&-~-*@rS*C^#1pL-glpUzrW{s-Zw8QJ~DcZ zi6B*wCYEz*nM%AwBo0#eizOnlLaEZIq&t(9DmnL?hf24rIU`r6NOy6@d+&wwC87Wy zi6~w4zn67WXWsLxuSFpE>mnS%ke{=8#tIke`JdO@o*cK5F2Hd4k)3XKDeWN~f!VOI1R;FTcCu+Xye^hVt;kw#%yR&gQJtBKA-ID)c? zcMQGuY>!;OFwxr^LrLJY>f%b;LpTCkh2bpz!R}?@Yoa;mj3LpwEGPRY?I9e&`u$a3 z^o4AX0R^K^Mvr^^$puyqX%FED=7&8q?J{Hc%5cOjDKq%<6xjrfWG2&j2uC1r)+Y`p zv3sRzHoxCz?8TSI5LZ>fHPU$qN8svF=W<%ep3h^h$b6e|K74sP%uhN7kFlQ#7ecq- z@MrEQJI_$-tVZ8Z4A%m3O|nMlUW6miV)5>!GPdVl?2(kR)%xbCAoN^rU{fytA=J?K#f~&NLa0Icw zf&s}3wnx2vxFp<`KacS7hME34IuGFpx|0um)pfD+4ApyGG#L#7PFYv27^4^A2o|@b zeeCFGd-^@saneW(PFYzFu`zTW!V%Oql$+Xzusv=;KL&R?^5FO8>tI7s7By z&D*eI_CAEd%IpC@495p=cD}2pdl7E@FP}OSG0~3g>9M#nH^vh~%{kNa+Do*Da0Ho8 z_j_%8%ASwT<3Lib4~F)*9aeG8w1;p6I|db@?;P2Ao~+V8kWJ&yGcTyGxqXcNLpTCu z-jTjz|2P*l*)7}3T`-I{&&&_D`R#k@ITMZ`vbsl;n#o?TJtrjckVzQ$^31tkYM82~ z^AL`}?bW2H?sRsZ0MAC@)QjJK+)G#SqeGihf*cC!lsFcW7SyGfjN+?1iAyEifk|k7< zgj8BADf=E{P5<*wbIsjc-B13Heh+>75m4|pZhZS&@cO>-_OI~^ZPUY*>$AyY}R#In74!b zZI^}RPfq+lI1M}YAk#hIEIZKG&+m%|W`6m%U(B4o?>r~`V~6e+GpGB-%$;)Z-upJv zuxcg8n9`i-(vR5Ray%k9N{*4C=mWsVB_vyE8b3uIqIDCOn@h`?%H zc1Mgap2lXEl%DUXra+~mwB*Nh9E68DZcuzbh3O?uJG1mtz^_;|@cL;YTr(xwIn+#H zDLB`R-j5XcmcM)LAx|3C8aO3fWQd0ez2WAub_x`ncbi&0frryP!J50a;$ea3xOHeZ z1&CiN9L}3a0RL^J%m#HlY$|X$N_;_qG%-f`!h~^7Zob(dD;Euwj~h^ z{qGx})hEEF_F+Fc1uFbF(|YvX6bUZIUoR)y6Tp9jt7-QJDtzT#+7@7 z6}yKAW`2)b)j**}{9k_F#qznD4QFJ?af{5Y8Bgn#VN{bJ_x_DTO*UuDjbY*#&wSqr== z6X)r{l-UI>@w{Z%KRdo8=`tSn7M!_tU8fh*9XueSHLL&^m#(oIm=K3cOI4G$o$tlE z3^W!HMid~~Afv%Z7zgY0b9@88@W4t=4(Cra1u%O1z*2iF5!z`2`S&b%z`8zG_vc#$ zh~|>HS9*&CyBdjg`!Df8tlh#A)*b~g_sLzbG71mnyR786FA;$#SkNkfxO3g3TJO)| zFn<2<)=z;%(AxEsn|o3Lwk*4uT znmjY5t z-5s)f#bNP#e!l-*+^Sjd5j1v zi*g@#aEifOQ>oDZc^=*IHMw^}N#M@c>+Y2)3Of#)i(Eu@lr3GJZS}Z`^{M!Z9lRD( zI(UUOx_Cwjgv9!T_HDs|mR%3qwI9``k(I`Q?;4aq8S(?|f`GBVnx zN`>`1Uv~L>PhcNJO`@WT*mP+E{wJB9e$09k`iFGz4r&}sITIssMlH8}V?C;p3@{8~@LROH+1PPsAk1bgMz_HeNPF!Muf zO{Bn6693|V?#qZb<~e;|2A{Vt`TKbooJmetTvpn=rKHLN-9G)l|H&D8@+YTZ$A5mX zsd>x}bWZn+nbZAZ=Jb8%IpH5WbibH6-7jWdm9SuFaS07u9CFrYM6DOAcPZ%g@E}9s zy$!jCSp^_d{ywH!+J`MaVe;dw4H-n+cJs(uN!XJ_ zNZnku;^lf?7@O!{^8l?ci1e%wSR?{MFGm$F9OH$XlPqZ|EM({{j_eYiCBgA8nqp-; zc;WKhoFOH3GD!Ocu=|eV;h`?CqymQs?A=^+(qS7JOhQ~mk6{upWL=^70NGny=-8&e zkqlQu54eoHmV%Xv!*h{bA~3$s$5au>@a&k})bn$R*lZPVmR9gI7JFdsS}vC&baGqX zQ7gg0`hwevw-_aY+bNb1H(^n?PTx&je;V zU@gMxj|cKar6udw6d}cx=T7Zj0Z^4=f3AU{IAmdu`z93U6bvq!K6Fk3>I{lZ)z;&o zwv{Wc;5!8hvppNQ_u_zX?owDSMGPJks)a=LQQ+Zd4^HnR5w^vaxSFmIgL~?2!Pi7vHBfd)eWF zyz!Pr4pay~aI-RhjtgGykgH>PMu3}|&jZ;-sPJT%E$_Ys5rhK5Yh*nM@H}ERCuuVk zw8QO+h1W?!a6{Yj#xMfBU07?XqD_UnW0#jWl#9b=)h-UU>qL;+`eI7Xm7ymdl^qAR zbTZ--U7bqjA5J*EAHTRr2po;K}eLqzM}*Nx*%B6ZxxByRiU?)IC&1GUOg`T-H#DgVr&w^V11DP$~1( z+hjc%B;aWOhHN6V3buZ@qQC=FccY88BmGIe8@)~DGYR&^C}uR?;DzsXLkFn&3P95; zT>az&9)zVkTIG%s;Y+~T_ZG%v*lu39@hDLO&g!$Qz7R@;y-s2`m!bG1HbrDzfgZ|t z3}<$bLWpqM)8Lss%D+-~M`{gb=anvd8&k!8dJ;>#Rg_zT-qWvMzu_S!c$#Ql$Pf1K%l2(1DT1?kbd^N7IP`Oljq-iqhciz--+ak)Q~} z-x{Qeg6U1pp(BkH@Zi`I+ivArD!V0BKh@ias_qTg zLcfm{tSFB-ZFt_Z0L61hdb_YzC7)88d!a*NLR4t768Bv$F@;^?pxI6saX@J7n?Czh zRG<#`t(oq}L#RvOwdlj_z-METmuiKos9@PtPaa0`NmH@7 z@=t#PXlmU$GO(BmZni6Ci&heW_RjU(xsN!wo!us>aGs$jb47hBX1NSKvAeRBtwak4 z%-sGLC;p4iuSa>SX+*$7JVQ_T$oXd$J-`DqFB$*n@6;&%FaGDg41JjA^nDq8-ahH~ z^DsDFFX*2EQ7mc@A`5i;f9DK6`ExxD!(L`NYHnc%I;Z=^%pd>ui<#5+o#%vq?9lyU z=5)W9c~G*oS!M_gYY9F7;aOEDX1C;selnXuBU|LpeMV|tA z4xN-c%`E|rO7?q6qhz;$FP#K8&L-{C7w3gzhMZPfNC$K}JJ#mD!^3#W(;uhP zh)|w~KfkD50YvqaZKdysL*>UWYq?OJqmBCzs~EDQ_raxx_oNh5Pfc^OT_(cK@B5W& zzAM1)(0*2--1yQwzLUgeyJ<`+OZ;I`E(OfiL^|!=jDzhGs{8N{r!f1>!+IZ!Dc}rx zdSc#0kP)_7|D|;bdp|OGdLWYm3a>gfHWrLy-!=;vS)=^rxlx63BU<14>eA=Y=Xfv_ zOlr6zhzGUxJ2J1Ke2U1`u-U6i98OJ4h;l|DozY_Z>|R0v?JAxj>Fqc;eAHRNu1pl1 z*41PzK+iLq`(@oMg$R0_;Vgjipw1uS1{qltAQbNJ{a!|b164<>bqsS6*${C-i5)l!mzq0IC_(v4>qlCRc(~^B zExXT&9oWLWZR-|NK}FC-TkDB9+{^x`^Cp5FHY{(r6xyQ%MqgD`Bz$q;maDQP#+(4> zd0&=qK>3%H!52*pCn8MMMvY$-MRChy?C9cdl$R0RejLmuL8R ztdB-Z&j>K|WZ>p{{&u7%%shnm!O@p*#Q(+r+?Syb^PIjfgU{P1{eB(>r|Sj%(}jEa zg_B56==T548G7>PdK#vd_&j*6F3NX)+oAi#%;|nHbNasX9OXNIJP#3=Io&U29$KZ6 zV$C;!B_H$pP#4;Zy*j;QlR6I>Vxy~5^-}O~an1R|&7>a8$?|p@u}cBW2+yRO)5T%c zS6?Tc@7Jjfh3H@eFM z78~C_Em=thYEtv6!D14`yS<8=$>Ra3s5eXZqI#^sU2^*4-FOf(E2J&nMg-rw?Ny5r z-yFQPcq&C4tTaz*A2BCFHycb{5+H+ftX@%vvlKX}h?XgP5#dLAo?S1h^K^8!+hbR9 zur=y9nb_26?6!Q>vdt(SxM8ihRTR}3B$5hQ{r62{d8%0)x#biv*A?gwp`v_6Mz&0?$9WEBjo+!`#}5pBES4!EHtP zMMo6pToq{AJU$~14nKe3xB~gX_Ta*y u!Mb>I?NJ;!)CgbgI3x-uegsJ^M9;G_ zHz=b9>4}&Ahv(%UqOhjYJJRPB(!HDn4g0&Oo}km$R=!shnn@A~!Ic!qJ6!Qtxv&zu zFX88U=4~-Hr(~)iG^PYOQnhOZQQY+@Z1{PPNC;*ZQ!G-j0_n-iT_%r$i7y) zM`-~2s>?MSsW2ufqpf&-98x@wS|F4|*FYfSbJq;V9wN9@TXXuHZ zaPo)lV)T2HiJz7th@AZ<{xAOLz6^bs=k$FUeBM6k_wz6~T`%aLSa7`k+Do!PxBqv} z(33ycqkKo!EutEIk91D=io;4|Jb4X#mwn`G4slT#tR9lH0&VRjwMUH z7lQ<@17*v|kmDj#6Tyx0XoC`y;m5sL{*>JcmI(#mTV~X|V@@0tKg|T~74N|Wbo670 zYGi2B8?v+Ahw|As7aP9Pc))1ri|R4dXUVkw^kGsBpwAo+*c zJZU6>uA*3g1s5;ai%_+;qkUhc-*($ljR)nthaGKih#)(nb?b^U8S<^&Z5>O+VL%gi zStOPSiyAzZ(D=ykNjzrzzN=D@KN_i-l|_UEZ);oQrDP}@lJC&nmW>S-)mhY$r?976 zuH)6}Dd62eI%>g+15G7Q(>}IoZ2wEIPkcx>3N9-QG#(+sL*oEl;Vn~`Wc1fP{zxB2 z%d4_wuyHJ=!|8y=Vmu^yU+ftCLIJB%ZO6hMJg^b4?+J={5S@!`aCl9DhEbw9|D-tl zSZ=Q#hIA&pWhzSv>Bd9nNL3>@94H$t^Os8&g|D7X2f3b6VB?2<2M2Bo!^Xau=nh3O zxNQ-eJ5fggX@Q8LwpbFFd@(K<7!ZZ4ZYyUlR#Bk*>iL$%i-JqJKHmw@3XLr_)6@S94Rf18QoKN~Eh|nQnAb0x$ z0amMB?~D;cd5zD8o5ep#a4@C0yY~bEjy>8Yqrpyvb-mxr);|%0LuFChb=*6#yPA3N z_uc-hCv<(Fzt?})ng1L;;ZxV=XL(4&mLEO-VvRyCc7spJuTFvt3muPs_gWwT&Wq}Q z$ffjR972>iZ=@&Rcj)+^XOn=XPerfgZ0W(?jmLg_j`ZZgx#dR7kKy2jcY5uQ86M!P zww7W;br`#KZ>tw5q2E&q`s$Zic_B3{&fqv3vY)X_<8uQERQ)Xm@3Qm4Vjt(_T2f@V zBk(-m{w*Flho3xbe~;qycAtP2REJ5)3eP4#5(nMZB3KZO>=8ubsuq!9$B43Uj`9E`0sZI%9CNWsCsH-)K&EAD^NjQTiovie#hoS<{8@#Lbp zDNKHdbnynNOVCsfuVF=TkH$tnmw*#fShcX8yBAvj=^;E<>>0yO5Vt6W5%3@uB_RI{ z>4SB~8|~^;Jh&fU#Bu@EWA7}gsb@txQ?@0Q^MCqK9Pt&+ONH=Y%DeBKE?V!Hd{6C( z6%I~beqUCK@|WqdC-vm#6yQiznHG!|hTWB#+^MMUdVcKg0ym^HKiP!b_n`hxK?{#p zRi`LyPvg60^^^ia-lIFs*^id)ZdL8s?|!TF&izcUM5HIFF~WORdE;P>_kL}+($vyo z$+MAfkUn(l7LTU-5~1K!98biD)Y69b6x$2jRCqr?+SndJ!;H&4`drl6A>%{n@toyU zxDpVb`rsoT*!_6fE0(i^+$QsOWfbR7>VwyXBb^LQnLRtZlpSzpm-nv`NBS^zb>IFo zobcd1>zO830@Us1dp(NcmYnR>5t&F&2=@Kw_Sz6Yr%HK36!oK3^b9k}EhN~(!Seh( zg#c3bQp{sfz7x9JR87)X3Va6Qn(y5pLWN{jf1DjdPgE~8Ms4|D^^+658Yi<*{p9Z) z^~agkd;VHa!#Y&HmSp^|`bl%H9MHhhO&5 z&-t(CVQ{)G&_C6Say&Pne1~rT@0<~z{JEZnrQdDIRz~#`I;Z=^%;|nHbNasXobZnw zx?jwE-aZMKc_1;TcXH_jmSlW4(mSFT>)rF7?-VD>Bi4$g?s$fW%@Wfg@3!?|Q(U{) z_)%Ym$L*l_C4X^fa96L~vako6RLoJpDX>phSj zn1zm(okRU%(~C1Qai0sZo!;G#cB6Voz&*DupHTb|aE|AlUoxum3?Dyaw|^QN+^#OV z1LYSJZNbXv%0%F^xTJlr+CUc}ks2=s{$vw|`RLL9!=l@}@ZS^7ZIjPxHeunT(A=Zz(`xz5L=zJPt0Y zM0~uzLlhqSchp3FL;AMBDaZ1IFqqw3a7e~d6dc4B&JB&AJnN|N&x)HQC_ff2spu&R zHDz&EW>LJPny}w3CZPa(GK(|Jw7Z2xb9Mzjm{S5JK4(Fe`1? zx}y{iyG%+x$Ofc!4&cp^xYzX{mmO@Ouff_wN;9Y!yzS|&P(1l5h!0)id{XwP`e z$s<68<-=!ttykbcz0##A)`tgZAC`qTI{#Ns==wl^um7$y|2cZHU(C=)b@K$a_w;7c zJNI5J;##VQK8hbkG+Xl4IXBz; zhmMIMJ?Yc2OS~D&1D{>1mn=tmViuyXCe#rRe&@^$&aNi{#;rlztU-pe?$0mWeZyb3z+S~|;e@n8EID{DZgavE z7E*CW;To!YL}h6eUai8x*|DMyE|Y1@-Bsq6GwRp;e0XLe(TE5}H^fX|Y@fykB7_bu zKyk%l<&O?ix|3MN!<08U(rE6fyK(tW6eqN2W^@~P;X(YPBcIS=RKHA6+Leg(#3ICQ zW3`pC=aS_yKWm#LiJ&f)S%;{u*nq*K7;gx`+-cTg?A}d&Ly>@R5A^t z(Qw{7e<(rI6@pVR83$*S3}mGPb1;q@l|4UDot8By(yDDr7#h>7Hx^09VqcC$)rX=u zfif85*?VdNo20tDu;FG0cjJfO)sP)8|F#f|lX!6OD7n5jiXBGoM~^5AQc+(!A|fIT z`E?;6+I%xRq}tB%=Ic{o^RakgD{UN<-X2mNHX}fe!Xo!~cq&w`FWoUUAPix-(PD+x z1mJ(mxs4O?{2sketX?GO+UR|w8c<)dsVP%!4VsIyBQ>ikNkM<0@XIBrPp5Ubh~46U zL-8a#DDSVU+ZbuDk9`^O#yqF*%i#0&Nxz?m!Rfj{|LjwG?XQgLCv^LN=ZyH|&-FBHRre9SSEznM z=XAfAIriHxW=`LCo)iADL-&iB)BR%R8JV|m*g_h{8y37kbbAlxB1!mej`E1}V>(I! z33y1rxrtTX>%sb|M`nys9rfz*rc@3t2@r76!PS51##jtaUn@uNuhqbT2V7JfJpUTZ zz5W>wTn^=TmqPQa$L=R=xo1a&J)Rd@*LLxMiG1!>P8^zd-F%fj3H1&0&It}HS@A&a zo5q1%sJ?1sO^P`+i3bsP<2A2xh>-h&J^u#kQ%5b@J^p4~9CUa*l2)NUOq)SMhytn` zcW8<)d2>YyKHBRUa{3YB;~U@b$4Fc-Rk1NWkprZM5goOkRSD6m>SadVR)5h4qotvntwg~c?!+Z>GYo#H2)CUX8X%=awY z@&wcuNXlI8YWb1^N{VmGgPrkk&)34QSQU@rfY#d9H)uV<=Y|NY1nSo--gaJ!2V2|A zdK@U9Qv0m3H^?0a2R>aJk_;1t5pCAx?a0piJ?-iVJBeV`pB^Xt6zK_{-dXY!3beVe zdp%c20+H9N-HV=!Ld%cdPJU#6ZBcPT$2oUQ;A7JD5zVuuLoKcWAWQ{gUh1ixXr7SV zrkeXOH4#f}J9hjP>eu8=P88ljI(hOin}osbEbPTbhsnDr-XL$%D04eQ!-_2To)(v5 z2gQ*UH;yi$g3c}450hJXF!DAl)eT~Y$#k}4XOvGZZkb8O2}*$FqnLv1Ty}VUSn=2! z88nx>ENrWB7AMdwp0CYyBEaG^gXu5X&^$~A&q*QF*K11pUN?6V&B1O?xxoED`BzzY zZ5_&UpI(VE7#4{$O88TP zu@SNIJCL5_l8%SH8We{YQ!SsykZxoqtHPs zT#M!x!a}z1NA(cpJ))jdKg8kcVCe?6<#@QfBC%Qs+1VCi9OP?*gW-x#&Vj+AFn&;5 z`!uS%_WBVvp3)_PPEm9vhV*2#N>}p<(mk1z2S){KNf5@bbvELiD73a-lJQ381>WT8 zC66Uv!mh?NJyYeliy4@!yon@J!OEVbYJ}$WMz*P#oQ=4N5%Sr$jiSCup@ICP&#FXN z?4dSwce#G4NIpl-c~oEB1e_k{>S@?A%Td!uq3qy!`sZQ^5~|ZaxJT-GiHBQho0g1H z+2O0qq^T^@t1qfXnEw(9Sfp6{cr+6I9^CRYViM`e>I&zDrSCbR+)rjX+dAlWz)0P(&%H9n>?qUR=2;`RUos85WgF4YC`*5ceL{UU6-MWyT#NyZOT!-S8JjaFSwH}nF zem7k(=rE0CtBsyth*N|#?};n2-8k4pS}(6Ti|QnAd5?~xyk@rQYN)gq`o8XXFPbx) z#f-kSG;i%k^^nQ|M~heEnBwfdzJUFB=o8)R--qfR!bNohbt!o0HN9?WmyL&5Cz)XD zX$qw4k0>5{DGtLA2FDhlxcaR5g#P1Yis0!dKca~0!20|Tf69i70d3)K-B@Ww7@7Fl zZF`UiVP0$M#?Oku32~+3he+T0jFK+svPi;D!xMtqc49EnP4M1^@}r2Jwi;Lc*H}w~ zgt|%0lhU$v9Q=4v+kB#-4D&cO{_FD9U^a500E!?rbAy5F?Y#OR2c5%RpQo|xe zA1bVgntOHkr35%@X5Wx_iRMytd{?Dkq{64FHj-vB4o(Q@-dwq#0DGj@ystN-g2|P0 zX6_{-@V4Fh(rlOzEaiNB*a+oWRL|ixw?!q*Jw80p)7QmUXy{t~$1 z=d8D|^b1z9Yc9jI2=U+HnoMoIjO|G6(iALcoIUk0DIFZug<7@V#b^v}<7T5B!RWr1%0 z@0_70f3Bxt*3WN>I@hrSozwkd=5oLNV&?RH=Q-gYJ9NL8Io&U2t|AbVyp?kTyIk|` z%awv|tTv#ywauFhX{3R;gtG!LvV4bhlnv?CdK;niZQA{T_#Tz+6S$OE`Xx(gfjks+umX3>Qw=p0AFV!6*7c!BbC&su9O zl)ng?zW<4SuI}L%b-uXA1KtBt+OAGyn46r8F{su7MRWC+B9%^0Iuz z9d;2oS|ykN?KA;c4o#31H>19G?KON;H3`1YJs=8N2?2iJ%hd;0qC7}_fJnI@0E-Rs zcX%H_=PWw1qzWDXt0#1Qpug9D*O~tuJ;`{KHhPn50uvv8B#6iLU~arKCwpAbyqQbS z$(-{7Fcm@~b%lS!E>>l;n4x?pHAVW=sSpXcDF4HF2o>Lkx+cD%o9PKN89Eg$^~h)}jIyzT>(~ouAQ-HA;KM*qe)YU7qu;*bmdBb*jPnI3euCKOJY8Vpg80T zo7LgRttdYEtny63TLgZ%-zi+@NrpupXPyb&D=1agm($%nJb{sy1+hOs@9$@}UE|g0 zIqSFAwltSaV~%7Iy;V|*FqtPgbQtxGOixOk5V|#ktq9;sx`fW}L{S>WtX|PDn|;R5 z`9AW4&gc8GZ4(q2_FI4Y(06ps{nkFW7zY6`m$bfZIZT0;&qd*Gx)QLbR`B6I4?I|$ zBUsp?{K)&dNnTO{4)wE**_ZASgEy_}b~{kKkw39Q<*APdJepkmvLaLroGdvx`&bnr zFy1Zw@@YwU5Np7mJSht4w|^3|}~f`mOgvyk#a| zbAZ-?=?K#Ucd&O`mL3Sm z9VYjwPsoCN5@hv-+w<)bg2y_x31rmw+26Vm;}#&mZvWJ`tnWLpog$Y8fvf3BxtZAu%u9-#A^bWZn+nbZAZ z=Jb8%IpH5WbibH6-7jX&x#t)cwT_1M;Q76hjC-)!h)WznsP5|1rC@Nb1rNneo-spJ z-PoybHnK}meN~d0G%$zenw;3zTK?c9yn0Oqf(9TL$Jtw zmp^z8&9SG;>c^rwE_rX!lz|Z$W|!VSX4D}L-p3P4q%NTOlS_oHchUL5O_D0v9d1%E zo{??3vX=;lJXsfu?nZj39LWcU`B=~@zwV=Hli0R`kv&O*ig4c5?nv}q9Q5W?rW>L8 zSb@t;q7ws12NYsk; zlpmSrB%Wz{gNNg?*S6@V;o);F$BJ;&$C>2!>6u<60mL&|ei<|TAW9S8K7`i452huQ zTcCM1k>exUBcc#%qmW;N)~jkLUky7?1Z5$$a>vi2sBh9Ia15RIY3uCi%tz-DHhM?I zuoQ}d)Wxf!e`!%vmh>&C;;K^whQfiVXXZ9TRg@Y0ww(>6?$39pgmc_wY0x;?B&y2=nEDnY3Lj?CMn5_j-@DCAtqJy36|hbJPzqyt`^e9IBJda(?7~ zS%(85Ea82S3<2af^XiEredyrx@ja@C>amMz5WDQ;yF@hq*5zfccj+YwELxp> z_<;b9bF#wM)>8q-7Im?rdoY@3v~-``6M`)94Oy5ULr?DBan{dIX6VVKeWmva>u~UQ zPW%_Y_-j3?gH>~03Fl(yNpBE;#icuVVCEEc=a}dU@qh8qKlWwl!#t<&%i#0&Nxz?m z!RdNI|NOKtZ~xs3vOu^0ch1n0KiAW+(J z4hgZNb2L1w76h7@|4$ zsB)bb*?v4wFq}ED2-RIpIW4ws4kp2o6NR(7w|L+g?+cEvP3Rm@|JAy$sDASDX?2fWU5zyRPGq1MLQ8cfv_Ef*5{~(&bds6iMC9I`h`3+^v;>&*^yPWOwM)BR%R^nK?!;U7D6znD4QFJ``0 z{j=1zwsEXPV@OqFsv8rQduk`V9L>Xj4A!om#e6wsXl`~U>y@Y?8D0u>Zyhn0 zfFE}>I2)R}v6*nuvp5kl9A_^HPcg$md*aw9fpaO;|h^Q zMbiqvK`vnJL*K7u^Nkg3QAFsf`Mga8^>M`R2zgI6N`Vf?pzO0M6wf4to!@}w*>-gV zt-lZ&giSXqjq0Mg1xxe9$W=m$V6@%2?`9efw)J@|&5NGGd|usrW`gF~*!$)CSEKuz zF3En+9TK0$x+mhkkD_^xNHq(A%)D{zh?ecGBOG{mKW(WSgW`lSxlMYDU!(JZWJ6gw zT|C@>BlRk-2;ILFYU?_P`d{unDIbM4;XyG`X6;(k-?<;9Y_2?j>XvnzX4bA015Qg< zj`0eVzw9%V;Y%dK_on05-Kk>ewvv}dG&KLFt~y06m?J^xz25Bs<)WY^Ud368=47}| z#|NiV?6GqyIY++rxs`5qS~zBn@|1f<8x^Hgxggj_#Z^(r3EQ)7aq&SM6`Z#kYIULW z-9qb+tY2wbUV7k%PMS9Ar?l{-9U)6jVnkW%v|4m;(sRmD1D60QnEMC!{(OW7*@>G) zBMt1p-m)+50LrJfFWID1b4(nL*parf`m;mBVU3ozs2*aL(vWhSlM4b@)q74X5rQpE z675ZB-v0Q_X`B7DloGa7Y6{o;etO0*#**!{$Mpa#XI5%NoxYPF*CgU#w^PN4WI zXdPk1(}Dz-x-gEx6?_o=P1#=a^ndk)t`GG0`tLgPpQ9(7?m`nAC&sack^_58?x1?? z>qN2a8#}YsVW$bw z4HN#&S&qAKkaAS^L^!ISgtsZHC5n>4{LtJf$(uyjbu(J`V=ym7Z973bMpqo?N*;*LIDX4Ohz#k?JGOpD=VD$z+a-9inF!}0A>El3#T#{x9#^7%Wm7}! zP&c|~DXJvar5x!=nPo%Mg?CbLZX=Z|x{?S(7Tke*Q9s%*pL5P5EfPCv8Z5S0Vj5dz z?%n8*>Jpu=o?U--9S4hVmEU^$dJ3yNoOmT2y{BDK`5zZPB0^|i?o^lXH1__>@#Kdn zK1mgl?VYxwVbrTnR8Elbu==z6{*SE`*k3TvT-Si|L2T#D)19b~%n~`<{geW1zIOyq zqI)yGp1kb2XEz=erazK@hx#=Nxnt1}U!(KI)i&4Oqq+LLky~B%s9%h^6~>jI`K>C# zjO}wNG4Oe@vg9zD%U%6`S$O()G)I~uw=b4g3{oE5ko7I5K%@V);5?74QuE%Yk=sZw zv9r+^N;A%&_x+kL;aVn&Pfm616Jw3QlJh5X?RlvXeR9n?Bh&{MyKBr}v^co*e3V$& zX>=~n^s4OPl{aXZbZn8}crFJt+FTz_KS_lGo!7k|U*S>RJlH&mmmPQx_<$ojny1Co z{T^LK^T{tx+T2F>8}O)a{P`F6{kZ_n*iN`QnAp`ATNK;uk0WADo79I-MAwUBie^tlai2xQxC> zWp+${|ViD(~=f_%BTlRRNa-7htB;; z)#29g*`xC*_p+PT7x2QWtFOH>kR3g%*0t?>(fjf?s^VrNFQgA|HQb5nGg}pR%od@a zE54p6o>x|5lac^s)mt)t z6qJ|$6cuo-QGlF-{#H7%QgBjjK&eL_#V1)fMJ06q8}3s*JH_oj7A_>+)OKSAyO?!` ztrNZ1G1uca3arL~09jRKm)R^Ka;BSQ#FmMabme?!- zD;9m*yawHWay)9-Y7>e>PFk9Yr7h!vtSMUOrrV-$(lNn@=6At}qbQzo)YZ7uaU%TGMP;NHTJ z+-4;pZx7OwMf0L&uMfP+GI@%9c>S*4Wee)hDr%eQp!&?}jw}2I&V^X)=UdyAkUroo zOsk8h$FL9kXN?{!bHLlyqG}~{-h%d!pGzwcb6p&xP@7p|dm0dB>QThdUTE6>$75cNzDxX0U?Poj9~t@X}>PN>hm{Ymmb=w@L!Jygj4 z*@&Sh*%4dbExy6Xcg7@}8spIS_;*hH7a#q#o`&fcUkqHs#?TYb=GOS}TX&KY|0=Xx4eb>Abg zL75%sobDGhr~Ads>HE%e!asKCelc^pU(8%@`FMEtZ&!7Pt~4q@qTiaDhi`--b#?f|bHO|iV!OcG1kKOqWM6%-0G;D#uCQCV z@DvXu6|+jZq4;Do{^-mAnm64#8>AMw8_myGuc!$`I_BNkIyjjv4k;f3_0}NHA9Tgk z2jxd01r3?^(0uGjT&1BCx>wTKl^|?3ssIhLux?-1q0&*EomEFSPGYxq94@aJqrjbA z%d%Zna3DUFvm?!a5}P@pd)ux9o!gCXHm?7lb8{~@?q;`{#uiqb?cqXkNO<#18m@jE zTX~3+#KOss?rkxBqL1n@TB=H#!RWo4U9+sUoPvkJACJYd(fOeGBPqu3H=*;_F$ZS0 zPw_)?wsI9L?2yCJr#ymYIPV=UiBDk#6cyk-4UVR$2}#(7jb91CkCkEEjWJB^o*aE_bM zuzh<^D$fOTz*kH_rT#Fw@5qyT=Jh8$2yEQ@Qc#-%GFVDE%2?4{y{8BFB2{rH&(~C7 zvEYESYfC$WQ67{r$XdN|4-TZ()YvNRB*3{#22saQz9aX2Z>_8Z${zz|wrBkmgzHjm zz47Qg(M7vZ`$SY1ipkJ^s%(z#n>BhD{|xC&O-ov*=D8nO=OpKznZ{16agBYJqR)Tz zgsuM0^KG~<(vz{B7Z!BObz{A8 zQ7H>oqdG~*vW>6Rio21XgnD@SI`r`Xp0MyT8;TQx zjBVW`-wH$JPvd8|PV>MC6NUQkXnoo1B;Daa61WZ4`-l1RK=~_Yd2b?`Gg(`aH;(kg z{4OU?BdR0HdcAX6jOOaE7n&|nNA=jdH3Dm%B0YJ)SGQsjs>1}^$(SV|J<-!J7oef| zg8jOniYB`MWR|kJB>Sj;>D?Edjx)BXPT-bYEj^9m3WCSdZfhLOL_LU{+}%R>HReAAeHYTf|>ly}smuV8ujU>q}B_k;W$-@1>K>p_K1uJ*w;H7-cgnVmbNlvCQYxez_`w|2$q=UN;P6LEZEj!1>HHFmP4n^Awq z_jAnHb^@IEP?Lh*}1R}=K`Kyy7wJi(r1Tf#mqAJ3L>N(aQF9|1+K-6XW-s6J= zC~i+UwI9Vx=1-d+k-rH8cRj}1EWis~%Po#_|F8QV<5>#x7CAHGlVCxC$B%J1VCF%; zIP#Zi{XC~(PhsW$h8O%W|6WQ%SM5u^MW~<1%vYK1RB*Z={x5#vk9`^O#-IB#c;_$s zD6jwZJPc0P1^VZS4#7qiKUtW!!^n63_Pmb=7E!)Puoxl!sPWOwM)BR%R^nK?! z`X2vy9#qHtWuL+4?W6lznYeZEtpH4nhIw{3#}!9)W5%aq3LMym&I4w|j{4wllc3c+bG_FJURZx~@rKy} zbbe6QDrvJnx~IsnDC^FwFo<^q3ALg6NqZV^|6)sVc+{Fz#g910u>?h0g#y^F%dO@( zjqXz_SlNd|{pIY+r58)DD!>QQhlWc*Nu|3-@?N^kPGV`V3Rc%wpm`6CpcNz$9ORiB zufd<1!p@MQg@kTV;83xQ>blLS&a;7Apb6ar_R8KMZSp?KgEoFTu{d`eBXRg_P2@yz zh*`xqTsZ|gA-HJQ2s+n)D$(E?c+@@UxdMtu)X@8v?yHDtKFD~bY{!Mz?oO* z??)`y!i76P6jW};I%kBT`&g{@5o=rXO4$PnJv0y8#_FCoC_F^-9wCl5os6P6!LZ20 z*~n!RCi$5Z9{NHFCig+7b+|Cdf6>D0eDcAzdaRxKfZ~T0p2AkI3?{H#FR5*5-&w)# zTzJ>JZ%RDWE`Ya4Xc6+yE=@7uZ>%LYUI&T=3V14Qc zEBZM!X|~OaCO|I5>J>N2n@)5(=yT}{!eMpEWjU?r9ATg^zv3ZAd?J)1cI&bMV?I{q z?7}knYNTU-<*5G5#HD|&r(yk>hE;{f82OHb6feFF&4DxX)!*_ztvsI|l^T_d9YgJf5NJTDkjhjBEZd6GT4j^!z@NuboV%1avkJ~H*P*s^gu zvZF@M*iXa5gUQDkQmBrb`$CQ2gXZ~JI9ul)ua-f@#fz60^Jk8D4JGdh~lVkgEFhi9%9$Cj_j02_YxeIM~N1iYh}4mBUp&~5jp+s zdGWnd7=Foz1VF#{Q){<$+zKYb+6{pp&0MFjZ@wRSZY)xSjqaSyi=-zot1I$uPqw1} zU&hEiYBHLG<+_o>jHkLbqsyf ziTX+oLWR{+z36^6p`+Het)dWU5_R27OcA_;@thKIXl{WODv$Sw!oU|=Kp?vBI^KGg z!19y?6EW+o)>MhYRHDqHG!$PsbcvF>ZG*9c9(4uF+izgL9D9`oJgK1OK6N_4o(sG$ zHUtP<&cbRARUCVzMFp+yxqMI52Y0w19)7^=0oL;34n+u^({Yf`_wo0h#%g^U`D~xF z!y`(FM<-ChN%QP!FBbtYersMal8Nqtbc@{3x`X=v+WXRID%&<};}#h<+f125$XJr$ zzM@P?gQ6lmDPzbGjY6c!9LiXxWS){K?II!y(Ik}2BJ-Sl=bLB0`?cS`pWj;Vx7PE^ zTF1Jd>$WJ4W)Wu}2{ZSdoOxHbc7tk&)$ zFc6S0+kDpS4rP@3Go2<9`ucJ$w6@q36OhJe^bB4O`io^x5;$=p$U8CWoJxnTV#s!s9V4X06xv`w?V5dCXkv=aoZ_PmG?vn99<`p+Do`i$(Fg z;qT{XunC5vZmcY1J}C*lUA#R9k0|lZk!eNV*8=~A?^?AlIo?>oN&AxVmGy=HejhSU z;tSGGe8T0ksyC$&Y5hOrWIkE_{0w%3@x5rLHZ3CIq;pZ?q;pZ?q^U;gpVN9EMTbL)*Yh?4? zG6d5FP?E>HWAit^V_ioYU1P=Nko%1@QbuPXPJ10cR2#^MSmKz@tp5W31_cb|j|aIB z?{uP$x+^1kaPavB708p=rZnxFe;|yaX&y}Iw}RirS1lQpMOnCN;SXj){Q2w)4bgL$ z3%OZcFj@wGwqB3$+JlJ#NC0X~$#4%ce`F@!2=AkKteU-RJJgGhoYJve=0ajh&+;rm zUl$!nC(d_@h>1mwH;MWzU}K!o>Q5mr!F5P9UK)IdS|0Q^3dk>D7I!Q^kAtqmo!w54 zlM3ejOi(ee)nC8}-@Y?0K%RD&-P%IF;u-99R?v>IW;SG1AikIdb)Qw55Qp1;T8UvQia_17N3WNQ@=-r=FXvkMOSJjmhH+XeB- zQGSCis0;7s%V|}nf&LxBZSz|f`H(R4(HU2Wqmu9;jWH3z=(Ho7Wi0p=_&YpHKGq6# zBKw{0DpULe&#v~l#k{M@xDHM)PN0-|XCX`T{*LcRa zwcJR^z2Vg{@Ry3?py3f3fjA@~EKey3=72c5J8ansyiqbc*a&)+$7e#Gw0#CXAChwV zu1kx|FBAPc;NG+eujiU^LjdiuIecmk{0a^l6$kb9E1_Jbrk%3jyON`xFJBG%G-~z^ z{s{1EvOVg+Is)@YYCE;ISBMi(QM96<5$IUxi?m&(&wzjIzRwlQn+P!HBji&y_|TSz zng|RALH+AM*a6`h_&a&)oO|;>^0Acs@DHE-C;$BK@Ch^Sz1eL19OfigBl z!}8?;S+sMrUj;wJg+sL)%vI)xvD(s|`-geK7v-rD^Iq`PdA+=+l{e%&CUcut{IIkf zGUBwW>jyrGQpulkxy6W{hk@1Gdqf5?wNb|MT=B<#Q$-Kd2@HVp?WBTyyX3F*)OflU3yei~Z zU$=Z7iBH5KJ%5X%Edwc-1@8>A0r1Ai){ZrvoZKkV_TzrxL%EpQ#mXo__}+QzwA3Cp z%wk43(L{xtwCK>Cfjfba_mC142@{UTqh{_iVHW3T(Fos{h!$99ynV~&;4lI77eil& zjv+0&olfjI1$k6^vu^gT^WcAy#o1K(g#%fCuc>?odDjnz%&54;xY2f7iF50@38!6QnX*|U81 zsHEV3;mND^CEs6GaMHeHd}V#`+57!IWSkU#kbd4b?$n*WD}_kw{~0I8C##>I!CqPa z#cJT$DKJT$DIz-xZwj?{!G$qQpt(qQw0gCPHG$#^#x2@I-frQe=R1fKqAii?`CvTS{=K=T6?7@@E$v;jAP#Z8ix-+!5=3(9 zR#|0$D<*Zj>4CnbZXhT3P!Hr|2MO1OSox9h_}Q->zz;M7J>Cl2!Kd=O2G`34KBOYU zq?9xa^EKq;K5v4)xe`rzW?CJ7B+7Tjr#w*s3Gi^9dlLGL82Y7~yV&wAmgX0CZ#U#u zONz%>c(cK;Y4{#pn(bTS>^I`w3KeA}F`$=@+`tc^v|sMy={H0xgL1V=O=T3HF*+<# zK7%E2>*=d-(;!bOv%`4cjaGj-y?Xe)$9JVPaLdymX%CIc7T|~GlgG@jT@^q&OaYGU z&eX`u4RuDPyBa6+$!dHCyIt2o z?QRA7*uU2yor@ADor@AD?Yn}5K6cgna3e~bbS_GqpJw3lm8u!ci1+EIhVns7ny;5O z41A5s6|9)|6yVXA$mB)iqCu=x(Al*Nbniz?ZjZUK3nC|W<0=-#A#B~kt>+GK@7U_T zlRg^y$2VtC8_l(8;HDaOM!;9&u03G{!U&<_L{3Y864fDVFutW%n8IXKUi)34u5| zQ*++g4D!Y-`lSX-L1KvE1he%Ef8d|PI{b0q-y(O|_n9brII%)NS*Qi_Cw^0(jxIwy zFcEsixZ50bs-b<^9~$Pd_X1lU)Wi3lNww8(6y7t^?ovUB@&ZSvIykyl&p^_ z$l}q)Ym!bD1qvu+vOHP}!y$VPHM0ch@2gDqjZp)g2^IF`2J+@cBJ|TQb~J!4Wi-Ul z4tS}~&G&)?_}A^Ece84E#fLU?xkuQ&gnl2*p77g*N5o%xVMia|^(QVk%FvC1&k4&f zB`h-!hh}%t4N6!>VWLeZ1hNRA1L?w9?(+aXDIiF)h7}NB#w_L)fp6Q>_Ujsq4l`Ip zL8Hnz72J2;KI}uFH_O_+&biwGk7Q0*x#CS}k&$Yu*F#ZdbbPt}a1!LN+6A9xX!p?| zxsTq=^nknP@zBsY;E<9>jI{oH@O#9Mw(J=PUdlS|cEN}ny^_^-7=}E_5AA|fRd{|4 ze&hGicf!be@nWisE!@)&jD6>YK0$6jrZSN0fM3Ro1p<7Qz3*BUbH8 z=7-h$lJS-Gh5vpZGEU+V($D>2M7cW9v#qQ{=9ASpnNL>ZGuQ*$UhksQw1|Y0&P9on z&P9on_FcijM`G3ca3e~bbS_GKM^e{$JHuJ5<_mG1?(rck$$Fi}A-G30GiEttL;YgQ zDVZr{kwNU&4UT)vI61T|uRCr8I-ljf+Tg|7A?%a|6aE9}RCmAmxaA)czl%ppLUuig!Uf8|D|Q<07QCF(Kx2bDOC; zWRZk~q?;1xA3Wz-`hG#bZ_WAVr8;omq3-)UeMnjmNtI2CJ%Ia8{xD86AJ%El&y09z z3VsFcrtN73FfX*Xn4J#l+GDSJ=0{E|VOKY%*G|5GJ3TO!uQMgHadZBGdFsj_s~3R!#sB7sX@vZ*|LRA3`wYfdAW@$Paq_P8 zJlV4qkPl(nFSG=GA8BV_$I3B+uTo2?#^+ZGNU*x!cjr3+^u3JTQCl337{)(Sm%{Ut znm?iw2Ix__ot51*%-{Lt{7x|y;-boG{;i6p5T8tZ6|~CaLt)E?S$l!s<_PCC1p0&# z-o))mTst2+EiSK}2mJH!ST%2EYZYcx$G}bB6^5zx+U*^J?=-92uyT(C16sCB0Ams@GJeE* zxexLf0y`u=t!u#}u}uOjo3GI#Uo9m?BIHjx+*16)rNEc3qwQq)F*-zhQN!*h^y4T9 z*9O^u?_71Y0kJ}#fbQBjiqU{BS72Vll{p9gJus&&kM{>3dz9#>3%p^gekdvy^2~`F z?ufU8{>?rxc0WDbgZR5$zaPIKfZD46I<|AuAQln(Q`Kh=nNN0zZa-v7B=gCmgLCsb zMmR)?7yibfubJZc6&(5ji=<{FdB}XCi!LlGl|nxt1ulHbbMC+w=oh2F3s&t*=7-h$ zlJS-Gh5vpZGEU+V($8V3zAuVFQfOrzGM}u*$$YXJhdJ+`6e@l8&>|8}Iu|8QIu|8Q z+IIyf{CgeJxhQecxhV1Af%c=DMQ5>Rht^)6vIoEVP?5_&K(F2#eWT;TO_-y}dStth z%Lq2O-h$o`?j7!LF~KonX6;+awtU5=1nW;V_#`J8`6Y)tixlr zgv=>fbp8uhM2;)Wr*nEA%dN(UlEg)e)~3K5z>Cqk>O|0!dsvqy-3A|n<=4-1XJnE6 z%?WA!r_eX#9~_?zb)k=*oeIx@A7Tay)xLl~+iuR;E4xmEpUV1OXJqcnqPR~L+rNSS zr2On4o7rQ~PpJkCID)T&nO_}opbK-a(k?>>jUod4LKvk1QQtO8n*P-j^ac4p&9 z{~UI#P5Lsefatw&z*BgQYYqFU$u0NoauFR43H0Z6EVLD-%W! zN^q`MUHFiq%;duih_4hBuL>=nEyCh@f~B5ErC`)AGLI5JI5NCDJy>GSfA5LAvjf~ZBetSI?1mY^8)#A89^94pP!GCTQ?a}iFNh}HykFcBp+(9so+kVS^_p!~ z9UHN)^ysc;x6M!}0c~x*z^w{(VYwt5hEi#6W?ecx9tUeoywvnRfRAL zyF%0-IZQwqDlY@%Y(Q^6Cp^G%j0;_7GCg6Ky#_hW;8W-w|KSr7KajrbKl9K34xboG zvwT;VnZ+c(QnL^r3}TWa>$OBdS5^|Mrs4=Xkj4(Z$&%9}*b`GxcX@~hWL|yJX{HiH zht=iJ?JgX|L?n&fsFmfAvsCw8qhmN^#9C>5DwGig9acWI9r)xDF|i}x8GPr9b@L)Q-d{;IYMF<4qleh|1OC0h<7Mlw!rX*c5{8GV zA>aE*JS$gu9OgARUGlC0K56*XPh*uJixlKT+pk85A+6D7k6pkgs`FGK+l=O)NOotAMDPo>!W;&S1G$Eo{?S!S}swJm4_I3AKZ%VIHqxZn(;TZi)ch zYb^a(9zmSbYkKA^{fGb(*XInP^J4>*%J@Pj@KV#rsj~*qU(8ls?^c<|j|`(sQm?>V z(L=mSlM)u-m)b*{I2+H0luQ@ivVp#if6I;cT;PYfBY2xw669%rys?XePd;VU+0Q%`nvIgBC{s|PfQZPSMjXh{%DYOQ^wJptTaao;QNM!j| zMABCm;uvSCnHtn{Z*WJ=(q5azB5ii4RB6(pkGR{jS(h|1QYGzijMw>@$P=j@s+7oN&AxVmGy=HejhSU;sw&ruA1J> z?chgETK~^DIX+qa{0#QZ`Fvm)+)qh3>0FdJ>0FdJY2Otb<_@fSAJAv~UZ0GU&P9ov zI>>fJHBMtXVG(%R!9MKGtl!nMkSCdZWNEEg4BtzBR#d3+cnqPb^xG(r}9>Gy`Ky+_>0@A0>5 z9H2|ldh*(z6Y4vIX>3K|>B8teVZ-8$2u2jUxM6Y%YQd-f;y9xS->J}R*5+i$hXfw! zXjBpapIAPh77YB==Ur$jyTOn8gNKk{2E-xBCd`k{ofShre|;m~KEsW8_)WBIAU_!abEiCvjQ674fvq~_kD>C#AH9h0(SAWb?GK5MWo5JoOJRW z_>dgnUov~Xh_NK>UF3#1!N5RF{%0TL88$IGdLTS{;~99~@IlpqvJ@P8JA8G6G~`1<3>^(S zp^l@!IKn-60DO23RS%B_gHQE|l22a1Z#S~xiUPhv##sjQJ&?aO*U+;YZ-;!ZA4}P@ zXAd!Vj=tc%sujet;CSW@kav|9%1gMpb`A8^i1gcJp za!4rn2u`UC;`s61-h99tTN}&Y7y+LU*X(dMcnNd0%q*w+VXpBti$g8d&@aQ_t2g!o z<`=UzJd<5_j}e{K75>TybGUp>Y*;Haxlt!?${1ZUBl6T>IV%Kul_;B`BkNOzkyv7u z4OJA>eSW#JCIioG(~~YLs>37OKy_(Fn7eCd>Z=_Ib9Tg<*NxS}obiVBRjzH|JN;tM zJ@=guhdB0X`R?=-Lwvbc_u}E+FivF~Tnqh){98552Tx>Uk)oZ00)_L~mO-(qK&bPo zuZ;`&40Y`}{1wOVjq})oNPs;1qyj3-!Fd%y-crCJr_wfJ5i4&joQZ_^WV)uZrR)Rv zg!FE@D5C}aOz*PkbiM!&++AwmZN;PYjh!0{HsX=!AaTE92jok2INb#*1W?P?I`05^ zJm^bUFCFWGKH{?UpJPxj7H6F<@u}oParbr^{%hXfwzd7z0s7p?VXjv@_bMOsx9l<+ z1U?x!W|_J>2Yj2K?r%|>=0h&kcp7{uIMP36R=?2G{8R)x~Q`(eIs42ug=w7

BEHucg%5x2CY# z54?@ncG97RfT7?g8&Kw(Jore6F#C=kZDRb<+<_60OfehBB^JoHoa*N7h2IBt(I|760nCTW)H=@z z@sjD#u1>z^!st|Mle9N80eQOKvUUPqDhX}zjkyc^zT5(RUfk%g=MU-E`~TsS|KIq; zHK_GAPxUmGh%@^%Ue$+LuiJH967sRGelkyn)A1-^EV8*;brAE%Ej+#tKK8eNuH*KH zxmu;#TaIsgGKifVE;Km!FMWR9b=8KAI25=o^Nu9ccUt97{$&C2$)e}ehuovf#op>NK?;GEv_S3YDP{8(DF zOaZM|JpIgk2lP>ki7buW0Y0g3GnR$>)I3sNssVqtZ&xPHanbOjFC*sX(+0pVF7a}> zC6=i(5pn4j3=$9FO7eqZBVH$lHz#$Un1!h`w+ z^>Iocvaqr%A+;UACjkjQVU4BJ82)m&SU)`->MQsW(ygJ49FM4p9Dum%V#v0NyDhXR zs{ZanHPFolI2L<4HwmDV%9Ekokf+tZ6y)j#`Pez9gW*QLP%ld?aIg#`AXR53)5lOh z%02t>y`V1C-`ieE9X&=s>t-%7l|kHcx^iu#4)m4NF<j-PxJLLNg-K*x59BHU0QsA_|apj;Y`JDQ^8@ezQ~E5wZV31lTyzXocsqoN#CGPCfzN=QjxW=F ze*v`l0&7->(GXVg_=Iq=4)`N6URt{&5`3OJHdLoTeP_aws;3~{H5ankT|qsni&7pA4; z&@O^`nq6NmwvGF`+%`0iCDHHFWE})Q>Tf%84Q}9&YyN1Etlk1gbxUPp^g zLSN86`-hI=P+#8t@toI321V5M!a-XC`dmiu&=oMfXG7n!uiw26-}Rtg*L>P_JYw`# zNl|~zhQcH<<4CA4zmJsB!@<44vahyoE$C*A#0GJkEs$?PG}m`RzItatkh$tXK9t+d z@!kb=CLejU9tFVszQdzJ#+5ty5M4)lUp(;M5^;#(*ezjH*&v%Sa)}RR?Hvmg0=#6R zP$;xN5es$Pn0NI;K9Ra1B=|MtF}^+d=CG@Z9;t_Z$@$@1L5!WeVATfo;xi|vep;Aw zBRXDT`Cy&<#2EK#jh8sc(>iiqx@0zu^|iR=f8d})oZHvscqsvI+|n3#PXoVArXD9R zd0O;tu7dekCg@2xqvX~?e0V03#q0>wgJip9mALVcSBcBpvIXut=^3NHOqB?zBhsQ| z7UGRodVL;a6}W#2Fg!?se!#A-p5AnrLpn4^#Xbu8lWC?3r^A~GXhUBb8#9A4>b==1 z_0wkt`?zs;=WWeF?C{mMCpI1XhfgTK>p%0){|=uF9dSCk40`gY2lh`a6bG?<`=W_P z(3QzFtoO_e!XsH0vr@OZA?&^O5pyHBU)aQ0(oK5_pp9+&#rUDVO#Cvj?2XAGpXJN* zCZRY~Ih3BD4*3uf-MD2Zm|I%TSdui`$A#W?+}i9e$%LG6+uB=!2U@K~E57nXM}&x zb?*N-2KDImVK>(11D}iv?G4$f7QWNeAftGmf`9!ohgudfI9 z6p2eRN6aBkAS~kcJVXmkti<0mYm_urUIkAE7kbg@- zJ6jqX@@$X~Yyw^VF#BE&;axZsNer>DH-mYj9%e3s5dUyWHC1pea-pr_UwL-w@S*IY z+kqu;-^pu_2{lq(&+7s60iy;_(84+c1#`2u7aw9?%Xwe?y|RgVYxel4LB3S& zrx(qhBzlDV@PJEWYbo(iq)bvf72G>ie)K$mJjTR{LR@EV9x<`(x>*K%ztc5367?Ua zv5(`!&-*N4zIk34)35cAZ)qu%-~m3NotZhD4Eb11&LzgU9Az|ic`0CTzW_40z3F0k z2@Tp{YeH)fQ~fUs<3Ww{(<%jH=hJ z;e+{N3_&-9#e%_?FZ^QiW*q`*%8{>pB&>|2R6eX@@&o@*iw@_Vs!WLI!l$YOu4F#> zMwiE^>`jhOIEeu+C7d|)XB^^MisuQxpP#`5aDolXo#gmr51aLYoAB=@CH~3zVPIc0 z^mkA^zj|MCys?6l_9f#h>qDIR`+dkbi5Eye0Fffnc&*{_2BooMqYDrtZ@kQw$b=@ za7Y#<^!rNbmj*bl2*0PMg98F|QL28cUEDUADSi z7hXh<7VmN@%&%iYH9so8F1(dRUpzkZ615ozWk!O z7VaGu>u0Ng_oU0|os6E$W5P#^U-)Jzpi5SlsKyV1zuV{T7oE@_Y5(C6jpT>|itOIH z(`FX>!9(O|U(@1|-J;xb80c)b4|;pw%*G>ij|PrQ%6P<-oujVzRRKL1Qsw`U4Rd}K zMyWUx*pSBls~JYXGxUGGl-vn%QO^z|doSoOo|O{tVE-3y{Lo??*#&+|EyqgVAL0Z3 zaF=Z~ekvaY55enSe@?z5DY)|K^t7Rm?;U{&bq&igq01n4+KRzWe_% z&x?{DNPqu-#!25bnScJ_lNG-Df6XUyOa|40Kc+F;z4r7!#)hyri!a#A`(@EhAU59Le18h+DbCmMs=nFuNb7`@ z)1n9yDtgc6_VWYysGRn_U&96U;^>=HzplVNh}!0SA>=~}wXEl~fKUFCCU_rZWJHmY z>n>J9ex`2C)5e{kd*OMNqb%Mzuf}cRHd#HXQEVXC3zsz~?06{4Lu^sQVmm3=he= zEsVmcb{a@p^P%sB@#AOUzOyDZ;uB5RZR}$~$fu9U8FOwX#4$UAe+XX^)kp*m-L(iK zT2N_YCaI;ZlHZl!es$gDRRr+K^D%X1^(^ebH0{z$_lZ)hMkIoBu&QR5`)8=9B8WV$w)5niKsGO^e4JGzcuPZ{^vp4#GCo+!f2>WYixfbBidR~Xx^yyPkcfdC;T~X`QM38HpFkr zGXnqE?8X!M+uTDsI5c=S|VrjlQe8?BdqZjlOk z+G(ErI}ac}8E6Y0ZchT;8kgEuiD_9Rme9McPFoD+nTeJ?5`uoL6j$oQa6d}kdFS=F zv4_~c?Jdr3!V8$5+|Alh(3O2=e7g%>0-wO{Nk=YG@L~H{Ci^s70R^)zb?mSQeJs7a z+9U(aFK*~@%Z7YQZca_{?V%aW+c%_tJX8%QEO&9M-1rjWxkxeNDY4Mb|%pe@*zHn71Z%0!268i z9F2Xzzp`R#mRaW%AL`(B%=rrO%&x+qu(>e!?u6QzEdbYb(sm_49wu!-y{#cC9xL2J zC%MC@8vFWEwaWtHldUnFugbb{C`=(pF>pQwa}?!}(1CbEH2T$7Bk(^F$ljxUc~>UZ zO1*pA1;~r~R%U+Q3;wZL2ZK*<7@Z&p5 zY+on%lm&vHQsj`B`ve^wI=XvL?7wvWJ+odSjUDvp*K%B>9K)=OgJT@KaS%v#hm+IWO<*?OA|-*i@FIN?t-AP0J0f zrU*{dW6PC<`;U2E#t{du)!Zca1Lhj9t8n{?Lx09Wzen+W&hO`Eu>AH2&L>f1{R!>j zI~G$-c0`FENjm8m0`rC_@zwj1`}WAf!64O4Sr5rmV+)tQ|TRVu=So8Bd#j5Aq=6=Pe6&K}YBPsH%rmP#hI`XSvw(^B~rdk8{%S{z3MqH^mW}ur*!> zHsx#sJ&A(PlF$qAna{8g5He$c`642Df0b3YZ3^z~nj$%lB5Q0c@Q#%=I- zYbmHiy#RTSl*RMXmu9iyGhV56P)FU4?YFZ7AC*j9%ldo`c68y!_ZwpQc+|Nz%cZ3k zd>ZF=4-CvfKj0NQ{$|LpXpP1D)^{qPb@Z+;FW+K77GG3r;!F4uLG=gv$fAg{-smUC z>G7c0@(3OUm^Z4dkbM01GV~W~ad-4;i=f)~P1fbM{3x%!qpDgK^lvf8(``&LG3V1F zC*6+~Vw$JJ85@3FiD<$e46CEnvWpKj@^sdo)tQ%Qio*IuxwMj)B9lE1G7^VyH z!>>;}LZskaT@O68J_pdDyFR{+`!*}1a|_n??$r#)#Xws?tcQTOGR&PeUj#qup+)X* zhCIlp-eu#4lLVAKV;AWU^`qtP^J3C;BH$}|s$J+h)OXD9_PfFRk7h|o)DuM!UV9Br QHJA(LlhyvpX;wl12eaK^8~^|S literal 0 HcmV?d00001 diff --git a/examples/denoise/data/data_1/set.000/real_atom_types.npy b/examples/denoise/data/data_1/set.000/real_atom_types.npy new file mode 100644 index 0000000000000000000000000000000000000000..19f40a37f8d3f024d09b7c19d9ad7b15da0f655d GIT binary patch literal 14848 zcmeIyy^0h85XABIEAc5dTZIb@L`00u6cYm@IrKz9Jh>AS@hN;^mGKw&hh|pcjOLP} zs;jGedS>tYvlmaFzr42lvio{?b9VXq;_zsHc>Mn1;beb!dw%ix;`HOI^NTlUzw{@k zA1=>+>X+|MKb`&LckbUi+24O~_hkQV|BrvSZ|s&ocrBd%x7ZxK*xY05zt`Ly>*t&1 z9jo&-Z;jvlz3}zmt#J9C4~Ol?&4+Y3_*xEMu{rAWqB(TDs4h>P?ml$L&sXl=`rQSt z>FP!OP#w32J?W6HuUtPr9}em5SBHG~8uG*X?FZG}8!t|I-XG+{C#(;*m%i#av}gU3 zu8+Sy+#dXNeCnseDW~`5H)ldVxqNuhzSY}@Px~t z(4jg$`RGvJgnahK`Ehfrm-Ey0(Q)(Y!}h4XSeDYhTcMm>w`H&9j%_(ng z`*D48>2T89H_hXdS8N^M?!8>zz1+q7xaxcR{g`i_`Q3*Or#f9e)Ms9OI3Ki^x;#02 zbl5&VIu7OI6Y`l;pL)MROhEdbEkTJxLh1I zhmUR^Kdi4@KYy{g<@%xi`sn7X7q@#amvk@p!QC0s>!TlgZ$5qUpx+go=F4dxE+6uj z)7wWk-#&b}I+U*t^{LCJ>!(jX_4bwfzUlh9k2s@q3S{ncA;tEKIr{kY741W>ABXbG=}<0Szy0{kFY2dn_jZv#(H*H*b#HT;kE?rM_JQ6zUX%k{ z$ETcWuD+sup!_M_xt3y7#xf7@RUh(G8&B5)n#^srVw_lDrr0d7k>9~8-p?Q!FJs(`t zTbHZ9s1DoDKh^1cP)<=Ds^hRaUgWn2obIh(AMX9(knTOwp?a}(9P;DPUajMhA6B2Z z`%Lv|zBxrX?ycV(cjLq1nyy|nr+T?P=;l_()#c;*>3I9<$02|Fk6z!D!`HeTy61%I&>pb4?JJj~51(+mw@WYR zZhc?wx7~ZWlY7A4Yr4~)*WT^^*S+m$J`O#HeQ@Y`)b-JEem=Z&)uEipKk52A-#mS- z)AjQg`Ru1o$D#ix=FxGe-+UaJr>>8V^Yh`Is}AK%{z=#0`R3_sovxq1$Y(!wIu6|% zFUn~@eezc?mj~7Rd^jKMd^(f|Cx7ea(P8`P6Ha?<_g*gMp4+{bd-z_U`}n@-kPat* x>;LQC_VItrTpZHj)Ta*hL%M!ky?5YKFQ?0aa@6%fKD>Br-FvPdw}<~HKLB`HDU|>K literal 0 HcmV?d00001 diff --git a/examples/denoise/data/data_1/type.raw b/examples/denoise/data/data_1/type.raw new file mode 100644 index 0000000000..e2338f70ca --- /dev/null +++ b/examples/denoise/data/data_1/type.raw @@ -0,0 +1,46 @@ +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 diff --git a/examples/denoise/data/data_1/type_map.raw b/examples/denoise/data/data_1/type_map.raw new file mode 100644 index 0000000000..c365ac55fd --- /dev/null +++ b/examples/denoise/data/data_1/type_map.raw @@ -0,0 +1,7 @@ +Ru +Pt +Ir +Pd +O +Ag +H diff --git a/examples/denoise/data/data_2/set.000/box.npy b/examples/denoise/data/data_2/set.000/box.npy new file mode 100644 index 0000000000000000000000000000000000000000..f6ed7286b83c88d11ea61af3922751ffea7c68d7 GIT binary patch literal 3008 zcmd6nYevW!E0=y1No`(fwX`+Hx0{Gb0h=dFy{9vzp& z=N;f3k!9%e(~YuViAIqbPcZNd(RPz+poDx0R>q?gfo z2uE;zceQUt8++fyC8iDai%Qt$l@Q$N>`m|96^!a$GfnCJ=f#@DxuI316Q80nfI=XLv$X(5uAv(j5sUUdD_Af^gI19*qcY2 zko(!&l|Da&3t-dpJ+10;_FSgy)AtPi7`XBjSX80o7P@D`5tz-FG6H4nJReT_gw}{K z%+>buea_Ky5iWp%xcIB!mQP2B!!aBjG(^oln9?d?DX4xd2hj0Yy*q?8O zYW7@*yFc+<6&SekG-8F`R!HX|96|KW#o@$@?0)Vj+v6OU?gdvKt*N}PtDVk6I08}3 Sjq-sRcAiPksi)^xVfY2*nlxDe literal 0 HcmV?d00001 diff --git a/examples/denoise/data/data_2/set.000/coord.npy b/examples/denoise/data/data_2/set.000/coord.npy new file mode 100644 index 0000000000000000000000000000000000000000..3683dd0ccaa48f6e24fbe49ba0e110b620b7099c GIT binary patch literal 44288 zcmeFac|6qX`#&zrXb@u?*+*o{Xqbq^>k<*7h%8ZAvXre2QIb$3g@i&;k&?>3q>&aa zmMF58imVYaw&;7mPiKx_&Uydy`|JIAJWhX{d7S6DAJ=_fbU)`>p4WZi&8$qf+Ad-8 zV+xXU*>lkOfZRHg+&6(evOQgdcQw^xcF0U49Kk ziKIf+P*5TrBmMvT!@>0DpUo~eHrO7S#(w>)4CqgXY|G-o!D3wR51ja4a5}cL!Yuyo zJ04)z87sRQTycf~i}4F<9pz)=MgN9x_-$X|zwG?J?*jhIKIzZ%Ea1QPU-;*pk+o+V zL*(I?9o(OGnV5dZiT?$sW15^lMwvxffU%z8*J7OE*J7Np?=PJHFFOps7UK-R7UQ2S z2ELC>OklJK_{KeDhn2c zYZBXc&jk%&p4zFOtXasw*D=L@a5)Yv<2$nNp2b6p%kuf~C1g;XF1e|uL4+XNSC@5y5oO(C9BH&Orz~!dFxWIF*?(9VbSDbBsjnz7nIsT(FO3A1>ynPAof3xkYvvw3D5XH6$A|Bn zs<*J~RaAq!RVT1h?$nruURCIcczLKxnho4)rpiX{Uc>B0Z3m;?tAbEUX#AcjK{(hh zu6gy@eav%5>SZ4hDkxhVTVlC^jy0N{*!%i8Gw_zliurv}1?$TWCO@|kAY+xRCc8Ha zRP4F<^l7ImY>HoT^T`oW$VgV5*rLG#fm19y4d+y$K3_kQ!x;z0hkQbheCGqHWO1oj zHY&`N>psC61%cBpYDuF3KU_cQH|O_R6$;KYHT3Qz!762NZ6#IYxY3JkXLRJo9!6IUj>)VxJ$fa*6G*A>aIcIMw*fxM!>86bw9aMs( zBl)SKI2>^3xrNy#;Ni;P=5(8%O0df(s!vIU2zFBHXE6%gK-Il$Rt&+Qo>1?{1+xWHTb{ z?8*7eI-vx&3gg+2AwIEdrFf255usj9WWD^j64dD9fA|E2(H@Lx8LtnV#wI+s8a+U~ zQOYG-HN=a9IN#@XXJn?aT9w?yPQ(M-E*|#l#}T2C*~Xjjc^We`F_!bsr+|}wz;>U( z35>JSg~XZ41M1&+iVKk)3L8XQrxQTZxaAdHiU7|YkMir4P+-%#bx})Ch{8Ubtj2xS zJbUX#jy&hI9#P==?CLW14nf#^tuQb&NEnXPWQ7~l zQb4m+>}B#L5}b5%(Q}CrhN?KvDve?a?3wd=(;|Kyb9N&-NHwO=6sy+oF%KX<*>^Hx z-DWme%l$%}D4Ix9ENbm6{GbXZ5f0p2i9~opQSy#Rcf&B=_Tgx5Dnw{dq+Q+Um@8Sj zZZMV^GCvd(WoK2Ph~A{xVoiYgt${1%?O34YTcdz0;u$CF32X8xQ8<&4oL;b;1)7Xv z_ph5*h2#$&-Qk;YP@Gi5H1dNF3LewXKKq8&8#aw;@(>}|O1Jc;@|M0Ki1PRZXc~N>(zumc}fOU5A>b;+(3ZE_)UK0jX_Jr{)T`3 zZQq4>;}_1@cLD!ppY-Q>7I3CTf7V>Kkr;@V2ZnvdfBqd`h);gU=~zI?ofRyeEWp4S zel5lsel5ls`~JfD|FXmIYcbC7YcW0_td)G&brLJMRyfNS+J~|6OjUQ0$q-^ikcf*Q zfB~$M8+<>2DLZnj|A!yCp?~-hhZxY8uj)@5?#I?0UE^x6K?aY{OhSe@cJM#_C~Jix z2P~r)MSeo@iNRJ!-CRy0EH$fnCV2x7*z5Tg`88z7)S9ZmI!G`{p$F6!;i32uYs8PW zWDxp3%l#i7YhId`{=tI?`e#P(tD2EP;0%`ml`aaUkFRQdY$5`A$wT^_3W}q)E{lDT zD-D`*`00~@MEGiK>SQ8Mh9I6FLAdcE+WTAT?V4OOnDx-G^=uynz7y?l#N%-=-BYpk z=%yL$`LWWqRWB)Ep%fX!J@J1BoADpH4rt!cPv)wz25O0*k zzFK|H9S3`TbLtCfg+XDD=h>Df3bakM$?PyBLX`amBQI7F;22!-MEMN`IiNj)V& zl40VCN6o?zP2lqrLUGZV@~eX5FN$dwH{D2;RgR@Gk?Ue&6{wKs+RgHA2nYJim8&=v zu45l_Xm8%hP{F>q_oCDuBH+TC9T2seo(lKBRBe=fh<@)0 z>1k^D{Lm7qPR|jiLb?srS4bKUv*s6zFb_`Heu?DrVBf!d!r%wSz5ZvO`QPCagD<7} z%he{a{T*RemcD)1Nd;DI>T1LTFUZZI2MKUjcDUIiegJ#yTa)8KSAxx|fDy(-p<-Et z^3HwzSZ>XfJulnCMbEtlgH@z5>M zvASA@48(^IGy+>m;P>fc(3fj?czt#Wiy(yzwY@K%=|3VsSlG-(%#jGJg+1O@K!!fE zgOiO+V(^?cM@VfYg81H!(5q@>=+X|4EXkIJ^x~xRh5LvQUr%}KjQE6BW)v5jP(i!@ zHjA9FX9i=RiFR1ph4L5a>D|kg;-I47^c&qBGnkG_Gw^Od@tU0ocBb2rfsw zG-U9QYfZH%%n8_P6(;b&x_%ZDb(E)E`k>A6ZIyZ-qhbYU;%muMwZ9u_=aG z5@F5O?ZwY=BEarpxA$>91=#9cR$Qni0ay1!8`A<|@Ub$5_2`_08{=M%4*J+x!*$;% z*6Eo05~=aC=svbT4USsl!w!9uHmrgJ+1Nz9;U_7?Cx_mTyL(0xp{enm^rx$(*qHsU z4cie9+&Y(ZIm2}tEAPh$&QUDzw(`-Z&F)k%ELD_R#|Q4E*({|(&8*=Q4^b)D-LIEc$t?ic*pu`5-t5pLszevisDeeL-DWe`ot*GbC%#bN6*sx5shJJ@>WkQqna^N0 zhy0bI5N`;tk!U*OLWC7vQf5YLUY3ap#ZsZBxkleolsXj1a928z}JLJ!Na%D-vY5A2-VT zDh&69Zl`BeQ{d?dXKn53eYBYH18<4(8JG%Z&pJ0LD(oy6dAH;_4uWkYsw8ixV<%!> z?XE$5^8L~~y+=VPPAGnOZs)3MEc5K}TsJ=zaI1G?{t0yK2vZpslRFD!MsZ!{LVR-c zQ-$TBaRNBG#j-aKu)tm1Y~TvSC$}}#E6$~fqI@*tjiEUUoIdAiIFItIm4dmtsnhJB z<PtQy$Drob= z)%wvZl~gKp+nKB1VU1tBn360h5%w!kMf(Ye(gt7rbI$G(U8qucJ_U@_kL z2TuGiI34>bR4bO@u#oR$-g}rjzl0YS;{pxZ_BU`3S!>`5ouk+*J z7ta5e9fn_vafV-uaYtd@P4jM(SVHrS-F>P9nA}e1Q;Pa1?;yRn+|JAk!rd$@zn&hz z_IGXctwM1^2majN3O6y}xJoFv6Fh*eIoqxvjPCokBZd(zb!@OYxuc?2fdeEUrehr#9Y-g|Qmd!Bt%)Yf?hv#_1!<@`wjTrzcvb zu39UEFsZ;MezPt6pAtcpPb5v-Wd=LL)>rB971dj0Q@4HcqGLO}$ktt32rw8e#~e6^ z>Le%oN1x0PAgNf+tj~)89H-v^_KgB-{q)p|jKqLu=hUHgi~uoG6B3<06o}YllbukD zgG>*PL~Mr$eAL0;*-J-ph|sa4SMC$x=c|&O`#VK|9?QBsu$Ka|^c z6M-MA?j1RY&cnnV_fE&a6?5(!R2DA1Ml;{vyw!aR$`@4bQ-Y##Wf$0*%{pste@R|QpdcU(6DEsY;9SdNEXSok;oVBQME>dfcuL};= zh9{lVe!>rp>B+B8=ulz2kS=WO?y9LZ$qntmnQwoCk%dI-0OelpZ^^`*=ZX~Pux0* z3DJie`zi;p-3LS^KkASnPeuOQAR8}8r)tqISnEiydlTw#{&B?cdI0-k;B z8^D%Z>Wn@zB!lMLqZcjF_x=e7%$eDM14xcF{jau@VY-POH++r=J$qbU$sOZ>_kMo6 zwj=x1#!cyiKS+?^+FYd^$N|D#Z0jZw{}k^P+-yQ8fXjS}4(%Bc{IZCV7c9uYrt{v) zV_FmpoNLd=OcCKrgy(1x;uB0;>eZ5G(y%{)ICw1;-OD}9(aY`0uz@AR_j1`~Ec1wz zb^_uP;qKKMfp`_@cpo9Z=Qs|MPE4LK+dPZ$l8iTQoI>%5jX0-r9T6nQLi5*o&S2`V zZW?k8QQ&hai(f-N9aD%1sMJJ#kK&-BA89-)unZs0T_?Z`P8V4!q(ca>Vu;%Nh)o4v zc%&~&*As&WA~Cr}X9=)PJhUWwf&z+FJ!d}O!oij7K;2ed5m4>5$+YdKKy@qig-SjV z-roHg(YHqg9=tM)xQX&I7ee}(=aVE*m6|h|(-eWj8n5+-5neVr`Xb|2EGFab!Rhci zkOo3q;^uX!Ab%*uMcteoa>;BGd`EB6CZ1f2lUqxLWr@qoCliR^@+4#Lmu*LAo;A-> zC)ZHHE=|X?TXhn9K9oiHahL@R`uE}QBv8RLE_b=xR$j2arWtC8>Lfl=0`+T zxA;IYsC~>7Uai6kq+E)Pu09n`hDvgn{$K;viVV$7m-*p`vMQz4k_rK2#Z{N=gQRa>T=^koru>lsZs2`D$L7Ol(pg3Z zvCE=2*fW^OaD2<{7fHbc*psrP4jb*qN);y`9voJJhG#c;`a(q^l(j8$MMyu!EU=xU z4AoT{ZYRZT>OlRmXhT~(f9hvrei7ox+s6yc5D13%Cky3k8$!mME%Mg zLsw1g(Y^0mUG$TS3||y#+jI=1foBc==K9S<*#CaWNpeaFaC$#Ywcnn`OqX)8D~?ZL z(N9_gc<)mnOfo)0x19|HbsW6dUQJ=4lob7{JPK@8r)gbbCc?A4lAYiDrme&foImvTPE`nzeHM`-t?Xaj@~lTNV`jcV<9_C zIJ`t*`H{h}39y5bH6o=AD^4n0JIIm$KDmBGRgbR%VgTtR&k0x8Dj>+)U(@;AY5$I^@qau zMPV?6zi>?`3;YP$Ce78X3SU3nA9GrQ_}T8AE#8|S0>oBn%dk+Pe$sMft1A)ipFAvS zILZg-^)=FDQNL-~@XNuRJtPRMs%>5`#}CU7>aB=G=V9*ZPM-h71_KH~Hu#6^fa`R% z%2>O=Ck6Ku+V!Fq@||e5dvQyC;NYKe;@|M7Ki1PRU+x+Ay%!hq9eGw(qz$d@2r(>t| zcXj9WvH$~T__Y{k__Y{k?E4Gn|H}@;uf;gSuf@3ch8&Vn^b{u8bLSzy&k&aR=IQ4# z6hElpHdRpVc|k}q_t3-J1DL8=t=4;IROkF8-SYOP7#OtIoRyav#B|vvYddzLyn`+H z<2Ek1P3x+9y#ER;-c`|C3`ciX|}o|$={F6U_-^udrCM} zfbzQko(4K+B`qw-+2#jUItir?ZaYV)a|=t zjR*RB>o)4NQbYyZgjZX6GPA?}sGkbf`$S;4`|FIifC^l;9#AYm@s@I30{-%85jY41 zOquf(czm$Ps%?uD#Ai>QKO-OlmalDhf0R;z+X?x0vx?f?@+k>@zasbMYR0eiu(wD2)Ai0O5vQs z%iByHUL*>$`go%Hy`xNNmnj zXN9;A#>Wi9schp|fBA&L z4~%>L&ph+L!zUM=tG4N6PGJ^D%#V%+3}Fep!~0E9UK4#e$!Nxz7gTD=_Veijm_7&Z zX&zLUC|6u*uau7R9Z^sJrO^Xe=*Zo;78f$y$@Yt3VPc20#P-=2`#B&hhGw9Nc;ko9 zT1^WfA!ILW(n*~IbQ|{W*@E!wksX`$H%P(Ks*qkg4-R;GWLW#=UNQu9)W`4|^TNJc zdZ}@uLa5I9zHhlN8ES+!c}LcYfs)qwqgOeF;E2jq%F|tlPrkpFAaKb5w@0;AC%X`w zX_3#!KR|}CFB~s?(j=2sT+)F9z0k9X&ShL;dCAWUbfyDll-_cg6ZIICy#8 zmi3#72yAM)w!>k$3UqDTetJLR6PYK%$4(p(fs~jv<=&`1bDr<1h|oqU*vZN)lg=dq ziP5GC57GMD4G*QQRiJ?kKD7Wo*9jfj`?3DhwrbtvgXd$C$^=$G1+hK*1>?v)w4K(UsiU zZ`?qD+0dTUNi|k5TDe?AY7e^a8*r@g8^j=Cw-(!QKMQ2*q|!=Vs32px`6%aB96WW< zSjHL15A$0TcaCqMf{=C4&>*VA*Z11L?CD2w%Qt89on}zh==?-;0r}8Q8HhZ&W`*k|BAhq8mp^#*Uq1Q2#wRIUS~@Q;PGP*dqz)0pCmbQn zH-=Gsa#ub#5iQ*jMr)KAK=$;DRe7vJ(sSvEU z>YI{4`_gudZOiA70Uhqzy;)pB5LBqD(uC}*af;rY);W!h%iG6jU7o@6T=lN+L-7gK zf9kFP7dsraFL@yl_ygPQRUSFQs{+sO34Yvpn+S^Q-n8x(pTRb9^PDtB@xUwp+&aUQ z8SMP&*QVTl1R$zbWKD~zKsg~VV164fg!t?`?zM{mf_v#ht%#RytJ}Uj^i>Shf*$?c zht`*$nTW|n@kvB*?e+F)9H?wRR@fOT0;KA4MGkZxwsg`~?w>>$xwN4v=#&W3r+!T= zM|_eKU9a^?R|>S5D)uyqh`_lYVX0jqw4}^%J+Wg~u{-stUWMnVpgNXW zf9x?1Hff80xZ_uWeKL!G6Q)N6_GBEVdbz=!h=K&R!|z*hI{Qm1+@x|SrKh9P~H7?zW6f> zh!uAY@giPoUL~Eya|j0()e+*7s4v{|yvdzafC||)7wmSC5l>#;W!yBu4};>Dl*dh| zU}6**=!|qV19BX}1OovOJ|z-*ITzJq1zGH-cS*tbrdm5EpQXS#a;9MB-U6Q-PN1iC zg)h`k_MLZ}4H3b?KjXx|;d}pBkMzLn3ZB{YF4Rx9Dn&l%iYLHgJYrmPom9H$-|*k} zU8slr!WsK6;J@sX{yfhD&WJM@pWSXRBk?EYfnooj@r8Wn_w{sa`8N%JcNrF7;0(VO z;|#wR0B{ztKngLM|#@m>ty6}MJrizL| zR7dquNr)M5V}oszUtDr0g<)0!G=RPZWMN~UZQ1j$1Q#;11Q!z9|YPy9zWBJwWL{E7HDmhN95kw9dD zm}QY3Q#@4I8~!$&b({w}%IJklPceg5OLpo3Nh(}WlP@Lh6osYj>*gZ-Sm4A^Z?!+- z8O?Jql~^*_U>5()tRNEgi@hnvcTxYycIL2;CyMtr6*LFGtmKE;^8v@d^P>JvqN$%b z>KE^2!51+ho~eC-bEri+H}-ctA)!s|AdGniQF4~TB`>R9iR1tB34~Pol1f)eMgd&i}CQL|GRRjI2qiDXIYdz2{5_NUPtyk5x$wb z)D)vW&di7O@8q+huw(6-Dv?kk_?Bs~Ek!)0&ic@?1M$f|#cw~Zq59u0AB%%$70A%U ztXjGJcqwh|qlTdp?nx~4&f1fqtrYM&^_Xir(#Jl2S?-!QJ%wEx6B%JceBu(}5H!Jo z`r1YGv9IsxSQgJdJ`CwYXwJ#%4f+$<&xSWsPda&^)muBK7xjPraXtR)4GB;xVSlYo zhyZFUJe3_$|JP>a3(MPAMS)4{GQXW84|tz0JaFVA1$3^v<(1{3e!3fhdIafDx?T#n z<EK4Ca@v!nhPid#~W zx6BatR?&>9Ap5%NKFzgE`33U@6c0Q)UVl=C9d6f3?Jbr*PxFzK>Ar#bwzfVFIpg~U z!MkGEVA?Q%rZMWB9*d*Gi8irl_7!yOeC1NtybKnImu%VXElmZE);qFk{b+t5pzPKQ zITm>B@1eeBLKPI0L-TtYL?P^ewtv_z7LeR*(SObj^`oy~o&SKG zfY%&`?@(!1ofyIhFMn()4O-w635Nonr;`iuN%ZqIROw?l_-CB>H$3W(^>mC{_FAER z-9kOq{i2&|TPFb);}_1@cLD!ppY-Q>7H|eHFg{zPvWr(G zqI&F~_Wv1Qh);fBPsiSEtbX<82%3-h(+)Em7UK-R7UK^h zs&rnP(6PLc{f!~Z2Ql2m^LJU1o^6T3t#3RX1mL;&PTi)cAA6sj=*px-hIeg>5ltPU z;QnUi{+_fi7}ud&XR8t3M|q+cZH4CDo>n@{y~abYB@`?Ydb5JF(Oop*Hq^r9(#Fp*&+Al zLsw1xA8#plIM=;M(kW60_KHcqED*OJPwc6 z%8*^?=cC4pQ{xr^&w}{;QDjGX#YRo{4AO}xzJGo1CfbEymcbHnD(tylEY0+q2;UEVQtQaQ zMjK2RYuMPM3i75}W8Rii*wq}jq8b7VL|K}@PFzEUF4kst#~A|ngs&e|ZDNKE+03rX zP@ibyt)13u?~1~bt2f+D+L%Ge^-aP}ODc?Ax7Sfk$H6)ya*F0{ez;<%lKxYX3T^q; zSldn_OmjzIbFTc*sgYb_qm1H{{%9N7ZW442-J1ULjUVU__iR4AnF{JwF^bGzNsxO= z?!d!AJXGx=3r^Vl%O{KP^*{5>{|=wXf8X+uXFVNDo1xoy4i8`l28xZ3uOx%k#+6Cc zT}VgHO>s*-*pCT%O2$yo_x`BGp=8G|qHwzIjPj!cU$EDB_W(V_C*O*{=a1UsV3kEN z*Rw7>2pWCZ&57cfj46-I<5!5lrntgq`2#dxtTi_^Es6Rs7mm^1AU=6vc_SpE9uLv0 z@93UG`BZCEaaAg+2PU2sDtcCl_(8EQGIj+Sx;5_}Ig09mERRd7QxTu66SQwRf#!@3 zCO6;YLVVKktv;D+2z@`)4$41>Q0*?-oNk8GeDgeVZ%^aC8q` zZ~a7pPn=uk9lo(aAAM=naY+%7)*7*Ht)xH?edkXR)aTZHzD;n0un3q@75tB){OI_- zY(uj;5-RaVTm+@Kg!>x7im=jUlLQD!x z89}^pTy78k6UvWL9L;B4kUrM+lJbfael+)SOJA)&vJdm!Tr&53*S~!7e~nLkxQ=cr zLHDJ|Tk|}gFo;R{Jh|v1igd6z_L0M12rzPC`=|(tPrl0B>wbvh7UiYe{XIU4Li=#M zVa3renEsA0^Q}lf-)mFRZ03ma9o7A{*@#a9_wcC;Av=Wzl#$qLXnv$!F1R8c4^3Yb z*BhhxnwLM+?+mmfeMR^|Vh$Y-`}tSal%n{gd=H`A`xS~mho&-z^N3*MXQcfT)kBsH z-JDlkD+bwmD#VA#-bn!?X$`cm%C=LPFWRJ$Ue}iV{VNex-8lUGHR2Py=emLY+-I?z z?O7+yP(8IFZK6@G6V)Ydc9;*0;y|GPw2GqDG-eqnD?meYd=;3l^3nhzY{{{csdAXX zPH<8h3(;JjuO~;9J<_G`lz;Dc9SC5|lXT|;igUzo`e$9cO#rbK_?m~iQQqT}`;_{g z0^B|OeODtLFFtQ9FT;WWXNC)_I}tzZJMlHa9OFzO{O4XY?{iIlY9kfZbG&sXbai5~X_eK^ zF0(NSv^rb%bqb;J!=*Z(PUDBojx*N=k)0jOqgU6nE`^<1 z28AYD@Zc7wQ9fh0z$XdE(ppY4E$B~Vga>z(h2h|zapK=_=|9%fv18oMr>xNL@z?jf z476K(t3LCY0E=<24?UB8E5-hX|Gw{np5zzK*mnW{WgqD_|2Y2w&WJM@pABoCI+P^I z1H=A5;|uxD@9XIp<-K2qYX}Q4aE4!tafV-uamK#CaMa)b?L6omk3Z}$;0(VO6JBJMobK%08u69guFV*7{K02t6 z8Y3%4?=$9k@aAaS)4V8Yxkq^bw;2C*+>Ob_*O zQde#CL%LJpC0ftNk)A?4R<3%@W-{EGxjS(3jWjfAyl6J>MY_tkRnxvGj+)9>>DzWX zo3=(%M(=6P4E8c`H1Gh5D~|C!ZqQO@2MrG8_5jH#%xz01U2}p0p%J~ywULfD>~>bg z__-NO=&(-kf9fqkLBrPGcPFrB7BxBE{iyHJq@o%yKmjeL$CQ+31o$}CG&e6pfGUOY zzAH%2|21sAn0u-iG*~)0Bwi)JeszTr9aO)qslVLZn!*Na)OHkQ>Wjd$$m8OE!xT`R zaTod^ON8NMu@{t8A}~_SZ1)J&Nl3l?9=xa?o1b11dx2R5PRM?2x{dOtzCe#?^5At^ z4ZfDi_S<ic(;-oGX$wHO&5BnhxBZLKbKwm;g9reMX6gJ zp*(1g<-?Q7<6_W8_x^P9JPQblF<;2x4EgeSq^MvaexWwER}cgp zCyfhBmqO6UAwh+I`Gjflz5ZvO`QPD_wjYN4Y}@G=lg!>j>L~-5+C#%>1=LS@q8Zk> z4ArSjvM21A4g0YUr^5qyM>4$mnI3%1R1CIen*6+ZY5;TIM3XiUCPNl!dDgZl9Mqd& z%2vW0Kz!D8DF@YQ*WYpxQbK&fd!WH41>=CnM_mq6mC5kob5@#X9|=CU>f-YBIKb{0 zxcQ^%A4D?o^_Y>$IiMM<1&<-}0gcwt2u;$fMzCTdBQ6F2m`zU%> z!E%?LcAU->HlomFcIF2KLM~~C%%T2JgZyk(M%D}#ymGA=0r5%3v&{6h$0jh1=T-5; zNYD0kF1VWy@yV9=(rpDf1n57U5Vul^07h(vDrTtvBBS@jJtjg7CXB|y3=yAfo1+~2 zfbtZzE>_JF6*e@SEwiu6Py{x7@Jf6*OM$qc5UzckM40DufA1_S0?r{D#zm2y?S7Fx z`w2AHfB3Zio^MPdV4r=9{uRYV-Q!Dg+uz-%$tJNKix9(R9=3YwR=yv$5Ge2NeJtT^+UP#iMw z-M38ZBMCGGHD}omp?PHgi8dh_#7iRg%A~D%!Q}NjNoJl=OkS_1CO={!K6x}Ze&pHx zg?em9Edjy-2mg!{|Ax2yv7U}4NoaD3@hrqAE|<8zIp0HbSc`CrC4%QOUyJ??Z~txI zh5G6*oU!i${>wh;&+{zc3|?S-j#ctW)kMhy!~Q?x3-QVC>*<*JL;rTmjV!>x8GbFs z8GbFs8Tp!lk$3lgX(-4rTArd zxWU8Q6s}7K=hKRcZ{>*K@m!{z6ZHYBFFAS3q4my(YljRANU;0!xx?H|c(Cy*Z7_bP z1Ulnih4oRsBic|ln5T%oSJ!!;*F^kgH@5wPYQHFm^Esx?BYl?X+fx^-h^Vex7>@S} zlLp7RjLtI|M9`}Ed}cX;4B8uf&c3~H8M`v@MD9TRG*+>fDi5Zy11es#$7(51DeHfi?=2mZTh{M5`hf@5 zUa1pyd_aN0EN=msRjAH0y#AiZ7!RzB^Q%)rykUg9o-c#m;klC+l+yTry0n&e%&&t3aU3JwRmr`!G{)j zpZhk4))17?^>Yc*r#ZRUy~#y+ply+fMOgt&x<|v0hWad@zWOVEu^Gp7`E;aI1X*Cf z0h?LPM+Kik?!yjfZp$ys<&n%bX868&%Z^1z$c`O))!}H7V@1W z$HtH}2X=(}5u`l-L>_+GVgK`dOiaJyLjMJy#FFkk`lhv>6&UM( z?aK>`@wq?!T8uOH{e=tsWryL{Vw~aEVqEAhdF%4%NsP92IS9)RVHfPolrs~_aB)uT z>gGsZNWX8t>yX_5Rx@|MrV#1PzO}C6o!}J*>GzpQ_@n`BW8la8F;QfAI}pciCpvp*Vy)f2|kNubwTG`SdVY3c|y8Mp^Z8 z0Gy8-Ulv1#kGH%6(-U~1k#IpG?GgIR%nzCxG4w z`PqHks)+#DF44;58PsQaznUgm#tSE|A2+aiig=~mJwXxa^9@tL-_$}JBx^@leM?cj zg`EC4UseVDxMez5N3+9@5T$BmiYPP@6K}gAo$bZT?AKG#zN4-@_rl4daMoqJ6}DOh zyyf;a_7_XRk-QBOwI4-5@A-UF4ARB5q@VlTqmqM#6dit-@T{0d<_%MGEHCqgXetIqyvO<12DJLtP3O$l)vJ`G{kXJgZQscu0(i@D9 z`aMAJIPp}nU5sFdOLSPhQ(ORU_2h4VVoC*^dG8yUl+pR|DeWm&^^*Xoej<$dKguxGtd;QP+ z^S{FP6|7x$f6H*Ymm5aZ1hy(B@&)sU`$#8E^RF6{%FUVeD+xnzf z2x6k12kk_>@t(eF&nk*ITyg02J`*4W^bExYmuRH3m2wQ?kCOqH%NtgzW9Z&!nKuHO z-;sI7DYpG!EVjR})iNM%7VGoZOL9_EfiFJxdcl{_TtIfz(5J(**x?5g8=tIDfh61G znkol`AX)FZaf0bA#%0lHEP!}`m$i1cb@vqJZPqFtHbQ`xZu`o#a4HbUrumps$P2EM ztJI&qBfw9b0C^7a!|Wkr+!H-<2oauhs%{~G>ksnM8;UC6KNG^2dEDaxKG*mU_Qw&7i{ZM`2rCAFxASTf6Pm#FI3K2Ww*Xp?P6iT0-pS z-9q3K7uS=mej2;aH4u9m>Ez9R#wc49Ok$i8LeH2x&^*rllG_`?&^rTsMotCEykOuK zz-hLG72G~+xtJk7S*C%lE?^Z09j;r!qVus+(7T8*o8gMZO4JZCE?>|5pUd~ zK5{p#_35+ScRZlvVI7&zu#wb`$;bxRYz?H<6Gm9#AQ4x;y^aiwQBilFZ)_KUfn-VR`|DG#2mLA-IbYfEO^ zb#};j?8jWRpA!yqR^7OS;+dLnJSHpAdurn^>oqkRIe_kcAoqYj>hpR%rfLL9!PS)5 zj=n)ukL~I-&%8i}@kzxq#R*%)Ck|S~ z@d*^qSkLS>%|h`>xWrTThk8P=qsj4@Ez;+6s2@78bA<=y?-JtG=rN1!YS6YjhvE}W z%}nO!Wk_#tP=C19U>56NN0w_xJfLmwKh^Gq=Ifc3q}FT9V*VQUM`~B0`5Nh$DNeG} zSjKYgp73e{9In~x?uF_tg_hoZJt#gI`*h}jcqakAIr_L1AUltXX``j<#Nn||e}{8D z0nFc4aT=+pK=;}fGo@4PAf|9Tw@gVC%$%=RZbWlEVs{3jJPrv#VnAJ7@J|sqIuoVe zhj>p^og-rk@yWcVl;3Ex2w1)4b&ElBh_VwcU*cY6&@LUnrx7=Nk5-u7Rves)bY<_N zxE;{s|2miHj0U57w6=N6k+(5ahzn&t8G-zAH&xU*qnt-`6qTdeqd0-NyWH6&V-ibl zP24)3!wR+65A{TZQ(^Q|O}?BhFEp7MygrBKh*vG8<%`~;qBy0*=j%>!m^d21nPkfX z+9C1$9(PfnTKvF8-WnVn^we;t+Y3NyLb_rS(vz^X7-}1#`q%xKC!hU71>l|K%EN7O zXy0_(Q}(G+AnB)9Cja@8TA;cNt`OecZXVbAeB8vS?Yv zwJg+6&b=H8lyE}x+W&|X|Axo>v7U|@eR4UtLvA788BA2^xcCCqzZT)Db6LWfGopXP zf8TeZ9`Xxk?7M*fvQPT+JPSA@&R~2VYT#+jij@b3{eQ+6>Lb>S z__Y|P{^{3ZoU!jOoc}L748IoR48IoRDYs3XjJHl;{F-lcZ-n(@navz7_eo?}zP>)p zKA8Y-yQizeEc-E?7D0UydS6kk+Ig2`4br_&6bQ_a`Z3Bk<%fO}NI(C8sWwyv2S0|? zXw+|b=&XGG&J5L8Cq=xCx<>_}%O@;8ZwwC~{LS31K34+m?-CuC3P}(lXd?3w^_NW@ z(qkSW9qhFa^8wdNke-8>0?M0Kd2BE%lpp>}-om zDO@AKhnc$kt*GwWa!4SpzJdT}-)$bgxsL#Ss@pz3Mtz)nXAJmXDT~2*+U9Q!Q3O~! z)0Oug)mIIwa5qeb*}zrWmh>@R7}P_nh(4&Un?LkMxee*kkHx22siVH|m^<}r3|c?U zecJRI`uPN$3uNAzDhz$4wo8W4{IZIy^N{$)IPCR=N~}s<5N)4T$forYsJ}DEjy**0 z%x`G1opvyNM9WysJ-dS$>0=`X>NB_||^DZ4$x{)g#v^cTt`8_4oX1lZi;Td}QQu$~AUK*y>fd>h!;S!r%wS zz5ZvO`QPD_YX^>A=0ACV4!4JlLeG~%D(qKd7pkwHyO*N%7xilo3XHQs>JhJt z^%qb*wsk7@UN;58*Ts>?(EI=s-_*Emg)j(zm8)e%@5MEr48*^>PXbo_{sE^%H1Bgq zHS{{-lb4le1O#|?V%fTt{cVL8X&&oO+;)1rFt+RS5vXf7-eu>64K9i(4%QQ|!g0$2VJ zqdD@!+!$f#Bzg~_W4Tm8Jw*t{uL#=YIHP{CQ}u=sG4$S}lu_u)E)v*A^L%nh z{jr{onLOOUp|xZoKIz=M_RM5D>SHd#Mf2{*rId*N4gY=Lg?Qr^&e(SW|79P&Yx>9e z7jOnIFh09Nnpb~_kOzkSf5sQ$li%0Vu}8)yu{fPpjoT8uONT8uOH{e|=YWryL{ zVw~aEV*HKd{nvVLCNRMt1y8>dda=^4)_v3H{i1J=)4!ZKO#t_I9u3Kd`>}Y()koYo zP<~O^EFPVS=4j?BvIL$FVuv>lxtE}R%3ZPS?Ocb^I{?Nzo+us1LrS^pgJh&L>Eql( zj!H&voJZzC6TZTj^gv>>_7m=}& zLP(_~Dv>gmlw?k!fl>&Wg%Xh=p@}F_k&?(%D%s}z*ZX+(vwwN|e}6x`pB%?|+{bm@ zSGiB?I@em~>SZ&0x6_P5bbJ?Z@xVURD;nA7kD`7eQ)<=blZf^7&d=Jx6YKD=3fLU^ ziS?^pwh9jT-FfR!oQEQ~LEA|DRX^sb*%p!CJXb(p#A_R$hIk9YPYLrE9q$xjzg`?>(zmQ|D7sO!iCqsdLeL<)>+7Yy)8f?~^uu4N7xkb3{hIWz^XtwOQiwAq z=XZX7(M4<~afE>D|M-N;57h7a&ph+r!zXDh)9=@njS?h1)6aCDdx?s{SKfyhFg~eW z*LN|B8)Vazy6ye@iE5?T{1q&ie+m7$ujiI9xVf1cJNBTTlFzxLTo^wb+so*_9Q#|L zTB3Y;B0J>&%4aUaI{c+qt~Z>!Oopz5MZE8W*}+D7_2Vk+ubGm`JVZQ3-iE$=Z0Kix zccODN5bNigEq?fnoZ$w;&e`(qdf4yoo?5DbykT2Z(A$Z8!dou$lWrq^e_oQl=P<9$ zn{!Y8#x@Da=4Ues!+k8na>_4Yeg1xxO$CBW&l3lYE3y{ZO%T3=$*d_@2U}em(|G{r z?{H|ya7CL=5=9Npks8e61?htRK}ep9%dXpthwr25w^jHVFxf-bkj zBcGfNf~XJJM?8J5H?j%uJNJ(>n^hs7tR0Uo%tJm=iu9;D5re)!cQ*a-cE$NTN9#4N zV;(v9`lvbURu)j(ZuN=hupsbEu$*o|yv5L+1RTW z841MPt8C@Ww=w&Pb+Eb40R#Q~Fws&s(5W3uFb8C2DKMe`($jlu_8E>6yd|eNweB*& z)WU-|yRm-tWtP>+=sa#{&U!J%E5HahWee6g;r*y(f{8)vCe~vdY!f)PmJv3Uh=w`NTm0nK;U|SAp+MW#geZn=S*Z9C`pl$y{67~sR4848S zS_~GcnXZnq!v5k9Ux2D|^mCr$4*8wx4V^2N7z z9UoT2d)A+E^8W{?5Gk^;2b;Fj;*$`zYekaiM?D|!D%rkL+gjvb`0xAD^1pL9bzd4j zx4!t_??c0>yg>cwd|)!XE=d}w>;D<2#V5Z%Pa(ED9lsNceVtUCdan66^<49D>b`S0 z{-^%-K4h4WQ_nRYe;~Yg-}*@G$7dcZxO#MeAQ>HfD1!dd4rGbj_JTYxt+U$O7k$_& zf2_UQfWEByRq5@gj6|TSPO~%O_yDmq%db~Ad3&tN@yb<5?OQY7$BVlWLgKpnVwnWzHR zKO9eYzqoH{*)_LmUy(zQCtx;JhxiUD!I47PoN$~P7DA)Lb^Kq_exyL_d0b#E;&&~MzX{)Pv zjMS$HOG)%!Ow&=92@=fzihk7Q+22l<;{AwW)^`8!3kng&*~1)diSvjKb?oQ)t_bv} zZmCH#HHwhIL`+lyxO1zC8I_!||UU|l#K*|y8L%zOy(nz1dg8Jvk3Vo4S!XTEo zwM-lJ(&h=3C+Ep9Bp4{YThfAO&`cP})Ev%<)ob)K#f_^gl1zSrLFmCy>h2ah1jCxM+YK3R$AGs7XPV}XS-aphzH8Q%6e*AVqZ^YS%`o=e!uFv4a(*sP}6$0{zcFL z;kuJ7%@^Yje*+vmS~^w5D!g5(e%y9VTeSUE3Gdqy4*x8T~GhzPr9sP#Eh^W*>z;cAX?Tk8R!Cf%W$F)4A_B&a`4*4X~ zF#W!g68bhz(`P-y{=w6(?ItI&4%RsT!sjgP!(F=J+Z$)}XPYgNP-#IvDcH1$`5^MG z>h{@72MHnA#kNIU1D~I@O*gY+5{J}Q`z2)|<1bSVZ3&!Xe zpT1WF_ShYP63ioT=|7U~F+yFXDd%ns)snLfpGc{^imTX9?uWmE`8V0%%}9Zn z&siFuES4H{y2VV3PsS~)C6&Th;LkYuU-;Nx&r^t1V-|3B9W6fbqdVXJsh1n(<6ha{ zU5}#6z3cCFsOOrGQ_nRYr|vt4^Zm09^<49D>bd6Qc6q&zOj0R?Lu$XY&PC*h?Ig8Q z^a0S4jyTi#lN&BNz}evIeMBJX#J6a?j~FzqFtNq_MJ`3noMJpk$ZiRh2}FNLXUouC zUXv^!b|hlWjZhLWJCunxtd3s*xAJrV?zmwA|0p!9whbEk>Idc24h?S%~T#DHxcCa?PQoTyFo02t2q~&bgwFd7O zT4N9B`oFNiTK-DYmyT0J`1rvcFY%s2e^()etseV}ll*6H#ZD3DPUY-w#ruUOlz7rF z9V1j{&-M-~aYJaP(PuT(TY~(=rVg9%fc;X(?8DkPpOa6|K@0U6-PUWoUFzts+r+(k z-3H_tdSQWr$BJ{ei*3Qhq_8%kIAm% zdwvqRxuM5~F_gO#c-URElJ-mccwr zuhl~~6`WsuP^r%P`55{pRh&|sK;Fnn8XluxBo51u%T1Tb;QX(9qoZGtXI8&l(K?2F zvh`Zn&uY|TwM+Ob)73F^kv9&MatJN{C;+NzlcLS0+op)*jVC(OP*=Is-jx0PCxuvY*iR%zh8sAD*VpMjDq{ai zf*&3FJvK^yS(d5B4Og{7PwmEf{%%=%Wzi)fAkp=C-wG3~kKM{7Kww?H<3r9>4-;76 z^izjG*Iq%`VHjbMk38dUzS^M&ef$GXuwAud$M}T32b{33o@~6}i^`}NbY1-7u^#)8 zhue31EXDqv#f>>W?Y7aCt=~&ZW=d`mPHW1O88MHuHs6LxpNSbZ7#z!z9zI_w{_)U) zHslAZo2N7wpQDfEHPfDY^{d3i{p{ac_*6i4Jn-q#x}U@^x32onwhWM?%rfbP`s&Hl z-M2{;tlK+YqkJEA*8~RnegB#B^+D{8`uFuBz@C&LAQ8z3()llre-+32SjX0xkb5lP z&s)O5i+P+k=_)_YBcHIgs=d5gNruE#`&Zl*Tm;;GWxXFzw<)|4<2r&onQb@fHlfA` zCzjZ0>{7%Sm(_%FgRV3^{1*IWSobX3+lInel|$vU0Y*n}3J@HzDh#6QIQ&L45q zG3MdCe?3njK2qvaT@TUXld}tkn?3k=U_O4ivfIAW6XW4|&;PzJE#8>Jsr%CKx%E*$ z`RjdXIF%QuKhu+JzE`J719kmB4Hu{$7WAuK76iT=Q}2zH>O< zLw|c8GR()R=bDelDwmkBB#shx@jqE~QU{0yoas#yMpy@{Yg_$bH4pGl$=hqT3=;39 zv>K|BPd-Zbb*uV|K=6fUL&=-^i3N_wWHRr_!69--vyCtMqw;5OP3|H=z_DLm8!^AU z!g}LmA%6emYOR-tPmrMPeq%}n`WT!O|5abqEe4$L7x>gDl7L6Dd0ivBJl@~kWfqKM zzB1JDjv32h5V3aiOS6#&)|FacgOE=$*550r*(Ly0^l!DfcB5}C#|m$gatUz#RFKqzLEKG&ej?`_h8??e@{0eZ7j{v(t+ZK|dwN z6PEd$GeY2@wuzMTOc6SwthWi_{Jv$bY-(!3LNNG|bEX^bWg^BBZLv*aK-aK*fr+Cq zw8{405yblu6dsa4V&76}pPUtT>BmjtO!69;1RK;JYSusfxReFfafD3%+*MIIwQpNJ z`*y68cf73ZkN3^oD)Q=!;Z>EA0Ui9|f+|q)`mxl8Jvg^|fMej&9wwY8?U31PjJnGA zEaLJ6=Jj;8<@;mbv1(*{`HTnp*zfDKmcV(hS!#JPyM-A*#NEj<4Edq#=g5s!8t9+p z&c_c|7J*LZ0*Slm=O%u%+Cd-hpXH8U*T1{R3nx2WM+P)iV2!un!39giLHa&%v<3a# z1U)+75!TlQj`|(mK>k96wJ(|eZu6G-Ui)n5TpEo}f~}Jm+pE#|0;iC-ZL{29l& zvGbnq{OfrNQPk?V%GQC#C;MvXNgb%y&Bv|2F*phC6!{na`@S@On8T_2((t+Uv9IK> z_o3lbzM%dLWbl$$m?90-_5Y01_~iHJDMTv0?+fM(Mxf%U1o}me#>91KL(qkzkSxGuUmFvzPLpG zUeJ3#G8~Bf^6i5*I~)>C&^UoU14RPc{o@D4AaUX1;QOf4?*F{-lq1%yb%ynNgzH<#G`kpNbSn4@Jl3XSG9HDMe=b=F@oWdgzMx<9$tRliA^3N8X=Ylq!{vpL@F0p_c!d%KN4joR5@UtE zm-G9`se+Ji`ZCnM34N3H_&E1q9iATB{*VZ)Gcn74I+}|9*ehNzm~2M>lSN*~S-2|& z;h|9n>vz=zZ-S z8n?%~j<1r{w-^Lq=^`#8c79jlwR1+c7n=&C31m6H%^xScx(irb?2%7ADW`X1ywPr8 zRPmoVWt-X93n||jK%6mj2`|peP*8V!c(+Rg>gp`IbTDqY^I%ulcqRIlEZ^*wh`x(! zg*McFs>S<`R{$#m`l!@!NvmnQAdkf>hiz6_1ijx`9GBMO9MT(otAe+PLk>${X6O(f zFdeZy{ZygHwpH11l>Pm1%rJQC9l-8hL*{uAeo1(FOfp=Hpj?+m}{P^83Ctd~SWQzu$+3 zQ~84W^Uy{~-I6$Im|KT7&+B)b#wWkyIB!l^wb0cKeJB22hkCC0IQ3lfaq7NvINv|( zP|r0Vr=Dv*Zm@9KqlCy&;@Im#y(e^o#A4lzXK&2Nfza@~+Zy=2c<3G!ti*b@)X1}L z(_HeRC}!AkH>Jy;ode!?97&$Vc%yjd zqg0J`WH9jU?n)3N0ZF6ZutpH)2@ah;QHAk|+0~oc>o#Kj(x!|b17nqxcQW15Lu4?Y->&TPDiqURrnd-73j0Xw!lvI zD&~cQIjt|;M4dtC3)@Dc)G1=`f^=dv=9gb{>h{%Q{qTLYwwgL_Zm4%785ra$!sM@3 z=Sm?SsO4S5r&+NOezImph!rb>r8#fKFZ87Z)=ox|_=Qjy@RV;|CDxOaPzvuUvA`?0 zb-OLmH)-MGl~#WV71JSE+NU?C7LvV`|7t=V<86^jLF;)6QJ}BW`q3NnjElCv2_IJmt@S!j zSXS@=uNe~f6Rh7hW%BZVq73v!)u*q>;T#aBsDr`UjPT8ofvF0gUr$bLYQee5@ki?_ zZ?5En$lN-Hi5JS?D@h4Uz2yUNi`)SpuM3KU9>0)<^&viZqBAVf zwO$3L*uJ(z4+ud|pR;1|dYqFYEOk-TgvKX+Xd?b`gvKY572LkH7!S|KrT)TE*Zt%3 z6e5RVc%ADS8lPM_`%GwSJ2%Y7^S*3tQ(r9dFZ}m?Y5XvUQ}?CebL->$vcKMkhEsWj z`ZHqKt=az~`YHW=u0P{6KKcE53USq?bd6Q z)N{?pkL+SA*tlwp(9piJ$}hd2=#iF@wZ{DH`nzu5%GUG1H4UkLBofg;ZT-0Y)s>w1uek1{6_77AJTqT3A2E$|m z>M(CL>ffDu6+?X8axJ)ob($zu*k!ktqy%t&_=p;b1v(SHFZ*zOida9z_^2NBlk3s! zk_+%&mLJ;cGtx3i@PFdm?}2q?3>P+n9M0v;_ejenw78*ANT)Oe?@iN7od`zsb9ixy z>DNdM`a0YR_}q>69S+vJ&(+=t1KefT$fx6mo|wWiqF)gLRJKw4H?Y8&H=nM2cqj-; z?4n0lFduZ!>i)7?jEmMakteFq*O4vTO1=!^kTwz38#(7aKtwgP>s4aj>=d;dpI zHdy{>(D@uevb1bU+kTM1K|EJO@E+kQx4eJD3RbuXJ10CEtB7?Q4W?SfP%oVgj9?z& z2WN#1mO8#wM4gYvmgo3g-zYAZanv3sjGnLFq4%_eH5q}NM zBt}>l%Q&cjb+Ahk8)IXAgkfd#I~^_W*Z;{E|Ct~D$0u{&`+pCgbWY4(u|ofM_rp;$;fMQ)Bs*bs zN6fcw5A^5KR>gT$L!Y;u-ZMb(dR{Y6Mm`CqFFDL5kN3(89>Jxk-!2-?kvLV^pp9IIqZEwpo<)LANOY_baFV@Hx44uq z1k(2o6MN)`wVvj)BhT^Qho9fK?TR1-^{CE%dZh@aXU4bmVt%Jt*`GfSee47B{0y_u z4?*q9=Zlx?#o%drnIC7PAh^Hiv9v{GbJA^>z21#=Xtj*jtHT9FpuU+bqK)&5 z9hOyX>$S$b%tme2k9sT+;kdX3v!4?>bc#KiGESIWDrXe|_iPXt}T+T1?ER!@KD?bEdQMAxV0 zRmLiU1iY4OSL&r=Ac1dF7A zL1Dvkke$CZ-wWpmxIC)YO(pdcZS^lO2{S)C#cReJ7GnCMC&>3{|&pUdp-84({qD2P7Awexk`v_nJ!_(p?-< z%TcG5YKo8+nv#RUz*$Eo?2EZ{n1ALbYAyiU2a7DnZsLjML@Q^EKZkM09+%J zgPdN#g8Ahq4-MkxBqzWY2N8;t(3rb;qm>Q#A_jyL=0j8i8GGTB|t8g-AS za>=&#`KTLrq(!vj+&`NpyMvZ@5bwzA4dq6CUQu>4XZ;v)Q{x;-0`EbO`@=rnZ5$)E z-w{b)Y|Rbo3*(*z)+hqEx&ABpV)QX94Im5ez<#3`lb6z%=M`|QZre48bF{Bm@p+4* ze;e1LyMrxQufjQ8&WCe3$G@2`i9;Xz0{ixl8Mw}t4p(D}IQD&HDAk2y-I{4r$I!Yw zai@g+*x;R4v-XM0=$ z7+Iksl3AVZ#r4XWYPhnC{o!#QTOB?64r4lk`JO0NfRDPg- z*MH`j{~kUu)a8@VB{Dbhr^pr6<=YtPd@ z@*iD=x!e$Ss;do}PwW!=h+CV|ba!LkmZhWONS6fOcM=Nh46HFPyyMg-M+SMYYne3L z5kQ8r3eO3fVs=ndQC7W=x<@+CrO(w1#6c*#-^UZt;35&LvW8YBSOM+LEz&g6vZ!97>Z*&QltC~OZ39o`#kPGrj_lBbN;~Ch$Bb}!o zjPu!v?XNxMZs%8ycWB?SE1x8E3+)VYus($G_;uML)DNF>tvR>Rc9O^|@*lrnh;_4+ z;V_wQerWJM&?~ujoJb54IwFjE#R@Ur&*q8fJLI}9E^-6T2fXHf-K7kD9W|Rihv#9v zn!d0Hpsr%FBG{(}^_gCGCWAw)SReaUQqBVTq=OW0{_Y9tz{_|wE|;*tl{%d)Mh+o} zTD4(zv|15njjwE^FLsPsIV13D0GV; zqehFv3GYYu4_UvDzm82x z0SkYx$2p52cp>M=h#fl!|9TR%;Q)T;vaC#&f(Uwo!4auwFkZ&uw* zm&+{zUte#ixBos!#4M{=7IzZs$7B@5685oz`6iD&f(#^RNnR1aXNmK4YOl7)<)YqB z^8V7joCH4uIO)WW$-}7hhr}}-Vo-X(V0}X`JB0RHT@S}Rjzgw|$r|*93nILnUBYml zSA?sv9QN<*o_Qz`jXJQy;+Z?U@m^l0Kz{WQ{hd^HJJ_^jNx)Z!F5Z0@2c<9zJ6!NY zpX!etdsOQm67`lbu6J=Bx1~+`qGj%;r8Ji{v*<+u#ZJowA#qN_~)+Q^m3VGw2qVpY9f%;wlnP>ic_(W4JmEP2HoOrAv?3jLXfUrJQ zLT_y%4}28I{i~5roFURS%5o6!HcCC2;N1lA#(Xebc_6D7W}ST zU(D3Iqn~GzC+jT_4iea$_+E2-7xGfYL$4)0IDFX&{Y;YM>4XpA$? zC;DWcs$XRoLf>g3?7@#i%#gX@-15X!Uf>VT%?>C=9r%3X@sw$P_~QK{_fj|? z6y>N#hJ>Mi_5Gz+T?51*E=`f~;@(A2qmlWuGE)UohV+z9x<~+*ZJE`Vx9A(;ZIo`( zMB|eZPfAQDn_t|x^W38?3EVIr&$<3> z{6n_zzwrCN?Mu@`=5Xr1G<Z`?auaDsz((`3oZ6C|QmdkGg>v=0H52WNseqS?Aq;E~vQB1>n zc-yonTYOKhC{G>Tvu|`Ey#n&7k0&$?}&0k(q;AYb}6WbRQ$^GvlE8JhIY6_E6+6_0$Pr#m<-9P&XmfTJ{a9?b6t@d|N`RzXVtI(bBKVbXY}cw)s__2+ D^YFb} literal 0 HcmV?d00001 diff --git a/examples/denoise/data/data_2/set.000/real_atom_types.npy b/examples/denoise/data/data_2/set.000/real_atom_types.npy new file mode 100644 index 0000000000000000000000000000000000000000..3003dc1cd3bf252e6cb75347e1019d23bdf2c0c6 GIT binary patch literal 14848 zcmeIy!D?M~6vg4B>ZDI`XICKu2SrNh*jc3$2aY7zh=MdFDNdwM;R~B``U&~L=6@^1 zOim_?wf0(j@3YUz{riU>U%me6{Or%Ozm9LNZhyHszBn9T{{H-UaX9{Zee?e2^6k&p zH*c;!>fc}faeH;A-~M*_?&^+z{p{O|!?W+cxj1|{{KvniPtNZD;qZ%30f?oz^@+;I~U*Yyp2{n>#~yhWouA=ict` zeU(?A-k%(7&Uex}A8Z|8&0W>?f#yLvq<8Pu@d^Du_`dn{FRy;;b2Zn!)r*~vuja1m z`oL*!>)pF`e8S!vhuxcQF3yLq>h;OX*_X}_tK;%~E3981IUMrC`fxb;eBbJQ-*jlc zzV+jL&>i*Rn>c@clg>AxedW8C+_aCnJYP9|>c8K6yNr9f*Tj3@a{2w<+XdWbx`%r` z^u4F~-H#92YvTTo)%Bx8eQ-V~59x9fwqNt=^0@x|>NuQy)%pFN;C%Yy>P0?!x&77k zqeD5I56VNj+=T7dyt+KDKfgK-Ctr1b_x67T)$yYFbp7$-)X($PUq0pQ!}YZ%u1?3{ zsdL(I_5Awp_uek-9{asr;Qn3QXF}hf?-=K&z@0;H|NY@{4oen(@UX+Ke zPkicIy}bMD@1EU5o{q~E`RJ41?+KqAq<7!e`QS89T^`a`IQ6Yw-u=yatb2QI*gfn| zhy0Kahm)V)9DS8ncRzjLseSmMzD0BRaP#WN)qUU9)mPYg^|xML|BClJo&2jgtNPTJ zzRIgZx&7YTm*yU)V(-|TI#l29{kZq`KK#Bw@2BW@4Dy%z9Z;XxU-+OyQ z?(?}g-P>KEyF$R`9IOwA_JsPV_uh194x|_Lhy0KZJ$E^Ov3uii+J|nQzVs=t-Whvun6y1wS&>QD~qFULpU@4a2bz1+ur-3?cVa?oAn_~`D9 zLw)#*^KbLf1<&S@R*zBtqenhW{Lec#P>Zgc#7-bpuKU;Uum zRHs9^&ZqMgJEwKL`{Ga^XfEU{cW-<1=~GmP?MH9UbI|L Date: Wed, 12 Mar 2025 10:06:11 +0000 Subject: [PATCH 04/26] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/pt/loss/denoise.py | 28 +++++++++++++------ .../atomic_model/denoise_atomic_model.py | 4 +-- deepmd/pt/model/model/__init__.py | 3 +- deepmd/pt/model/model/denoise_model.py | 2 +- deepmd/pt/model/task/denoise.py | 28 ++++++++++++------- 5 files changed, 43 insertions(+), 22 deletions(-) diff --git a/deepmd/pt/loss/denoise.py b/deepmd/pt/loss/denoise.py index 2ca672d624..eac3ee06f1 100644 --- a/deepmd/pt/loss/denoise.py +++ b/deepmd/pt/loss/denoise.py @@ -92,7 +92,7 @@ def __init__( Other keyword arguments. """ super().__init__() - self.mask_type_idx = ntypes-1 + self.mask_type_idx = ntypes - 1 self.mask_token = mask_token self.mask_coord = mask_coord self.mask_cell = mask_cell @@ -145,9 +145,13 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): label["clean_frac_coord"] = origin_frac_coord.clone().detach() label["clean_type"] = input_dict["atype"].clone().detach().to(torch.int64) if self.mask_cell: - strain_components_all = torch.zeros((nbz,6), dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) + strain_components_all = torch.zeros( + (nbz, 6), dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + ) for ii in range(nbz): - cell_perturb_matrix, strain_components = get_cell_perturb_matrix(self.cell_pert_fraction) + cell_perturb_matrix, strain_components = get_cell_perturb_matrix( + self.cell_pert_fraction + ) # left-multiplied by `cell_perturb_matrix`` to get the noise box input_dict["box"][ii] = torch.matmul( cell_perturb_matrix, input_dict["box"][ii].reshape(3, 3) @@ -178,7 +182,9 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): ) for ii in range(nbz): noise_on_coord = 0.0 - coord_mask_res = np.random.choice(range(nloc), mask_num, replace=False).tolist() + coord_mask_res = np.random.choice( + range(nloc), mask_num, replace=False + ).tolist() coord_mask = np.isin(range(nloc), coord_mask_res) if self.noise_type == "uniform": noise_on_coord = np.random.uniform( @@ -217,15 +223,21 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): ) if self.mask_token: - type_mask_all = torch.zeros(input_dict["atype"].shape, dtype=torch.bool, device=env.DEVICE) + type_mask_all = torch.zeros( + input_dict["atype"].shape, dtype=torch.bool, device=env.DEVICE + ) for ii in range(nbz): if self.same_mask: type_mask = coord_mask_all[ii].clone() else: - type_mask_res = np.random.choice(range(nloc), self.mask_num, replace=False).tolist() + type_mask_res = np.random.choice( + range(nloc), self.mask_num, replace=False + ).tolist() type_mask = np.isin(range(nloc), type_mask_res) input_dict["atype"][ii][type_mask] = self.mask_type_idx - type_mask_all[ii] = torch.tensor(type_mask, dtype=torch.bool, device=env.DEVICE) + type_mask_all[ii] = torch.tensor( + type_mask, dtype=torch.bool, device=env.DEVICE + ) label["type_mask"] = type_mask_all if (not self.mask_coord) and (not self.mask_cell) and (not self.mask_token): @@ -282,7 +294,7 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): ) more_loss["token_loss"] = token_loss.detach() loss += self.token_loss * token_loss.to(GLOBAL_PT_FLOAT_PRECISION) - + return model_pred, loss, more_loss @property diff --git a/deepmd/pt/model/atomic_model/denoise_atomic_model.py b/deepmd/pt/model/atomic_model/denoise_atomic_model.py index 70a1339712..5ff7246a91 100644 --- a/deepmd/pt/model/atomic_model/denoise_atomic_model.py +++ b/deepmd/pt/model/atomic_model/denoise_atomic_model.py @@ -29,11 +29,11 @@ def apply_out_stat( atype: torch.Tensor, ): noise_type = self.fitting_net.get_noise_type() - cell_std = self.fitting_net.get_cell_pert_fraction()/1.732 + cell_std = self.fitting_net.get_cell_pert_fraction() / 1.732 if noise_type == "gaussian": coord_std = self.fitting_net.get_coord_noise() elif noise_type == "uniform": - coord_std = self.fitting_net.get_coord_noise()/1.732 + coord_std = self.fitting_net.get_coord_noise() / 1.732 else: raise RuntimeError(f"Unknown noise type {noise_type}") ret["strain_components"] = ret["strain_components"] * cell_std diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index 67123c8446..8134db201f 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -98,8 +98,9 @@ def _get_standard_model_components(model_params, ntypes): fitting_net["dim_descrpt"] = descriptor.get_dim_out() grad_force = "direct" not in fitting_net["type"] if fitting_net["type"] in ["denoise"]: - assert model_params["type_map"][-1] == "MASKED_TOKEN", \ + assert model_params["type_map"][-1] == "MASKED_TOKEN", ( f"When using denoise fitting, the last element in `type_map` must be 'MASKED_TOKEN', but got '{model_params['type_map'][-1]}'" + ) fitting_net["out_dim"] = descriptor.get_dim_emb() fitting_net["coord_noise"] = model_params["coord_noise"] fitting_net["cell_pert_fraction"] = model_params["cell_pert_fraction"] diff --git a/deepmd/pt/model/model/denoise_model.py b/deepmd/pt/model/model/denoise_model.py index 61d1de238a..0f76b58300 100644 --- a/deepmd/pt/model/model/denoise_model.py +++ b/deepmd/pt/model/model/denoise_model.py @@ -85,5 +85,5 @@ def forward_lower( do_atomic_virial: bool = False, comm_dict: Optional[dict[str, torch.Tensor]] = None, ): - #TODO: implement forward_lower + # TODO: implement forward_lower pass diff --git a/deepmd/pt/model/task/denoise.py b/deepmd/pt/model/task/denoise.py index e30e15559b..a8b89aec39 100644 --- a/deepmd/pt/model/task/denoise.py +++ b/deepmd/pt/model/task/denoise.py @@ -232,7 +232,7 @@ def __init__( networks=[ FittingNet( in_dim, - self.ntypes-1, + self.ntypes - 1, self.neuron, self.activation_function, self.resnet_dt, @@ -299,7 +299,7 @@ def output_def(self): ), OutputVariableDef( "logits", - [self.ntypes-1], + [self.ntypes - 1], reducible=False, r_differentiable=False, c_differentiable=False, @@ -398,7 +398,7 @@ def get_coord_noise(self): def get_cell_pert_fraction(self): return self.cell_pert_fraction - + def get_noise_type(self): return self.noise_type @@ -554,16 +554,22 @@ def forward( # coord fitting updated_coord = self.filter_layers_coord.networks[0](xx) assert list(updated_coord.size()) == [nf, nloc, self.out_dim] - updated_coord = updated_coord.view(-1, 1, self.out_dim) # (nf x nloc) x 1 x od + updated_coord = updated_coord.view( + -1, 1, self.out_dim + ) # (nf x nloc) x 1 x od assert gr is not None - gr = gr.view(-1, self.out_dim, 3) # (nf x nloc) x od x 3 + gr = gr.view(-1, self.out_dim, 3) # (nf x nloc) x od x 3 updated_coord = ( torch.bmm(updated_coord, gr).squeeze(-2).view(nf, nloc, 3) ) # [nf, nloc, 3] # cell fitting - strain_components = self.filter_layers_cell.networks[0](xx) # [nframes, natoms[0], 6] + strain_components = self.filter_layers_cell.networks[0]( + xx + ) # [nframes, natoms[0], 6] # token fitting - logits = self.filter_layers_token.networks[0](xx) # [nframes, natoms[0], ntypes-1] + logits = self.filter_layers_token.networks[0]( + xx + ) # [nframes, natoms[0], ntypes-1] else: strain_components = torch.zeros( (nf, nloc, 6), @@ -576,7 +582,7 @@ def forward( device=descriptor.device, ) logits = torch.zeros( - (nf, nloc, self.ntypes-1), + (nf, nloc, self.ntypes - 1), dtype=self.prec, device=descriptor.device, ) @@ -586,9 +592,11 @@ def forward( mask = torch.tile(mask, (1, 1, 1)) updated_coord_type = ll(xx) assert list(updated_coord_type.size()) == [nf, nloc, self.out_dim] - updated_coord_type = updated_coord_type.view(-1, 1, self.out_dim) # (nf x nloc) x 1 x od + updated_coord_type = updated_coord_type.view( + -1, 1, self.out_dim + ) # (nf x nloc) x 1 x od assert gr is not None - gr = gr.view(-1, self.out_dim, 3) # (nf x nloc) x od x 3 + gr = gr.view(-1, self.out_dim, 3) # (nf x nloc) x od x 3 updated_coord_type = ( torch.bmm(updated_coord_type, gr).squeeze(-2).view(nf, nloc, 3) ) # [nf, nloc, 3] From 8d622bf9c72ab07e2d683e86db88a6f2b7f7d3fc Mon Sep 17 00:00:00 2001 From: root <2000011006@stu.pku.edu.cn> Date: Thu, 13 Mar 2025 16:48:28 +0800 Subject: [PATCH 05/26] Fix pre-commit and Code Scanning --- deepmd/pt/loss/denoise.py | 10 +++---- deepmd/pt/model/model/denoise_model.py | 20 ++++++++++++-- deepmd/pt/model/task/denoise.py | 36 +++++++------------------- 3 files changed, 32 insertions(+), 34 deletions(-) diff --git a/deepmd/pt/loss/denoise.py b/deepmd/pt/loss/denoise.py index 2ca672d624..f0ced4dcb7 100644 --- a/deepmd/pt/loss/denoise.py +++ b/deepmd/pt/loss/denoise.py @@ -131,6 +131,7 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): more_loss: dict[str, torch.Tensor] Other losses for display. """ + rng = np.random.default_rng() nloc = input_dict["atype"].shape[1] nbz = input_dict["atype"].shape[0] input_dict["box"] = input_dict["box"].cuda() @@ -177,15 +178,14 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): input_dict["atype"].shape, dtype=torch.bool, device=env.DEVICE ) for ii in range(nbz): - noise_on_coord = 0.0 - coord_mask_res = np.random.choice(range(nloc), mask_num, replace=False).tolist() + coord_mask_res = rng.choice(range(nloc), mask_num, replace=False).tolist() coord_mask = np.isin(range(nloc), coord_mask_res) if self.noise_type == "uniform": - noise_on_coord = np.random.uniform( + noise_on_coord = rng.uniform( low=-self.noise, high=self.coord_noise, size=(mask_num, 3) ) elif self.noise_type == "gaussian": - noise_on_coord = np.random.normal( + noise_on_coord = rng.normal( loc=0.0, scale=self.coord_noise, size=(mask_num, 3) ) else: @@ -222,7 +222,7 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): if self.same_mask: type_mask = coord_mask_all[ii].clone() else: - type_mask_res = np.random.choice(range(nloc), self.mask_num, replace=False).tolist() + type_mask_res = rng.choice(range(nloc), self.mask_num, replace=False).tolist() type_mask = np.isin(range(nloc), type_mask_res) input_dict["atype"][ii][type_mask] = self.mask_type_idx type_mask_all[ii] = torch.tensor(type_mask, dtype=torch.bool, device=env.DEVICE) diff --git a/deepmd/pt/model/model/denoise_model.py b/deepmd/pt/model/model/denoise_model.py index 61d1de238a..c073d8fe5e 100644 --- a/deepmd/pt/model/model/denoise_model.py +++ b/deepmd/pt/model/model/denoise_model.py @@ -85,5 +85,21 @@ def forward_lower( do_atomic_virial: bool = False, comm_dict: Optional[dict[str, torch.Tensor]] = None, ): - #TODO: implement forward_lower - pass + model_ret = self.forward_common_lower( + extended_coord, + extended_atype, + nlist, + mapping, + fparam=fparam, + aparam=aparam, + do_atomic_virial=do_atomic_virial, + extra_nlist_sort=self.need_sorted_nlist_for_lower(), + ) + model_predict = {} + model_predict["updated_coord"] = model_ret["updated_coord"] + model_predict["atom_strain_components"] = model_ret["strain_components"] + model_predict["strain_components"] = model_ret["strain_components_redu"] + model_predict["logits"] = model_ret["logits"] + if "mask" in model_ret: + model_predict["mask"] = model_ret["mask"] + return model_predict diff --git a/deepmd/pt/model/task/denoise.py b/deepmd/pt/model/task/denoise.py index e30e15559b..d0429bc327 100644 --- a/deepmd/pt/model/task/denoise.py +++ b/deepmd/pt/model/task/denoise.py @@ -32,6 +32,14 @@ from deepmd.pt.utils.exclude_mask import ( AtomExcludeMask, ) +from deepmd.pt.utils.utils import ( + to_numpy_array, + to_torch_tensor, +) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_atom_exclude_types, +) dtype = env.GLOBAL_PT_FLOAT_PRECISION device = env.DEVICE @@ -350,7 +358,7 @@ def serialize(self) -> dict: "spin": None, } - def deserialize(self) -> "DenoiseNet": + def deserialize(cls, data: dict) -> "DenoiseNet": data = data.copy() variables = data.pop("@variables") cell_nets = data.pop("cell_nets") @@ -464,22 +472,11 @@ def forward( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, ) -> dict[str, torch.Tensor]: - """Based on embedding net output, alculate total energy. - - Args: - - inputs: Embedding matrix. Its shape is [nframes, natoms[0], self.dim_descrpt]. - - natoms: Tell atom count and element count. Its shape is [2+self.ntypes]. - - Returns - ------- - - `torch.Tensor`: Total energy with shape [nframes, natoms[0]]. - """ # cast the input to internal precsion xx = descriptor.to(self.prec) fparam = fparam.to(self.prec) if fparam is not None else None aparam = aparam.to(self.prec) if aparam is not None else None - xx_zeros = None nf, nloc, nd = xx.shape if nd != self.dim_descrpt: @@ -507,11 +504,6 @@ def forward( [xx, fparam], dim=-1, ) - if xx_zeros is not None: - xx_zeros = torch.cat( - [xx_zeros, fparam], - dim=-1, - ) # check aparam dim, concate to input descriptor if self.numb_aparam > 0 and not self.use_aparam_as_mask: assert aparam is not None, "aparam should not be None" @@ -531,11 +523,6 @@ def forward( [xx, aparam], dim=-1, ) - if xx_zeros is not None: - xx_zeros = torch.cat( - [xx_zeros, aparam], - dim=-1, - ) if self.dim_case_embd > 0: assert self.case_embd is not None @@ -544,11 +531,6 @@ def forward( [xx, case_embd], dim=-1, ) - if xx_zeros is not None: - xx_zeros = torch.cat( - [xx_zeros, case_embd], - dim=-1, - ) if self.mixed_types: # coord fitting From 34e647dc38517b08b0cd0fe8711cedb55b3cc302 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 13 Mar 2025 08:52:25 +0000 Subject: [PATCH 06/26] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/pt/loss/denoise.py | 8 ++++++-- deepmd/pt/model/model/denoise_model.py | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/deepmd/pt/loss/denoise.py b/deepmd/pt/loss/denoise.py index 8c1d471c4e..997d697f51 100644 --- a/deepmd/pt/loss/denoise.py +++ b/deepmd/pt/loss/denoise.py @@ -182,7 +182,9 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): input_dict["atype"].shape, dtype=torch.bool, device=env.DEVICE ) for ii in range(nbz): - coord_mask_res = rng.choice(range(nloc), mask_num, replace=False).tolist() + coord_mask_res = rng.choice( + range(nloc), mask_num, replace=False + ).tolist() coord_mask = np.isin(range(nloc), coord_mask_res) if self.noise_type == "uniform": noise_on_coord = rng.uniform( @@ -228,7 +230,9 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): if self.same_mask: type_mask = coord_mask_all[ii].clone() else: - type_mask_res = rng.choice(range(nloc), self.mask_num, replace=False).tolist() + type_mask_res = rng.choice( + range(nloc), self.mask_num, replace=False + ).tolist() type_mask = np.isin(range(nloc), type_mask_res) input_dict["atype"][ii][type_mask] = self.mask_type_idx type_mask_all[ii] = torch.tensor( diff --git a/deepmd/pt/model/model/denoise_model.py b/deepmd/pt/model/model/denoise_model.py index c073d8fe5e..c9422da842 100644 --- a/deepmd/pt/model/model/denoise_model.py +++ b/deepmd/pt/model/model/denoise_model.py @@ -102,4 +102,4 @@ def forward_lower( model_predict["logits"] = model_ret["logits"] if "mask" in model_ret: model_predict["mask"] = model_ret["mask"] - return model_predict + return model_predict From b2e1419730351ef594229054e80039be36ddd637 Mon Sep 17 00:00:00 2001 From: root <2000011006@stu.pku.edu.cn> Date: Thu, 13 Mar 2025 18:54:54 +0800 Subject: [PATCH 07/26] Add denoise rot,trans,permutation,smooth UT --- .../atomic_model/denoise_atomic_model.py | 4 +- deepmd/pt/model/model/__init__.py | 6 +- deepmd/pt/model/task/denoise.py | 1 + source/tests/pt/common.py | 55 ++++++- source/tests/pt/model/test_permutation.py | 46 +++++- .../pt/model/test_permutation_denoise.py | 99 ------------ source/tests/pt/model/test_rot.py | 24 ++- source/tests/pt/model/test_rot_denoise.py | 130 ---------------- source/tests/pt/model/test_smooth.py | 19 ++- source/tests/pt/model/test_smooth_denoise.py | 141 ------------------ source/tests/pt/model/test_trans.py | 16 +- source/tests/pt/model/test_trans_denoise.py | 89 ----------- 12 files changed, 151 insertions(+), 479 deletions(-) delete mode 100644 source/tests/pt/model/test_permutation_denoise.py delete mode 100644 source/tests/pt/model/test_rot_denoise.py delete mode 100644 source/tests/pt/model/test_smooth_denoise.py delete mode 100644 source/tests/pt/model/test_trans_denoise.py diff --git a/deepmd/pt/model/atomic_model/denoise_atomic_model.py b/deepmd/pt/model/atomic_model/denoise_atomic_model.py index 5ff7246a91..f147471b6c 100644 --- a/deepmd/pt/model/atomic_model/denoise_atomic_model.py +++ b/deepmd/pt/model/atomic_model/denoise_atomic_model.py @@ -36,6 +36,6 @@ def apply_out_stat( coord_std = self.fitting_net.get_coord_noise() / 1.732 else: raise RuntimeError(f"Unknown noise type {noise_type}") - ret["strain_components"] = ret["strain_components"] * cell_std - ret["updated_coord"] = ret["updated_coord"] * coord_std + ret["strain_components"] = ret["strain_components"] * cell_std if cell_std>0 else ret["strain_components"] + ret["updated_coord"] = ret["updated_coord"] * coord_std if coord_std>0 else ret["updated_coord"] return ret diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index 8134db201f..75d025f832 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -102,9 +102,9 @@ def _get_standard_model_components(model_params, ntypes): f"When using denoise fitting, the last element in `type_map` must be 'MASKED_TOKEN', but got '{model_params['type_map'][-1]}'" ) fitting_net["out_dim"] = descriptor.get_dim_emb() - fitting_net["coord_noise"] = model_params["coord_noise"] - fitting_net["cell_pert_fraction"] = model_params["cell_pert_fraction"] - fitting_net["noise_type"] = model_params["noise_type"] + fitting_net["coord_noise"] = model_params.get("coord_noise", 0.2) + fitting_net["cell_pert_fraction"] = model_params.get("cell_pert_fraction", 0.0) + fitting_net["noise_type"] = model_params.get("noise_type", "gaussian") if not grad_force: fitting_net["out_dim"] = descriptor.get_dim_emb() if "ener" in fitting_net["type"]: diff --git a/deepmd/pt/model/task/denoise.py b/deepmd/pt/model/task/denoise.py index 1762661127..52f9b88dd4 100644 --- a/deepmd/pt/model/task/denoise.py +++ b/deepmd/pt/model/task/denoise.py @@ -358,6 +358,7 @@ def serialize(self) -> dict: "spin": None, } + @classmethod def deserialize(cls, data: dict) -> "DenoiseNet": data = data.copy() variables = data.pop("@variables") diff --git a/source/tests/pt/common.py b/source/tests/pt/common.py index 8709c8b4f9..6029c3eef7 100644 --- a/source/tests/pt/common.py +++ b/source/tests/pt/common.py @@ -66,6 +66,8 @@ def eval_model( force_mag_out = [] virial_out = [] atomic_virial_out = [] + strain_components_out = [] + atom_strain_components_out = [] updated_coord_out = [] logits_out = [] err_msg = ( @@ -162,6 +164,14 @@ def eval_model( atomic_virial_out.append( batch_output["atom_virial"].detach().cpu().numpy() ) + if "strain_components" in batch_output: + strain_components_out.append( + batch_output["strain_components"].detach().cpu().numpy() + ) + if "atom_strain_components" in batch_output: + atom_strain_components_out.append( + batch_output["atom_strain_components"].detach().cpu().numpy() + ) if "updated_coord" in batch_output: updated_coord_out.append( batch_output["updated_coord"].detach().cpu().numpy() @@ -181,6 +191,10 @@ def eval_model( virial_out.append(batch_output["virial"]) if "atom_virial" in batch_output: atomic_virial_out.append(batch_output["atom_virial"]) + if "strain_components" in batch_output: + strain_components_out.append(batch_output["strain_components"]) + if "atom_strain_components" in batch_output: + atom_strain_components_out.append(batch_output["atom_strain_components"]) if "updated_coord" in batch_output: updated_coord_out.append(batch_output["updated_coord"]) if "logits" in batch_output: @@ -210,8 +224,18 @@ def eval_model( if atomic_virial_out else np.zeros([nframes, natoms, 3, 3]) # pylint: disable=no-explicit-dtype ) + strain_components_out = ( + np.concatenate(strain_components_out) + if strain_components_out + else np.zeros([nframes, 6]) # pylint: disable=no-explicit-dtype + ) + atom_strain_components_out = ( + np.concatenate(atom_strain_components_out) + if atom_strain_components_out + else np.zeros([nframes, natoms, 6]) # pylint: disable=no-explicit-dtype + ) updated_coord_out = ( - np.concatenate(updated_coord_out) if updated_coord_out else None + np.concatenate(updated_coord_out) if updated_coord_out else np.zeros([nframes, natoms, 3]) ) logits_out = np.concatenate(logits_out) if logits_out else None else: @@ -257,10 +281,35 @@ def eval_model( [nframes, natoms, 3, 3], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE ) ) - updated_coord_out = torch.cat(updated_coord_out) if updated_coord_out else None + strain_components_out = ( + torch.cat(strain_components_out) + if strain_components_out + else torch.zeros( + [nframes, 6], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE + ) + ) + atom_component_out = ( + torch.cat(atom_strain_components_out) + if atom_strain_components_out + else torch.zeros( + [nframes, natoms, 6], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE + ) + ) + updated_coord_out = ( + torch.cat(updated_coord_out) + if updated_coord_out + else torch.zeros( + [nframes, natoms, 3], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE + ) + ) logits_out = torch.cat(logits_out) if logits_out else None if denoise: - return updated_coord_out, logits_out + results_dict = { + "strain_components": strain_components_out, + "updated_coord": updated_coord_out, + "logits": logits_out, + } + return results_dict else: results_dict = { "energy": energy_out, diff --git a/source/tests/pt/model/test_permutation.py b/source/tests/pt/model/test_permutation.py index 0354336e37..2cbcba77e3 100644 --- a/source/tests/pt/model/test_permutation.py +++ b/source/tests/pt/model/test_permutation.py @@ -377,6 +377,35 @@ }, } +model_denoise = { + "type_map": ["H", "C", "N", "O", "MASKED_TOKEN"], + "descriptor": { + "type": "se_atten", + "sel": 40, + "rcut_smth": 0.5, + "rcut": 4.0, + "neuron": [25, 50, 100], + "axis_neuron": 16, + "attn": 64, + "attn_layer": 2, + "attn_dotr": True, + "attn_mask": False, + "activation_function": "tanh", + "scaling_factor": 1.0, + "normalize": False, + "temperature": 1.0, + "set_davg_zero": True, + "type_one_side": True, + "seed": 1, + }, + "fitting_net": { + "type": "denoise", + "neuron": [24,24,24], + "resnet_dt": True, + "seed": 1, + "_comment": " that's all" + }, +} class PermutationTest: def test( @@ -396,7 +425,10 @@ def test( atype = torch.tensor([0, 0, 0, 1, 1], dtype=torch.int32, device=env.DEVICE) idx_perm = [1, 0, 4, 3, 2] test_spin = getattr(self, "test_spin", False) - if not test_spin: + test_denoise = getattr(self, "test_denoise", False) + if test_denoise: + test_keys = ["strain_components", "updated_coord", "logits"] + elif not test_spin: test_keys = ["energy", "force", "virial"] else: test_keys = ["energy", "force", "force_mag", "virial"] @@ -406,6 +438,7 @@ def test( cell.unsqueeze(0), atype, spins=spin.unsqueeze(0), + denoise=test_denoise, ) ret0 = {key: result_0[key].squeeze(0) for key in test_keys} result_1 = eval_model( @@ -414,13 +447,14 @@ def test( cell.unsqueeze(0), atype[idx_perm], spins=spin[idx_perm].unsqueeze(0), + denoise=test_denoise, ) ret1 = {key: result_1[key].squeeze(0) for key in test_keys} prec = 1e-10 for key in test_keys: - if key in ["energy"]: + if key in ["energy", "strain_components"]: torch.testing.assert_close(ret0[key], ret1[key], rtol=prec, atol=prec) - elif key in ["force", "force_mag"]: + elif key in ["force", "force_mag", "updated_coord", "logits"]: torch.testing.assert_close( ret0[key][idx_perm], ret1[key], rtol=prec, atol=prec ) @@ -500,6 +534,12 @@ def setUp(self) -> None: self.test_spin = True self.model = get_model(model_params).to(env.DEVICE) +class TestDenoiseModelDPA1(unittest.TestCase, PermutationTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_denoise) + self.type_split = False + self.test_denoise = True + self.model = get_model(model_params).to(env.DEVICE) # class TestEnergyFoo(unittest.TestCase): # def test(self): diff --git a/source/tests/pt/model/test_permutation_denoise.py b/source/tests/pt/model/test_permutation_denoise.py deleted file mode 100644 index 389520daa3..0000000000 --- a/source/tests/pt/model/test_permutation_denoise.py +++ /dev/null @@ -1,99 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -import copy -import unittest - -import torch - -from deepmd.pt.model.model import ( - get_model, -) -from deepmd.pt.utils import ( - env, -) - -from ...seed import ( - GLOBAL_SEED, -) -from ..common import ( - eval_model, -) -from .test_permutation import ( # model_dpau, - model_dpa1, - model_dpa2, - model_hybrid, -) - -dtype = torch.float64 - -model_dpa1 = copy.deepcopy(model_dpa1) -model_dpa2 = copy.deepcopy(model_dpa2) -model_hybrid = copy.deepcopy(model_hybrid) -model_dpa1["type_map"] = ["O", "H", "B", "MASKED_TOKEN"] -model_dpa1.pop("fitting_net") -model_dpa2["type_map"] = ["O", "H", "B", "MASKED_TOKEN"] -model_dpa2.pop("fitting_net") -model_hybrid["type_map"] = ["O", "H", "B", "MASKED_TOKEN"] -model_hybrid.pop("fitting_net") - - -class PermutationDenoiseTest: - def test( - self, - ) -> None: - generator = torch.Generator(device=env.DEVICE).manual_seed(GLOBAL_SEED) - natoms = 5 - cell = torch.rand([3, 3], dtype=dtype, generator=generator).to(env.DEVICE) - cell = (cell + cell.T) + 5.0 * torch.eye(3).to(env.DEVICE) - coord = torch.rand([natoms, 3], dtype=dtype, generator=generator).to(env.DEVICE) - coord = torch.matmul(coord, cell) - atype = torch.IntTensor([0, 0, 0, 1, 1]).to(env.DEVICE) - idx_perm = [1, 0, 4, 3, 2] - updated_c0, logits0 = eval_model( - self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True - ) - ret0 = {"updated_coord": updated_c0.squeeze(0), "logits": logits0.squeeze(0)} - updated_c1, logits1 = eval_model( - self.model, - coord[idx_perm].unsqueeze(0), - cell.unsqueeze(0), - atype[idx_perm], - denoise=True, - ) - ret1 = {"updated_coord": updated_c1.squeeze(0), "logits": logits1.squeeze(0)} - prec = 1e-10 - torch.testing.assert_close( - ret0["updated_coord"][idx_perm], ret1["updated_coord"], rtol=prec, atol=prec - ) - torch.testing.assert_close( - ret0["logits"][idx_perm], ret1["logits"], rtol=prec, atol=prec - ) - - -@unittest.skip("support of the denoise is temporally disabled") -class TestDenoiseModelDPA1(unittest.TestCase, PermutationDenoiseTest): - def setUp(self) -> None: - model_params = copy.deepcopy(model_dpa1) - self.type_split = True - self.model = get_model(model_params).to(env.DEVICE) - - -@unittest.skip("support of the denoise is temporally disabled") -class TestDenoiseModelDPA2(unittest.TestCase, PermutationDenoiseTest): - def setUp(self) -> None: - model_params = copy.deepcopy(model_dpa2) - self.type_split = True - self.model = get_model( - model_params, - ).to(env.DEVICE) - - -# @unittest.skip("hybrid not supported at the moment") -# class TestDenoiseModelHybrid(unittest.TestCase, TestPermutationDenoise): -# def setUp(self): -# model_params = copy.deepcopy(model_hybrid_denoise) -# self.type_split = True -# self.model = get_model(model_params).to(env.DEVICE) - - -if __name__ == "__main__": - unittest.main() diff --git a/source/tests/pt/model/test_rot.py b/source/tests/pt/model/test_rot.py index 283dbb31d7..c0dd5f41bc 100644 --- a/source/tests/pt/model/test_rot.py +++ b/source/tests/pt/model/test_rot.py @@ -25,6 +25,7 @@ model_se_e2_a, model_spin, model_zbl, + model_denoise, ) dtype = torch.float64 @@ -51,7 +52,10 @@ def test( ) test_spin = getattr(self, "test_spin", False) - if not test_spin: + test_denoise = getattr(self, "test_denoise", False) + if test_denoise: + test_keys = ["strain_components", "updated_coord", "logits"] + elif not test_spin: test_keys = ["energy", "force", "virial"] else: test_keys = ["energy", "force", "force_mag"] @@ -66,6 +70,7 @@ def test( cell.unsqueeze(0), atype, spins=spin.unsqueeze(0), + denoise=test_denoise, ) ret0 = {key: result_0[key].squeeze(0) for key in test_keys} result_1 = eval_model( @@ -74,12 +79,13 @@ def test( cell.unsqueeze(0), atype, spins=spin_rot.unsqueeze(0), + denoise=test_denoise, ) ret1 = {key: result_1[key].squeeze(0) for key in test_keys} for key in test_keys: - if key in ["energy"]: + if key in ["energy", "strain_components", "logits"]: torch.testing.assert_close(ret0[key], ret1[key], rtol=prec, atol=prec) - elif key in ["force", "force_mag"]: + elif key in ["force", "force_mag", "updated_coord"]: torch.testing.assert_close( torch.matmul(ret0[key], rmat), ret1[key], rtol=prec, atol=prec ) @@ -116,6 +122,7 @@ def test( cell.unsqueeze(0), atype, spins=spin.unsqueeze(0), + denoise=test_denoise, ) ret0 = {key: result_0[key].squeeze(0) for key in test_keys} result_1 = eval_model( @@ -124,12 +131,13 @@ def test( cell_rot.unsqueeze(0), atype, spins=spin_rot.unsqueeze(0), + denoise=test_denoise, ) ret1 = {key: result_1[key].squeeze(0) for key in test_keys} for key in test_keys: - if key in ["energy"]: + if key in ["energy", "strain_components", "logits"]: torch.testing.assert_close(ret0[key], ret1[key], rtol=prec, atol=prec) - elif key in ["force", "force_mag"]: + elif key in ["force", "force_mag", "updated_coord"]: torch.testing.assert_close( torch.matmul(ret0[key], rmat), ret1[key], rtol=prec, atol=prec ) @@ -212,6 +220,12 @@ def setUp(self) -> None: self.test_spin = True self.model = get_model(model_params).to(env.DEVICE) +class TestDenoiseModelDPA1(unittest.TestCase, RotTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_denoise) + self.type_split = False + self.test_denoise = True + self.model = get_model(model_params).to(env.DEVICE) if __name__ == "__main__": unittest.main() diff --git a/source/tests/pt/model/test_rot_denoise.py b/source/tests/pt/model/test_rot_denoise.py deleted file mode 100644 index fcae4b23d7..0000000000 --- a/source/tests/pt/model/test_rot_denoise.py +++ /dev/null @@ -1,130 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -import copy -import unittest - -import torch - -from deepmd.pt.model.model import ( - get_model, -) -from deepmd.pt.utils import ( - env, -) - -from ...seed import ( - GLOBAL_SEED, -) -from ..common import ( - eval_model, -) -from .test_permutation_denoise import ( - model_dpa1, - model_dpa2, -) - -dtype = torch.float64 - - -class RotDenoiseTest: - def test( - self, - ) -> None: - generator = torch.Generator(device=env.DEVICE).manual_seed(GLOBAL_SEED) - prec = 1e-10 - natoms = 5 - cell = 10.0 * torch.eye(3, dtype=dtype).to(env.DEVICE) - coord = 2 * torch.rand( - [natoms, 3], dtype=dtype, generator=generator, device=env.DEVICE - ) - shift = torch.tensor([4, 4, 4], dtype=dtype).to(env.DEVICE) - atype = torch.IntTensor([0, 0, 0, 1, 1]).to(env.DEVICE) - from scipy.stats import ( - special_ortho_group, - ) - - rmat = torch.tensor(special_ortho_group.rvs(3), dtype=dtype).to(env.DEVICE) - - # rotate only coord and shift to the center of cell - coord_rot = torch.matmul(coord, rmat) - update_c0, logits0 = eval_model( - self.model, - (coord + shift).unsqueeze(0), - cell.unsqueeze(0), - atype, - denoise=True, - ) - update_c0 = update_c0 - (coord + shift).unsqueeze(0) - ret0 = {"updated_coord": update_c0.squeeze(0), "logits": logits0.squeeze(0)} - update_c1, logits1 = eval_model( - self.model, - (coord_rot + shift).unsqueeze(0), - cell.unsqueeze(0), - atype, - denoise=True, - ) - update_c1 = update_c1 - (coord_rot + shift).unsqueeze(0) - ret1 = {"updated_coord": update_c1.squeeze(0), "logits": logits1.squeeze(0)} - torch.testing.assert_close( - torch.matmul(ret0["updated_coord"], rmat), - ret1["updated_coord"], - rtol=prec, - atol=prec, - ) - torch.testing.assert_close(ret0["logits"], ret1["logits"], rtol=prec, atol=prec) - - # rotate coord and cell - torch.manual_seed(0) - cell = torch.rand([3, 3], dtype=dtype, generator=generator).to(env.DEVICE) - cell = (cell + cell.T) + 5.0 * torch.eye(3).to(env.DEVICE) - coord = torch.rand([natoms, 3], dtype=dtype, generator=generator).to(env.DEVICE) - coord = torch.matmul(coord, cell) - atype = torch.IntTensor([0, 0, 0, 1, 1]).to(env.DEVICE) - coord_rot = torch.matmul(coord, rmat) - cell_rot = torch.matmul(cell, rmat) - update_c0, logits0 = eval_model( - self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True - ) - ret0 = {"updated_coord": update_c0.squeeze(0), "logits": logits0.squeeze(0)} - update_c1, logits1 = eval_model( - self.model, - coord_rot.unsqueeze(0), - cell_rot.unsqueeze(0), - atype, - denoise=True, - ) - ret1 = {"updated_coord": update_c1.squeeze(0), "logits": logits1.squeeze(0)} - torch.testing.assert_close(ret0["logits"], ret1["logits"], rtol=prec, atol=prec) - torch.testing.assert_close( - torch.matmul(ret0["updated_coord"], rmat), - ret1["updated_coord"], - rtol=prec, - atol=prec, - ) - - -@unittest.skip("support of the denoise is temporally disabled") -class TestDenoiseModelDPA1(unittest.TestCase, RotDenoiseTest): - def setUp(self) -> None: - model_params = copy.deepcopy(model_dpa1) - self.type_split = True - self.model = get_model(model_params).to(env.DEVICE) - - -@unittest.skip("support of the denoise is temporally disabled") -class TestDenoiseModelDPA2(unittest.TestCase, RotDenoiseTest): - def setUp(self) -> None: - model_params = copy.deepcopy(model_dpa2) - self.type_split = True - self.model = get_model(model_params).to(env.DEVICE) - - -# @unittest.skip("hybrid not supported at the moment") -# class TestEnergyModelHybrid(unittest.TestCase, TestRotDenoise): -# def setUp(self): -# model_params = copy.deepcopy(model_hybrid_denoise) -# self.type_split = True -# self.model = get_model(model_params).to(env.DEVICE) - - -if __name__ == "__main__": - unittest.main() diff --git a/source/tests/pt/model/test_smooth.py b/source/tests/pt/model/test_smooth.py index 1c6303d14c..4d1b28c19d 100644 --- a/source/tests/pt/model/test_smooth.py +++ b/source/tests/pt/model/test_smooth.py @@ -25,6 +25,7 @@ model_se_e2_a, model_spin, model_zbl, + model_denoise, ) dtype = torch.float64 @@ -83,7 +84,10 @@ def test( coord3[1][0] += epsilon coord3[2][1] += epsilon test_spin = getattr(self, "test_spin", False) - if not test_spin: + test_denoise = getattr(self, "test_denoise", False) + if test_denoise: + test_keys = ["strain_components", "updated_coord", "logits"] + elif not test_spin: test_keys = ["energy", "force", "virial"] else: test_keys = ["energy", "force", "force_mag", "virial"] @@ -94,6 +98,7 @@ def test( cell.unsqueeze(0), atype, spins=spin.unsqueeze(0), + denoise=test_denoise, ) ret0 = {key: result_0[key].squeeze(0) for key in test_keys} result_1 = eval_model( @@ -102,6 +107,7 @@ def test( cell.unsqueeze(0), atype, spins=spin.unsqueeze(0), + denoise=test_denoise, ) ret1 = {key: result_1[key].squeeze(0) for key in test_keys} result_2 = eval_model( @@ -110,6 +116,7 @@ def test( cell.unsqueeze(0), atype, spins=spin.unsqueeze(0), + denoise=test_denoise, ) ret2 = {key: result_2[key].squeeze(0) for key in test_keys} result_3 = eval_model( @@ -118,12 +125,13 @@ def test( cell.unsqueeze(0), atype, spins=spin.unsqueeze(0), + denoise=test_denoise, ) ret3 = {key: result_3[key].squeeze(0) for key in test_keys} def compare(ret0, ret1) -> None: for key in test_keys: - if key in ["energy"]: + if key in ["energy", "strain_components", "updated_coord", "logits"]: torch.testing.assert_close( ret0[key], ret1[key], rtol=rprec, atol=aprec ) @@ -250,6 +258,13 @@ def setUp(self) -> None: self.model = get_model(model_params).to(env.DEVICE) self.epsilon, self.aprec = None, None +class TestDenoiseModelDPA1(unittest.TestCase, SmoothTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_denoise) + self.type_split = False + self.test_denoise = True + self.model = get_model(model_params).to(env.DEVICE) + self.epsilon, self.aprec = None, None # class TestEnergyFoo(unittest.TestCase): # def test(self): diff --git a/source/tests/pt/model/test_smooth_denoise.py b/source/tests/pt/model/test_smooth_denoise.py deleted file mode 100644 index 199d6664a1..0000000000 --- a/source/tests/pt/model/test_smooth_denoise.py +++ /dev/null @@ -1,141 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -import copy -import unittest - -import torch - -from deepmd.pt.model.model import ( - get_model, -) -from deepmd.pt.utils import ( - env, -) - -from ...seed import ( - GLOBAL_SEED, -) -from ..common import ( - eval_model, -) -from .test_permutation_denoise import ( - model_dpa2, -) - -dtype = torch.float64 - - -class SmoothDenoiseTest: - def test( - self, - ) -> None: - # displacement of atoms - epsilon = 1e-5 if self.epsilon is None else self.epsilon - # required prec. relative prec is not checked. - rprec = 0 - aprec = 1e-5 if self.aprec is None else self.aprec - - natoms = 10 - cell = 8.6 * torch.eye(3, dtype=dtype).to(env.DEVICE) - generator = torch.Generator(device=env.DEVICE).manual_seed(GLOBAL_SEED) - atype = torch.randint(0, 3, [natoms], generator=generator, device=env.DEVICE) - coord0 = ( - torch.tensor( - [ - 0.0, - 0.0, - 0.0, - 4.0 - 0.5 * epsilon, - 0.0, - 0.0, - 0.0, - 4.0 - 0.5 * epsilon, - 0.0, - ], - dtype=dtype, - ) - .view([-1, 3]) - .to(env.DEVICE) - ) - coord1 = torch.rand( - [natoms - coord0.shape[0], 3], dtype=dtype, generator=generator - ).to(env.DEVICE) - coord1 = torch.matmul(coord1, cell) - coord = torch.concat([coord0, coord1], dim=0) - - coord0 = torch.clone(coord) - coord1 = torch.clone(coord) - coord1[1][0] += epsilon - coord2 = torch.clone(coord) - coord2[2][1] += epsilon - coord3 = torch.clone(coord) - coord3[1][0] += epsilon - coord3[2][1] += epsilon - - update_c0, logits0 = eval_model( - self.model, coord0.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True - ) - ret0 = {"updated_coord": update_c0.squeeze(0), "logits": logits0.squeeze(0)} - update_c1, logits1 = eval_model( - self.model, coord1.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True - ) - ret1 = {"updated_coord": update_c1.squeeze(0), "logits": logits1.squeeze(0)} - update_c2, logits2 = eval_model( - self.model, coord2.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True - ) - ret2 = {"updated_coord": update_c2.squeeze(0), "logits": logits2.squeeze(0)} - update_c3, logits3 = eval_model( - self.model, coord3.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True - ) - ret3 = {"updated_coord": update_c3.squeeze(0), "logits": logits3.squeeze(0)} - - def compare(ret0, ret1) -> None: - torch.testing.assert_close( - ret0["updated_coord"], ret1["updated_coord"], rtol=rprec, atol=aprec - ) - torch.testing.assert_close( - ret0["logits"], ret1["logits"], rtol=rprec, atol=aprec - ) - - compare(ret0, ret1) - compare(ret1, ret2) - compare(ret0, ret3) - - -@unittest.skip("support of the denoise is temporally disabled") -class TestDenoiseModelDPA2(unittest.TestCase, SmoothDenoiseTest): - def setUp(self) -> None: - model_params = copy.deepcopy(model_dpa2) - model_params["descriptor"]["sel"] = 8 - model_params["descriptor"]["rcut_smth"] = 3.5 - self.type_split = True - self.model = get_model(model_params).to(env.DEVICE) - self.epsilon, self.aprec = None, None - self.epsilon = 1e-7 - self.aprec = 1e-5 - - -@unittest.skip("support of the denoise is temporally disabled") -class TestDenoiseModelDPA2_1(unittest.TestCase, SmoothDenoiseTest): - def setUp(self) -> None: - model_params = copy.deepcopy(model_dpa2) - # model_params["descriptor"]["combine_grrg"] = True - self.type_split = True - self.model = get_model(model_params).to(env.DEVICE) - self.epsilon, self.aprec = None, None - self.epsilon = 1e-7 - self.aprec = 1e-5 - - -# @unittest.skip("hybrid not supported at the moment") -# class TestDenoiseModelHybrid(unittest.TestCase, TestSmoothDenoise): -# def setUp(self): -# model_params = copy.deepcopy(model_hybrid_denoise) -# self.type_split = True -# self.model = get_model(model_params).to(env.DEVICE) -# self.epsilon, self.aprec = None, None -# self.epsilon = 1e-7 -# self.aprec = 1e-5 - - -if __name__ == "__main__": - unittest.main() diff --git a/source/tests/pt/model/test_trans.py b/source/tests/pt/model/test_trans.py index 2e39cc4bd5..a3d7ab98e8 100644 --- a/source/tests/pt/model/test_trans.py +++ b/source/tests/pt/model/test_trans.py @@ -25,6 +25,7 @@ model_se_e2_a, model_spin, model_zbl, + model_denoise, ) dtype = torch.float64 @@ -54,7 +55,10 @@ def test( cell, ) test_spin = getattr(self, "test_spin", False) - if not test_spin: + test_denoise = getattr(self, "test_denoise", False) + if test_denoise: + test_keys = ["strain_components", "updated_coord", "logits"] + elif not test_spin: test_keys = ["energy", "force", "virial"] else: test_keys = ["energy", "force", "force_mag", "virial"] @@ -64,6 +68,7 @@ def test( cell.unsqueeze(0), atype, spins=spin.unsqueeze(0), + denoise=test_denoise, ) ret0 = {key: result_0[key].squeeze(0) for key in test_keys} result_1 = eval_model( @@ -72,11 +77,12 @@ def test( cell.unsqueeze(0), atype, spins=spin.unsqueeze(0), + denoise=test_denoise, ) ret1 = {key: result_1[key].squeeze(0) for key in test_keys} prec = 1e-7 for key in test_keys: - if key in ["energy", "force", "force_mag"]: + if key in ["energy", "force", "force_mag", "strain_components", "updated_coord", "logits"]: torch.testing.assert_close(ret0[key], ret1[key], rtol=prec, atol=prec) elif key == "virial": if not hasattr(self, "test_virial") or self.test_virial: @@ -154,6 +160,12 @@ def setUp(self) -> None: self.test_spin = True self.model = get_model(model_params).to(env.DEVICE) +class TestDenoiseModelDPA1(unittest.TestCase, TransTest): + def setUp(self) -> None: + model_params = copy.deepcopy(model_denoise) + self.type_split = False + self.test_denoise = True + self.model = get_model(model_params).to(env.DEVICE) if __name__ == "__main__": unittest.main() diff --git a/source/tests/pt/model/test_trans_denoise.py b/source/tests/pt/model/test_trans_denoise.py deleted file mode 100644 index 77bff7980a..0000000000 --- a/source/tests/pt/model/test_trans_denoise.py +++ /dev/null @@ -1,89 +0,0 @@ -# SPDX-License-Identifier: LGPL-3.0-or-later -import copy -import unittest - -import torch - -from deepmd.pt.model.model import ( - get_model, -) -from deepmd.pt.utils import ( - env, -) - -from ...seed import ( - GLOBAL_SEED, -) -from ..common import ( - eval_model, -) -from .test_permutation_denoise import ( - model_dpa1, - model_dpa2, - model_hybrid, -) - -dtype = torch.float64 - - -class TransDenoiseTest: - def test( - self, - ) -> None: - natoms = 5 - generator = torch.Generator(device=env.DEVICE).manual_seed(GLOBAL_SEED) - cell = torch.rand([3, 3], dtype=dtype, generator=generator).to(env.DEVICE) - cell = (cell + cell.T) + 5.0 * torch.eye(3).to(env.DEVICE) - coord = torch.rand([natoms, 3], dtype=dtype).to(env.DEVICE) - coord = torch.matmul(coord, cell) - atype = torch.IntTensor([0, 0, 0, 1, 1]).to(env.DEVICE) - shift = (torch.rand([3], dtype=dtype, generator=generator) - 0.5).to( - env.DEVICE - ) * 2.0 - coord_s = torch.matmul( - torch.remainder(torch.matmul(coord + shift, torch.linalg.inv(cell)), 1.0), - cell, - ) - updated_c0, logits0 = eval_model( - self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True - ) - updated_c0 = updated_c0 - coord.unsqueeze(0) - ret0 = {"updated_coord": updated_c0.squeeze(0), "logits": logits0.squeeze(0)} - updated_c1, logits1 = eval_model( - self.model, coord_s.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True - ) - updated_c1 = updated_c1 - coord_s.unsqueeze(0) - ret1 = {"updated_coord": updated_c1.squeeze(0), "logits": logits1.squeeze(0)} - prec = 1e-10 - torch.testing.assert_close( - ret0["updated_coord"], ret1["updated_coord"], rtol=prec, atol=prec - ) - torch.testing.assert_close(ret0["logits"], ret1["logits"], rtol=prec, atol=prec) - - -@unittest.skip("support of the denoise is temporally disabled") -class TestDenoiseModelDPA1(unittest.TestCase, TransDenoiseTest): - def setUp(self) -> None: - model_params = copy.deepcopy(model_dpa1) - self.type_split = True - self.model = get_model(model_params).to(env.DEVICE) - - -@unittest.skip("support of the denoise is temporally disabled") -class TestDenoiseModelDPA2(unittest.TestCase, TransDenoiseTest): - def setUp(self) -> None: - model_params = copy.deepcopy(model_dpa2) - self.type_split = True - self.model = get_model(model_params).to(env.DEVICE) - - -@unittest.skip("hybrid not supported at the moment") -class TestDenoiseModelHybrid(unittest.TestCase, TransDenoiseTest): - def setUp(self) -> None: - model_params = copy.deepcopy(model_hybrid) - self.type_split = True - self.model = get_model(model_params).to(env.DEVICE) - - -if __name__ == "__main__": - unittest.main() From b9e8528360031b0b8b1772b31c7e73203cf5f0c0 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 13 Mar 2025 10:57:02 +0000 Subject: [PATCH 08/26] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../pt/model/atomic_model/denoise_atomic_model.py | 10 ++++++++-- source/tests/pt/common.py | 12 ++++++++---- source/tests/pt/model/test_permutation.py | 7 +++++-- source/tests/pt/model/test_rot.py | 4 +++- source/tests/pt/model/test_smooth.py | 4 +++- source/tests/pt/model/test_trans.py | 13 +++++++++++-- 6 files changed, 38 insertions(+), 12 deletions(-) diff --git a/deepmd/pt/model/atomic_model/denoise_atomic_model.py b/deepmd/pt/model/atomic_model/denoise_atomic_model.py index f147471b6c..c6f600b3da 100644 --- a/deepmd/pt/model/atomic_model/denoise_atomic_model.py +++ b/deepmd/pt/model/atomic_model/denoise_atomic_model.py @@ -36,6 +36,12 @@ def apply_out_stat( coord_std = self.fitting_net.get_coord_noise() / 1.732 else: raise RuntimeError(f"Unknown noise type {noise_type}") - ret["strain_components"] = ret["strain_components"] * cell_std if cell_std>0 else ret["strain_components"] - ret["updated_coord"] = ret["updated_coord"] * coord_std if coord_std>0 else ret["updated_coord"] + ret["strain_components"] = ( + ret["strain_components"] * cell_std + if cell_std > 0 + else ret["strain_components"] + ) + ret["updated_coord"] = ( + ret["updated_coord"] * coord_std if coord_std > 0 else ret["updated_coord"] + ) return ret diff --git a/source/tests/pt/common.py b/source/tests/pt/common.py index 6029c3eef7..1f68cbcd2c 100644 --- a/source/tests/pt/common.py +++ b/source/tests/pt/common.py @@ -66,7 +66,7 @@ def eval_model( force_mag_out = [] virial_out = [] atomic_virial_out = [] - strain_components_out = [] + strain_components_out = [] atom_strain_components_out = [] updated_coord_out = [] logits_out = [] @@ -194,7 +194,9 @@ def eval_model( if "strain_components" in batch_output: strain_components_out.append(batch_output["strain_components"]) if "atom_strain_components" in batch_output: - atom_strain_components_out.append(batch_output["atom_strain_components"]) + atom_strain_components_out.append( + batch_output["atom_strain_components"] + ) if "updated_coord" in batch_output: updated_coord_out.append(batch_output["updated_coord"]) if "logits" in batch_output: @@ -235,7 +237,9 @@ def eval_model( else np.zeros([nframes, natoms, 6]) # pylint: disable=no-explicit-dtype ) updated_coord_out = ( - np.concatenate(updated_coord_out) if updated_coord_out else np.zeros([nframes, natoms, 3]) + np.concatenate(updated_coord_out) + if updated_coord_out + else np.zeros([nframes, natoms, 3]) ) logits_out = np.concatenate(logits_out) if logits_out else None else: @@ -301,7 +305,7 @@ def eval_model( else torch.zeros( [nframes, natoms, 3], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE ) - ) + ) logits_out = torch.cat(logits_out) if logits_out else None if denoise: results_dict = { diff --git a/source/tests/pt/model/test_permutation.py b/source/tests/pt/model/test_permutation.py index 2cbcba77e3..f2443cd15e 100644 --- a/source/tests/pt/model/test_permutation.py +++ b/source/tests/pt/model/test_permutation.py @@ -400,13 +400,14 @@ }, "fitting_net": { "type": "denoise", - "neuron": [24,24,24], + "neuron": [24, 24, 24], "resnet_dt": True, "seed": 1, - "_comment": " that's all" + "_comment": " that's all", }, } + class PermutationTest: def test( self, @@ -534,6 +535,7 @@ def setUp(self) -> None: self.test_spin = True self.model = get_model(model_params).to(env.DEVICE) + class TestDenoiseModelDPA1(unittest.TestCase, PermutationTest): def setUp(self) -> None: model_params = copy.deepcopy(model_denoise) @@ -541,6 +543,7 @@ def setUp(self) -> None: self.test_denoise = True self.model = get_model(model_params).to(env.DEVICE) + # class TestEnergyFoo(unittest.TestCase): # def test(self): # model_params = model_dpau diff --git a/source/tests/pt/model/test_rot.py b/source/tests/pt/model/test_rot.py index c0dd5f41bc..c1e1632dfe 100644 --- a/source/tests/pt/model/test_rot.py +++ b/source/tests/pt/model/test_rot.py @@ -18,6 +18,7 @@ eval_model, ) from .test_permutation import ( # model_dpau, + model_denoise, model_dos, model_dpa1, model_dpa2, @@ -25,7 +26,6 @@ model_se_e2_a, model_spin, model_zbl, - model_denoise, ) dtype = torch.float64 @@ -220,6 +220,7 @@ def setUp(self) -> None: self.test_spin = True self.model = get_model(model_params).to(env.DEVICE) + class TestDenoiseModelDPA1(unittest.TestCase, RotTest): def setUp(self) -> None: model_params = copy.deepcopy(model_denoise) @@ -227,5 +228,6 @@ def setUp(self) -> None: self.test_denoise = True self.model = get_model(model_params).to(env.DEVICE) + if __name__ == "__main__": unittest.main() diff --git a/source/tests/pt/model/test_smooth.py b/source/tests/pt/model/test_smooth.py index 4d1b28c19d..8eb430cf3d 100644 --- a/source/tests/pt/model/test_smooth.py +++ b/source/tests/pt/model/test_smooth.py @@ -18,6 +18,7 @@ eval_model, ) from .test_permutation import ( # model_dpau, + model_denoise, model_dos, model_dpa1, model_dpa2, @@ -25,7 +26,6 @@ model_se_e2_a, model_spin, model_zbl, - model_denoise, ) dtype = torch.float64 @@ -258,6 +258,7 @@ def setUp(self) -> None: self.model = get_model(model_params).to(env.DEVICE) self.epsilon, self.aprec = None, None + class TestDenoiseModelDPA1(unittest.TestCase, SmoothTest): def setUp(self) -> None: model_params = copy.deepcopy(model_denoise) @@ -266,6 +267,7 @@ def setUp(self) -> None: self.model = get_model(model_params).to(env.DEVICE) self.epsilon, self.aprec = None, None + # class TestEnergyFoo(unittest.TestCase): # def test(self): # model_params = model_dpau diff --git a/source/tests/pt/model/test_trans.py b/source/tests/pt/model/test_trans.py index a3d7ab98e8..916dce8bff 100644 --- a/source/tests/pt/model/test_trans.py +++ b/source/tests/pt/model/test_trans.py @@ -18,6 +18,7 @@ eval_model, ) from .test_permutation import ( # model_dpau, + model_denoise, model_dos, model_dpa1, model_dpa2, @@ -25,7 +26,6 @@ model_se_e2_a, model_spin, model_zbl, - model_denoise, ) dtype = torch.float64 @@ -82,7 +82,14 @@ def test( ret1 = {key: result_1[key].squeeze(0) for key in test_keys} prec = 1e-7 for key in test_keys: - if key in ["energy", "force", "force_mag", "strain_components", "updated_coord", "logits"]: + if key in [ + "energy", + "force", + "force_mag", + "strain_components", + "updated_coord", + "logits", + ]: torch.testing.assert_close(ret0[key], ret1[key], rtol=prec, atol=prec) elif key == "virial": if not hasattr(self, "test_virial") or self.test_virial: @@ -160,6 +167,7 @@ def setUp(self) -> None: self.test_spin = True self.model = get_model(model_params).to(env.DEVICE) + class TestDenoiseModelDPA1(unittest.TestCase, TransTest): def setUp(self) -> None: model_params = copy.deepcopy(model_denoise) @@ -167,5 +175,6 @@ def setUp(self) -> None: self.test_denoise = True self.model = get_model(model_params).to(env.DEVICE) + if __name__ == "__main__": unittest.main() From 1246658f11bb9652f406bd2827397fdc8e693368 Mon Sep 17 00:00:00 2001 From: root <2000011006@stu.pku.edu.cn> Date: Thu, 13 Mar 2025 19:21:04 +0800 Subject: [PATCH 09/26] Fix pre-commit --- deepmd/pt/model/atomic_model/denoise_atomic_model.py | 1 + deepmd/pt/model/model/__init__.py | 2 ++ source/tests/pt/common.py | 2 +- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/deepmd/pt/model/atomic_model/denoise_atomic_model.py b/deepmd/pt/model/atomic_model/denoise_atomic_model.py index c6f600b3da..d61ae6168a 100644 --- a/deepmd/pt/model/atomic_model/denoise_atomic_model.py +++ b/deepmd/pt/model/atomic_model/denoise_atomic_model.py @@ -28,6 +28,7 @@ def apply_out_stat( ret: dict[str, torch.Tensor], atype: torch.Tensor, ): + # Scale values to appropriate magnitudes noise_type = self.fitting_net.get_noise_type() cell_std = self.fitting_net.get_cell_pert_fraction() / 1.732 if noise_type == "gaussian": diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index 75d025f832..ed5c663829 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -291,6 +291,8 @@ def get_standard_model(model_params): preset_out_bias=preset_out_bias, data_stat_protect=data_stat_protect, ) + if model_params.get("hessian_mode"): + model.enable_hessian() model.model_def_script = json.dumps(model_params_old) return model diff --git a/source/tests/pt/common.py b/source/tests/pt/common.py index 1f68cbcd2c..b081e4df89 100644 --- a/source/tests/pt/common.py +++ b/source/tests/pt/common.py @@ -292,7 +292,7 @@ def eval_model( [nframes, 6], dtype=GLOBAL_PT_FLOAT_PRECISION, device=DEVICE ) ) - atom_component_out = ( + atom_strain_components_out = ( torch.cat(atom_strain_components_out) if atom_strain_components_out else torch.zeros( From 5e5abaa32f501c09f95e7f4140ce1be438b3fbda Mon Sep 17 00:00:00 2001 From: root <2000011006@stu.pku.edu.cn> Date: Fri, 14 Mar 2025 16:51:43 +0800 Subject: [PATCH 10/26] Support dpmodel denoise fitting --- deepmd/dpmodel/fitting/denoise_fitting.py | 511 ++++++++++++++++++++++ deepmd/pt/model/task/denoise.py | 63 +-- source/tests/pt/common.py | 2 + 3 files changed, 551 insertions(+), 25 deletions(-) create mode 100644 deepmd/dpmodel/fitting/denoise_fitting.py diff --git a/deepmd/dpmodel/fitting/denoise_fitting.py b/deepmd/dpmodel/fitting/denoise_fitting.py new file mode 100644 index 0000000000..3869c5e47a --- /dev/null +++ b/deepmd/dpmodel/fitting/denoise_fitting.py @@ -0,0 +1,511 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from abc import ( + abstractmethod, +) +from typing import ( + Any, + Optional, + Union, +) + +import array_api_compat +import numpy as np + +from deepmd.dpmodel import ( + DEFAULT_PRECISION, + PRECISION_DICT, + NativeOP, +) +from deepmd.dpmodel.common import ( + get_xp_precision, + to_numpy_array, +) +from deepmd.dpmodel.utils import ( + AtomExcludeMask, + FittingNet, + NetworkCollection, +) +from deepmd.dpmodel.utils.seed import ( + child_seed, +) +from deepmd.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) +from deepmd.utils.finetune import ( + get_index_between_two_maps, + map_atom_exclude_types, +) + +from .base_fitting import ( + BaseFitting, +) + + +class DenoiseFitting(NativeOP, BaseFitting): + r"""Deoise fitting class. + + Parameters + ---------- + var_name + The name of the output variable. + ntypes + The number of atom types. + dim_descrpt + The dimension of the input descriptor. + neuron + Number of neurons :math:`N` in each hidden layer of the fitting net + bias_atom_e + Average energy per atom for each element. + resnet_dt + Time-step `dt` in the resnet construction: + :math:`y = x + dt * \phi (Wx + b)` + numb_fparam + Number of frame parameter + numb_aparam + Number of atomic parameter + trainable + If the weights of fitting net are trainable. + Suppose that we have :math:`N_l` hidden layers in the fitting net, + this list is of length :math:`N_l + 1`, specifying if the hidden layers and the output layer are trainable. + activation_function + The activation function :math:`\boldsymbol{\phi}` in the embedding net. Supported options are |ACTIVATION_FN| + precision + The precision of the embedding net parameters. Supported options are |PRECISION| + use_aparam_as_mask: bool, optional + If True, the atomic parameters will be used as a mask that determines the atom is real/virtual. + And the aparam will not be used as the atomic parameters for embedding. + mixed_types + If true, use a uniform fitting net for all atom types, otherwise use + different fitting nets for different atom types. + exclude_types: list[int] + Atomic contributions of the excluded atom types are set zero. + type_map: list[str], Optional + A list of strings. Give the name to each type of atoms. + seed: Optional[Union[int, list[int]]] + Random seed for initializing the network parameters. + """ + + def __init__( + self, + ntypes: int, + dim_descrpt: int, + neuron: list[int] = [120, 120, 120], + bias_atom_e: Optional[np.ndarray] = None, + out_dim: int = 1, + resnet_dt: bool = True, + numb_fparam: int = 0, + numb_aparam: int = 0, + dim_case_embd: int = 0, + activation_function: str = "tanh", + precision: str = DEFAULT_PRECISION, + mixed_types: bool = True, + seed: Optional[Union[int, list[int]]] = None, + exclude_types: list[int] = [], + trainable: Optional[list[bool]] = None, + type_map: Optional[list[str]] = None, + use_aparam_as_mask: bool = False, + coord_noise: Optional[float] = None, + cell_pert_fraction: Optional[float] = None, + noise_type: Optional[str] = None, + ) -> None: + self.ntypes = ntypes + self.dim_descrpt = dim_descrpt + self.neuron = neuron + self.out_dim = out_dim + self.resnet_dt = resnet_dt + self.numb_fparam = numb_fparam + self.numb_aparam = numb_aparam + self.dim_case_embd = dim_case_embd + self.trainable = trainable + self.type_map = type_map + if self.trainable is None: + self.trainable = [True for ii in range(len(self.neuron) + 1)] + if isinstance(self.trainable, bool): + self.trainable = [self.trainable] * (len(self.neuron) + 1) + self.activation_function = activation_function + self.precision = precision + if self.precision.lower() not in PRECISION_DICT: + raise ValueError( + f"Unsupported precision '{self.precision}'. Supported options are: {list(PRECISION_DICT.keys())}" + ) + self.prec = PRECISION_DICT[self.precision.lower()] + self.use_aparam_as_mask = use_aparam_as_mask + self.mixed_types = mixed_types + # order matters, should be place after the assignment of ntypes + self.reinit_exclude(exclude_types) + + # init constants + if bias_atom_e is None: + self.bias_atom_e = np.zeros( + [self.ntypes, self.out_dim], dtype=GLOBAL_NP_FLOAT_PRECISION + ) + else: + assert bias_atom_e.shape == (self.ntypes, self.out_dim) + self.bias_atom_e = bias_atom_e.astype(GLOBAL_NP_FLOAT_PRECISION) + if self.numb_fparam > 0: + self.fparam_avg = np.zeros(self.numb_fparam, dtype=self.prec) + self.fparam_inv_std = np.ones(self.numb_fparam, dtype=self.prec) + else: + self.fparam_avg, self.fparam_inv_std = None, None + if self.numb_aparam > 0: + self.aparam_avg = np.zeros(self.numb_aparam, dtype=self.prec) + self.aparam_inv_std = np.ones(self.numb_aparam, dtype=self.prec) + else: + self.aparam_avg, self.aparam_inv_std = None, None + if self.dim_case_embd > 0: + self.case_embd = np.zeros(self.dim_case_embd, dtype=self.prec) + else: + self.case_embd = None + # init networks + in_dim = ( + self.dim_descrpt + + self.numb_fparam + + (0 if self.use_aparam_as_mask else self.numb_aparam) + + self.dim_case_embd + ) + self.coord_nets = NetworkCollection( + 1 if not self.mixed_types else 0, + self.ntypes, + network_type="fitting_network", + networks=[ + FittingNet( + in_dim, + self.out_dim, + self.neuron, + self.activation_function, + self.resnet_dt, + self.precision, + bias_out=True, + seed=child_seed(seed, ii), + ) + for ii in range(self.ntypes if not self.mixed_types else 1) + ], + ) + self.cell_nets = NetworkCollection( + 1 if not self.mixed_types else 0, + self.ntypes, + network_type="fitting_network", + networks=[ + FittingNet( + in_dim, + 6, + self.neuron, + self.activation_function, + self.resnet_dt, + self.precision, + bias_out=True, + seed=child_seed(self.seed, ii), + ) + for ii in range(self.ntypes if not self.mixed_types else 1) + ], + ) + self.token_nets = NetworkCollection( + 1 if not self.mixed_types else 0, + self.ntypes, + network_type="fitting_network", + networks=[ + FittingNet( + in_dim, + self.ntypes - 1, + self.neuron, + self.activation_function, + self.resnet_dt, + self.precision, + bias_out=True, + seed=child_seed(self.seed, ii), + ) + for ii in range(self.ntypes if not self.mixed_types else 1) + ], + ) + + def get_dim_fparam(self) -> int: + """Get the number (dimension) of frame parameters of this atomic model.""" + return self.numb_fparam + + def get_dim_aparam(self) -> int: + """Get the number (dimension) of atomic parameters of this atomic model.""" + return self.numb_aparam + + def get_sel_type(self) -> list[int]: + """Get the selected atom types of this model. + + Only atoms with selected atom types have atomic contribution + to the result of the model. + If returning an empty list, all atom types are selected. + """ + return [ii for ii in range(self.ntypes) if ii not in self.exclude_types] + + def get_type_map(self) -> list[str]: + """Get the name to each type of atoms.""" + return self.type_map + + def set_case_embd(self, case_idx: int): + """ + Set the case embedding of this fitting net by the given case_idx, + typically concatenated with the output of the descriptor and fed into the fitting net. + """ + self.case_embd = np.eye(self.dim_case_embd, dtype=self.prec)[case_idx] + + def change_type_map( + self, type_map: list[str], model_with_new_type_stat=None + ) -> None: + """Change the type related params to new ones, according to `type_map` and the original one in the model. + If there are new types in `type_map`, statistics will be updated accordingly to `model_with_new_type_stat` for these new types. + """ + assert self.type_map is not None, ( + "'type_map' must be defined when performing type changing!" + ) + assert self.mixed_types, "Only models in mixed types can perform type changing!" + remap_index, has_new_type = get_index_between_two_maps(self.type_map, type_map) + self.type_map = type_map + self.ntypes = len(type_map) + self.reinit_exclude(map_atom_exclude_types(self.exclude_types, remap_index)) + if has_new_type: + extend_shape = [len(type_map), *list(self.bias_atom_e.shape[1:])] + extend_bias_atom_e = np.zeros(extend_shape, dtype=self.bias_atom_e.dtype) + self.bias_atom_e = np.concatenate( + [self.bias_atom_e, extend_bias_atom_e], axis=0 + ) + self.bias_atom_e = self.bias_atom_e[remap_index] + + def __setitem__(self, key, value) -> None: + if key in ["bias_atom_e"]: + self.bias_atom_e = value + elif key in ["fparam_avg"]: + self.fparam_avg = value + elif key in ["fparam_inv_std"]: + self.fparam_inv_std = value + elif key in ["aparam_avg"]: + self.aparam_avg = value + elif key in ["aparam_inv_std"]: + self.aparam_inv_std = value + elif key in ["case_embd"]: + self.case_embd = value + else: + raise KeyError(key) + + def __getitem__(self, key): + if key in ["bias_atom_e"]: + return self.bias_atom_e + elif key in ["fparam_avg"]: + return self.fparam_avg + elif key in ["fparam_inv_std"]: + return self.fparam_inv_std + elif key in ["aparam_avg"]: + return self.aparam_avg + elif key in ["aparam_inv_std"]: + return self.aparam_inv_std + elif key in ["case_embd"]: + return self.case_embd + else: + raise KeyError(key) + + def reinit_exclude( + self, + exclude_types: list[int] = [], + ) -> None: + self.exclude_types = exclude_types + self.emask = AtomExcludeMask(self.ntypes, self.exclude_types) + + def serialize(self) -> dict: + """Serialize the fitting to dict.""" + return { + "@class": "Fitting", + "@version": 3, + "type": "denoise", + "ntypes": self.ntypes, + "out_dim": self.out_dim, + "dim_descrpt": self.dim_descrpt, + "neuron": self.neuron, + "resnet_dt": self.resnet_dt, + "numb_fparam": self.numb_fparam, + "numb_aparam": self.numb_aparam, + "dim_case_embd": self.dim_case_embd, + "activation_function": self.activation_function, + "precision": self.precision, + "mixed_types": self.mixed_types, + "cell_nets": self.cell_nets.serialize(), + "coord_nets": self.coord_nets.serialize(), + "token_nets": self.token_nets.serialize(), + "exclude_types": self.exclude_types, + "@variables": { + "bias_atom_e": to_numpy_array(self.bias_atom_e), + "case_embd": to_numpy_array(self.case_embd), + "fparam_avg": to_numpy_array(self.fparam_avg), + "fparam_inv_std": to_numpy_array(self.fparam_inv_std), + "aparam_avg": to_numpy_array(self.aparam_avg), + "aparam_inv_std": to_numpy_array(self.aparam_inv_std), + }, + "type_map": self.type_map, + } + + @classmethod + def deserialize(cls, data: dict) -> "DenoiseFitting": + data = data.copy() + data.pop("@class") + data.pop("type") + variables = data.pop("@variables") + cell_nets = data.pop("cell_nets") + coord_nets = data.pop("coord_nets") + token_nets = data.pop("token_nets") + obj = cls(**data) + for kk in variables.keys(): + obj[kk] = variables[kk] + obj.cell_nets = NetworkCollection.deserialize(cell_nets) + obj.coord_nets = NetworkCollection.deserialize(coord_nets) + obj.token_nets = NetworkCollection.deserialize(token_nets) + return obj + + def _call_common( + self, + descriptor: np.ndarray, + atype: np.ndarray, + gr: Optional[np.ndarray] = None, + g2: Optional[np.ndarray] = None, + h2: Optional[np.ndarray] = None, + fparam: Optional[np.ndarray] = None, + aparam: Optional[np.ndarray] = None, + ) -> dict[str, np.ndarray]: + """Calculate the fitting. + + Parameters + ---------- + descriptor + input descriptor. shape: nf x nloc x nd + atype + the atom type. shape: nf x nloc + gr + The rotationally equivariant and permutationally invariant single particle + representation. shape: nf x nloc x ng x 3 + g2 + The rotationally invariant pair-partical representation. + shape: nf x nloc x nnei x ng + h2 + The rotationally equivariant pair-partical representation. + shape: nf x nloc x nnei x 3 + fparam + The frame parameter. shape: nf x nfp. nfp being `numb_fparam` + aparam + The atomic parameter. shape: nf x nloc x nap. nap being `numb_aparam` + + """ + xp = array_api_compat.array_namespace(descriptor, atype) + nf, nloc, nd = descriptor.shape + # check input dim + if nd != self.dim_descrpt: + raise ValueError( + "get an input descriptor of dim {nd}," + "which is not consistent with {self.dim_descrpt}." + ) + xx = descriptor + + # check fparam dim, concate to input descriptor + if self.numb_fparam > 0: + assert fparam is not None, "fparam should not be None" + if fparam.shape[-1] != self.numb_fparam: + raise ValueError( + f"get an input fparam of dim {fparam.shape[-1]}, " + f"which is not consistent with {self.numb_fparam}." + ) + fparam = (fparam - self.fparam_avg) * self.fparam_inv_std + fparam = xp.tile( + xp.reshape(fparam, [nf, 1, self.numb_fparam]), (1, nloc, 1) + ) + xx = xp.concat( + [xx, fparam], + axis=-1, + ) + # check aparam dim, concate to input descriptor + if self.numb_aparam > 0 and not self.use_aparam_as_mask: + assert aparam is not None, "aparam should not be None" + if aparam.shape[-1] != self.numb_aparam: + raise ValueError( + f"get an input aparam of dim {aparam.shape[-1]}, " + f"which is not consistent with {self.numb_aparam}." + ) + aparam = xp.reshape(aparam, [nf, nloc, self.numb_aparam]) + aparam = (aparam - self.aparam_avg) * self.aparam_inv_std + xx = xp.concat( + [xx, aparam], + axis=-1, + ) + + if self.dim_case_embd > 0: + assert self.case_embd is not None + case_embd = xp.tile(xp.reshape(self.case_embd, [1, 1, -1]), [nf, nloc, 1]) + xx = xp.concat( + [xx, case_embd], + axis=-1, + ) + + # calculate the prediction + if not self.mixed_types: + strain_components = xp.zeros( + [nf, nloc, 6], dtype=get_xp_precision(xp, self.precision) + ) + updated_coord = xp.zeros( + [nf, nloc, 3], dtype=get_xp_precision(xp, self.precision) + ) + logits = xp.zeros( + [nf, nloc, self.ntypes - 1], dtype=get_xp_precision(xp, self.precision) + ) + # coord fitting + for type_i in range(self.ntypes): + mask = xp.tile( + xp.reshape((atype == type_i), [nf, nloc, 1]), (1, 1, 3) + ) + updated_coord_type = self.coord_nets[(type_i,)](xx) + assert list(updated_coord_type.shape) == [nf, nloc, self.out_dim] + updated_coord_type = xp.reshape(updated_coord_type, (-1, 1, self.out_dim)) # (nf * nloc, 1, out_dim) + gr = xp.reshape(gr, (nframes * nloc, -1, 3)) # (nf * nloc, out_dim, 3) + updated_coord_type = updated_coord_type @ gr # (nf, nloc, 3) + updated_coord_type = xp.reshape(updated_coord_type, (nframes, nloc, 3)) + updated_coord_type = xp.where( + mask, updated_coord_type, xp.zeros_like(updated_coord_type) + ) + updated_coord = updated_coord + updated_coord_type # Shape is [nf, nloc, 3] + # cell fitting + for type_i in range(self.ntypes): + mask = xp.tile( + xp.reshape((atype == type_i), [nf, nloc, 1]), (1, 1, 6) + ) + strain_components_type = self.cell_nets[(type_i,)](xx) + strain_components_type = xp.where( + mask, strain_components_type, xp.zeros_like(strain_components_type) + ) + strain_components = strain_components + strain_components_type + # token fitting + for type_i in range(self.ntypes): + mask = xp.tile( + xp.reshape((atype == type_i), [nf, nloc, 1]), (1, 1, self.ntypes - 1) + ) + logits_type = self.token_nets[(type_i,)](xx) + logits_type = xp.where( + mask, logits_type, xp.zeros_like(logits_type) + ) + logits = logits + logits_type + else: + # coord fitting + updated_coord = self.coord_nets[()](xx) + assert list(updated_coord.shape) == [nf, nloc, self.out_dim] + updated_coord = xp.reshape(updated_coord, (-1, 1, self.out_dim)) # (nf * nloc, 1, out_dim) + gr = xp.reshape(gr, (nframes * nloc, -1, 3)) # (nf * nloc, out_dim, 3) + updated_coord = updated_coord @ gr # (nf, nloc, 3) + updated_coord = xp.reshape(updated_coord, (nframes, nloc, 3)) + # cell fitting + strain_components = self.cell_nets[()](xx) # [nframes, nloc, 6] + # token fitting + logits = self.token_nets[()](xx) # [nframes, natoms[0], ntypes-1] + # nf x nloc + exclude_mask = self.emask.build_type_exclude_mask(atype) + exclude_mask = xp.astype(exclude_mask, xp.bool) + # nf x nloc x od + strain_components = xp.where(exclude_mask[:, :, None], strain_components, xp.zeros_like(strain_components)) + updated_coord = xp.where(exclude_mask[:, :, None], updated_coord, xp.zeros_like(updated_coord)) + logits = xp.where(exclude_mask[:, :, None], logits, xp.zeros_like(logits)) + + return { + "strain_components": strain_components, + "updated_coord": updated_coord, + "logits": logits, + } diff --git a/deepmd/pt/model/task/denoise.py b/deepmd/pt/model/task/denoise.py index 52f9b88dd4..855b8a9c69 100644 --- a/deepmd/pt/model/task/denoise.py +++ b/deepmd/pt/model/task/denoise.py @@ -50,19 +50,18 @@ class DenoiseNet(Fitting): def __init__( self, - ntypes, - dim_descrpt, - neuron, - bias_atom_e=None, - out_dim=1, - resnet_dt=True, + ntypes: int, + dim_descrpt: int, + neuron: list[int] = [128, 128, 128], + bias_atom_e: Optional[torch.Tensor] = None, + out_dim: int = 1, + resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, dim_case_embd: int = 0, activation_function: str = "tanh", precision: str = DEFAULT_PRECISION, mixed_types: bool = True, - rcond: Optional[float] = None, seed: Optional[Union[int, list[int]]] = None, exclude_types: list[int] = [], trainable: Union[bool, list[bool]] = True, @@ -102,8 +101,6 @@ def __init__( mixed_types : bool If true, use a uniform fitting net for all atom types, otherwise use different fitting nets for different atom types. - rcond : float, optional - The condition number for the regression of atomic energy. seed : int, optional Random seed. exclude_types : list[int] @@ -130,7 +127,6 @@ def __init__( self.activation_function = activation_function self.precision = precision self.prec = PRECISION_DICT[self.precision] - self.rcond = rcond self.seed = seed self.type_map = type_map self.use_aparam_as_mask = use_aparam_as_mask @@ -320,6 +316,7 @@ def serialize(self) -> dict: return { "@class": "Fitting", "@version": 3, + "type": "denoise", "ntypes": self.ntypes, "out_dim": self.out_dim, "dim_descrpt": self.dim_descrpt, @@ -334,7 +331,6 @@ def serialize(self) -> dict: "cell_nets": self.filter_layers_cell.serialize(), "coord_nets": self.filter_layers_coord.serialize(), "token_nets": self.filter_layers_token.serialize(), - "rcond": self.rcond, "exclude_types": self.exclude_types, "@variables": { "bias_atom_e": to_numpy_array(self.bias_atom_e), @@ -345,22 +341,13 @@ def serialize(self) -> dict: "aparam_inv_std": to_numpy_array(self.aparam_inv_std), }, "type_map": self.type_map, - # "tot_ener_zero": self.tot_ener_zero , - # "trainable": self.trainable , - # "atom_ener": self.atom_ener , - # "layer_name": self.layer_name , - # "spin": self.spin , - ## NOTICE: not supported by far - "tot_ener_zero": False, - "trainable": [self.trainable] * (len(self.neuron) + 1), - "layer_name": None, - "use_aparam_as_mask": self.use_aparam_as_mask, - "spin": None, } @classmethod def deserialize(cls, data: dict) -> "DenoiseNet": data = data.copy() + data.pop("@class") + data.pop("type") variables = data.pop("@variables") cell_nets = data.pop("cell_nets") coord_nets = data.pop("coord_nets") @@ -403,12 +390,15 @@ def get_type_map(self) -> list[str]: return self.type_map def get_coord_noise(self): + """Get the noise level of the coordinates.""" return self.coord_noise def get_cell_pert_fraction(self): + """Get the fraction of the cell perturbation.""" return self.cell_pert_fraction def get_noise_type(self): + """Get the noise type.""" return self.noise_type def set_case_embd(self, case_idx: int): @@ -473,6 +463,29 @@ def forward( fparam: Optional[torch.Tensor] = None, aparam: Optional[torch.Tensor] = None, ) -> dict[str, torch.Tensor]: + """Calculate the fitting. + + Parameters + ---------- + descriptor + input descriptor. shape: nf x nloc x nd + atype + the atom type. shape: nf x nloc + gr + The rotationally equivariant and permutationally invariant single particle + representation. shape: nf x nloc x ng x 3 + g2 + The rotationally invariant pair-partical representation. + shape: nf x nloc x nnei x ng + h2 + The rotationally equivariant pair-partical representation. + shape: nf x nloc x nnei x 3 + fparam + The frame parameter. shape: nf x nfp. nfp being `numb_fparam` + aparam + The atomic parameter. shape: nf x nloc x nap. nap being `numb_aparam` + + """ # cast the input to internal precsion xx = descriptor.to(self.prec) fparam = fparam.to(self.prec) if fparam is not None else None @@ -572,7 +585,7 @@ def forward( # coord fitting for type_i, ll in enumerate(self.filter_layers_coord.networks): mask = (atype == type_i).unsqueeze(-1) - mask = torch.tile(mask, (1, 1, 1)) + mask = torch.tile(mask, (1, 1, 3)) updated_coord_type = ll(xx) assert list(updated_coord_type.size()) == [nf, nloc, self.out_dim] updated_coord_type = updated_coord_type.view( @@ -590,7 +603,7 @@ def forward( # cell fitting for type_i, ll in enumerate(self.filter_layers_cell.networks): mask = (atype == type_i).unsqueeze(-1) - mask = torch.tile(mask, (1, 1, 1)) + mask = torch.tile(mask, (1, 1, 6)) strain_components_type = ll(xx) strain_components_type = torch.where(mask, strain_components_type, 0.0) strain_components = ( @@ -599,7 +612,7 @@ def forward( # token fitting for type_i, ll in enumerate(self.filter_layers_token.networks): mask = (atype == type_i).unsqueeze(-1) - mask = torch.tile(mask, (1, 1, 1)) + mask = torch.tile(mask, (1, 1, self.ntypes - 1)) logits_type = ll(xx) logits_type = torch.where(mask, logits_type, 0.0) logits = logits + logits_type diff --git a/source/tests/pt/common.py b/source/tests/pt/common.py index b081e4df89..415954662f 100644 --- a/source/tests/pt/common.py +++ b/source/tests/pt/common.py @@ -313,6 +313,8 @@ def eval_model( "updated_coord": updated_coord_out, "logits": logits_out, } + if atomic: + results_dict["atom_strain_components"] = atom_strain_components_out return results_dict else: results_dict = { From ebef6c1ef68cc37edda5e6a515e96cefd777f603 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 14 Mar 2025 08:53:23 +0000 Subject: [PATCH 11/26] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/dpmodel/fitting/denoise_fitting.py | 55 ++++++++++++----------- 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/deepmd/dpmodel/fitting/denoise_fitting.py b/deepmd/dpmodel/fitting/denoise_fitting.py index 3869c5e47a..f9059971cb 100644 --- a/deepmd/dpmodel/fitting/denoise_fitting.py +++ b/deepmd/dpmodel/fitting/denoise_fitting.py @@ -1,9 +1,5 @@ # SPDX-License-Identifier: LGPL-3.0-or-later -from abc import ( - abstractmethod, -) from typing import ( - Any, Optional, Union, ) @@ -106,7 +102,7 @@ def __init__( use_aparam_as_mask: bool = False, coord_noise: Optional[float] = None, cell_pert_fraction: Optional[float] = None, - noise_type: Optional[str] = None, + noise_type: Optional[str] = None, ) -> None: self.ntypes = ntypes self.dim_descrpt = dim_descrpt @@ -451,24 +447,24 @@ def _call_common( ) # coord fitting for type_i in range(self.ntypes): - mask = xp.tile( - xp.reshape((atype == type_i), [nf, nloc, 1]), (1, 1, 3) - ) + mask = xp.tile(xp.reshape((atype == type_i), [nf, nloc, 1]), (1, 1, 3)) updated_coord_type = self.coord_nets[(type_i,)](xx) assert list(updated_coord_type.shape) == [nf, nloc, self.out_dim] - updated_coord_type = xp.reshape(updated_coord_type, (-1, 1, self.out_dim)) # (nf * nloc, 1, out_dim) - gr = xp.reshape(gr, (nframes * nloc, -1, 3)) # (nf * nloc, out_dim, 3) - updated_coord_type = updated_coord_type @ gr # (nf, nloc, 3) + updated_coord_type = xp.reshape( + updated_coord_type, (-1, 1, self.out_dim) + ) # (nf * nloc, 1, out_dim) + gr = xp.reshape(gr, (nframes * nloc, -1, 3)) # (nf * nloc, out_dim, 3) + updated_coord_type = updated_coord_type @ gr # (nf, nloc, 3) updated_coord_type = xp.reshape(updated_coord_type, (nframes, nloc, 3)) updated_coord_type = xp.where( mask, updated_coord_type, xp.zeros_like(updated_coord_type) ) - updated_coord = updated_coord + updated_coord_type # Shape is [nf, nloc, 3] + updated_coord = ( + updated_coord + updated_coord_type + ) # Shape is [nf, nloc, 3] # cell fitting for type_i in range(self.ntypes): - mask = xp.tile( - xp.reshape((atype == type_i), [nf, nloc, 1]), (1, 1, 6) - ) + mask = xp.tile(xp.reshape((atype == type_i), [nf, nloc, 1]), (1, 1, 6)) strain_components_type = self.cell_nets[(type_i,)](xx) strain_components_type = xp.where( mask, strain_components_type, xp.zeros_like(strain_components_type) @@ -477,31 +473,38 @@ def _call_common( # token fitting for type_i in range(self.ntypes): mask = xp.tile( - xp.reshape((atype == type_i), [nf, nloc, 1]), (1, 1, self.ntypes - 1) + xp.reshape((atype == type_i), [nf, nloc, 1]), + (1, 1, self.ntypes - 1), ) logits_type = self.token_nets[(type_i,)](xx) - logits_type = xp.where( - mask, logits_type, xp.zeros_like(logits_type) - ) + logits_type = xp.where(mask, logits_type, xp.zeros_like(logits_type)) logits = logits + logits_type else: # coord fitting updated_coord = self.coord_nets[()](xx) assert list(updated_coord.shape) == [nf, nloc, self.out_dim] - updated_coord = xp.reshape(updated_coord, (-1, 1, self.out_dim)) # (nf * nloc, 1, out_dim) - gr = xp.reshape(gr, (nframes * nloc, -1, 3)) # (nf * nloc, out_dim, 3) - updated_coord = updated_coord @ gr # (nf, nloc, 3) + updated_coord = xp.reshape( + updated_coord, (-1, 1, self.out_dim) + ) # (nf * nloc, 1, out_dim) + gr = xp.reshape(gr, (nframes * nloc, -1, 3)) # (nf * nloc, out_dim, 3) + updated_coord = updated_coord @ gr # (nf, nloc, 3) updated_coord = xp.reshape(updated_coord, (nframes, nloc, 3)) # cell fitting - strain_components = self.cell_nets[()](xx) # [nframes, nloc, 6] + strain_components = self.cell_nets[()](xx) # [nframes, nloc, 6] # token fitting - logits = self.token_nets[()](xx) # [nframes, natoms[0], ntypes-1] + logits = self.token_nets[()](xx) # [nframes, natoms[0], ntypes-1] # nf x nloc exclude_mask = self.emask.build_type_exclude_mask(atype) exclude_mask = xp.astype(exclude_mask, xp.bool) # nf x nloc x od - strain_components = xp.where(exclude_mask[:, :, None], strain_components, xp.zeros_like(strain_components)) - updated_coord = xp.where(exclude_mask[:, :, None], updated_coord, xp.zeros_like(updated_coord)) + strain_components = xp.where( + exclude_mask[:, :, None], + strain_components, + xp.zeros_like(strain_components), + ) + updated_coord = xp.where( + exclude_mask[:, :, None], updated_coord, xp.zeros_like(updated_coord) + ) logits = xp.where(exclude_mask[:, :, None], logits, xp.zeros_like(logits)) return { From b20dd588a4fe47d8fc99e106b73e2043a9a72e4a Mon Sep 17 00:00:00 2001 From: root <2000011006@stu.pku.edu.cn> Date: Mon, 17 Mar 2025 14:09:28 +0800 Subject: [PATCH 12/26] Add universial denoise fitting UT --- deepmd/dpmodel/fitting/__init__.py | 4 + deepmd/dpmodel/fitting/denoise_fitting.py | 75 ++++++++++++++----- .../atomic_model/denoise_atomic_model.py | 6 +- deepmd/pt/model/model/__init__.py | 2 +- deepmd/pt/model/task/__init__.py | 4 +- deepmd/pt/model/task/denoise.py | 27 +++---- .../universal/common/cases/fitting/utils.py | 60 ++++++++------- .../universal/dpmodel/fitting/test_fitting.py | 43 +++++++++++ .../universal/pt/fitting/test_fitting.py | 3 + 9 files changed, 162 insertions(+), 62 deletions(-) diff --git a/deepmd/dpmodel/fitting/__init__.py b/deepmd/dpmodel/fitting/__init__.py index 5bdfff2571..91f961379a 100644 --- a/deepmd/dpmodel/fitting/__init__.py +++ b/deepmd/dpmodel/fitting/__init__.py @@ -20,6 +20,9 @@ from .property_fitting import ( PropertyFittingNet, ) +from .denoise_fitting import ( + DenoiseFitting, +) __all__ = [ "DOSFittingNet", @@ -28,5 +31,6 @@ "InvarFitting", "PolarFitting", "PropertyFittingNet", + "DenoiseFitting", "make_base_fitting", ] diff --git a/deepmd/dpmodel/fitting/denoise_fitting.py b/deepmd/dpmodel/fitting/denoise_fitting.py index 3869c5e47a..8150482682 100644 --- a/deepmd/dpmodel/fitting/denoise_fitting.py +++ b/deepmd/dpmodel/fitting/denoise_fitting.py @@ -16,9 +16,15 @@ PRECISION_DICT, NativeOP, ) +from deepmd.dpmodel.output_def import ( + FittingOutputDef, + OutputVariableDef, + fitting_check_output, +) from deepmd.dpmodel.common import ( get_xp_precision, to_numpy_array, + cast_precision, ) from deepmd.dpmodel.utils import ( AtomExcludeMask, @@ -35,6 +41,9 @@ get_index_between_two_maps, map_atom_exclude_types, ) +from deepmd.utils.version import ( + check_version_compatibility, +) from .base_fitting import ( BaseFitting, @@ -89,9 +98,9 @@ def __init__( self, ntypes: int, dim_descrpt: int, + embedding_width: int, neuron: list[int] = [120, 120, 120], bias_atom_e: Optional[np.ndarray] = None, - out_dim: int = 1, resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, @@ -111,13 +120,15 @@ def __init__( self.ntypes = ntypes self.dim_descrpt = dim_descrpt self.neuron = neuron - self.out_dim = out_dim + self.embedding_width = embedding_width self.resnet_dt = resnet_dt self.numb_fparam = numb_fparam self.numb_aparam = numb_aparam self.dim_case_embd = dim_case_embd self.trainable = trainable self.type_map = type_map + self.seed = seed + self.var_name = ["strain_components", "updated_coord", "logits"] if self.trainable is None: self.trainable = [True for ii in range(len(self.neuron) + 1)] if isinstance(self.trainable, bool): @@ -137,10 +148,10 @@ def __init__( # init constants if bias_atom_e is None: self.bias_atom_e = np.zeros( - [self.ntypes, self.out_dim], dtype=GLOBAL_NP_FLOAT_PRECISION + [self.ntypes, self.embedding_width], dtype=GLOBAL_NP_FLOAT_PRECISION ) else: - assert bias_atom_e.shape == (self.ntypes, self.out_dim) + assert bias_atom_e.shape == (self.ntypes, self.embedding_width) self.bias_atom_e = bias_atom_e.astype(GLOBAL_NP_FLOAT_PRECISION) if self.numb_fparam > 0: self.fparam_avg = np.zeros(self.numb_fparam, dtype=self.prec) @@ -170,7 +181,7 @@ def __init__( networks=[ FittingNet( in_dim, - self.out_dim, + self.embedding_width, self.neuron, self.activation_function, self.resnet_dt, @@ -314,7 +325,7 @@ def serialize(self) -> dict: "@version": 3, "type": "denoise", "ntypes": self.ntypes, - "out_dim": self.out_dim, + "embedding_width": self.embedding_width, "dim_descrpt": self.dim_descrpt, "neuron": self.neuron, "resnet_dt": self.resnet_dt, @@ -344,6 +355,7 @@ def deserialize(cls, data: dict) -> "DenoiseFitting": data = data.copy() data.pop("@class") data.pop("type") + check_version_compatibility(data.pop("@version"), 3, 1) variables = data.pop("@variables") cell_nets = data.pop("cell_nets") coord_nets = data.pop("coord_nets") @@ -356,7 +368,36 @@ def deserialize(cls, data: dict) -> "DenoiseFitting": obj.token_nets = NetworkCollection.deserialize(token_nets) return obj - def _call_common( + def output_def(self): + return FittingOutputDef( + [ + OutputVariableDef( + "strain_components", + [6], + reducible=True, + r_differentiable=False, + c_differentiable=False, + intensive=True, + ), + OutputVariableDef( + "updated_coord", + [3], + reducible=False, + r_differentiable=False, + c_differentiable=False, + ), + OutputVariableDef( + "logits", + [self.ntypes - 1], + reducible=False, + r_differentiable=False, + c_differentiable=False, + ), + ] + ) + + @cast_precision + def call( self, descriptor: np.ndarray, atype: np.ndarray, @@ -455,11 +496,11 @@ def _call_common( xp.reshape((atype == type_i), [nf, nloc, 1]), (1, 1, 3) ) updated_coord_type = self.coord_nets[(type_i,)](xx) - assert list(updated_coord_type.shape) == [nf, nloc, self.out_dim] - updated_coord_type = xp.reshape(updated_coord_type, (-1, 1, self.out_dim)) # (nf * nloc, 1, out_dim) - gr = xp.reshape(gr, (nframes * nloc, -1, 3)) # (nf * nloc, out_dim, 3) + assert list(updated_coord_type.shape) == [nf, nloc, self.embedding_width] + updated_coord_type = xp.reshape(updated_coord_type, (-1, 1, self.embedding_width)) # (nf * nloc, 1, embedding_width) + gr = xp.reshape(gr, (nf * nloc, -1, 3)) # (nf * nloc, embedding_width, 3) updated_coord_type = updated_coord_type @ gr # (nf, nloc, 3) - updated_coord_type = xp.reshape(updated_coord_type, (nframes, nloc, 3)) + updated_coord_type = xp.reshape(updated_coord_type, (nf, nloc, 3)) updated_coord_type = xp.where( mask, updated_coord_type, xp.zeros_like(updated_coord_type) ) @@ -487,15 +528,15 @@ def _call_common( else: # coord fitting updated_coord = self.coord_nets[()](xx) - assert list(updated_coord.shape) == [nf, nloc, self.out_dim] - updated_coord = xp.reshape(updated_coord, (-1, 1, self.out_dim)) # (nf * nloc, 1, out_dim) - gr = xp.reshape(gr, (nframes * nloc, -1, 3)) # (nf * nloc, out_dim, 3) + assert list(updated_coord.shape) == [nf, nloc, self.embedding_width] + updated_coord = xp.reshape(updated_coord, (-1, 1, self.embedding_width)) # (nf * nloc, 1, embedding_width) + gr = xp.reshape(gr, (nf * nloc, -1, 3)) # (nf * nloc, embedding_width, 3) updated_coord = updated_coord @ gr # (nf, nloc, 3) - updated_coord = xp.reshape(updated_coord, (nframes, nloc, 3)) + updated_coord = xp.reshape(updated_coord, (nf, nloc, 3)) # cell fitting - strain_components = self.cell_nets[()](xx) # [nframes, nloc, 6] + strain_components = self.cell_nets[()](xx) # [nf, nloc, 6] # token fitting - logits = self.token_nets[()](xx) # [nframes, natoms[0], ntypes-1] + logits = self.token_nets[()](xx) # [nf, natoms[0], ntypes-1] # nf x nloc exclude_mask = self.emask.build_type_exclude_mask(atype) exclude_mask = xp.astype(exclude_mask, xp.bool) diff --git a/deepmd/pt/model/atomic_model/denoise_atomic_model.py b/deepmd/pt/model/atomic_model/denoise_atomic_model.py index d61ae6168a..0054a6c559 100644 --- a/deepmd/pt/model/atomic_model/denoise_atomic_model.py +++ b/deepmd/pt/model/atomic_model/denoise_atomic_model.py @@ -5,7 +5,7 @@ import torch from deepmd.pt.model.task.denoise import ( - DenoiseNet, + DenoiseFittingNet, ) from .dp_atomic_model import ( @@ -17,9 +17,9 @@ class DPDenoiseAtomicModel(DPAtomicModel): def __init__(self, descriptor, fitting, type_map, **kwargs): - if not isinstance(fitting, DenoiseNet): + if not isinstance(fitting, DenoiseFittingNet): raise TypeError( - "fitting must be an instance of DenoiseNet for DPDenoiseAtomicModel" + "fitting must be an instance of DenoiseFittingNet for DPDenoiseAtomicModel" ) super().__init__(descriptor, fitting, type_map, **kwargs) diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py index ed5c663829..53f76c3ece 100644 --- a/deepmd/pt/model/model/__init__.py +++ b/deepmd/pt/model/model/__init__.py @@ -101,7 +101,7 @@ def _get_standard_model_components(model_params, ntypes): assert model_params["type_map"][-1] == "MASKED_TOKEN", ( f"When using denoise fitting, the last element in `type_map` must be 'MASKED_TOKEN', but got '{model_params['type_map'][-1]}'" ) - fitting_net["out_dim"] = descriptor.get_dim_emb() + fitting_net["embedding_width"] = descriptor.get_dim_emb() fitting_net["coord_noise"] = model_params.get("coord_noise", 0.2) fitting_net["cell_pert_fraction"] = model_params.get("cell_pert_fraction", 0.0) fitting_net["noise_type"] = model_params.get("noise_type", "gaussian") diff --git a/deepmd/pt/model/task/__init__.py b/deepmd/pt/model/task/__init__.py index 37ffec2725..a142b69c65 100644 --- a/deepmd/pt/model/task/__init__.py +++ b/deepmd/pt/model/task/__init__.py @@ -3,7 +3,7 @@ BaseFitting, ) from .denoise import ( - DenoiseNet, + DenoiseFittingNet, ) from .dipole import ( DipoleFittingNet, @@ -31,7 +31,7 @@ __all__ = [ "BaseFitting", "DOSFittingNet", - "DenoiseNet", + "DenoiseFittingNet", "DipoleFittingNet", "EnergyFittingNet", "EnergyFittingNetDirect", diff --git a/deepmd/pt/model/task/denoise.py b/deepmd/pt/model/task/denoise.py index 855b8a9c69..15c2e617c1 100644 --- a/deepmd/pt/model/task/denoise.py +++ b/deepmd/pt/model/task/denoise.py @@ -47,14 +47,14 @@ @Fitting.register("denoise") @fitting_check_output -class DenoiseNet(Fitting): +class DenoiseFittingNet(Fitting): def __init__( self, ntypes: int, dim_descrpt: int, + embedding_width: int, neuron: list[int] = [128, 128, 128], bias_atom_e: Optional[torch.Tensor] = None, - out_dim: int = 1, resnet_dt: bool = True, numb_fparam: int = 0, numb_aparam: int = 0, @@ -86,7 +86,7 @@ def __init__( Average energy per atom for each element. resnet_dt : bool Using time-step in the ResNet construction. - out_dim : int + embedding_width : int The output dimension of the fitting net. numb_fparam : int Number of frame parameters. @@ -120,7 +120,7 @@ def __init__( self.neuron = neuron self.mixed_types = mixed_types self.resnet_dt = resnet_dt - self.out_dim = out_dim + self.embedding_width = embedding_width self.numb_fparam = numb_fparam self.numb_aparam = numb_aparam self.dim_case_embd = dim_case_embd @@ -128,6 +128,7 @@ def __init__( self.precision = precision self.prec = PRECISION_DICT[self.precision] self.seed = seed + self.var_name = ["strain_components", "updated_coord", "logits"] self.type_map = type_map self.use_aparam_as_mask = use_aparam_as_mask self.coord_noise = coord_noise @@ -198,7 +199,7 @@ def __init__( networks=[ FittingNet( in_dim, - self.out_dim, + self.embedding_width, self.neuron, self.activation_function, self.resnet_dt, @@ -318,7 +319,7 @@ def serialize(self) -> dict: "@version": 3, "type": "denoise", "ntypes": self.ntypes, - "out_dim": self.out_dim, + "embedding_width": self.embedding_width, "dim_descrpt": self.dim_descrpt, "neuron": self.neuron, "resnet_dt": self.resnet_dt, @@ -344,7 +345,7 @@ def serialize(self) -> dict: } @classmethod - def deserialize(cls, data: dict) -> "DenoiseNet": + def deserialize(cls, data: dict) -> "DenoiseFittingNet": data = data.copy() data.pop("@class") data.pop("type") @@ -549,12 +550,12 @@ def forward( if self.mixed_types: # coord fitting updated_coord = self.filter_layers_coord.networks[0](xx) - assert list(updated_coord.size()) == [nf, nloc, self.out_dim] + assert list(updated_coord.size()) == [nf, nloc, self.embedding_width] updated_coord = updated_coord.view( - -1, 1, self.out_dim + -1, 1, self.embedding_width ) # (nf x nloc) x 1 x od assert gr is not None - gr = gr.view(-1, self.out_dim, 3) # (nf x nloc) x od x 3 + gr = gr.view(-1, self.embedding_width, 3) # (nf x nloc) x od x 3 updated_coord = ( torch.bmm(updated_coord, gr).squeeze(-2).view(nf, nloc, 3) ) # [nf, nloc, 3] @@ -587,12 +588,12 @@ def forward( mask = (atype == type_i).unsqueeze(-1) mask = torch.tile(mask, (1, 1, 3)) updated_coord_type = ll(xx) - assert list(updated_coord_type.size()) == [nf, nloc, self.out_dim] + assert list(updated_coord_type.size()) == [nf, nloc, self.embedding_width] updated_coord_type = updated_coord_type.view( - -1, 1, self.out_dim + -1, 1, self.embedding_width ) # (nf x nloc) x 1 x od assert gr is not None - gr = gr.view(-1, self.out_dim, 3) # (nf x nloc) x od x 3 + gr = gr.view(-1, self.embedding_width, 3) # (nf x nloc) x od x 3 updated_coord_type = ( torch.bmm(updated_coord_type, gr).squeeze(-2).view(nf, nloc, 3) ) # [nf, nloc, 3] diff --git a/source/tests/universal/common/cases/fitting/utils.py b/source/tests/universal/common/cases/fitting/utils.py index de6b12c3a2..2a654e1362 100644 --- a/source/tests/universal/common/cases/fitting/utils.py +++ b/source/tests/universal/common/cases/fitting/utils.py @@ -76,33 +76,38 @@ def test_exclude_types( ) self.module = self.module.deserialize(serialize_dict) ff = self.forward_wrapper(self.module) - var_name = self.module.var_name - if var_name == "polar": - var_name = "polarizability" + var_names = self.module.var_name + if isinstance(var_names, str): + var_names = [var_names] + var_names = ["polarizability" if v == "polar" else v for v in var_names] for em in [[0], [1]]: ex_pair = AtomExcludeMask(self.nt, em) atom_mask = ex_pair.build_type_exclude_mask(atype_device) # exclude neighbors in the output - rd = ff( + result = ff( self.mock_descriptor, self.atype_ext[:, : self.nloc], gr=self.mock_gr, - )[var_name] - for _ in range(len(rd.shape) - len(atom_mask.shape)): - atom_mask = atom_mask[..., None] - rd = rd * atom_mask + ) + for var in var_names: + rd = result[var] + _atom_mask = atom_mask.copy() + for _ in range(len(rd.shape) - len(_atom_mask.shape)): + _atom_mask = _atom_mask[..., None] + rd_masked = rd * _atom_mask + # normal nlist but use exclude_types params + serialize_dict_em = deepcopy(serialize_dict) + serialize_dict_em.update({"exclude_types": em}) + ff_ex = self.forward_wrapper(self.module.deserialize(serialize_dict_em)) + result_ex = ff_ex( + self.mock_descriptor, + self.atype_ext[:, : self.nloc], + gr=self.mock_gr, + ) + rd_ex = result_ex[var] - # normal nlist but use exclude_types params - serialize_dict_em = deepcopy(serialize_dict) - serialize_dict_em.update({"exclude_types": em}) - ff_ex = self.forward_wrapper(self.module.deserialize(serialize_dict_em)) - rd_ex = ff_ex( - self.mock_descriptor, - self.atype_ext[:, : self.nloc], - gr=self.mock_gr, - )[var_name] - np.testing.assert_allclose(rd, rd_ex) + np.testing.assert_allclose(rd_masked, rd_ex) def test_change_type_map(self) -> None: if not self.module.mixed_types: @@ -168,23 +173,26 @@ def test_change_type_map(self) -> None: size=serialize_dict["@variables"]["bias_atom_e"].shape ) old_tm_module = old_tm_module.deserialize(serialize_dict) - var_name = old_tm_module.var_name - if var_name == "polar": - var_name = "polarizability" + var_names = old_tm_module.var_name + if isinstance(var_names, str): + var_names = [var_names] + var_names = ["polarizability" if v == "polar" else v for v in var_names] old_tm_ff = self.forward_wrapper(old_tm_module) - rd_old_tm = old_tm_ff( + + result_old = old_tm_ff( self.mock_descriptor, old_tm_index[atype_device], gr=self.mock_gr, - )[var_name] + ) old_tm_module.change_type_map(new_tm) new_tm_ff = self.forward_wrapper(old_tm_module) - rd_new_tm = new_tm_ff( + result_new = new_tm_ff( self.mock_descriptor, new_tm_index[atype_device], gr=self.mock_gr, - )[var_name] - np.testing.assert_allclose(rd_old_tm, rd_new_tm) + ) + for var in var_names: + np.testing.assert_allclose(result_old[var], result_new[var],) def remap_exclude_types(exclude_types, ori_tm, new_tm): diff --git a/source/tests/universal/dpmodel/fitting/test_fitting.py b/source/tests/universal/dpmodel/fitting/test_fitting.py index 90b0668d20..25caa875ac 100644 --- a/source/tests/universal/dpmodel/fitting/test_fitting.py +++ b/source/tests/universal/dpmodel/fitting/test_fitting.py @@ -10,6 +10,7 @@ EnergyFittingNet, PolarFitting, PropertyFittingNet, + DenoiseFitting, ) from ....consistent.common import ( @@ -234,6 +235,47 @@ def FittingParamProperty( FittingParamProperty = FittingParamPropertyList[0] +def FittingParamDenoise( + ntypes, + dim_descrpt, + mixed_types, + type_map, + exclude_types=[], + precision="float64", + embedding_width=None, + numb_param=0, # test numb_fparam, numb_aparam and dim_case_embd together +): + assert embedding_width is not None, "embedding_width for denoise fitting is required." + input_dict = { + "ntypes": ntypes, + "dim_descrpt": dim_descrpt, + "mixed_types": mixed_types, + "type_map": type_map, + "embedding_width": embedding_width, + "exclude_types": exclude_types, + "seed": GLOBAL_SEED, + "precision": precision, + "numb_fparam": numb_param, + "numb_aparam": numb_param, + "dim_case_embd": numb_param, + } + return input_dict + + +FittingParamDenoiseList = parameterize_func( + FittingParamDenoise, + OrderedDict( + { + "exclude_types": ([], [0]), + "precision": ("float64",), + "numb_param": (0, 2), + } + ), +) +# to get name for the default function +FittingParamDenoise = FittingParamDenoiseList[0] + + @parameterized( ( (FittingParamEnergy, EnergyFittingNet), @@ -241,6 +283,7 @@ def FittingParamProperty( (FittingParamDipole, DipoleFitting), (FittingParamPolar, PolarFitting), (FittingParamProperty, PropertyFittingNet), + (FittingParamDenoise, DenoiseFitting) ), # class_param & class (True, False), # mixed_types ) diff --git a/source/tests/universal/pt/fitting/test_fitting.py b/source/tests/universal/pt/fitting/test_fitting.py index efda3b9619..a2209e2f11 100644 --- a/source/tests/universal/pt/fitting/test_fitting.py +++ b/source/tests/universal/pt/fitting/test_fitting.py @@ -7,6 +7,7 @@ EnergyFittingNet, PolarFittingNet, PropertyFittingNet, + DenoiseFittingNet, ) from ....consistent.common import ( @@ -21,6 +22,7 @@ FittingParamEnergy, FittingParamPolar, FittingParamProperty, + FittingParamDenoise, ) from ..backend import ( PTTestCase, @@ -34,6 +36,7 @@ (FittingParamDipole, DipoleFittingNet), (FittingParamPolar, PolarFittingNet), (FittingParamProperty, PropertyFittingNet), + (FittingParamDenoise, DenoiseFittingNet) ), # class_param & class (True, False), # mixed_types ) From cdcb74ee7f095b93c172fe1612d12ca590c47aa8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 17 Mar 2025 06:14:38 +0000 Subject: [PATCH 13/26] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/dpmodel/fitting/__init__.py | 8 ++-- deepmd/dpmodel/fitting/denoise_fitting.py | 39 ++++++++++++------- deepmd/pt/model/task/denoise.py | 6 ++- .../universal/common/cases/fitting/utils.py | 5 ++- .../universal/dpmodel/fitting/test_fitting.py | 8 ++-- .../universal/pt/fitting/test_fitting.py | 6 +-- 6 files changed, 45 insertions(+), 27 deletions(-) diff --git a/deepmd/dpmodel/fitting/__init__.py b/deepmd/dpmodel/fitting/__init__.py index 91f961379a..fcc41ba232 100644 --- a/deepmd/dpmodel/fitting/__init__.py +++ b/deepmd/dpmodel/fitting/__init__.py @@ -1,4 +1,7 @@ # SPDX-License-Identifier: LGPL-3.0-or-later +from .denoise_fitting import ( + DenoiseFitting, +) from .dipole_fitting import ( DipoleFitting, ) @@ -20,17 +23,14 @@ from .property_fitting import ( PropertyFittingNet, ) -from .denoise_fitting import ( - DenoiseFitting, -) __all__ = [ "DOSFittingNet", + "DenoiseFitting", "DipoleFitting", "EnergyFittingNet", "InvarFitting", "PolarFitting", "PropertyFittingNet", - "DenoiseFitting", "make_base_fitting", ] diff --git a/deepmd/dpmodel/fitting/denoise_fitting.py b/deepmd/dpmodel/fitting/denoise_fitting.py index 3ee61acad4..5eca7fcfca 100644 --- a/deepmd/dpmodel/fitting/denoise_fitting.py +++ b/deepmd/dpmodel/fitting/denoise_fitting.py @@ -12,15 +12,14 @@ PRECISION_DICT, NativeOP, ) -from deepmd.dpmodel.output_def import ( - FittingOutputDef, - OutputVariableDef, - fitting_check_output, -) from deepmd.dpmodel.common import ( + cast_precision, get_xp_precision, to_numpy_array, - cast_precision, +) +from deepmd.dpmodel.output_def import ( + FittingOutputDef, + OutputVariableDef, ) from deepmd.dpmodel.utils import ( AtomExcludeMask, @@ -490,10 +489,18 @@ def call( for type_i in range(self.ntypes): mask = xp.tile(xp.reshape((atype == type_i), [nf, nloc, 1]), (1, 1, 3)) updated_coord_type = self.coord_nets[(type_i,)](xx) - assert list(updated_coord_type.shape) == [nf, nloc, self.embedding_width] - updated_coord_type = xp.reshape(updated_coord_type, (-1, 1, self.embedding_width)) # (nf * nloc, 1, embedding_width) - gr = xp.reshape(gr, (nf * nloc, -1, 3)) # (nf * nloc, embedding_width, 3) - updated_coord_type = updated_coord_type @ gr # (nf, nloc, 3) + assert list(updated_coord_type.shape) == [ + nf, + nloc, + self.embedding_width, + ] + updated_coord_type = xp.reshape( + updated_coord_type, (-1, 1, self.embedding_width) + ) # (nf * nloc, 1, embedding_width) + gr = xp.reshape( + gr, (nf * nloc, -1, 3) + ) # (nf * nloc, embedding_width, 3) + updated_coord_type = updated_coord_type @ gr # (nf, nloc, 3) updated_coord_type = xp.reshape(updated_coord_type, (nf, nloc, 3)) updated_coord_type = xp.where( mask, updated_coord_type, xp.zeros_like(updated_coord_type) @@ -522,14 +529,16 @@ def call( # coord fitting updated_coord = self.coord_nets[()](xx) assert list(updated_coord.shape) == [nf, nloc, self.embedding_width] - updated_coord = xp.reshape(updated_coord, (-1, 1, self.embedding_width)) # (nf * nloc, 1, embedding_width) - gr = xp.reshape(gr, (nf * nloc, -1, 3)) # (nf * nloc, embedding_width, 3) - updated_coord = updated_coord @ gr # (nf, nloc, 3) + updated_coord = xp.reshape( + updated_coord, (-1, 1, self.embedding_width) + ) # (nf * nloc, 1, embedding_width) + gr = xp.reshape(gr, (nf * nloc, -1, 3)) # (nf * nloc, embedding_width, 3) + updated_coord = updated_coord @ gr # (nf, nloc, 3) updated_coord = xp.reshape(updated_coord, (nf, nloc, 3)) # cell fitting - strain_components = self.cell_nets[()](xx) # [nf, nloc, 6] + strain_components = self.cell_nets[()](xx) # [nf, nloc, 6] # token fitting - logits = self.token_nets[()](xx) # [nf, natoms[0], ntypes-1] + logits = self.token_nets[()](xx) # [nf, natoms[0], ntypes-1] # nf x nloc exclude_mask = self.emask.build_type_exclude_mask(atype) exclude_mask = xp.astype(exclude_mask, xp.bool) diff --git a/deepmd/pt/model/task/denoise.py b/deepmd/pt/model/task/denoise.py index 15c2e617c1..0e3bb26a1c 100644 --- a/deepmd/pt/model/task/denoise.py +++ b/deepmd/pt/model/task/denoise.py @@ -588,7 +588,11 @@ def forward( mask = (atype == type_i).unsqueeze(-1) mask = torch.tile(mask, (1, 1, 3)) updated_coord_type = ll(xx) - assert list(updated_coord_type.size()) == [nf, nloc, self.embedding_width] + assert list(updated_coord_type.size()) == [ + nf, + nloc, + self.embedding_width, + ] updated_coord_type = updated_coord_type.view( -1, 1, self.embedding_width ) # (nf x nloc) x 1 x od diff --git a/source/tests/universal/common/cases/fitting/utils.py b/source/tests/universal/common/cases/fitting/utils.py index 2a654e1362..55d1e14247 100644 --- a/source/tests/universal/common/cases/fitting/utils.py +++ b/source/tests/universal/common/cases/fitting/utils.py @@ -192,7 +192,10 @@ def test_change_type_map(self) -> None: gr=self.mock_gr, ) for var in var_names: - np.testing.assert_allclose(result_old[var], result_new[var],) + np.testing.assert_allclose( + result_old[var], + result_new[var], + ) def remap_exclude_types(exclude_types, ori_tm, new_tm): diff --git a/source/tests/universal/dpmodel/fitting/test_fitting.py b/source/tests/universal/dpmodel/fitting/test_fitting.py index 25caa875ac..1faa894c9f 100644 --- a/source/tests/universal/dpmodel/fitting/test_fitting.py +++ b/source/tests/universal/dpmodel/fitting/test_fitting.py @@ -5,12 +5,12 @@ ) from deepmd.dpmodel.fitting import ( + DenoiseFitting, DipoleFitting, DOSFittingNet, EnergyFittingNet, PolarFitting, PropertyFittingNet, - DenoiseFitting, ) from ....consistent.common import ( @@ -245,7 +245,9 @@ def FittingParamDenoise( embedding_width=None, numb_param=0, # test numb_fparam, numb_aparam and dim_case_embd together ): - assert embedding_width is not None, "embedding_width for denoise fitting is required." + assert embedding_width is not None, ( + "embedding_width for denoise fitting is required." + ) input_dict = { "ntypes": ntypes, "dim_descrpt": dim_descrpt, @@ -283,7 +285,7 @@ def FittingParamDenoise( (FittingParamDipole, DipoleFitting), (FittingParamPolar, PolarFitting), (FittingParamProperty, PropertyFittingNet), - (FittingParamDenoise, DenoiseFitting) + (FittingParamDenoise, DenoiseFitting), ), # class_param & class (True, False), # mixed_types ) diff --git a/source/tests/universal/pt/fitting/test_fitting.py b/source/tests/universal/pt/fitting/test_fitting.py index a2209e2f11..ee44afb4ca 100644 --- a/source/tests/universal/pt/fitting/test_fitting.py +++ b/source/tests/universal/pt/fitting/test_fitting.py @@ -2,12 +2,12 @@ import unittest from deepmd.pt.model.task import ( + DenoiseFittingNet, DipoleFittingNet, DOSFittingNet, EnergyFittingNet, PolarFittingNet, PropertyFittingNet, - DenoiseFittingNet, ) from ....consistent.common import ( @@ -17,12 +17,12 @@ FittingTest, ) from ...dpmodel.fitting.test_fitting import ( + FittingParamDenoise, FittingParamDipole, FittingParamDos, FittingParamEnergy, FittingParamPolar, FittingParamProperty, - FittingParamDenoise, ) from ..backend import ( PTTestCase, @@ -36,7 +36,7 @@ (FittingParamDipole, DipoleFittingNet), (FittingParamPolar, PolarFittingNet), (FittingParamProperty, PropertyFittingNet), - (FittingParamDenoise, DenoiseFittingNet) + (FittingParamDenoise, DenoiseFittingNet), ), # class_param & class (True, False), # mixed_types ) From 96bd72adb283d9ab7c7b7bf7548efcaebe73304e Mon Sep 17 00:00:00 2001 From: root <2000011006@stu.pku.edu.cn> Date: Mon, 17 Mar 2025 14:17:14 +0800 Subject: [PATCH 14/26] Add dtype and device to strain_components --- deepmd/pt/loss/denoise.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepmd/pt/loss/denoise.py b/deepmd/pt/loss/denoise.py index 997d697f51..45dc37ae77 100644 --- a/deepmd/pt/loss/denoise.py +++ b/deepmd/pt/loss/denoise.py @@ -24,7 +24,7 @@ def get_cell_perturb_matrix(cell_pert_fraction: float): # TODO: user fix some component if cell_pert_fraction < 0: raise RuntimeError("cell_pert_fraction can not be negative") - e0 = torch.rand(6) + e0 = torch.rand(6, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) e = e0 * 2 * cell_pert_fraction - cell_pert_fraction cell_pert_matrix = torch.tensor( [ From 13eba8c5fe7e0aabcb8768a20f9e99a423021969 Mon Sep 17 00:00:00 2001 From: root <2000011006@stu.pku.edu.cn> Date: Mon, 17 Mar 2025 16:42:57 +0800 Subject: [PATCH 15/26] Add denoise universial atommic_model UT --- deepmd/dpmodel/atomic_model/__init__.py | 4 ++ .../atomic_model/denoise_atomic_model.py | 55 +++++++++++++++ deepmd/dpmodel/fitting/__init__.py | 4 ++ deepmd/dpmodel/fitting/denoise_fitting.py | 21 +++++- deepmd/dpmodel/model/__init__.py | 4 ++ deepmd/dpmodel/model/denoise_model.py | 27 ++++++++ deepmd/dpmodel/model/model.py | 13 ++++ .../atomic_model/denoise_atomic_model.py | 12 ++++ .../common/cases/atomic_model/atomic_model.py | 18 +++++ .../dpmodel/atomc_model/test_atomic_model.py | 67 +++++++++++++++++++ .../pt/atomc_model/test_atomic_model.py | 67 +++++++++++++++++++ 11 files changed, 289 insertions(+), 3 deletions(-) create mode 100644 deepmd/dpmodel/atomic_model/denoise_atomic_model.py create mode 100644 deepmd/dpmodel/model/denoise_model.py diff --git a/deepmd/dpmodel/atomic_model/__init__.py b/deepmd/dpmodel/atomic_model/__init__.py index 4d882d5e4b..0c941977a8 100644 --- a/deepmd/dpmodel/atomic_model/__init__.py +++ b/deepmd/dpmodel/atomic_model/__init__.py @@ -45,6 +45,9 @@ from .property_atomic_model import ( DPPropertyAtomicModel, ) +from .denoise_atomic_model import ( + DPDenoiseAtomicModel, +) __all__ = [ "BaseAtomicModel", @@ -54,6 +57,7 @@ "DPEnergyAtomicModel", "DPPolarAtomicModel", "DPPropertyAtomicModel", + "DPDenoiseAtomicModel", "DPZBLLinearEnergyAtomicModel", "LinearEnergyAtomicModel", "PairTabAtomicModel", diff --git a/deepmd/dpmodel/atomic_model/denoise_atomic_model.py b/deepmd/dpmodel/atomic_model/denoise_atomic_model.py new file mode 100644 index 0000000000..71b5ad90a7 --- /dev/null +++ b/deepmd/dpmodel/atomic_model/denoise_atomic_model.py @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import numpy as np + +from deepmd.dpmodel.fitting.denoise_fitting import ( + DenoiseFitting, +) + +from .dp_atomic_model import ( + DPAtomicModel, +) + + +class DPDenoiseAtomicModel(DPAtomicModel): + def __init__(self, descriptor, fitting, type_map, **kwargs): + if not isinstance(fitting, DenoiseFitting): + raise TypeError( + "fitting must be an instance of DenoiseFitting for DPDenoiseAtomicModel" + ) + super().__init__(descriptor, fitting, type_map, **kwargs) + + def apply_out_stat( + self, + ret: dict[str, np.ndarray], + atype: np.ndarray, + ): + """Apply the stat to each atomic output. + + In denoise fitting, each output will be multiplied by label std. + + Parameters + ---------- + ret + The returned dict by the forward_atomic method + atype + The atom types. nf x nloc. It is useless in denoise fitting. + + """ + # Scale values to appropriate magnitudes + noise_type = self.fitting_net.get_noise_type() + cell_std = self.fitting_net.get_cell_pert_fraction() / 1.732 + if noise_type == "gaussian": + coord_std = self.fitting_net.get_coord_noise() + elif noise_type == "uniform": + coord_std = self.fitting_net.get_coord_noise() / 1.732 + else: + raise RuntimeError(f"Unknown noise type {noise_type}") + ret["strain_components"] = ( + ret["strain_components"] * cell_std + if cell_std > 0 + else ret["strain_components"] + ) + ret["updated_coord"] = ( + ret["updated_coord"] * coord_std if coord_std > 0 else ret["updated_coord"] + ) + return ret diff --git a/deepmd/dpmodel/fitting/__init__.py b/deepmd/dpmodel/fitting/__init__.py index fcc41ba232..2b2370ad86 100644 --- a/deepmd/dpmodel/fitting/__init__.py +++ b/deepmd/dpmodel/fitting/__init__.py @@ -23,6 +23,9 @@ from .property_fitting import ( PropertyFittingNet, ) +from .denoise_fitting import ( + DenoiseFitting, +) __all__ = [ "DOSFittingNet", @@ -32,5 +35,6 @@ "InvarFitting", "PolarFitting", "PropertyFittingNet", + "DenoiseFitting", "make_base_fitting", ] diff --git a/deepmd/dpmodel/fitting/denoise_fitting.py b/deepmd/dpmodel/fitting/denoise_fitting.py index 5eca7fcfca..976e672e3f 100644 --- a/deepmd/dpmodel/fitting/denoise_fitting.py +++ b/deepmd/dpmodel/fitting/denoise_fitting.py @@ -44,7 +44,7 @@ BaseFitting, ) - +@BaseFitting.register("denoise") class DenoiseFitting(NativeOP, BaseFitting): r"""Deoise fitting class. @@ -124,6 +124,9 @@ def __init__( self.type_map = type_map self.seed = seed self.var_name = ["strain_components", "updated_coord", "logits"] + self.coord_noise = coord_noise + self.cell_pert_fraction = cell_pert_fraction + self.noise_type = noise_type if self.trainable is None: self.trainable = [True for ii in range(len(self.neuron) + 1)] if isinstance(self.trainable, bool): @@ -245,6 +248,18 @@ def get_type_map(self) -> list[str]: """Get the name to each type of atoms.""" return self.type_map + def get_coord_noise(self): + """Get the noise level of the coordinates.""" + return self.coord_noise + + def get_cell_pert_fraction(self): + """Get the fraction of the cell perturbation.""" + return self.cell_pert_fraction + + def get_noise_type(self): + """Get the noise type.""" + return self.noise_type + def set_case_embd(self, case_idx: int): """ Set the case embedding of this fitting net by the given case_idx, @@ -430,8 +445,8 @@ def call( # check input dim if nd != self.dim_descrpt: raise ValueError( - "get an input descriptor of dim {nd}," - "which is not consistent with {self.dim_descrpt}." + f"get an input descriptor of dim {nd}," + f"which is not consistent with {self.dim_descrpt}." ) xx = descriptor diff --git a/deepmd/dpmodel/model/__init__.py b/deepmd/dpmodel/model/__init__.py index 37ef57b38b..2131af332c 100644 --- a/deepmd/dpmodel/model/__init__.py +++ b/deepmd/dpmodel/model/__init__.py @@ -24,6 +24,9 @@ from .property_model import ( PropertyModel, ) +from .denoise_model import ( + DenoiseModel, +) from .spin_model import ( SpinModel, ) @@ -32,6 +35,7 @@ "DPModelCommon", "EnergyModel", "PropertyModel", + "DenoiseModel", "SpinModel", "make_model", ] diff --git a/deepmd/dpmodel/model/denoise_model.py b/deepmd/dpmodel/model/denoise_model.py new file mode 100644 index 0000000000..df61645990 --- /dev/null +++ b/deepmd/dpmodel/model/denoise_model.py @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.dpmodel.atomic_model import ( + DPDenoiseAtomicModel, +) +from deepmd.dpmodel.model.base_model import ( + BaseModel, +) + +from .dp_model import ( + DPModelCommon, +) +from .make_model import ( + make_model, +) + +DPDenoiseModel_ = make_model(DPDenoiseAtomicModel) + + +@BaseModel.register("denoise") +class DenoiseModel(DPModelCommon, DPDenoiseModel_): + def __init__( + self, + *args, + **kwargs, + ) -> None: + DPModelCommon.__init__(self) + DPDenoiseModel_.__init__(self, *args, **kwargs) \ No newline at end of file diff --git a/deepmd/dpmodel/model/model.py b/deepmd/dpmodel/model/model.py index 1d18b70e8e..069d9e96bf 100644 --- a/deepmd/dpmodel/model/model.py +++ b/deepmd/dpmodel/model/model.py @@ -37,6 +37,9 @@ from deepmd.dpmodel.model.property_model import ( PropertyModel, ) +from deepmd.dpmodel.model.denoise_model import ( + DenoiseModel, +) from deepmd.dpmodel.model.spin_model import ( SpinModel, ) @@ -60,6 +63,14 @@ def _get_standard_model_components(data, ntypes): fitting_net["embedding_width"] = descriptor.get_dim_emb() fitting_net["dim_descrpt"] = descriptor.get_dim_out() grad_force = "direct" not in fitting_net["type"] + if fitting_net["type"] in ["denoise"]: + assert model_params["type_map"][-1] == "MASKED_TOKEN", ( + f"When using denoise fitting, the last element in `type_map` must be 'MASKED_TOKEN', but got '{model_params['type_map'][-1]}'" + ) + fitting_net["embedding_width"] = descriptor.get_dim_emb() + fitting_net["coord_noise"] = model_params.get("coord_noise", 0.2) + fitting_net["cell_pert_fraction"] = model_params.get("cell_pert_fraction", 0.0) + fitting_net["noise_type"] = model_params.get("noise_type", "gaussian") if not grad_force: fitting_net["out_dim"] = descriptor.get_dim_emb() if "ener" in fitting_net["type"]: @@ -96,6 +107,8 @@ def get_standard_model(data: dict) -> EnergyModel: modelcls = EnergyModel elif fitting_net_type == "property": modelcls = PropertyModel + elif fitting_net_type == "denoise": + modelcls = DenoiseModel else: raise RuntimeError(f"Unknown fitting type: {fitting_net_type}") diff --git a/deepmd/pt/model/atomic_model/denoise_atomic_model.py b/deepmd/pt/model/atomic_model/denoise_atomic_model.py index 0054a6c559..3dd32809e9 100644 --- a/deepmd/pt/model/atomic_model/denoise_atomic_model.py +++ b/deepmd/pt/model/atomic_model/denoise_atomic_model.py @@ -28,6 +28,18 @@ def apply_out_stat( ret: dict[str, torch.Tensor], atype: torch.Tensor, ): + """Apply the stat to each atomic output. + + In denoise fitting, each output will be multiplied by label std. + + Parameters + ---------- + ret + The returned dict by the forward_atomic method + atype + The atom types. nf x nloc. It is useless in denoise fitting. + + """ # Scale values to appropriate magnitudes noise_type = self.fitting_net.get_noise_type() cell_std = self.fitting_net.get_cell_pert_fraction() / 1.732 diff --git a/source/tests/universal/common/cases/atomic_model/atomic_model.py b/source/tests/universal/common/cases/atomic_model/atomic_model.py index 499a313a32..94596ed422 100644 --- a/source/tests/universal/common/cases/atomic_model/atomic_model.py +++ b/source/tests/universal/common/cases/atomic_model/atomic_model.py @@ -127,3 +127,21 @@ def setUpClass(cls) -> None: cls.aprec_dict = {} cls.rprec_dict = {} cls.epsilon_dict = {} + +class DenoiseAtomicModelTest(AtomicModelTestCase): + @classmethod + def setUpClass(cls) -> None: + cls.expected_rcut = 5.0 + cls.expected_type_map = ["O", "H"] + cls.expected_dim_fparam = 0 + cls.expected_dim_aparam = 0 + cls.expected_sel_type = [0, 1] + cls.expected_aparam_nall = False + cls.expected_model_output_type = ["strain_components", "updated_coord", "logits", "mask"] + cls.model_output_equivariant = ["updated_coord"] + cls.expected_sel = [46, 92] + cls.expected_sel_mix = sum(cls.expected_sel) + cls.expected_has_message_passing = False + cls.aprec_dict = {} + cls.rprec_dict = {} + cls.epsilon_dict = {} diff --git a/source/tests/universal/dpmodel/atomc_model/test_atomic_model.py b/source/tests/universal/dpmodel/atomc_model/test_atomic_model.py index 7b579ae82c..83b73e0be6 100644 --- a/source/tests/universal/dpmodel/atomc_model/test_atomic_model.py +++ b/source/tests/universal/dpmodel/atomc_model/test_atomic_model.py @@ -20,6 +20,7 @@ EnergyFittingNet, PolarFitting, PropertyFittingNet, + DenoiseFitting, ) from ....consistent.common import ( @@ -36,6 +37,7 @@ PolarAtomicModelTest, PropertyAtomicModelTest, ZBLAtomicModelTest, + DenoiseAtomicModelTest, ) from ...dpmodel.descriptor.test_descriptor import ( DescriptorParamDPA1, @@ -69,6 +71,8 @@ FittingParamPolarList, FittingParamProperty, FittingParamPropertyList, + FittingParamDenoise, + FittingParamDenoiseList, ) @@ -469,3 +473,66 @@ def setUpClass(cls) -> None: cls.expected_sel_type = ft.get_sel_type() cls.expected_dim_fparam = ft.get_dim_fparam() cls.expected_dim_aparam = ft.get_dim_aparam() + + +@parameterized( + des_parameterized=( + ( + *[(param_func, DescrptDPA1) for param_func in DescriptorParamDPA1List], + *[(param_func, DescrptDPA2) for param_func in DescriptorParamDPA2List], + (DescriptorParamHybridMixed, DescrptHybrid), + ), # descrpt_class_param & class + ((FittingParamDenoise, DenoiseFitting),), # fitting_class_param & class + ), + fit_parameterized=( + ( + (DescriptorParamDPA1, DescrptDPA1), + (DescriptorParamDPA2, DescrptDPA2), + ), # descrpt_class_param & class + ( + *[ + (param_func, DenoiseFitting) + for param_func in FittingParamDenoiseList + ], + ), # fitting_class_param & class + ), +) +@unittest.skipIf(TEST_DEVICE != "cpu" and CI, "Only test on CPU.") +class TestDenoiseAtomicModelDP(unittest.TestCase, DenoiseAtomicModelTest, DPTestCase): + @classmethod + def setUpClass(cls) -> None: + DenoiseAtomicModelTest.setUpClass() + (DescriptorParam, Descrpt) = cls.param[0] + (FittingParam, Fitting) = cls.param[1] + cls.input_dict_ds = DescriptorParam( + len(cls.expected_type_map), + cls.expected_rcut, + cls.expected_rcut / 2, + cls.expected_sel, + cls.expected_type_map, + ) + # set skip tests + skiptest, skip_reason = skip_model_tests(cls) + if skiptest: + raise cls.skipTest(cls, skip_reason) + ds = Descrpt(**cls.input_dict_ds) + cls.input_dict_ft = FittingParam( + ntypes=len(cls.expected_type_map), + dim_descrpt=ds.get_dim_out(), + mixed_types=ds.mixed_types(), + type_map=cls.expected_type_map, + embedding_width=ds.get_dim_emb(), + ) + ft = Fitting( + **cls.input_dict_ft, + ) + cls.module = DPAtomicModel( + ds, + ft, + type_map=cls.expected_type_map, + ) + cls.output_def = cls.module.atomic_output_def().get_data() + cls.expected_has_message_passing = ds.has_message_passing() + cls.expected_sel_type = ft.get_sel_type() + cls.expected_dim_fparam = ft.get_dim_fparam() + cls.expected_dim_aparam = ft.get_dim_aparam() \ No newline at end of file diff --git a/source/tests/universal/pt/atomc_model/test_atomic_model.py b/source/tests/universal/pt/atomc_model/test_atomic_model.py index f41d384b6b..276f5f50b0 100644 --- a/source/tests/universal/pt/atomc_model/test_atomic_model.py +++ b/source/tests/universal/pt/atomc_model/test_atomic_model.py @@ -20,6 +20,7 @@ EnergyFittingNet, PolarFittingNet, PropertyFittingNet, + DenoiseFittingNet, ) from ....consistent.common import ( @@ -31,6 +32,7 @@ EnerAtomicModelTest, PolarAtomicModelTest, PropertyAtomicModelTest, + DenoiseAtomicModelTest, ZBLAtomicModelTest, ) from ...dpmodel.descriptor.test_descriptor import ( @@ -59,6 +61,8 @@ FittingParamPolarList, FittingParamProperty, FittingParamPropertyList, + FittingParamDenoise, + FittingParamDenoiseList, ) from ...dpmodel.model.test_model import ( skip_model_tests, @@ -459,3 +463,66 @@ def setUpClass(cls) -> None: cls.expected_sel_type = ft.get_sel_type() cls.expected_dim_fparam = ft.get_dim_fparam() cls.expected_dim_aparam = ft.get_dim_aparam() + + +@parameterized( + des_parameterized=( + ( + *[(param_func, DescrptDPA1) for param_func in DescriptorParamDPA1List], + *[(param_func, DescrptDPA2) for param_func in DescriptorParamDPA2List], + (DescriptorParamHybrid, DescrptHybrid), + (DescriptorParamHybridMixed, DescrptHybrid), + ), # descrpt_class_param & class + ((FittingParamDenoise, DenoiseFittingNet),), # fitting_class_param & class + ), + fit_parameterized=( + ( + (DescriptorParamDPA1, DescrptDPA1), + (DescriptorParamDPA2, DescrptDPA2), + ), # descrpt_class_param & class + ( + *[ + (param_func, DenoiseFittingNet) + for param_func in FittingParamDenoiseList + ], + ), # fitting_class_param & class + ), +) +class TestDenoiseAtomicModelPT(unittest.TestCase, DenoiseAtomicModelTest, PTTestCase): + @classmethod + def setUpClass(cls) -> None: + DenoiseAtomicModelTest.setUpClass() + (DescriptorParam, Descrpt) = cls.param[0] + (FittingParam, Fitting) = cls.param[1] + cls.input_dict_ds = DescriptorParam( + len(cls.expected_type_map), + cls.expected_rcut, + cls.expected_rcut / 2, + cls.expected_sel, + cls.expected_type_map, + ) + # set skip tests + skiptest, skip_reason = skip_model_tests(cls) + if skiptest: + raise cls.skipTest(cls, skip_reason) + ds = Descrpt(**cls.input_dict_ds) + cls.input_dict_ft = FittingParam( + ntypes=len(cls.expected_type_map), + dim_descrpt=ds.get_dim_out(), + mixed_types=ds.mixed_types(), + type_map=cls.expected_type_map, + embedding_width=ds.get_dim_emb(), + ) + ft = Fitting( + **cls.input_dict_ft, + ) + cls.module = DPAtomicModel( + ds, + ft, + type_map=cls.expected_type_map, + ) + cls.output_def = cls.module.atomic_output_def().get_data() + cls.expected_has_message_passing = ds.has_message_passing() + cls.expected_sel_type = ft.get_sel_type() + cls.expected_dim_fparam = ft.get_dim_fparam() + cls.expected_dim_aparam = ft.get_dim_aparam() \ No newline at end of file From ae98c15041eae97169950cbc2feb4e1b4c43d362 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 17 Mar 2025 08:44:51 +0000 Subject: [PATCH 16/26] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/dpmodel/atomic_model/__init__.py | 8 ++++---- deepmd/dpmodel/fitting/__init__.py | 5 +---- deepmd/dpmodel/fitting/denoise_fitting.py | 1 + deepmd/dpmodel/model/__init__.py | 8 ++++---- deepmd/dpmodel/model/denoise_model.py | 2 +- deepmd/dpmodel/model/model.py | 6 +++--- .../common/cases/atomic_model/atomic_model.py | 8 +++++++- .../dpmodel/atomc_model/test_atomic_model.py | 15 ++++++--------- .../universal/pt/atomc_model/test_atomic_model.py | 10 +++++----- 9 files changed, 32 insertions(+), 31 deletions(-) diff --git a/deepmd/dpmodel/atomic_model/__init__.py b/deepmd/dpmodel/atomic_model/__init__.py index 0c941977a8..ef9ea9d7e3 100644 --- a/deepmd/dpmodel/atomic_model/__init__.py +++ b/deepmd/dpmodel/atomic_model/__init__.py @@ -17,6 +17,9 @@ from .base_atomic_model import ( BaseAtomicModel, ) +from .denoise_atomic_model import ( + DPDenoiseAtomicModel, +) from .dipole_atomic_model import ( DPDipoleAtomicModel, ) @@ -45,19 +48,16 @@ from .property_atomic_model import ( DPPropertyAtomicModel, ) -from .denoise_atomic_model import ( - DPDenoiseAtomicModel, -) __all__ = [ "BaseAtomicModel", "DPAtomicModel", "DPDOSAtomicModel", + "DPDenoiseAtomicModel", "DPDipoleAtomicModel", "DPEnergyAtomicModel", "DPPolarAtomicModel", "DPPropertyAtomicModel", - "DPDenoiseAtomicModel", "DPZBLLinearEnergyAtomicModel", "LinearEnergyAtomicModel", "PairTabAtomicModel", diff --git a/deepmd/dpmodel/fitting/__init__.py b/deepmd/dpmodel/fitting/__init__.py index 2b2370ad86..21b5928275 100644 --- a/deepmd/dpmodel/fitting/__init__.py +++ b/deepmd/dpmodel/fitting/__init__.py @@ -23,18 +23,15 @@ from .property_fitting import ( PropertyFittingNet, ) -from .denoise_fitting import ( - DenoiseFitting, -) __all__ = [ "DOSFittingNet", "DenoiseFitting", + "DenoiseFitting", "DipoleFitting", "EnergyFittingNet", "InvarFitting", "PolarFitting", "PropertyFittingNet", - "DenoiseFitting", "make_base_fitting", ] diff --git a/deepmd/dpmodel/fitting/denoise_fitting.py b/deepmd/dpmodel/fitting/denoise_fitting.py index 976e672e3f..9c614b9274 100644 --- a/deepmd/dpmodel/fitting/denoise_fitting.py +++ b/deepmd/dpmodel/fitting/denoise_fitting.py @@ -44,6 +44,7 @@ BaseFitting, ) + @BaseFitting.register("denoise") class DenoiseFitting(NativeOP, BaseFitting): r"""Deoise fitting class. diff --git a/deepmd/dpmodel/model/__init__.py b/deepmd/dpmodel/model/__init__.py index 2131af332c..31277fdaed 100644 --- a/deepmd/dpmodel/model/__init__.py +++ b/deepmd/dpmodel/model/__init__.py @@ -12,6 +12,9 @@ Models generated by `make_model` have already done it. """ +from .denoise_model import ( + DenoiseModel, +) from .dp_model import ( DPModelCommon, ) @@ -24,18 +27,15 @@ from .property_model import ( PropertyModel, ) -from .denoise_model import ( - DenoiseModel, -) from .spin_model import ( SpinModel, ) __all__ = [ "DPModelCommon", + "DenoiseModel", "EnergyModel", "PropertyModel", - "DenoiseModel", "SpinModel", "make_model", ] diff --git a/deepmd/dpmodel/model/denoise_model.py b/deepmd/dpmodel/model/denoise_model.py index df61645990..ddde836469 100644 --- a/deepmd/dpmodel/model/denoise_model.py +++ b/deepmd/dpmodel/model/denoise_model.py @@ -24,4 +24,4 @@ def __init__( **kwargs, ) -> None: DPModelCommon.__init__(self) - DPDenoiseModel_.__init__(self, *args, **kwargs) \ No newline at end of file + DPDenoiseModel_.__init__(self, *args, **kwargs) diff --git a/deepmd/dpmodel/model/model.py b/deepmd/dpmodel/model/model.py index 069d9e96bf..dec1be5e9b 100644 --- a/deepmd/dpmodel/model/model.py +++ b/deepmd/dpmodel/model/model.py @@ -19,6 +19,9 @@ from deepmd.dpmodel.model.base_model import ( BaseModel, ) +from deepmd.dpmodel.model.denoise_model import ( + DenoiseModel, +) from deepmd.dpmodel.model.dipole_model import ( DipoleModel, ) @@ -37,9 +40,6 @@ from deepmd.dpmodel.model.property_model import ( PropertyModel, ) -from deepmd.dpmodel.model.denoise_model import ( - DenoiseModel, -) from deepmd.dpmodel.model.spin_model import ( SpinModel, ) diff --git a/source/tests/universal/common/cases/atomic_model/atomic_model.py b/source/tests/universal/common/cases/atomic_model/atomic_model.py index 94596ed422..1969404c4f 100644 --- a/source/tests/universal/common/cases/atomic_model/atomic_model.py +++ b/source/tests/universal/common/cases/atomic_model/atomic_model.py @@ -128,6 +128,7 @@ def setUpClass(cls) -> None: cls.rprec_dict = {} cls.epsilon_dict = {} + class DenoiseAtomicModelTest(AtomicModelTestCase): @classmethod def setUpClass(cls) -> None: @@ -137,7 +138,12 @@ def setUpClass(cls) -> None: cls.expected_dim_aparam = 0 cls.expected_sel_type = [0, 1] cls.expected_aparam_nall = False - cls.expected_model_output_type = ["strain_components", "updated_coord", "logits", "mask"] + cls.expected_model_output_type = [ + "strain_components", + "updated_coord", + "logits", + "mask", + ] cls.model_output_equivariant = ["updated_coord"] cls.expected_sel = [46, 92] cls.expected_sel_mix = sum(cls.expected_sel) diff --git a/source/tests/universal/dpmodel/atomc_model/test_atomic_model.py b/source/tests/universal/dpmodel/atomc_model/test_atomic_model.py index 83b73e0be6..03d0871be6 100644 --- a/source/tests/universal/dpmodel/atomc_model/test_atomic_model.py +++ b/source/tests/universal/dpmodel/atomc_model/test_atomic_model.py @@ -15,12 +15,12 @@ DescrptSeT, ) from deepmd.dpmodel.fitting import ( + DenoiseFitting, DipoleFitting, DOSFittingNet, EnergyFittingNet, PolarFitting, PropertyFittingNet, - DenoiseFitting, ) from ....consistent.common import ( @@ -31,13 +31,13 @@ TEST_DEVICE, ) from ...common.cases.atomic_model.atomic_model import ( + DenoiseAtomicModelTest, DipoleAtomicModelTest, DosAtomicModelTest, EnerAtomicModelTest, PolarAtomicModelTest, PropertyAtomicModelTest, ZBLAtomicModelTest, - DenoiseAtomicModelTest, ) from ...dpmodel.descriptor.test_descriptor import ( DescriptorParamDPA1, @@ -61,6 +61,8 @@ DPTestCase, ) from ..fitting.test_fitting import ( + FittingParamDenoise, + FittingParamDenoiseList, FittingParamDipole, FittingParamDipoleList, FittingParamDos, @@ -71,8 +73,6 @@ FittingParamPolarList, FittingParamProperty, FittingParamPropertyList, - FittingParamDenoise, - FittingParamDenoiseList, ) @@ -490,10 +490,7 @@ def setUpClass(cls) -> None: (DescriptorParamDPA2, DescrptDPA2), ), # descrpt_class_param & class ( - *[ - (param_func, DenoiseFitting) - for param_func in FittingParamDenoiseList - ], + *[(param_func, DenoiseFitting) for param_func in FittingParamDenoiseList], ), # fitting_class_param & class ), ) @@ -535,4 +532,4 @@ def setUpClass(cls) -> None: cls.expected_has_message_passing = ds.has_message_passing() cls.expected_sel_type = ft.get_sel_type() cls.expected_dim_fparam = ft.get_dim_fparam() - cls.expected_dim_aparam = ft.get_dim_aparam() \ No newline at end of file + cls.expected_dim_aparam = ft.get_dim_aparam() diff --git a/source/tests/universal/pt/atomc_model/test_atomic_model.py b/source/tests/universal/pt/atomc_model/test_atomic_model.py index 276f5f50b0..8269942fd8 100644 --- a/source/tests/universal/pt/atomc_model/test_atomic_model.py +++ b/source/tests/universal/pt/atomc_model/test_atomic_model.py @@ -15,24 +15,24 @@ DescrptSeT, ) from deepmd.pt.model.task import ( + DenoiseFittingNet, DipoleFittingNet, DOSFittingNet, EnergyFittingNet, PolarFittingNet, PropertyFittingNet, - DenoiseFittingNet, ) from ....consistent.common import ( parameterized, ) from ...common.cases.atomic_model.atomic_model import ( + DenoiseAtomicModelTest, DipoleAtomicModelTest, DosAtomicModelTest, EnerAtomicModelTest, PolarAtomicModelTest, PropertyAtomicModelTest, - DenoiseAtomicModelTest, ZBLAtomicModelTest, ) from ...dpmodel.descriptor.test_descriptor import ( @@ -51,6 +51,8 @@ DescriptorParamSeTList, ) from ...dpmodel.fitting.test_fitting import ( + FittingParamDenoise, + FittingParamDenoiseList, FittingParamDipole, FittingParamDipoleList, FittingParamDos, @@ -61,8 +63,6 @@ FittingParamPolarList, FittingParamProperty, FittingParamPropertyList, - FittingParamDenoise, - FittingParamDenoiseList, ) from ...dpmodel.model.test_model import ( skip_model_tests, @@ -525,4 +525,4 @@ def setUpClass(cls) -> None: cls.expected_has_message_passing = ds.has_message_passing() cls.expected_sel_type = ft.get_sel_type() cls.expected_dim_fparam = ft.get_dim_fparam() - cls.expected_dim_aparam = ft.get_dim_aparam() \ No newline at end of file + cls.expected_dim_aparam = ft.get_dim_aparam() From 491eabdc673b1ef71d683e2b8ce4e08e491a8f91 Mon Sep 17 00:00:00 2001 From: root <2000011006@stu.pku.edu.cn> Date: Mon, 17 Mar 2025 17:27:36 +0800 Subject: [PATCH 17/26] Fix pre-commit --- deepmd/dpmodel/model/model.py | 8 ++--- deepmd/pt/loss/__init__.py | 4 +++ .../tests/universal/dpmodel/loss/test_loss.py | 32 +++++++++++++++++++ source/tests/universal/pt/loss/test_loss.py | 2 ++ 4 files changed, 42 insertions(+), 4 deletions(-) diff --git a/deepmd/dpmodel/model/model.py b/deepmd/dpmodel/model/model.py index dec1be5e9b..4102ee893b 100644 --- a/deepmd/dpmodel/model/model.py +++ b/deepmd/dpmodel/model/model.py @@ -64,13 +64,13 @@ def _get_standard_model_components(data, ntypes): fitting_net["dim_descrpt"] = descriptor.get_dim_out() grad_force = "direct" not in fitting_net["type"] if fitting_net["type"] in ["denoise"]: - assert model_params["type_map"][-1] == "MASKED_TOKEN", ( + assert data["type_map"][-1] == "MASKED_TOKEN", ( f"When using denoise fitting, the last element in `type_map` must be 'MASKED_TOKEN', but got '{model_params['type_map'][-1]}'" ) fitting_net["embedding_width"] = descriptor.get_dim_emb() - fitting_net["coord_noise"] = model_params.get("coord_noise", 0.2) - fitting_net["cell_pert_fraction"] = model_params.get("cell_pert_fraction", 0.0) - fitting_net["noise_type"] = model_params.get("noise_type", "gaussian") + fitting_net["coord_noise"] = data.get("coord_noise", 0.2) + fitting_net["cell_pert_fraction"] = data.get("cell_pert_fraction", 0.0) + fitting_net["noise_type"] = data.get("noise_type", "gaussian") if not grad_force: fitting_net["out_dim"] = descriptor.get_dim_emb() if "ener" in fitting_net["type"]: diff --git a/deepmd/pt/loss/__init__.py b/deepmd/pt/loss/__init__.py index 1d25c1e52f..7cb4095785 100644 --- a/deepmd/pt/loss/__init__.py +++ b/deepmd/pt/loss/__init__.py @@ -18,6 +18,9 @@ from .property import ( PropertyLoss, ) +from .denoise import ( + DenoiseLoss, +) from .tensor import ( TensorLoss, ) @@ -29,6 +32,7 @@ "EnergySpinLoss", "EnergyStdLoss", "PropertyLoss", + "DenoiseLoss", "TaskLoss", "TensorLoss", ] diff --git a/source/tests/universal/dpmodel/loss/test_loss.py b/source/tests/universal/dpmodel/loss/test_loss.py index 79c67cdba4..ffd427180c 100644 --- a/source/tests/universal/dpmodel/loss/test_loss.py +++ b/source/tests/universal/dpmodel/loss/test_loss.py @@ -204,3 +204,35 @@ def LossParamProperty(): LossParamPropertyList = [LossParamProperty] # to get name for the default function LossParamProperty = LossParamPropertyList[0] + + +def LossParamDenoise(): + key_to_pref_map = { + "strain_components": 1.0, + "updated_coord": 1.0, + "logits": 1.0, + } + input_dict = { + "key_to_pref_map": key_to_pref_map, + "ntypes": 1, + "mask_token": False, + "mask_coord": True, + "mask_cell": False, + "token_loss": 1.0, + "coord_loss": 1.0, + "cell_loss": 1.0, + "noise_type": "gaussian", + "coord_noise": 0.2, + "cell_pert_fraction": 0.0, + "noise_mode": "prob", + "mask_num": 1, + "mask_prob": 0.2, + "same_mask": False, + "loss_func": "rmse", + } + return input_dict + + +LossParamDenoiseList = [LossParamDenoise] +# to get name for the default function +LossParamDenoise = LossParamDenoiseList[0] diff --git a/source/tests/universal/pt/loss/test_loss.py b/source/tests/universal/pt/loss/test_loss.py index 47c2d06fbc..91064cafd6 100644 --- a/source/tests/universal/pt/loss/test_loss.py +++ b/source/tests/universal/pt/loss/test_loss.py @@ -7,6 +7,7 @@ EnergyStdLoss, PropertyLoss, TensorLoss, + DenoiseLoss, ) from ....consistent.common import ( @@ -21,6 +22,7 @@ LossParamEnergySpinList, LossParamPropertyList, LossParamTensorList, + LossParamDenoiseList, ) from ..backend import ( PTTestCase, From 2a2b707f88d625e694aa9967dd61da44f91dd2c5 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 17 Mar 2025 09:29:25 +0000 Subject: [PATCH 18/26] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/pt/loss/__init__.py | 5 +---- source/tests/universal/pt/loss/test_loss.py | 2 -- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/deepmd/pt/loss/__init__.py b/deepmd/pt/loss/__init__.py index 7cb4095785..01942a414f 100644 --- a/deepmd/pt/loss/__init__.py +++ b/deepmd/pt/loss/__init__.py @@ -18,9 +18,6 @@ from .property import ( PropertyLoss, ) -from .denoise import ( - DenoiseLoss, -) from .tensor import ( TensorLoss, ) @@ -28,11 +25,11 @@ __all__ = [ "DOSLoss", "DenoiseLoss", + "DenoiseLoss", "EnergyHessianStdLoss", "EnergySpinLoss", "EnergyStdLoss", "PropertyLoss", - "DenoiseLoss", "TaskLoss", "TensorLoss", ] diff --git a/source/tests/universal/pt/loss/test_loss.py b/source/tests/universal/pt/loss/test_loss.py index 91064cafd6..47c2d06fbc 100644 --- a/source/tests/universal/pt/loss/test_loss.py +++ b/source/tests/universal/pt/loss/test_loss.py @@ -7,7 +7,6 @@ EnergyStdLoss, PropertyLoss, TensorLoss, - DenoiseLoss, ) from ....consistent.common import ( @@ -22,7 +21,6 @@ LossParamEnergySpinList, LossParamPropertyList, LossParamTensorList, - LossParamDenoiseList, ) from ..backend import ( PTTestCase, From 993ed2fc4a548a59cd4722ae20c08ce8198dcf25 Mon Sep 17 00:00:00 2001 From: root <2000011006@stu.pku.edu.cn> Date: Wed, 19 Mar 2025 18:01:43 +0800 Subject: [PATCH 19/26] Add universial denoise model UT --- deepmd/dpmodel/fitting/denoise_fitting.py | 3 + deepmd/dpmodel/model/model.py | 2 +- deepmd/pt/model/model/denoise_model.py | 9 +- deepmd/pt/model/task/denoise.py | 3 + .../universal/common/cases/model/model.py | 20 ++++ .../universal/dpmodel/fitting/test_fitting.py | 3 + source/tests/universal/pt/model/test_model.py | 98 +++++++++++++++++++ 7 files changed, 132 insertions(+), 6 deletions(-) diff --git a/deepmd/dpmodel/fitting/denoise_fitting.py b/deepmd/dpmodel/fitting/denoise_fitting.py index 9c614b9274..e88a5f8d3e 100644 --- a/deepmd/dpmodel/fitting/denoise_fitting.py +++ b/deepmd/dpmodel/fitting/denoise_fitting.py @@ -350,6 +350,9 @@ def serialize(self) -> dict: "coord_nets": self.coord_nets.serialize(), "token_nets": self.token_nets.serialize(), "exclude_types": self.exclude_types, + "coord_noise": self.coord_noise, + "cell_pert_fraction": self.cell_pert_fraction, + "noise_type": self.noise_type, "@variables": { "bias_atom_e": to_numpy_array(self.bias_atom_e), "case_embd": to_numpy_array(self.case_embd), diff --git a/deepmd/dpmodel/model/model.py b/deepmd/dpmodel/model/model.py index 4102ee893b..e49773dbca 100644 --- a/deepmd/dpmodel/model/model.py +++ b/deepmd/dpmodel/model/model.py @@ -65,7 +65,7 @@ def _get_standard_model_components(data, ntypes): grad_force = "direct" not in fitting_net["type"] if fitting_net["type"] in ["denoise"]: assert data["type_map"][-1] == "MASKED_TOKEN", ( - f"When using denoise fitting, the last element in `type_map` must be 'MASKED_TOKEN', but got '{model_params['type_map'][-1]}'" + f"When using denoise fitting, the last element in `type_map` must be 'MASKED_TOKEN', but got '{data['type_map'][-1]}'" ) fitting_net["embedding_width"] = descriptor.get_dim_emb() fitting_net["coord_noise"] = data.get("coord_noise", 0.2) diff --git a/deepmd/pt/model/model/denoise_model.py b/deepmd/pt/model/model/denoise_model.py index c9422da842..411eb023d7 100644 --- a/deepmd/pt/model/model/denoise_model.py +++ b/deepmd/pt/model/model/denoise_model.py @@ -35,17 +35,16 @@ def __init__( DPDenoiseModel_.__init__(self, *args, **kwargs) def translated_output_def(self): - pass - """ out_def_data = self.model_output_def().get_data() output_def = { - f"atom_{self.get_var_name()}": out_def_data[self.get_var_name()], - self.get_var_name(): out_def_data[f"{self.get_var_name()}_redu"], + "strain_components": out_def_data["strain_components_redu"], + "atom_strain_components": out_def_data["strain_components"], + "updated_coord": out_def_data["updated_coord"], + "logits": out_def_data["logits"] } if "mask" in out_def_data: output_def["mask"] = out_def_data["mask"] return output_def - """ def forward( self, diff --git a/deepmd/pt/model/task/denoise.py b/deepmd/pt/model/task/denoise.py index 0e3bb26a1c..72906494c4 100644 --- a/deepmd/pt/model/task/denoise.py +++ b/deepmd/pt/model/task/denoise.py @@ -333,6 +333,9 @@ def serialize(self) -> dict: "coord_nets": self.filter_layers_coord.serialize(), "token_nets": self.filter_layers_token.serialize(), "exclude_types": self.exclude_types, + "coord_noise": self.coord_noise, + "cell_pert_fraction": self.cell_pert_fraction, + "noise_type": self.noise_type, "@variables": { "bias_atom_e": to_numpy_array(self.bias_atom_e), "case_embd": to_numpy_array(self.case_embd), diff --git a/source/tests/universal/common/cases/model/model.py b/source/tests/universal/common/cases/model/model.py index 06ddd90970..1a6faea48c 100644 --- a/source/tests/universal/common/cases/model/model.py +++ b/source/tests/universal/common/cases/model/model.py @@ -174,3 +174,23 @@ def setUpClass(cls) -> None: cls.rprec_dict = {} cls.epsilon_dict = {} cls.skip_test_autodiff = True + + +class DenoiseModelTest(ModelTestCase): + @classmethod + def setUpClass(cls) -> None: + cls.expected_rcut = 5.0 + cls.expected_type_map = ["O", "H"] + cls.expected_dim_fparam = 0 + cls.expected_dim_aparam = 0 + cls.expected_sel_type = [0, 1] + cls.expected_aparam_nall = False + cls.expected_model_output_type = ["strain_components", "updated_coord", "logits", "mask"] + cls.model_output_equivariant = ["updated_coord"] + cls.expected_sel = [46, 92] + cls.expected_sel_mix = sum(cls.expected_sel) + cls.expected_has_message_passing = False + cls.aprec_dict = {} + cls.rprec_dict = {} + cls.epsilon_dict = {} + cls.skip_test_autodiff = True diff --git a/source/tests/universal/dpmodel/fitting/test_fitting.py b/source/tests/universal/dpmodel/fitting/test_fitting.py index 1faa894c9f..451cd7e4de 100644 --- a/source/tests/universal/dpmodel/fitting/test_fitting.py +++ b/source/tests/universal/dpmodel/fitting/test_fitting.py @@ -260,6 +260,9 @@ def FittingParamDenoise( "numb_fparam": numb_param, "numb_aparam": numb_param, "dim_case_embd": numb_param, + "coord_noise": 0.2, + "cell_pert_fraction": 0.008, + "noise_type": "gaussian", } return input_dict diff --git a/source/tests/universal/pt/model/test_model.py b/source/tests/universal/pt/model/test_model.py index 867fa48b87..069863cb57 100644 --- a/source/tests/universal/pt/model/test_model.py +++ b/source/tests/universal/pt/model/test_model.py @@ -25,6 +25,7 @@ LinearEnergyModel, PolarModel, PropertyModel, + DenoiseModel, SpinEnergyModel, ) from deepmd.pt.model.task import ( @@ -33,6 +34,7 @@ EnergyFittingNet, PolarFittingNet, PropertyFittingNet, + DenoiseFittingNet, ) from deepmd.utils.spin import ( Spin, @@ -48,6 +50,7 @@ LinearEnerModelTest, PolarModelTest, PropertyModelTest, + DenoiseModelTest, SpinEnerModelTest, ZBLModelTest, ) @@ -81,6 +84,8 @@ FittingParamPolarList, FittingParamProperty, FittingParamPropertyList, + FittingParamDenoise, + FittingParamDenoiseList, ) from ...dpmodel.model.test_model import ( skip_model_tests, @@ -106,6 +111,7 @@ FittingParamDipole, FittingParamPolar, FittingParamProperty, + FittingParamDenoise, ] @@ -919,3 +925,95 @@ def setUpClass(cls) -> None: cls.expected_dim_fparam = ft1.get_dim_fparam() cls.expected_dim_aparam = ft1.get_dim_aparam() cls.expected_sel_type = ft1.get_sel_type() + + +@parameterized( + des_parameterized=( + ( + *[(param_func, DescrptDPA1) for param_func in DescriptorParamDPA1List], + *[(param_func, DescrptDPA2) for param_func in DescriptorParamDPA2List], + *[(param_func, DescrptDPA3) for param_func in DescriptorParamDPA3List], + (DescriptorParamHybrid, DescrptHybrid), + (DescriptorParamHybridMixed, DescrptHybrid), + ), # descrpt_class_param & class + ((FittingParamDenoise, DenoiseFittingNet),), # fitting_class_param & class + ), + fit_parameterized=( + ( + (DescriptorParamDPA1, DescrptDPA1), + (DescriptorParamDPA2, DescrptDPA2), + (DescriptorParamDPA3, DescrptDPA3), + ), # descrpt_class_param & class + ( + *[ + (param_func, DenoiseFittingNet) + for param_func in FittingParamDenoiseList + ], + ), # fitting_class_param & class + ), +) +class TestDenoiseModelPT(unittest.TestCase, DenoiseModelTest, PTTestCase): + @property + def modules_to_test(self): + skip_test_jit = getattr(self, "skip_test_jit", False) + modules = PTTestCase.modules_to_test.fget(self) + if not skip_test_jit: + # for Model, we can test script module API + modules += [ + self._script_module + if hasattr(self, "_script_module") + else self.script_module + ] + return modules + + @classmethod + def setUpClass(cls) -> None: + DenoiseModelTest.setUpClass() + (DescriptorParam, Descrpt) = cls.param[0] + (FittingParam, Fitting) = cls.param[1] + # set special precision + #if Descrpt in [DescrptDPA2]: + # cls.epsilon_dict["test_smooth"] = 1e-8 + cls.input_dict_ds = DescriptorParam( + len(cls.expected_type_map), + cls.expected_rcut, + cls.expected_rcut / 2, + cls.expected_sel, + cls.expected_type_map, + ) + + # set skip tests + skiptest, skip_reason = skip_model_tests(cls) + if skiptest: + raise cls.skipTest(cls, skip_reason) + + ds = Descrpt(**cls.input_dict_ds) + cls.input_dict_ft = FittingParam( + ntypes=len(cls.expected_type_map), + dim_descrpt=ds.get_dim_out(), + mixed_types=ds.mixed_types(), + type_map=cls.expected_type_map, + embedding_width=ds.get_dim_emb(), + ) + ft = Fitting( + **cls.input_dict_ft, + ) + cls.module = DenoiseModel( + ds, + ft, + type_map=cls.expected_type_map, + ) + # only test jit API once for different models + if ( + DescriptorParam not in defalut_des_param + or FittingParam not in defalut_fit_param + ): + cls.skip_test_jit = True + else: + with torch.jit.optimized_execution(False): + cls._script_module = torch.jit.script(cls.module) + cls.output_def = cls.module.translated_output_def() + cls.expected_has_message_passing = ds.has_message_passing() + cls.expected_sel_type = ft.get_sel_type() + cls.expected_dim_fparam = ft.get_dim_fparam() + cls.expected_dim_aparam = ft.get_dim_aparam() From acbbdea30938749b6d84e97003bdd44a56671fc5 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 10:03:23 +0000 Subject: [PATCH 20/26] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/pt/model/model/denoise_model.py | 2 +- source/tests/universal/common/cases/model/model.py | 7 ++++++- source/tests/universal/pt/model/test_model.py | 12 ++++++------ 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/deepmd/pt/model/model/denoise_model.py b/deepmd/pt/model/model/denoise_model.py index 411eb023d7..e2bbf38241 100644 --- a/deepmd/pt/model/model/denoise_model.py +++ b/deepmd/pt/model/model/denoise_model.py @@ -40,7 +40,7 @@ def translated_output_def(self): "strain_components": out_def_data["strain_components_redu"], "atom_strain_components": out_def_data["strain_components"], "updated_coord": out_def_data["updated_coord"], - "logits": out_def_data["logits"] + "logits": out_def_data["logits"], } if "mask" in out_def_data: output_def["mask"] = out_def_data["mask"] diff --git a/source/tests/universal/common/cases/model/model.py b/source/tests/universal/common/cases/model/model.py index 1a6faea48c..791e00fdb5 100644 --- a/source/tests/universal/common/cases/model/model.py +++ b/source/tests/universal/common/cases/model/model.py @@ -185,7 +185,12 @@ def setUpClass(cls) -> None: cls.expected_dim_aparam = 0 cls.expected_sel_type = [0, 1] cls.expected_aparam_nall = False - cls.expected_model_output_type = ["strain_components", "updated_coord", "logits", "mask"] + cls.expected_model_output_type = [ + "strain_components", + "updated_coord", + "logits", + "mask", + ] cls.model_output_equivariant = ["updated_coord"] cls.expected_sel = [46, 92] cls.expected_sel_mix = sum(cls.expected_sel) diff --git a/source/tests/universal/pt/model/test_model.py b/source/tests/universal/pt/model/test_model.py index 069863cb57..bade6a9390 100644 --- a/source/tests/universal/pt/model/test_model.py +++ b/source/tests/universal/pt/model/test_model.py @@ -18,6 +18,7 @@ DescrptSeTTebd, ) from deepmd.pt.model.model import ( + DenoiseModel, DipoleModel, DOSModel, DPZBLModel, @@ -25,16 +26,15 @@ LinearEnergyModel, PolarModel, PropertyModel, - DenoiseModel, SpinEnergyModel, ) from deepmd.pt.model.task import ( + DenoiseFittingNet, DipoleFittingNet, DOSFittingNet, EnergyFittingNet, PolarFittingNet, PropertyFittingNet, - DenoiseFittingNet, ) from deepmd.utils.spin import ( Spin, @@ -44,13 +44,13 @@ parameterized, ) from ...common.cases.model.model import ( + DenoiseModelTest, DipoleModelTest, DosModelTest, EnerModelTest, LinearEnerModelTest, PolarModelTest, PropertyModelTest, - DenoiseModelTest, SpinEnerModelTest, ZBLModelTest, ) @@ -74,6 +74,8 @@ DescriptorParamSeTTebdList, ) from ...dpmodel.fitting.test_fitting import ( + FittingParamDenoise, + FittingParamDenoiseList, FittingParamDipole, FittingParamDipoleList, FittingParamDos, @@ -84,8 +86,6 @@ FittingParamPolarList, FittingParamProperty, FittingParamPropertyList, - FittingParamDenoise, - FittingParamDenoiseList, ) from ...dpmodel.model.test_model import ( skip_model_tests, @@ -972,7 +972,7 @@ def setUpClass(cls) -> None: (DescriptorParam, Descrpt) = cls.param[0] (FittingParam, Fitting) = cls.param[1] # set special precision - #if Descrpt in [DescrptDPA2]: + # if Descrpt in [DescrptDPA2]: # cls.epsilon_dict["test_smooth"] = 1e-8 cls.input_dict_ds = DescriptorParam( len(cls.expected_type_map), From 5c58054322a6f0d05eb119e0726c1bdc90244633 Mon Sep 17 00:00:00 2001 From: root <2000011006@stu.pku.edu.cn> Date: Wed, 26 Mar 2025 16:01:27 +0800 Subject: [PATCH 21/26] delete special precision --- source/tests/universal/pt/model/test_model.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/source/tests/universal/pt/model/test_model.py b/source/tests/universal/pt/model/test_model.py index 069863cb57..fc522dacfd 100644 --- a/source/tests/universal/pt/model/test_model.py +++ b/source/tests/universal/pt/model/test_model.py @@ -971,9 +971,6 @@ def setUpClass(cls) -> None: DenoiseModelTest.setUpClass() (DescriptorParam, Descrpt) = cls.param[0] (FittingParam, Fitting) = cls.param[1] - # set special precision - #if Descrpt in [DescrptDPA2]: - # cls.epsilon_dict["test_smooth"] = 1e-8 cls.input_dict_ds = DescriptorParam( len(cls.expected_type_map), cls.expected_rcut, From ebb2c345026fcd8eba738061f22146ea7c68615a Mon Sep 17 00:00:00 2001 From: root <2000011006@stu.pku.edu.cn> Date: Wed, 26 Mar 2025 19:19:42 +0800 Subject: [PATCH 22/26] Add universial denoise loss UT --- deepmd/pt/loss/denoise.py | 35 ++++++++++++++----- deepmd/pt/utils/stat.py | 4 +++ .../universal/common/cases/loss/utils.py | 34 +++++++++++++----- source/tests/universal/pt/loss/test_loss.py | 3 ++ 4 files changed, 59 insertions(+), 17 deletions(-) diff --git a/deepmd/pt/loss/denoise.py b/deepmd/pt/loss/denoise.py index 45dc37ae77..7ee8dcdfed 100644 --- a/deepmd/pt/loss/denoise.py +++ b/deepmd/pt/loss/denoise.py @@ -92,7 +92,8 @@ def __init__( Other keyword arguments. """ super().__init__() - self.mask_type_idx = ntypes - 1 + self.ntypes = ntypes + self.mask_type_idx = self.ntypes - 1 self.mask_token = mask_token self.mask_coord = mask_coord self.mask_cell = mask_cell @@ -300,11 +301,27 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): @property def label_requirement(self) -> list[DataRequirementItem]: """Return data label requirements needed for this loss calculation.""" - return [] - - def serialize(self) -> dict: - pass - - @classmethod - def deserialize(cls, data: dict) -> "TaskLoss": - pass + label_requirement = [ + DataRequirementItem( + "strain_components", + ndof=6, + atomic=False, + must=False, + high_prec=False, + ), + DataRequirementItem( + "updated_coord", + ndof=3, + atomic=True, + must=False, + high_prec=False, + ), + DataRequirementItem( + "logits", + ndof=self.ntypes - 1, + atomic=True, + must=False, + high_prec=False, + ), + ] + return label_requirement \ No newline at end of file diff --git a/deepmd/pt/utils/stat.py b/deepmd/pt/utils/stat.py index cf6892b49d..c23f34929e 100644 --- a/deepmd/pt/utils/stat.py +++ b/deepmd/pt/utils/stat.py @@ -281,6 +281,10 @@ def compute_output_stats( intensive : bool, optional Whether the fitting target is intensive. """ + # in denoise mode, label is created in loss, so we don't need to compute the bias + if ("strain_components" in keys) or ("updated_coord" in keys) or ("logits" in keys): + keys = [] + # try to restore the bias from stat file bias_atom_e, std_atom_e = _restore_from_file(stat_file_path, keys) diff --git a/source/tests/universal/common/cases/loss/utils.py b/source/tests/universal/common/cases/loss/utils.py index 63e6e3ed27..a8f31ae0dc 100644 --- a/source/tests/universal/common/cases/loss/utils.py +++ b/source/tests/universal/common/cases/loss/utils.py @@ -35,7 +35,7 @@ def test_forward(self): natoms = 5 nframes = 2 - def fake_model(): + def fake_model(**kwargs): model_predict = { data_key: fake_input( label_dict[data_key], natoms=natoms, nframes=nframes @@ -55,13 +55,31 @@ def fake_model(): } labels.update({"find_" + data_key: 1.0 for data_key in label_keys}) - _, loss, more_loss = module( - {}, - fake_model, - labels, - natoms, - 1.0, - ) + if "updated_coord" in self.key_to_pref_map: + import torch + from deepmd.pt.utils import ( + env, + ) + labels.update({"type_mask": torch.tensor([[False]*natoms, [False]*natoms], dtype=torch.bool, device=env.DEVICE)}) + input_dict = {} + input_dict["box"] = torch.tensor([[1,0,0,0,1,0,0,0,1]] * nframes,dtype=env.GLOBAL_PT_FLOAT_PRECISION) + input_dict["atype"] = torch.tensor([[0]*natoms,[0]*natoms],dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) + input_dict["coord"] = torch.tensor([[[0]*3]*natoms]*nframes,dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) + _, loss, more_loss = module( + input_dict, + fake_model, + labels, + natoms, + 1.0, + ) + else: + _, loss, more_loss = module( + {}, + fake_model, + labels, + natoms, + 1.0, + ) def fake_input(data_item: DataRequirementItem, natoms=5, nframes=2) -> np.ndarray: diff --git a/source/tests/universal/pt/loss/test_loss.py b/source/tests/universal/pt/loss/test_loss.py index 47c2d06fbc..20b694d8e2 100644 --- a/source/tests/universal/pt/loss/test_loss.py +++ b/source/tests/universal/pt/loss/test_loss.py @@ -6,6 +6,7 @@ EnergySpinLoss, EnergyStdLoss, PropertyLoss, + DenoiseLoss, TensorLoss, ) @@ -20,6 +21,7 @@ LossParamEnergyList, LossParamEnergySpinList, LossParamPropertyList, + LossParamDenoiseList, LossParamTensorList, ) from ..backend import ( @@ -34,6 +36,7 @@ *[(param_func, DOSLoss) for param_func in LossParamDosList], *[(param_func, TensorLoss) for param_func in LossParamTensorList], *[(param_func, PropertyLoss) for param_func in LossParamPropertyList], + *[(param_func, DenoiseLoss) for param_func in LossParamDenoiseList], ) # class_param & class ) class TestLossPT(unittest.TestCase, LossTest, PTTestCase): From 9e1e1f40a18661c9eb70875df83dc5f106f0d6e9 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 27 Mar 2025 10:45:44 +0000 Subject: [PATCH 23/26] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/pt/loss/denoise.py | 2 +- .../universal/common/cases/loss/utils.py | 29 ++++++++++++++++--- source/tests/universal/pt/loss/test_loss.py | 4 +-- 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/deepmd/pt/loss/denoise.py b/deepmd/pt/loss/denoise.py index 7ee8dcdfed..fda716eaa3 100644 --- a/deepmd/pt/loss/denoise.py +++ b/deepmd/pt/loss/denoise.py @@ -324,4 +324,4 @@ def label_requirement(self) -> list[DataRequirementItem]: high_prec=False, ), ] - return label_requirement \ No newline at end of file + return label_requirement diff --git a/source/tests/universal/common/cases/loss/utils.py b/source/tests/universal/common/cases/loss/utils.py index a8f31ae0dc..374a43ebdc 100644 --- a/source/tests/universal/common/cases/loss/utils.py +++ b/source/tests/universal/common/cases/loss/utils.py @@ -57,14 +57,35 @@ def fake_model(**kwargs): if "updated_coord" in self.key_to_pref_map: import torch + from deepmd.pt.utils import ( env, ) - labels.update({"type_mask": torch.tensor([[False]*natoms, [False]*natoms], dtype=torch.bool, device=env.DEVICE)}) + + labels.update( + { + "type_mask": torch.tensor( + [[False] * natoms, [False] * natoms], + dtype=torch.bool, + device=env.DEVICE, + ) + } + ) input_dict = {} - input_dict["box"] = torch.tensor([[1,0,0,0,1,0,0,0,1]] * nframes,dtype=env.GLOBAL_PT_FLOAT_PRECISION) - input_dict["atype"] = torch.tensor([[0]*natoms,[0]*natoms],dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) - input_dict["coord"] = torch.tensor([[[0]*3]*natoms]*nframes,dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) + input_dict["box"] = torch.tensor( + [[1, 0, 0, 0, 1, 0, 0, 0, 1]] * nframes, + dtype=env.GLOBAL_PT_FLOAT_PRECISION, + ) + input_dict["atype"] = torch.tensor( + [[0] * natoms, [0] * natoms], + dtype=env.GLOBAL_PT_FLOAT_PRECISION, + device=env.DEVICE, + ) + input_dict["coord"] = torch.tensor( + [[[0] * 3] * natoms] * nframes, + dtype=env.GLOBAL_PT_FLOAT_PRECISION, + device=env.DEVICE, + ) _, loss, more_loss = module( input_dict, fake_model, diff --git a/source/tests/universal/pt/loss/test_loss.py b/source/tests/universal/pt/loss/test_loss.py index 20b694d8e2..c3f4cdce26 100644 --- a/source/tests/universal/pt/loss/test_loss.py +++ b/source/tests/universal/pt/loss/test_loss.py @@ -2,11 +2,11 @@ import unittest from deepmd.pt.loss import ( + DenoiseLoss, DOSLoss, EnergySpinLoss, EnergyStdLoss, PropertyLoss, - DenoiseLoss, TensorLoss, ) @@ -17,11 +17,11 @@ LossTest, ) from ...dpmodel.loss.test_loss import ( + LossParamDenoiseList, LossParamDosList, LossParamEnergyList, LossParamEnergySpinList, LossParamPropertyList, - LossParamDenoiseList, LossParamTensorList, ) from ..backend import ( From ecf937deb516fd4e9cfa53759e9e87901fb74a03 Mon Sep 17 00:00:00 2001 From: root <2000011006@stu.pku.edu.cn> Date: Fri, 28 Mar 2025 15:31:19 +0800 Subject: [PATCH 24/26] Fix torch cuda UT --- deepmd/pt/loss/denoise.py | 8 +++++--- source/tests/universal/common/cases/loss/utils.py | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/deepmd/pt/loss/denoise.py b/deepmd/pt/loss/denoise.py index 7ee8dcdfed..c812bb4aa4 100644 --- a/deepmd/pt/loss/denoise.py +++ b/deepmd/pt/loss/denoise.py @@ -135,7 +135,8 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): rng = np.random.default_rng() nloc = input_dict["atype"].shape[1] nbz = input_dict["atype"].shape[0] - input_dict["box"] = input_dict["box"].cuda() + if torch.cuda.is_available(): + input_dict["box"] = input_dict["box"].cuda() # TODO: Change lattice to lower triangular matrix @@ -177,7 +178,7 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): if mask_num == 0: mask_num = 1 else: - NotImplementedError(f"Unknown noise mode {self.noise_mode}!") + raise NotImplementedError(f"Unknown noise mode {self.noise_mode}!") coord_mask_all = torch.zeros( input_dict["atype"].shape, dtype=torch.bool, device=env.DEVICE @@ -231,8 +232,9 @@ def forward(self, input_dict, model, label, natoms, learning_rate, mae=False): if self.same_mask: type_mask = coord_mask_all[ii].clone() else: + mask_count = min(self.mask_num, nloc) type_mask_res = rng.choice( - range(nloc), self.mask_num, replace=False + range(nloc), mask_count, replace=False ).tolist() type_mask = np.isin(range(nloc), type_mask_res) input_dict["atype"][ii][type_mask] = self.mask_type_idx diff --git a/source/tests/universal/common/cases/loss/utils.py b/source/tests/universal/common/cases/loss/utils.py index a8f31ae0dc..a59ecc2130 100644 --- a/source/tests/universal/common/cases/loss/utils.py +++ b/source/tests/universal/common/cases/loss/utils.py @@ -62,7 +62,7 @@ def fake_model(**kwargs): ) labels.update({"type_mask": torch.tensor([[False]*natoms, [False]*natoms], dtype=torch.bool, device=env.DEVICE)}) input_dict = {} - input_dict["box"] = torch.tensor([[1,0,0,0,1,0,0,0,1]] * nframes,dtype=env.GLOBAL_PT_FLOAT_PRECISION) + input_dict["box"] = torch.tensor([[1,0,0,0,1,0,0,0,1]] * nframes,dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) input_dict["atype"] = torch.tensor([[0]*natoms,[0]*natoms],dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) input_dict["coord"] = torch.tensor([[[0]*3]*natoms]*nframes,dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) _, loss, more_loss = module( From d4689173794cf9c633dac86175a62720748e4b88 Mon Sep 17 00:00:00 2001 From: root <2000011006@stu.pku.edu.cn> Date: Fri, 28 Mar 2025 17:06:19 +0800 Subject: [PATCH 25/26] Add DeepDenoise part, but not complete --- deepmd/entrypoints/test.py | 5 ++ deepmd/infer/deep_denoise.py | 138 +++++++++++++++++++++++++++++++++++ deepmd/pt/infer/deep_eval.py | 5 ++ 3 files changed, 148 insertions(+) create mode 100644 deepmd/infer/deep_denoise.py diff --git a/deepmd/entrypoints/test.py b/deepmd/entrypoints/test.py index 919d23f757..40d85725a0 100644 --- a/deepmd/entrypoints/test.py +++ b/deepmd/entrypoints/test.py @@ -34,6 +34,9 @@ from deepmd.infer.deep_property import ( DeepProperty, ) +from deepmd.infer.deep_denoise import ( + DeepDenoise, +) from deepmd.infer.deep_wfc import ( DeepWFC, ) @@ -174,6 +177,8 @@ def test( err = test_polar( dp, data, numb_test, detail_file, atomic=False ) # YWolfeee: downward compatibility + elif isinstance(dp, DeepDenoise): + raise NotImplementedError("DeepDenoise is not supported in test mode.") log.info("# ----------------------------------------------- ") err_coll.append(err) diff --git a/deepmd/infer/deep_denoise.py b/deepmd/infer/deep_denoise.py new file mode 100644 index 0000000000..0f71fec87e --- /dev/null +++ b/deepmd/infer/deep_denoise.py @@ -0,0 +1,138 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Any, + Optional, + Union, +) + +import numpy as np + +from deepmd.dpmodel.output_def import ( + FittingOutputDef, + ModelOutputDef, + OutputVariableDef, +) + +from .deep_eval import ( + DeepEval, +) +from deepmd.pt.utils.region import ( + phys2inter, + inter2phys, +) + +class DeepDenoise(DeepEval): + """Given structures with noise, denoising them to get relaxed structures. + + Parameters + ---------- + model_file : Path + The name of the frozen model file. + *args : list + Positional arguments. + auto_batch_size : bool or int or AutoBatchSize, default: True + If True, automatic batch size will be used. If int, it will be used + as the initial batch size. + neighbor_list : ase.neighborlist.NewPrimitiveNeighborList, optional + The ASE neighbor list class to produce the neighbor list. If None, the + neighbor list will be built natively in the model. + **kwargs : dict + Keyword arguments. + """ + + @property + def output_def(self) -> ModelOutputDef: + """ + Get the output definition of this model. + """ + return ModelOutputDef( + FittingOutputDef( + [ + OutputVariableDef( + "strain_components", + [6], + reducible=True, + r_differentiable=False, + c_differentiable=False, + intensive=True, + ), + OutputVariableDef( + "updated_coord", + [3], + reducible=False, + r_differentiable=False, + c_differentiable=False, + ), + OutputVariableDef( + "logits", + [-1], + reducible=False, + r_differentiable=False, + c_differentiable=False, + ), + ] + ) + ) + + def eval( + self, + coords: np.ndarray, + cells: Optional[np.ndarray], + atom_types: Union[list[int], np.ndarray], + atomic: bool = False, + fparam: Optional[np.ndarray] = None, + aparam: Optional[np.ndarray] = None, + mixed_type: bool = False, + **kwargs: dict[str, Any], + ) -> tuple[np.ndarray, ...]: + """Evaluate properties. If atomic is True, also return atomic property. + + Parameters + ---------- + coords : np.ndarray + The coordinates of the atoms, in shape (nframes, natoms, 3). + cells : np.ndarray + The cell vectors of the system, in shape (nframes, 9). If the system + is not periodic, set it to None. + atom_types : list[int] or np.ndarray + The types of the atoms. If mixed_type is False, the shape is (natoms,); + otherwise, the shape is (nframes, natoms). + atomic : bool, optional + Whether to return atomic property, by default False. + fparam : np.ndarray, optional + The frame parameters, by default None. + aparam : np.ndarray, optional + The atomic parameters, by default None. + mixed_type : bool, optional + Whether the atom_types is mixed type, by default False. + **kwargs : dict[str, Any] + Keyword arguments. + + Returns + ------- + property + The properties of the system, in shape (nframes, num_tasks). + """ + ( + coords, + cells, + atom_types, + fparam, + aparam, + nframes, + natoms, + ) = self._standard_input(coords, cells, atom_types, fparam, aparam, mixed_type) + results = self.deep_eval.eval( + coords, + cells, + atom_types, + atomic, + fparam=fparam, + aparam=aparam, + **kwargs, + ) + + #TODO: + return None + +__all__ = ["DeepDenoise"] \ No newline at end of file diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py index d1cc4fb82f..f6fb5bdd8d 100644 --- a/deepmd/pt/infer/deep_eval.py +++ b/deepmd/pt/infer/deep_eval.py @@ -37,6 +37,9 @@ from deepmd.infer.deep_property import ( DeepProperty, ) +from deepmd.infer.deep_denoise import ( + DeepDenoise, +) from deepmd.infer.deep_wfc import ( DeepWFC, ) @@ -211,6 +214,8 @@ def model_type(self) -> type["DeepEvalWrapper"]: return DeepGlobalPolar elif "wfc" in model_output_type: return DeepWFC + elif "updated_coord" in model_output_type: + return DeepDenoise elif self.get_var_name() in model_output_type: return DeepProperty else: From 0a2f57dbee5c324008b34582ad4bc0a75569eb59 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 28 Mar 2025 09:09:38 +0000 Subject: [PATCH 26/26] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- deepmd/entrypoints/test.py | 6 +++--- deepmd/infer/deep_denoise.py | 14 ++++++-------- deepmd/pt/infer/deep_eval.py | 6 +++--- 3 files changed, 12 insertions(+), 14 deletions(-) diff --git a/deepmd/entrypoints/test.py b/deepmd/entrypoints/test.py index 40d85725a0..71e048fd34 100644 --- a/deepmd/entrypoints/test.py +++ b/deepmd/entrypoints/test.py @@ -15,6 +15,9 @@ from deepmd.common import ( expand_sys_str, ) +from deepmd.infer.deep_denoise import ( + DeepDenoise, +) from deepmd.infer.deep_dipole import ( DeepDipole, ) @@ -34,9 +37,6 @@ from deepmd.infer.deep_property import ( DeepProperty, ) -from deepmd.infer.deep_denoise import ( - DeepDenoise, -) from deepmd.infer.deep_wfc import ( DeepWFC, ) diff --git a/deepmd/infer/deep_denoise.py b/deepmd/infer/deep_denoise.py index 0f71fec87e..cc6498e070 100644 --- a/deepmd/infer/deep_denoise.py +++ b/deepmd/infer/deep_denoise.py @@ -16,10 +16,7 @@ from .deep_eval import ( DeepEval, ) -from deepmd.pt.utils.region import ( - phys2inter, - inter2phys, -) + class DeepDenoise(DeepEval): """Given structures with noise, denoising them to get relaxed structures. @@ -39,7 +36,7 @@ class DeepDenoise(DeepEval): **kwargs : dict Keyword arguments. """ - + @property def output_def(self) -> ModelOutputDef: """ @@ -131,8 +128,9 @@ def eval( aparam=aparam, **kwargs, ) - - #TODO: + + # TODO: return None -__all__ = ["DeepDenoise"] \ No newline at end of file + +__all__ = ["DeepDenoise"] diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py index f6fb5bdd8d..5b28c8e3b3 100644 --- a/deepmd/pt/infer/deep_eval.py +++ b/deepmd/pt/infer/deep_eval.py @@ -17,6 +17,9 @@ OutputVariableCategory, OutputVariableDef, ) +from deepmd.infer.deep_denoise import ( + DeepDenoise, +) from deepmd.infer.deep_dipole import ( DeepDipole, ) @@ -37,9 +40,6 @@ from deepmd.infer.deep_property import ( DeepProperty, ) -from deepmd.infer.deep_denoise import ( - DeepDenoise, -) from deepmd.infer.deep_wfc import ( DeepWFC, )