From b5756ded4554ee1eaa705d403c507c3b3c6c1093 Mon Sep 17 00:00:00 2001 From: Daniel Zuegner Date: Fri, 27 Feb 2026 16:44:41 +0000 Subject: [PATCH] save detailed metrics, --- README.md | 7 +++++++ mattergen/evaluation/evaluate.py | 10 +++++++++- mattergen/scripts/evaluate.py | 2 ++ 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 8696d80..1eb6eaa 100644 --- a/README.md +++ b/README.md @@ -164,6 +164,13 @@ If you want to save the relaxed structures, toghether with their energies, force ```bash mattergen-evaluate --structures_path=$RESULTS_PATH --relax=True --structure_matcher='disordered' --save_as='metrics' --structures_output_path="relaxed_structures.extxyz" ``` + +If you want to obtain per-structure metrics (e.g., `energy_above_hull` for every crystal rather than just the average), add `--save_detailed_as` to save a JSON file with per-structure values: +```bash +mattergen-evaluate --structures_path=$RESULTS_PATH --relax=True --structure_matcher='disordered' --save_as='metrics.json' --save_detailed_as='detailed_metrics.json' +``` +The detailed metrics file contains per-structure values for `energy_above_hull`, `self_consistent_energy_above_hull`, `stability`, `novelty`, `uniqueness`, and other metrics. + ### Benchmark In [`plot_benchmark_results.ipynb`](benchmark/plot_benchmark_results.ipynb) we provide a Jupyter notebook to generate figures like Figs. 2e and 2f in the paper. We further provide the resulting metrics of analyzing samples generated by several baselines under [`benchmark/metrics`](benchmark/metrics). You can add your own model's results by copying the metrics JSON file resulting from `mattergen-evaluate` into the same folder. Note, again, that these results were obtained via MatterSim relaxation and energies, so results will differ from those obtained via DFT (e.g., as those in the paper).

diff --git a/mattergen/evaluation/evaluate.py b/mattergen/evaluation/evaluate.py index a96ae6f..a3263fc 100644 --- a/mattergen/evaluation/evaluate.py +++ b/mattergen/evaluation/evaluate.py @@ -24,6 +24,7 @@ def evaluate( OrderedStructureMatcher | DisorderedStructureMatcher ) = DefaultDisorderedStructureMatcher(), save_as: str | None = None, + save_detailed_as: str | None = None, potential_load_path: str | None = None, device: str = str(get_device()), structures_output_path: str | None = None, @@ -38,6 +39,7 @@ def evaluate( reference: Reference dataset. If this is None, the default reference dataset will be used. structure_matcher: Structure matcher to use for matching the structures. save_as: Save the metrics as a JSON file. + save_detailed_as: Save per-structure metrics (e.g., energy_above_hull_per_atom) as a JSON file. potential_load_path: Path to the Machine Learning potential to use for relaxation. device: Device to use for relaxation. structures_output_path: Path to save the relaxed structures. @@ -62,8 +64,14 @@ def evaluate( structure_matcher=structure_matcher, energy_correction_scheme=energy_correction_scheme ) - return evaluator.compute_metrics( + metrics = evaluator.compute_metrics( metrics=evaluator.available_metrics, save_as=save_as, pretty_print=True, ) + if save_detailed_as is not None: + evaluator.as_dataframe( + metrics=evaluator.available_metrics, + save_as=save_detailed_as, + ) + return metrics diff --git a/mattergen/scripts/evaluate.py b/mattergen/scripts/evaluate.py index 3808c6d..8c6ade7 100644 --- a/mattergen/scripts/evaluate.py +++ b/mattergen/scripts/evaluate.py @@ -26,6 +26,7 @@ def main( energies_path: str | None = None, structure_matcher: Literal["ordered", "disordered"] = "disordered", save_as: str | None = None, + save_detailed_as: str | None = None, potential_load_path: ( Literal["MatterSim-v1.0.0-1M.pth", "MatterSim-v1.0.0-5M.pth"] | None ) = None, @@ -57,6 +58,7 @@ def main( energies=energies, structure_matcher=structure_matcher, save_as=save_as, + save_detailed_as=save_detailed_as, potential_load_path=potential_load_path, reference=reference, device=device,