Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
51 commits
Select commit Hold shift + click to select a range
584613b
Add pt2 format for C/C++ inference.
Mar 7, 2026
98fa3fa
fix(dpmodel): fix NoPbc for DPA1 pt2 export and unify test models
Mar 7, 2026
6f7c11f
feat(c++): add DPA2 C++ inference tests and fix mapping bug in all ba…
Mar 8, 2026
f92b408
feat(c++): add DPA3 .pt2 export and C/C++ inference tests
Mar 9, 2026
46908c0
fix(ci): restore pre-committed .pth files and handle missing custom ops
Mar 9, 2026
da5579d
temporarily clears the default device (None) before calling aoti_comp…
Mar 9, 2026
0091315
fix issue of c/c++ ut
Mar 9, 2026
46c56e3
check the size validity
Mar 9, 2026
1ec9b85
fix: zip 64-bit widths
Mar 9, 2026
fbb8a61
rm dead code
Mar 9, 2026
7530a6e
fix: trace on CPU during export to avoid CUDA stream assert in make_fx
Mar 9, 2026
0269ef7
fix: trace on CPU then move to target device for CUDA-compatible export
Mar 9, 2026
f120319
fix(ci): preload LSAN runtime for gen scripts in sanitizer builds
Mar 9, 2026
f60f423
fix(export): use move_to_device_pass for CPU→CUDA device relocation
Mar 10, 2026
f564b1a
fix: harden C++ parser, fix CI gen script failures
Mar 10, 2026
cd90c31
fix(ci): clear LD_PRELOAD before AOTInductor compilation
Mar 10, 2026
914f401
feat(c++): implement has_default_fparam for pt_expt backend
Mar 21, 2026
9f52a35
fix: address coderabbitai review comments on PR #5298
Mar 21, 2026
5b346f6
fix(ci): don't preload LSAN into gen script Python processes
Mar 21, 2026
4b1f897
fix: resolve github-advanced-security alerts
Mar 21, 2026
1aded45
fix(ci): preload LSAN with detect_leaks=0 for gen scripts
Mar 21, 2026
59a3bf7
fix(build): guard pt_expt C++ backend on AOTInductor header availability
Mar 21, 2026
460ccf7
fix: move _load_custom_ops after deepmd.pt import in gen scripts
Mar 22, 2026
dd0b96a
fix(ci): install custom op .so to deepmd/lib before gen scripts
Mar 22, 2026
dd75039
fix(ci): add install prefix to LD_LIBRARY_PATH for gen scripts
Mar 22, 2026
00327cc
fix(ci): use SHARED_LIB_DIR to find correct custom op install path
Mar 22, 2026
7e04ad5
fix(ci): guard gen scripts behind ENABLE_PYTORCH and clean up test_cc…
Mar 22, 2026
f25a800
fix: address CodeQL alerts — integer overflow and duplicate import
Mar 23, 2026
6434c9a
fix(pt): add fallback stub for tabulate_fusion_se_t_tebd custom op
Mar 23, 2026
6d21400
fix: widen all int operands in fold_back to ptrdiff_t
Mar 23, 2026
a2ec6f7
merge upstream/master into feat-pt-expt-c
Mar 23, 2026
f911ee0
refactor: extract common gen script helpers into gen_common.py
Mar 23, 2026
ce9f3dd
refactor: simplify custom op .so install in test_cc_local.sh
Mar 23, 2026
50507c6
test(pt_expt): add .pt2 (AOTInductor) unit tests for freeze and compress
Mar 23, 2026
5a53e20
fix: support default fparam in .pt2 inference and NoPBC negative coords
Mar 24, 2026
00fcba0
merge upstream/master into feat-pt-expt-c
Mar 24, 2026
35e3335
test(c++): add NoPBC atomic and multi-frame tests for DeepPotPTExpt
Mar 24, 2026
0cb00cb
Merge remote-tracking branch 'origin/feat-pt-expt-c' into feat-pt-exp…
Mar 24, 2026
9b52564
test(pt_expt): add .pt2 finetune tests
Mar 24, 2026
1f1d929
test(c++): add parser, ZIP, and metadata accessor tests for DeepPotPT…
Mar 24, 2026
1a45b88
merge upstream/master into feat-pt-expt-pt2-uts
Mar 24, 2026
80aa996
Merge remote-tracking branch 'upstream/master' into feat-pt-expt-pt2-uts
Mar 25, 2026
78d5bde
test(pt_expt): add .pt2 change-bias tests
Mar 25, 2026
b6e7db3
fix: address reviewer comments on PR #5334
Mar 25, 2026
196bcca
fix(c++): raise error when fparam required but not provided and no de…
Mar 25, 2026
808b9c7
fix: address remaining reviewer comments
Mar 25, 2026
ef878f5
fix(tests): fix expected bias shape in pt2 user-defined test
Mar 25, 2026
b7ec3a6
fix(tests): add finiteness checks and set rtol=0 in freeze parity tests
Mar 25, 2026
6a0f145
perf(tests): speed up freeze and change-bias tests
Mar 25, 2026
acc89b3
perf(tests): share pretrained model in TestFinetuneCLI setUpClass
Mar 25, 2026
4c95f98
fix(tests): pop device contexts in TestDPFreeze setUpClass
Mar 25, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions deepmd/pt_expt/infer/deep_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -621,6 +621,22 @@ def _eval_model(
dtype=torch.float64,
device=DEVICE,
)
elif self._is_pt2 and self.get_dim_fparam() > 0:
# .pt2 models are compiled with fparam as a required input.
# When the user omits fparam, fill with default values from metadata.
default_fp = self.metadata.get("default_fparam")
if default_fp is not None:
fparam_t = (
torch.tensor(default_fp, dtype=torch.float64, device=DEVICE)
.unsqueeze(0)
.expand(nframes, -1)
.contiguous()
)
else:
raise ValueError(
f"fparam is required for this model (dim_fparam={self.get_dim_fparam()}) "
"but was not provided, and no default_fparam is stored in the model."
)
else:
fparam_t = None

Expand Down
1 change: 1 addition & 0 deletions deepmd/pt_expt/utils/serialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,7 @@ def _collect_metadata(model: torch.nn.Module) -> dict:
"mixed_types": model.mixed_types(),
"sel_type": model.get_sel_type(),
"has_default_fparam": model.has_default_fparam(),
"default_fparam": model.get_default_fparam(),
"fitting_output_defs": fitting_output_defs,
}

Expand Down
1 change: 1 addition & 0 deletions source/api_cc/include/DeepPotPTExpt.h
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,7 @@ class DeepPotPTExpt : public DeepPotBackend {
int daparam;
bool aparam_nall;
bool has_default_fparam_;
std::vector<double> default_fparam_;
double rcut;
int gpu_id;
bool gpu_enabled;
Expand Down
40 changes: 40 additions & 0 deletions source/api_cc/src/DeepPotPTExpt.cc
Original file line number Diff line number Diff line change
Expand Up @@ -636,6 +636,13 @@ void DeepPotPTExpt::init(const std::string& model,
} else {
has_default_fparam_ = false;
}
default_fparam_.clear();
if (has_default_fparam_ && metadata.obj_val.count("default_fparam") &&
metadata["default_fparam"].type == JsonValue::Array) {
for (const auto& v : metadata["default_fparam"].as_array()) {
default_fparam_.push_back(v.as_double());
}
}

type_map.clear();
for (const auto& v : metadata["type_map"].as_array()) {
Expand Down Expand Up @@ -818,6 +825,18 @@ void DeepPotPTExpt::compute(ENERGYVTYPE& ener,
valuetype_options)
.to(torch::kFloat64)
.to(device);
} else if (!default_fparam_.empty()) {
fparam_tensor =
torch::from_blob(const_cast<double*>(default_fparam_.data()),
{1, static_cast<std::int64_t>(default_fparam_.size())},
torch::TensorOptions().dtype(torch::kFloat64))
.clone()
.to(device);
} else if (dfparam > 0) {
throw deepmd::deepmd_exception(
"fparam is required for this model (dim_fparam=" +
std::to_string(dfparam) +
") but was not provided, and no default_fparam is stored.");
} else {
fparam_tensor = torch::zeros({0}, options).to(device);
}
Expand Down Expand Up @@ -982,6 +1001,15 @@ void DeepPotPTExpt::compute(ENERGYVTYPE& ener,
min_z = std::min(min_z, coord_d[ii * 3 + 2]);
max_z = std::max(max_z, coord_d[ii * 3 + 2]);
}
// Shift coords so minimum is at rcut (ensures all atoms are in [0, L))
double shift_x = rcut - min_x;
double shift_y = rcut - min_y;
double shift_z = rcut - min_z;
for (int ii = 0; ii < natoms; ++ii) {
coord_d[ii * 3 + 0] += shift_x;
coord_d[ii * 3 + 1] += shift_y;
coord_d[ii * 3 + 2] += shift_z;
}
box_d.resize(9, 0.0);
box_d[0] = (max_x - min_x) + 2.0 * rcut;
box_d[4] = (max_y - min_y) + 2.0 * rcut;
Expand Down Expand Up @@ -1052,6 +1080,18 @@ void DeepPotPTExpt::compute(ENERGYVTYPE& ener,
valuetype_options)
.to(torch::kFloat64)
.to(device);
} else if (!default_fparam_.empty()) {
fparam_tensor =
torch::from_blob(const_cast<double*>(default_fparam_.data()),
{1, static_cast<std::int64_t>(default_fparam_.size())},
torch::TensorOptions().dtype(torch::kFloat64))
.clone()
.to(device);
} else if (dfparam > 0) {
throw deepmd::deepmd_exception(
"fparam is required for this model (dim_fparam=" +
std::to_string(dfparam) +
") but was not provided, and no default_fparam is stored.");
} else {
fparam_tensor = torch::zeros({0}, options).to(device);
}
Expand Down
86 changes: 86 additions & 0 deletions source/api_cc/tests/test_deeppot_a_fparam_aparam_ptexpt.cc
Original file line number Diff line number Diff line change
Expand Up @@ -370,3 +370,89 @@ TYPED_TEST(TestInferDeepPotDefaultFParamPtExpt, has_default_fparam) {
EXPECT_EQ(dp.dim_fparam(), 1);
EXPECT_TRUE(dp.has_default_fparam());
}

// Eval without fparam should produce same result as eval with explicit default
TYPED_TEST(TestInferDeepPotDefaultFParamPtExpt, eval_default_vs_explicit) {
using VALUETYPE = TypeParam;
deepmd::DeepPot& dp = this->dp;

std::vector<VALUETYPE> coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74,
00.25, 3.32, 1.68, 3.36, 3.00, 1.81,
3.51, 2.51, 2.60, 4.27, 3.22, 1.56};
std::vector<int> atype = {0, 0, 0, 0, 0, 0};
std::vector<VALUETYPE> box = {13., 0., 0., 0., 13., 0., 0., 0., 13.};
// The default fparam value from gen_fparam_aparam.py
std::vector<VALUETYPE> explicit_fparam = {0.25852028};
std::vector<VALUETYPE> aparam = {0.25852028, 0.25852028, 0.25852028,
0.25852028, 0.25852028, 0.25852028};

// Eval with explicit fparam
double e_explicit;
std::vector<VALUETYPE> f_explicit, v_explicit;
dp.compute(e_explicit, f_explicit, v_explicit, coord, atype, box,
explicit_fparam, aparam);

// Eval without fparam (should use default)
double e_default;
std::vector<VALUETYPE> f_default, v_default;
std::vector<VALUETYPE> empty_fparam;
dp.compute(e_default, f_default, v_default, coord, atype, box, empty_fparam,
aparam);

EXPECT_LT(fabs(e_explicit - e_default), EPSILON);
for (size_t ii = 0; ii < f_explicit.size(); ++ii) {
EXPECT_LT(fabs(f_explicit[ii] - f_default[ii]), EPSILON);
}
for (size_t ii = 0; ii < v_explicit.size(); ++ii) {
EXPECT_LT(fabs(v_explicit[ii] - v_default[ii]), EPSILON);
}
}

// Same test but with external nlist (LAMMPS path)
TYPED_TEST(TestInferDeepPotDefaultFParamPtExpt, eval_default_vs_explicit_lmp) {
using VALUETYPE = TypeParam;
deepmd::DeepPot& dp = this->dp;

std::vector<VALUETYPE> coord = {12.83, 2.56, 2.18, 12.09, 2.87, 2.74,
00.25, 3.32, 1.68, 3.36, 3.00, 1.81,
3.51, 2.51, 2.60, 4.27, 3.22, 1.56};
std::vector<int> atype = {0, 0, 0, 0, 0, 0};
std::vector<VALUETYPE> box = {13., 0., 0., 0., 13., 0., 0., 0., 13.};
std::vector<VALUETYPE> explicit_fparam = {0.25852028};
std::vector<VALUETYPE> aparam = {0.25852028, 0.25852028, 0.25852028,
0.25852028, 0.25852028, 0.25852028};

float rc = dp.cutoff();
int nloc = coord.size() / 3;
std::vector<VALUETYPE> coord_cpy;
std::vector<int> atype_cpy, mapping;
std::vector<std::vector<int> > nlist_data;
_build_nlist<VALUETYPE>(nlist_data, coord_cpy, atype_cpy, mapping, coord,
atype, box, rc);
int nall = coord_cpy.size() / 3;
std::vector<int> ilist(nloc), numneigh(nloc);
std::vector<int*> firstneigh(nloc);
deepmd::InputNlist inlist(nloc, &ilist[0], &numneigh[0], &firstneigh[0]);
convert_nlist(inlist, nlist_data);

// Eval with explicit fparam (external nlist)
double e_explicit;
std::vector<VALUETYPE> f_explicit, v_explicit;
dp.compute(e_explicit, f_explicit, v_explicit, coord_cpy, atype_cpy, box,
nall - nloc, inlist, 0, explicit_fparam, aparam);

// Eval without fparam (external nlist, should use default)
double e_default;
std::vector<VALUETYPE> f_default, v_default;
std::vector<VALUETYPE> empty_fparam;
dp.compute(e_default, f_default, v_default, coord_cpy, atype_cpy, box,
nall - nloc, inlist, 0, empty_fparam, aparam);

EXPECT_LT(fabs(e_explicit - e_default), EPSILON);
for (size_t ii = 0; ii < f_explicit.size(); ++ii) {
EXPECT_LT(fabs(f_explicit[ii] - f_default[ii]), EPSILON);
}
for (size_t ii = 0; ii < v_explicit.size(); ++ii) {
EXPECT_LT(fabs(v_explicit[ii] - v_default[ii]), EPSILON);
}
}
Loading
Loading