Skip to content

Commit 0844580

Browse files
committed
fix: issues in collect hashes APIs and add more tests
1 parent 357ccf6 commit 0844580

File tree

2 files changed

+384
-65
lines changed

2 files changed

+384
-65
lines changed

src/machine.cpp

Lines changed: 83 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1967,13 +1967,17 @@ void machine::write_counter(uint64_t val, const char *name, const char *domain)
19671967

19681968
void machine::collect_mcycle_root_hashes(uint64_t mcycle_phase, uint64_t mcycle_period, uint64_t period_count,
19691969
mcycle_root_hashes &result) {
1970-
result.hashes.clear();
1970+
mcycle_root_hashes next_result;
1971+
std::swap(next_result, result);
19711972
if (mcycle_period == 0) {
19721973
throw std::runtime_error{"mcycle_period cannot be 0"};
19731974
}
19741975
if (mcycle_phase >= mcycle_period) {
19751976
throw std::runtime_error{"mcycle_phase must be in {0, ..., mcycle_period-1}"};
19761977
}
1978+
if (mcycle_phase > read_reg(reg::mcycle)) {
1979+
throw std::runtime_error{"mcycle_phase cannot be greater than machine mcycle"};
1980+
}
19771981
if (read_reg(reg::iunrep) != 0) {
19781982
throw std::runtime_error{"cannot collect hashes from unreproducible machines"};
19791983
}
@@ -1991,20 +1995,32 @@ void machine::collect_mcycle_root_hashes(uint64_t mcycle_phase, uint64_t mcycle_
19911995
throw std::runtime_error{"microarchitecture is not reset"};
19921996
}
19931997
}
1998+
next_result.hashes.clear();
1999+
next_result.mcycle_phase = mcycle_phase;
2000+
// Check halted and yielded break reasons first to behave with same priority as the interpreter
2001+
if (read_reg(reg::iflags_H) != 0) {
2002+
next_result.break_reason = interpreter_break_reason::halted;
2003+
std::swap(next_result, result);
2004+
return;
2005+
}
2006+
if (read_reg(reg::iflags_Y) != 0) {
2007+
next_result.break_reason = interpreter_break_reason::yielded_manually;
2008+
std::swap(next_result, result);
2009+
return;
2010+
}
2011+
next_result.hashes.reserve(mcycle_period);
2012+
next_result.break_reason = interpreter_break_reason::reached_target_mcycle;
19942013
collect_mcycle_hashes_state_access::context context{};
19952014
context.dirty_pages.reserve(std::clamp<uint64_t>(mcycle_period * 4, 16, 4096));
1996-
result.hashes.reserve(mcycle_period);
19972015
const collect_mcycle_hashes_state_access a(context, *this);
19982016
os_silence_putchar(m_r.htif.no_console_putchar);
1999-
auto mcycle_start = read_reg(reg::mcycle);
2017+
auto mcycle_start = read_reg(reg::mcycle) - mcycle_phase;
20002018
for (uint64_t period = 0; period < period_count; ++period) {
20012019
uint64_t mcycle_target{};
2002-
if (__builtin_add_overflow(mcycle_start, mcycle_period - mcycle_phase, &mcycle_target)) {
2020+
if (__builtin_add_overflow(mcycle_start, mcycle_period, &mcycle_target)) {
20032021
mcycle_target = UINT64_MAX;
20042022
}
2005-
mcycle_phase = 0;
2006-
result.break_reason = interpret(a, mcycle_target);
2007-
const auto mcycle_end = read_reg(reg::mcycle);
2023+
next_result.break_reason = interpret(a, mcycle_target);
20082024
for (const uint64_t paddr_page : context.dirty_pages) {
20092025
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
20102026
auto &ar = const_cast<address_range &>(find_address_range(paddr_page, HASH_TREE_PAGE_SIZE));
@@ -2014,25 +2030,36 @@ void machine::collect_mcycle_root_hashes(uint64_t mcycle_phase, uint64_t mcycle_
20142030
throw std::runtime_error{"update hash tree failed"};
20152031
}
20162032
context.dirty_pages.clear();
2033+
const auto mcycle_end = read_reg(reg::mcycle);
20172034
// If the machine stopped before we asked, we are done
2018-
if (mcycle_end != mcycle_target) {
2035+
if (mcycle_end != mcycle_target || mcycle_target == UINT64_MAX) {
20192036
break;
20202037
}
2021-
result.hashes.emplace_back(m_ht.get_root_hash());
2038+
next_result.hashes.emplace_back(m_ht.get_root_hash());
20222039
mcycle_start = mcycle_end;
2040+
// If the machine stopped before we asked, we are done
2041+
if (next_result.break_reason != interpreter_break_reason::reached_target_mcycle) {
2042+
break;
2043+
}
20232044
}
2024-
result.mcycle_phase = read_reg(reg::mcycle) - mcycle_start;
2045+
next_result.mcycle_phase = read_reg(reg::mcycle) - mcycle_start;
2046+
// Commit new result
2047+
std::swap(next_result, result);
20252048
}
20262049

20272050
void machine::collect_uarch_cycle_root_hashes(uint64_t mcycle_count, uarch_cycle_root_hashes &result) {
2028-
result.hashes.clear();
2029-
result.reset_indices.clear();
2051+
uarch_cycle_root_hashes next_result;
2052+
std::swap(next_result, result);
20302053
if (read_reg(reg::iunrep) != 0) {
20312054
throw std::runtime_error{"cannot collect hashes from unreproducible machines"};
20322055
}
20332056
if (m_r.soft_yield) {
20342057
throw std::runtime_error{"cannot collect hashes when soft yield is enabled"};
20352058
}
2059+
if (m_c.hash_tree.hash_function != hash_function_type::keccak256) {
2060+
throw std::runtime_error{
2061+
"microarchitecture can only be used with hash tree configured with Keccak-256 hash function"};
2062+
}
20362063
if (!update_hash_tree()) {
20372064
throw std::runtime_error{"update hash tree failed"};
20382065
}
@@ -2041,18 +2068,48 @@ void machine::collect_uarch_cycle_root_hashes(uint64_t mcycle_count, uarch_cycle
20412068
if (current_uarch_state_hash != get_uarch_pristine_state_hash()) {
20422069
throw std::runtime_error{"microarchitecture is not reset"};
20432070
}
2071+
next_result.hashes.clear();
2072+
next_result.reset_indices.clear();
2073+
// Check halted and yielded break reasons first to behave with same priority as the interpreter
2074+
if (read_reg(reg::iflags_H) != 0) {
2075+
next_result.break_reason = interpreter_break_reason::halted;
2076+
std::swap(next_result, result);
2077+
return;
2078+
}
2079+
if (read_reg(reg::iflags_Y) != 0) {
2080+
next_result.break_reason = interpreter_break_reason::yielded_manually;
2081+
std::swap(next_result, result);
2082+
return;
2083+
}
2084+
next_result.hashes.reserve(mcycle_count * 512);
2085+
next_result.reset_indices.reserve(mcycle_count);
2086+
next_result.break_reason = interpreter_break_reason::reached_target_mcycle;
20442087
collect_uarch_cycle_hashes_state_access::context context{};
20452088
context.dirty_words.reserve(8);
2046-
result.hashes.reserve(mcycle_count * 512);
2047-
result.reset_indices.reserve(mcycle_count);
20482089
const collect_uarch_cycle_hashes_state_access a(context, *this);
20492090
os_silence_putchar(m_r.htif.no_console_putchar);
2050-
auto mcycle_start = read_reg(reg::mcycle);
20512091
for (uint64_t mcycles = 0; mcycles < mcycle_count; ++mcycles) {
2092+
// If the machine stopped before we asked, we are done
2093+
if (read_reg(reg::iflags_H) != 0) {
2094+
next_result.break_reason = interpreter_break_reason::halted;
2095+
break;
2096+
}
2097+
if (read_reg(reg::iflags_Y) != 0) {
2098+
next_result.break_reason = interpreter_break_reason::yielded_manually;
2099+
break;
2100+
}
2101+
if (read_reg(reg::iflags_X) != 0 && mcycles > 0) {
2102+
next_result.break_reason = interpreter_break_reason::yielded_automatically;
2103+
break;
2104+
}
2105+
uint64_t mcycle_target{};
2106+
if (__builtin_add_overflow(read_reg(reg::mcycle), 1, &mcycle_target)) {
2107+
break;
2108+
}
20522109
auto uarch_cycle_start = read_reg(reg::uarch_cycle);
20532110
// Add one hash after each uarch cycle
20542111
for (;;) {
2055-
uint64_t uarch_cycle_target = 0;
2112+
uint64_t uarch_cycle_target{};
20562113
if (__builtin_add_overflow(uarch_cycle_start, 1, &uarch_cycle_target)) {
20572114
break;
20582115
}
@@ -2065,29 +2122,24 @@ void machine::collect_uarch_cycle_root_hashes(uint64_t mcycle_count, uarch_cycle
20652122
if (uarch_cycle_end != uarch_cycle_target) {
20662123
break;
20672124
}
2068-
result.hashes.emplace_back(m_ht.get_root_hash());
2125+
next_result.hashes.emplace_back(m_ht.get_root_hash());
20692126
uarch_cycle_start = uarch_cycle_end;
20702127
}
2071-
// Add one hash after the uarch reset, and the index where it happened
20722128
//??D maybe optimize this?
20732129
reset_uarch();
20742130
if (!update_hash_tree()) {
20752131
throw std::runtime_error{"update hash tree failed"};
20762132
}
2077-
result.reset_indices.emplace_back(result.hashes.size());
2078-
result.hashes.emplace_back(m_ht.get_root_hash());
2079-
}
2080-
// Now check why we stopped
2081-
auto mcycle_end = read_reg(reg::mcycle);
2082-
if (mcycle_end - mcycle_start == mcycle_count) {
2083-
result.break_reason = interpreter_break_reason::reached_target_mcycle;
2084-
} else if (read_reg(reg::iflags_H) != 0) {
2085-
result.break_reason = interpreter_break_reason::halted;
2086-
} else if (read_reg(reg::iflags_Y) != 0) {
2087-
result.break_reason = interpreter_break_reason::yielded_manually;
2088-
} else if (read_reg(reg::iflags_X) != 0) {
2089-
result.break_reason = interpreter_break_reason::yielded_automatically;
2133+
const auto mcycle_end = read_reg(reg::mcycle);
2134+
if (mcycle_end != mcycle_target) {
2135+
throw std::runtime_error{"machine did not reach the expected target mcycle"};
2136+
}
2137+
// Add one hash after the uarch reset, and the index where it happened
2138+
next_result.reset_indices.emplace_back(next_result.hashes.size());
2139+
next_result.hashes.emplace_back(m_ht.get_root_hash());
20902140
}
2141+
// Commit new result
2142+
std::swap(next_result, result);
20912143
}
20922144

20932145
} // namespace cartesi

0 commit comments

Comments
 (0)