@@ -1967,13 +1967,17 @@ void machine::write_counter(uint64_t val, const char *name, const char *domain)
1967
1967
1968
1968
void machine::collect_mcycle_root_hashes (uint64_t mcycle_phase, uint64_t mcycle_period, uint64_t period_count,
1969
1969
mcycle_root_hashes &result) {
1970
- result.hashes .clear ();
1970
+ mcycle_root_hashes next_result;
1971
+ std::swap (next_result, result);
1971
1972
if (mcycle_period == 0 ) {
1972
1973
throw std::runtime_error{" mcycle_period cannot be 0" };
1973
1974
}
1974
1975
if (mcycle_phase >= mcycle_period) {
1975
1976
throw std::runtime_error{" mcycle_phase must be in {0, ..., mcycle_period-1}" };
1976
1977
}
1978
+ if (mcycle_phase > read_reg (reg::mcycle)) {
1979
+ throw std::runtime_error{" mcycle_phase cannot be greater than machine mcycle" };
1980
+ }
1977
1981
if (read_reg (reg::iunrep) != 0 ) {
1978
1982
throw std::runtime_error{" cannot collect hashes from unreproducible machines" };
1979
1983
}
@@ -1991,20 +1995,32 @@ void machine::collect_mcycle_root_hashes(uint64_t mcycle_phase, uint64_t mcycle_
1991
1995
throw std::runtime_error{" microarchitecture is not reset" };
1992
1996
}
1993
1997
}
1998
+ next_result.hashes .clear ();
1999
+ next_result.mcycle_phase = mcycle_phase;
2000
+ // Check halted and yielded break reasons first to behave with same priority as the interpreter
2001
+ if (read_reg (reg::iflags_H) != 0 ) {
2002
+ next_result.break_reason = interpreter_break_reason::halted;
2003
+ std::swap (next_result, result);
2004
+ return ;
2005
+ }
2006
+ if (read_reg (reg::iflags_Y) != 0 ) {
2007
+ next_result.break_reason = interpreter_break_reason::yielded_manually;
2008
+ std::swap (next_result, result);
2009
+ return ;
2010
+ }
2011
+ next_result.hashes .reserve (mcycle_period);
2012
+ next_result.break_reason = interpreter_break_reason::reached_target_mcycle;
1994
2013
collect_mcycle_hashes_state_access::context context{};
1995
2014
context.dirty_pages .reserve (std::clamp<uint64_t >(mcycle_period * 4 , 16 , 4096 ));
1996
- result.hashes .reserve (mcycle_period);
1997
2015
const collect_mcycle_hashes_state_access a (context, *this );
1998
2016
os_silence_putchar (m_r.htif .no_console_putchar );
1999
- auto mcycle_start = read_reg (reg::mcycle);
2017
+ auto mcycle_start = read_reg (reg::mcycle) - mcycle_phase ;
2000
2018
for (uint64_t period = 0 ; period < period_count; ++period) {
2001
2019
uint64_t mcycle_target{};
2002
- if (__builtin_add_overflow (mcycle_start, mcycle_period - mcycle_phase , &mcycle_target)) {
2020
+ if (__builtin_add_overflow (mcycle_start, mcycle_period, &mcycle_target)) {
2003
2021
mcycle_target = UINT64_MAX;
2004
2022
}
2005
- mcycle_phase = 0 ;
2006
- result.break_reason = interpret (a, mcycle_target);
2007
- const auto mcycle_end = read_reg (reg::mcycle);
2023
+ next_result.break_reason = interpret (a, mcycle_target);
2008
2024
for (const uint64_t paddr_page : context.dirty_pages ) {
2009
2025
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
2010
2026
auto &ar = const_cast <address_range &>(find_address_range (paddr_page, HASH_TREE_PAGE_SIZE));
@@ -2014,25 +2030,36 @@ void machine::collect_mcycle_root_hashes(uint64_t mcycle_phase, uint64_t mcycle_
2014
2030
throw std::runtime_error{" update hash tree failed" };
2015
2031
}
2016
2032
context.dirty_pages .clear ();
2033
+ const auto mcycle_end = read_reg (reg::mcycle);
2017
2034
// If the machine stopped before we asked, we are done
2018
- if (mcycle_end != mcycle_target) {
2035
+ if (mcycle_end != mcycle_target || mcycle_target == UINT64_MAX ) {
2019
2036
break ;
2020
2037
}
2021
- result .hashes .emplace_back (m_ht.get_root_hash ());
2038
+ next_result .hashes .emplace_back (m_ht.get_root_hash ());
2022
2039
mcycle_start = mcycle_end;
2040
+ // If the machine stopped before we asked, we are done
2041
+ if (next_result.break_reason != interpreter_break_reason::reached_target_mcycle) {
2042
+ break ;
2043
+ }
2023
2044
}
2024
- result.mcycle_phase = read_reg (reg::mcycle) - mcycle_start;
2045
+ next_result.mcycle_phase = read_reg (reg::mcycle) - mcycle_start;
2046
+ // Commit new result
2047
+ std::swap (next_result, result);
2025
2048
}
2026
2049
2027
2050
void machine::collect_uarch_cycle_root_hashes (uint64_t mcycle_count, uarch_cycle_root_hashes &result) {
2028
- result. hashes . clear () ;
2029
- result. reset_indices . clear ( );
2051
+ uarch_cycle_root_hashes next_result ;
2052
+ std::swap (next_result, result );
2030
2053
if (read_reg (reg::iunrep) != 0 ) {
2031
2054
throw std::runtime_error{" cannot collect hashes from unreproducible machines" };
2032
2055
}
2033
2056
if (m_r.soft_yield ) {
2034
2057
throw std::runtime_error{" cannot collect hashes when soft yield is enabled" };
2035
2058
}
2059
+ if (m_c.hash_tree .hash_function != hash_function_type::keccak256) {
2060
+ throw std::runtime_error{
2061
+ " microarchitecture can only be used with hash tree configured with Keccak-256 hash function" };
2062
+ }
2036
2063
if (!update_hash_tree ()) {
2037
2064
throw std::runtime_error{" update hash tree failed" };
2038
2065
}
@@ -2041,18 +2068,48 @@ void machine::collect_uarch_cycle_root_hashes(uint64_t mcycle_count, uarch_cycle
2041
2068
if (current_uarch_state_hash != get_uarch_pristine_state_hash ()) {
2042
2069
throw std::runtime_error{" microarchitecture is not reset" };
2043
2070
}
2071
+ next_result.hashes .clear ();
2072
+ next_result.reset_indices .clear ();
2073
+ // Check halted and yielded break reasons first to behave with same priority as the interpreter
2074
+ if (read_reg (reg::iflags_H) != 0 ) {
2075
+ next_result.break_reason = interpreter_break_reason::halted;
2076
+ std::swap (next_result, result);
2077
+ return ;
2078
+ }
2079
+ if (read_reg (reg::iflags_Y) != 0 ) {
2080
+ next_result.break_reason = interpreter_break_reason::yielded_manually;
2081
+ std::swap (next_result, result);
2082
+ return ;
2083
+ }
2084
+ next_result.hashes .reserve (mcycle_count * 512 );
2085
+ next_result.reset_indices .reserve (mcycle_count);
2086
+ next_result.break_reason = interpreter_break_reason::reached_target_mcycle;
2044
2087
collect_uarch_cycle_hashes_state_access::context context{};
2045
2088
context.dirty_words .reserve (8 );
2046
- result.hashes .reserve (mcycle_count * 512 );
2047
- result.reset_indices .reserve (mcycle_count);
2048
2089
const collect_uarch_cycle_hashes_state_access a (context, *this );
2049
2090
os_silence_putchar (m_r.htif .no_console_putchar );
2050
- auto mcycle_start = read_reg (reg::mcycle);
2051
2091
for (uint64_t mcycles = 0 ; mcycles < mcycle_count; ++mcycles) {
2092
+ // If the machine stopped before we asked, we are done
2093
+ if (read_reg (reg::iflags_H) != 0 ) {
2094
+ next_result.break_reason = interpreter_break_reason::halted;
2095
+ break ;
2096
+ }
2097
+ if (read_reg (reg::iflags_Y) != 0 ) {
2098
+ next_result.break_reason = interpreter_break_reason::yielded_manually;
2099
+ break ;
2100
+ }
2101
+ if (read_reg (reg::iflags_X) != 0 && mcycles > 0 ) {
2102
+ next_result.break_reason = interpreter_break_reason::yielded_automatically;
2103
+ break ;
2104
+ }
2105
+ uint64_t mcycle_target{};
2106
+ if (__builtin_add_overflow (read_reg (reg::mcycle), 1 , &mcycle_target)) {
2107
+ break ;
2108
+ }
2052
2109
auto uarch_cycle_start = read_reg (reg::uarch_cycle);
2053
2110
// Add one hash after each uarch cycle
2054
2111
for (;;) {
2055
- uint64_t uarch_cycle_target = 0 ;
2112
+ uint64_t uarch_cycle_target{} ;
2056
2113
if (__builtin_add_overflow (uarch_cycle_start, 1 , &uarch_cycle_target)) {
2057
2114
break ;
2058
2115
}
@@ -2065,29 +2122,24 @@ void machine::collect_uarch_cycle_root_hashes(uint64_t mcycle_count, uarch_cycle
2065
2122
if (uarch_cycle_end != uarch_cycle_target) {
2066
2123
break ;
2067
2124
}
2068
- result .hashes .emplace_back (m_ht.get_root_hash ());
2125
+ next_result .hashes .emplace_back (m_ht.get_root_hash ());
2069
2126
uarch_cycle_start = uarch_cycle_end;
2070
2127
}
2071
- // Add one hash after the uarch reset, and the index where it happened
2072
2128
// ??D maybe optimize this?
2073
2129
reset_uarch ();
2074
2130
if (!update_hash_tree ()) {
2075
2131
throw std::runtime_error{" update hash tree failed" };
2076
2132
}
2077
- result.reset_indices .emplace_back (result.hashes .size ());
2078
- result.hashes .emplace_back (m_ht.get_root_hash ());
2079
- }
2080
- // Now check why we stopped
2081
- auto mcycle_end = read_reg (reg::mcycle);
2082
- if (mcycle_end - mcycle_start == mcycle_count) {
2083
- result.break_reason = interpreter_break_reason::reached_target_mcycle;
2084
- } else if (read_reg (reg::iflags_H) != 0 ) {
2085
- result.break_reason = interpreter_break_reason::halted;
2086
- } else if (read_reg (reg::iflags_Y) != 0 ) {
2087
- result.break_reason = interpreter_break_reason::yielded_manually;
2088
- } else if (read_reg (reg::iflags_X) != 0 ) {
2089
- result.break_reason = interpreter_break_reason::yielded_automatically;
2133
+ const auto mcycle_end = read_reg (reg::mcycle);
2134
+ if (mcycle_end != mcycle_target) {
2135
+ throw std::runtime_error{" machine did not reach the expected target mcycle" };
2136
+ }
2137
+ // Add one hash after the uarch reset, and the index where it happened
2138
+ next_result.reset_indices .emplace_back (next_result.hashes .size ());
2139
+ next_result.hashes .emplace_back (m_ht.get_root_hash ());
2090
2140
}
2141
+ // Commit new result
2142
+ std::swap (next_result, result);
2091
2143
}
2092
2144
2093
2145
} // namespace cartesi
0 commit comments