Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,7 @@
[submodule "extern/test-vectors"]
path = extern/test-vectors
url = https://github.com/filecoin-project/test-vectors.git
[submodule "extern/ref-fvm"]
path = extern/ref-fvm
url = [email protected]:filecoin-project/ref-fvm.git
branch = multistage-execution
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@

# UNRELEASED

- feat(consensus): wire tipset gas reservations and reservation-aware mempool pre-pack to activate at network version 28 (UpgradeXxHeight), keeping receipts and gas accounting identical while preventing miner penalties from underfunded intra-tipset messages

## 👌 Improvements
- docs: fix outdated link in documentation ([#13436](https://github.com/filecoin-project/lotus/pull/13436))
- docs: fix dead link in documentation ([#13437](https://github.com/filecoin-project/lotus/pull/13437))
Expand Down Expand Up @@ -97,6 +99,7 @@ For the set of changes since the last stable release:
| asamuj | 1 | +1/-1 | 1 |
| spuradage | 1 | +0/-1 | 1 |

>>>>>>> origin/master
# Node and Miner v1.34.1 / 2025-09-15

This is a non-critical patch release that fixes an issue with the Lotus `v1.34.0` release where the incorrect version of filecoin-ffi was included. Lotus `v1.34.0` used filecoin-ffi `v1.34.0-dev` when it should have used `v1.34.0`. This isn’t critical since it’s the same filecoin-ffi version used during the nv27 Calibration network upgrade, but for consistency with other Node implementations like Forest, we are creating this release. This ensures the inclusion of ref-fvm `v4.7.3` update that was missing in v1.34.0. All users of v1.34.0 are encouraged to upgrade to v1.34.1.
Expand Down
42 changes: 40 additions & 2 deletions chain/consensus/compute_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"go.opencensus.io/trace"
"golang.org/x/xerrors"

"github.com/filecoin-project/go-address"
amt4 "github.com/filecoin-project/go-amt-ipld/v4"
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
Expand Down Expand Up @@ -196,6 +197,10 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
}
}

// Network version at the execution epoch, used for reservations activation
// and gating.
nv := sm.GetNetworkVersion(ctx, epoch)

vmEarlyDuration := partDone()
earlyCronGas := cronGas
cronGas = 0
Expand All @@ -206,24 +211,46 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err)
}

// Start a tipset reservation session around explicit messages. A deferred
// call ensures the session is closed on all paths, while the explicit call
// before cron keeps the session scope limited to explicit messages.
if err := startReservations(ctx, vmi, bms, nv); err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("starting tipset reservations: %w", err)
}
defer func() {
if err := endReservations(ctx, vmi, nv); err != nil {
log.Warnw("ending tipset reservations failed", "error", err)
}
}()

var (
receipts []*types.MessageReceipt
storingEvents = sm.ChainStore().IsStoringEvents()
events [][]types.Event
processedMsgs = make(map[cid.Cid]struct{})
seenSenders = make(map[address.Address]struct{}) // Strict Sender Partitioning
)

var msgGas int64

for _, b := range bms {
penalty := types.NewInt(0)
gasReward := big.Zero()
currentBlockSenders := make(map[address.Address]struct{})

for _, cm := range append(b.BlsMessages, b.SecpkMessages...) {
m := cm.VMMessage()
if _, found := processedMsgs[m.Cid()]; found {
continue
}

// Strict Sender Partitioning:
// If this sender was seen in a previous block (higher precedence),
// ignore this message completely.
if _, found := seenSenders[m.From]; found {
continue
}

r, err := vmi.ApplyMessage(ctx, cm)
if err != nil {
return cid.Undef, cid.Undef, err
Expand All @@ -246,10 +273,15 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
}
}
processedMsgs[m.Cid()] = struct{}{}
currentBlockSenders[m.From] = struct{}{}
}

params := &reward.AwardBlockRewardParams{
Miner: b.Miner,
// After processing the block, mark its senders as seen for future blocks.
for s := range currentBlockSenders {
seenSenders[s] = struct{}{}
}

params := &reward.AwardBlockRewardParams{Miner: b.Miner,
Copy link

Copilot AI Dec 9, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[nitpick] The struct initialization formatting is inconsistent. The opening brace should be on the same line as the field list, or all fields should be on separate lines with proper indentation.

Suggested change
params := &reward.AwardBlockRewardParams{Miner: b.Miner,
params := &reward.AwardBlockRewardParams{
Miner: b.Miner,

Copilot uses AI. Check for mistakes.
Penalty: penalty,
GasReward: gasReward,
WinCount: b.WinCount,
Expand All @@ -260,6 +292,12 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
}
}

// End the reservation session before running cron so that reservations
// strictly cover explicit messages only.
if err := endReservations(ctx, vmi, nv); err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("ending tipset reservations: %w", err)
}

vmMsgDuration := partDone()
partDone = metrics.Timer(ctx, metrics.VMApplyCron)

Expand Down
53 changes: 53 additions & 0 deletions chain/consensus/features.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
package consensus

import "os"

// ReservationFeatureFlags holds feature toggles for tipset gas reservations.
//
// These flags are evaluated by consensus and the message pool when deciding
// whether to attempt tipset‑scope reservations pre‑activation, and how to
// interpret Begin/End reservation errors.
type ReservationFeatureFlags struct {
// MultiStageReservations enables tipset‑scope gas reservations
// pre‑activation. When false, ReservationsEnabled returns false for
// network versions before ReservationsActivationNetworkVersion and Lotus
// operates in legacy mode (no Begin/End calls).
//
// At or after activation, reservations are always enabled regardless of
// this flag.
MultiStageReservations bool

// MultiStageReservationsStrict controls how pre‑activation reservation
// failures are handled when MultiStageReservations is true:
//
// - When false (non‑strict), non‑NotImplemented Begin/End errors such
// as ErrReservationsInsufficientFunds and ErrReservationsPlanTooLarge
// are treated as best‑effort: Lotus logs and falls back to legacy
// mode for that tipset.
// - When true (strict), those reservation failures invalidate the
// tipset pre‑activation. Node‑error classes (e.g. overflow or
// invariant violations) always surface as errors regardless of this
// flag.
MultiStageReservationsStrict bool
}

// Feature exposes the current reservation feature flags.
//
// Defaults:
// - MultiStageReservations: enabled when LOTUS_ENABLE_TIPSET_RESERVATIONS=1.
// - MultiStageReservationsStrict: enabled when
// LOTUS_ENABLE_TIPSET_RESERVATIONS_STRICT=1.
//
// These defaults preserve the existing environment‑based gating while making
// the flags explicit and testable.
var Feature = ReservationFeatureFlags{
MultiStageReservations: os.Getenv("LOTUS_ENABLE_TIPSET_RESERVATIONS") == "1",
MultiStageReservationsStrict: os.Getenv("LOTUS_ENABLE_TIPSET_RESERVATIONS_STRICT") == "1",
}

// SetFeatures overrides the global reservation feature flags. This is intended
// for wiring from higher‑level configuration and for tests; callers should
// treat it as process‑wide and set it once during initialization.
func SetFeatures(flags ReservationFeatureFlags) {
Feature = flags
}
180 changes: 180 additions & 0 deletions chain/consensus/reservations.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
package consensus

import (
"context"
"errors"

"go.opencensus.io/stats"

"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
cid "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"

"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/metrics"
)

var rlog = logging.Logger("reservations")

// ReservationsEnabled returns true when tipset reservations should be attempted.
// Before the reservations activation network version, this helper consults the
// MultiStageReservations feature flag. At or after the activation network
// version, reservations are always enabled and become consensus-critical.
func ReservationsEnabled(nv network.Version) bool {
// After activation, reservations are required regardless of feature flags.
if nv >= vm.ReservationsActivationNetworkVersion() {
return true
}

// Pre-activation: best-effort mode controlled by the feature flag.
return Feature.MultiStageReservations
}

// buildReservationPlan aggregates per-sender gas reservations across the full
// tipset. The amount reserved for each message is gas_limit * gas_fee_cap.
//
// Strict Sender Partitioning:
// If a sender has already been seen in a previous block (higher precedence),
// their messages in subsequent blocks are strictly ignored for reservation
// purposes. This aligns with the execution logic in ApplyBlocks and prevents
// valid individual blocks from forming an invalid tipset due to aggregate
// over-reservation.
func buildReservationPlan(bms []FilecoinBlockMessages) map[address.Address]abi.TokenAmount {
plan := make(map[address.Address]abi.TokenAmount)
seenCIDs := make(map[cid.Cid]struct{})
seenSenders := make(map[address.Address]struct{})

for _, b := range bms {
currentBlockSenders := make(map[address.Address]struct{})

// canonical order is preserved in the combined slices append below
for _, cm := range append(b.BlsMessages, b.SecpkMessages...) {
m := cm.VMMessage()
mcid := m.Cid()

// 1. Deduplicate by CID (legacy rule)
if _, ok := seenCIDs[mcid]; ok {
continue
}

// 2. Strict Sender Partitioning
// If this sender was seen in a previous block, ignore this message.
// We do NOT check currentBlockSenders here because multiple messages
// from the same sender in the *same* block are allowed.
if _, ok := seenSenders[m.From]; ok {
continue
}

seenCIDs[mcid] = struct{}{}
currentBlockSenders[m.From] = struct{}{}

// Only explicit messages are included in blocks; implicit messages are applied separately.
cost := types.BigMul(types.NewInt(uint64(m.GasLimit)), m.GasFeeCap)
if prev, ok := plan[m.From]; ok {
plan[m.From] = types.BigAdd(prev, cost)
} else {
plan[m.From] = cost
}
}

// After finishing the block, mark all its senders as seen for future blocks.
for s := range currentBlockSenders {
seenSenders[s] = struct{}{}
}
}
return plan
}

// startReservations is a helper that starts a reservation session on the VM if enabled.
// If the computed plan is empty (no explicit messages), Begin is skipped entirely.
func startReservations(ctx context.Context, vmi vm.Interface, bms []FilecoinBlockMessages, nv network.Version) error {
if !ReservationsEnabled(nv) {
return nil
}

plan := buildReservationPlan(bms)
if len(plan) == 0 {
rlog.Debugw("skipping tipset reservations for empty plan")
return nil
}

total := abi.NewTokenAmount(0)
for _, amt := range plan {
total = big.Add(total, amt)
}

stats.Record(ctx,
metrics.ReservationPlanSenders.M(int64(len(plan))),
metrics.ReservationPlanTotal.M(total.Int64()),
)

rlog.Infow("starting tipset reservations", "senders", len(plan), "total", total)
if err := vmi.StartTipsetReservations(ctx, plan); err != nil {
return handleReservationError("begin", err, nv)
}
return nil
}

// endReservations ends the active reservation session if enabled.
func endReservations(ctx context.Context, vmi vm.Interface, nv network.Version) error {
if !ReservationsEnabled(nv) {
return nil
}
if err := vmi.EndTipsetReservations(ctx); err != nil {
return handleReservationError("end", err, nv)
}
return nil
}

// handleReservationError interprets Begin/End reservation errors according to
// network version and feature flags, deciding whether to fall back to legacy
// mode (pre-activation, non-strict) or surface the error.
func handleReservationError(stage string, err error, nv network.Version) error {
if err == nil {
return nil
}

// Post-activation: reservations are consensus-critical; all Begin/End
// errors surface to the caller. ErrReservationsNotImplemented becomes a
// node error (engine too old) under active rules.
if nv >= vm.ReservationsActivationNetworkVersion() {
return err
}

// Pre-activation: ErrNotImplemented is always treated as a benign signal
// that the engine does not support reservations yet; fall back to legacy
// mode regardless of strictness.
if errors.Is(err, vm.ErrReservationsNotImplemented) {
rlog.Debugw("tipset reservations not implemented; continuing in legacy mode",
"stage", stage, "error", err)
return nil
}

// Node-error classes: always surface as errors, even pre-activation.
if errors.Is(err, vm.ErrReservationsSessionOpen) ||
errors.Is(err, vm.ErrReservationsSessionClosed) ||
errors.Is(err, vm.ErrReservationsNonZeroRemainder) ||
errors.Is(err, vm.ErrReservationsOverflow) ||
errors.Is(err, vm.ErrReservationsInvariantViolation) {
return err
}

// Reservation failures toggled by strict mode. When strict is disabled,
// treat these as best-effort pre-activation and fall back to legacy mode.
switch {
case errors.Is(err, vm.ErrReservationsInsufficientFunds), errors.Is(err, vm.ErrReservationsPlanTooLarge):
if Feature.MultiStageReservationsStrict {
return err
}
rlog.Debugw("tipset reservations failed pre-activation; continuing in legacy mode (non-strict)",
"stage", stage, "error", err)
return nil
default:
// Unknown errors pre-activation are treated as node errors.
return err
}
}
Loading
Loading