Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -121,17 +121,17 @@ endif
.PHONY: test-ci
test-ci:
ifdef cover
$(GO) test -run "[^FLAKY]$$" -coverprofile=cover.out ./...
$(GO) test -run "[^FLAKY]$$" -coverprofile=cover.out ./pkg/storageincentives
Copy link

Copilot AI Oct 29, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The test-ci and test-ci-race targets have been changed from testing all packages (./...) to only testing ./pkg/storageincentives. This significantly reduces test coverage in CI and means tests in other packages (e.g., pkg/accounting, pkg/api, pkg/storer, etc.) will not run during CI. If this is intentional for testing the synctest migration, consider adding a comment explaining this is temporary, or restore the original ./... pattern if all migrations are complete.

Copilot uses AI. Check for mistakes.
else
$(GO) test -run "[^FLAKY]$$" ./...
$(GO) test -run "[^FLAKY]$$" ./pkg/storageincentives
endif

.PHONY: test-ci-race
test-ci-race:
ifdef cover
$(GO) test -race -run "[^FLAKY]$$" -coverprofile=cover.out ./...
$(GO) test -race -run "[^FLAKY]$$" -coverprofile=cover.out ./pkg/storageincentives
Copy link

Copilot AI Oct 29, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The test-ci-race target has been changed from testing all packages (./...) to only testing ./pkg/storageincentives. This significantly reduces test coverage in CI and means race detection will not run for tests in other packages. If this is intentional for testing the synctest migration, consider adding a comment explaining this is temporary, or restore the original ./... pattern if all migrations are complete.

Copilot uses AI. Check for mistakes.
else
$(GO) test -race -run "[^FLAKY]$$" ./...
$(GO) test -race -run "[^FLAKY]$$" ./pkg/storageincentives
endif

.PHONY: test-ci-flaky
Expand Down
145 changes: 73 additions & 72 deletions pkg/storageincentives/events_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,87 +7,88 @@ package storageincentives_test
import (
"context"
"testing"
"testing/synctest"
"time"

"github.com/ethersphere/bee/v2/pkg/storageincentives"
)

func TestClose(t *testing.T) {
t.Parallel()

ev := storageincentives.NewEvents()

done1 := make(chan struct{})
done2 := make(chan struct{})
done3 := make(chan struct{})

ev.On(1, func(ctx context.Context) {
<-ctx.Done()
close(done1)
})

ev.On(1, func(ctx context.Context) {
<-ctx.Done()
close(done2)
})

ev.On(2, func(ctx context.Context) {
<-ctx.Done()
close(done3)
})

ev.Publish(1)
ev.Publish(2)

ev.Close()

for range 3 {
select {
case <-done1:
case <-done2:
case <-done3:
case <-time.After(time.Second):
t.Fatal("timeout")
synctest.Test(t, func(t *testing.T) {
ev := storageincentives.NewEvents()

done1 := make(chan struct{})
done2 := make(chan struct{})
done3 := make(chan struct{})

ev.On(1, func(ctx context.Context) {
<-ctx.Done()
close(done1)
})

ev.On(1, func(ctx context.Context) {
<-ctx.Done()
close(done2)
})

ev.On(2, func(ctx context.Context) {
<-ctx.Done()
close(done3)
})

ev.Publish(1)
ev.Publish(2)

ev.Close()

for range 3 {
select {
case <-done1:
case <-done2:
case <-done3:
case <-time.After(time.Second):
t.Fatal("timeout")
}
}
}
})
}

func TestPhaseCancel(t *testing.T) {
t.Parallel()

ev := storageincentives.NewEvents()

done1 := make(chan struct{})
done2 := make(chan struct{})
defer ev.Close()

// ensure no panics occur on an empty publish
ev.Publish(0)

ev.On(1, func(ctx context.Context) {
<-ctx.Done()
close(done1)
})

ev.On(2, func(ctx context.Context) {
<-ctx.Done()
close(done2)
})

ev.On(3, func(ctx context.Context) {
ev.Cancel(1, 2)
})

ev.Publish(1)
ev.Publish(2)
ev.Publish(3)

for range 2 {
select {
case <-done1:
case <-done2:
case <-time.After(time.Second):
t.Fatal("timeout")
synctest.Test(t, func(t *testing.T) {
ev := storageincentives.NewEvents()

done1 := make(chan struct{})
done2 := make(chan struct{})
defer ev.Close()

// ensure no panics occur on an empty publish
ev.Publish(0)

ev.On(1, func(ctx context.Context) {
<-ctx.Done()
close(done1)
})

ev.On(2, func(ctx context.Context) {
<-ctx.Done()
close(done2)
})

ev.On(3, func(ctx context.Context) {
ev.Cancel(1, 2)
})

ev.Publish(1)
ev.Publish(2)
ev.Publish(3)

for range 2 {
select {
case <-done1:
case <-done2:
case <-time.After(time.Second):
t.Fatal("timeout")
}
}
}
})
}
104 changes: 53 additions & 51 deletions pkg/storageincentives/soc_mine_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (
"os"
"sync"
"testing"
"testing/synctest"

"github.com/ethersphere/bee/v2/pkg/bmt"
"github.com/ethersphere/bee/v2/pkg/cac"
Expand All @@ -32,58 +33,59 @@ import (
// to generate uploads using the input
// cat socs.txt | tail 19 | head 16 | perl -pne 's/([a-f0-9]+)\t([a-f0-9]+)\t([a-f0-9]+)\t([a-f0-9]+)/echo -n $4 | xxd -r -p | curl -X POST \"http:\/\/localhost:1633\/soc\/$1\/$2?sig=$3\" -H \"accept: application\/json, text\/plain, \/\" -H \"content-type: application\/octet-stream\" -H \"swarm-postage-batch-id: 14b26beca257e763609143c6b04c2c487f01a051798c535c2f542ce75a97c05f\" --data-binary \@-/'
func TestSocMine(t *testing.T) {
t.Parallel()
// the anchor used in neighbourhood selection and reserve salt for sampling
prefix, err := hex.DecodeString("3617319a054d772f909f7c479a2cebe5066e836a939412e32403c99029b92eff")
if err != nil {
t.Fatal(err)
}
// the transformed address hasher factory function
prefixhasher := func() hash.Hash { return swarm.NewPrefixHasher(prefix) }
// Create a pool for efficient hasher reuse
trHasherPool := bmt.NewPool(bmt.NewConf(prefixhasher, swarm.BmtBranches, 8))
// the bignum cast of the maximum sample value (upper bound on transformed addresses as a 256-bit article)
// this constant is for a minimum reserve size of 2 million chunks with sample size of 16
// = 1.284401 * 10^71 = 1284401 + 66 0-s
mstring := "1284401"
for range 66 {
mstring = mstring + "0"
}
n, ok := new(big.Int).SetString(mstring, 10)
if !ok {
t.Fatalf("SetString: error setting to '%s'", mstring)
}
// the filter function on the SOC address
// meant to make sure we pass check for proof of retrievability for
// a node of overlay 0x65xxx with a reserve depth of 1, i.e.,
// SOC address must start with zero bit
filterSOCAddr := func(a swarm.Address) bool {
return a.Bytes()[0]&0x80 != 0x00
}
// the filter function on the transformed address using the density estimation constant
filterTrAddr := func(a swarm.Address) (bool, error) {
m := new(big.Int).SetBytes(a.Bytes())
return m.Cmp(n) < 0, nil
}
// setup the signer with a private key from a fixture
data, err := hex.DecodeString("634fb5a872396d9693e5c9f9d7233cfa93f395c093371017ff44aa9ae6564cdd")
if err != nil {
t.Fatal(err)
}
privKey, err := crypto.DecodeSecp256k1PrivateKey(data)
if err != nil {
t.Fatal(err)
}
signer := crypto.NewDefaultSigner(privKey)
synctest.Test(t, func(t *testing.T) {
// the anchor used in neighbourhood selection and reserve salt for sampling
prefix, err := hex.DecodeString("3617319a054d772f909f7c479a2cebe5066e836a939412e32403c99029b92eff")
if err != nil {
t.Fatal(err)
}
// the transformed address hasher factory function
prefixhasher := func() hash.Hash { return swarm.NewPrefixHasher(prefix) }
// Create a pool for efficient hasher reuse
trHasherPool := bmt.NewPool(bmt.NewConf(prefixhasher, swarm.BmtBranches, 8))
// the bignum cast of the maximum sample value (upper bound on transformed addresses as a 256-bit article)
// this constant is for a minimum reserve size of 2 million chunks with sample size of 16
// = 1.284401 * 10^71 = 1284401 + 66 0-s
mstring := "1284401"
for range 66 {
mstring = mstring + "0"
}
n, ok := new(big.Int).SetString(mstring, 10)
if !ok {
t.Fatalf("SetString: error setting to '%s'", mstring)
}
// the filter function on the SOC address
// meant to make sure we pass check for proof of retrievability for
// a node of overlay 0x65xxx with a reserve depth of 1, i.e.,
// SOC address must start with zero bit
filterSOCAddr := func(a swarm.Address) bool {
return a.Bytes()[0]&0x80 != 0x00
}
// the filter function on the transformed address using the density estimation constant
filterTrAddr := func(a swarm.Address) (bool, error) {
m := new(big.Int).SetBytes(a.Bytes())
return m.Cmp(n) < 0, nil
}
// setup the signer with a private key from a fixture
data, err := hex.DecodeString("634fb5a872396d9693e5c9f9d7233cfa93f395c093371017ff44aa9ae6564cdd")
if err != nil {
t.Fatal(err)
}
privKey, err := crypto.DecodeSecp256k1PrivateKey(data)
if err != nil {
t.Fatal(err)
}
signer := crypto.NewDefaultSigner(privKey)

sampleSize := 16
// for sanity check: given a filterSOCAddr requiring a 0 leading bit (chance of 1/2)
// we expect an overall rough 4 million chunks to be mined to create this sample
// for 8 workers that is half a million round on average per worker
err = makeChunks(t, signer, sampleSize, filterSOCAddr, filterTrAddr, trHasherPool)
if err != nil {
t.Fatal(err)
}
sampleSize := 16
// for sanity check: given a filterSOCAddr requiring a 0 leading bit (chance of 1/2)
// we expect an overall rough 4 million chunks to be mined to create this sample
// for 8 workers that is half a million round on average per worker
err = makeChunks(t, signer, sampleSize, filterSOCAddr, filterTrAddr, trHasherPool)
if err != nil {
t.Fatal(err)
}
})
}

func makeChunks(t *testing.T, signer crypto.Signer, sampleSize int, filterSOCAddr func(swarm.Address) bool, filterTrAddr func(swarm.Address) (bool, error), trHasherPool *bmt.Pool) error {
Expand Down
Loading