diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8fe4cb29ea..78df4165d0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -70,8 +70,9 @@ jobs: rm -f $OUTNAME/post.h zip -r $OUTNAME.zip $OUTNAME - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v0 + uses: google-github-actions/setup-gcloud@v1 with: + version: "450.0.0" project_id: ${{ secrets.GCP_PROJECT_ID }} service_account_key: ${{ secrets.GCP_SA_KEY }} export_default_credentials: true diff --git a/.github/workflows/systest.yml b/.github/workflows/systest.yml index 1a5c276a90..042b4f81e2 100644 --- a/.github/workflows/systest.yml +++ b/.github/workflows/systest.yml @@ -69,6 +69,8 @@ jobs: - name: Configure gcloud uses: "google-github-actions/setup-gcloud@v1" + with: + version: "450.0.0" - name: Configure gke authentication plugin run: gcloud components install gke-gcloud-auth-plugin --quiet diff --git a/CHANGELOG.md b/CHANGELOG.md index c57dfd1244..dbe6b534ce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,10 @@ See [RELEASE](./RELEASE.md) for workflow instructions. The change improves initial sync speed and any sync protocol requests required during consensus. +* [#5109](https://github.com/spacemeshos/go-spacemesh/pull/5109) Limit number of layers that tortoise needs to read on startup. + + Bounds the time required to restart a node. + ## v1.2.0 ### Upgrade information diff --git a/activation/activation_test.go b/activation/activation_test.go index 5207296cb1..7e64918d35 100644 --- a/activation/activation_test.go +++ b/activation/activation_test.go @@ -928,10 +928,7 @@ func TestBuilder_SignAtx(t *testing.T) { atx := newAtx(t, tab.sig, challenge, nipost, 100, types.Address{}) require.NoError(t, SignAndFinalizeAtx(tab.signer, atx)) - verifier, err := signing.NewEdVerifier() - require.NoError(t, err) - - ok := verifier.Verify(signing.ATX, tab.nodeID, atx.SignedBytes(), atx.Signature) + ok := signing.NewEdVerifier().Verify(signing.ATX, tab.nodeID, atx.SignedBytes(), atx.Signature) require.True(t, ok) require.Equal(t, tab.nodeID, atx.SmesherID) } diff --git a/activation/handler_test.go b/activation/handler_test.go index bc6a3bc912..7d9efc7539 100644 --- a/activation/handler_test.go +++ b/activation/handler_test.go @@ -91,10 +91,6 @@ type testHandler struct { func newTestHandler(tb testing.TB, goldenATXID types.ATXID) *testHandler { lg := logtest.New(tb) cdb := datastore.NewCachedDB(sql.InMemory(), lg) - - verifier, err := signing.NewEdVerifier() - require.NoError(tb, err) - ctrl := gomock.NewController(tb) mclock := NewMocklayerClock(ctrl) mpub := pubsubmocks.NewMockPublisher(ctrl) @@ -104,7 +100,7 @@ func newTestHandler(tb testing.TB, goldenATXID types.ATXID) *testHandler { mbeacon := NewMockAtxReceiver(ctrl) mtortoise := mocks.NewMockTortoise(ctrl) - atxHdlr := NewHandler(localID, cdb, verifier, mclock, mpub, mockFetch, 1, goldenATXID, mValidator, mbeacon, mtortoise, lg, PoetConfig{}) + atxHdlr := NewHandler(localID, cdb, signing.NewEdVerifier(), mclock, mpub, mockFetch, 1, goldenATXID, mValidator, mbeacon, mtortoise, lg, PoetConfig{}) return &testHandler{ Handler: atxHdlr, diff --git a/beacon/beacon_test.go b/beacon/beacon_test.go index 278d8c85f0..bde3738eb7 100644 --- a/beacon/beacon_test.go +++ b/beacon/beacon_test.go @@ -94,16 +94,13 @@ func newTestDriver(tb testing.TB, cfg Config, p pubsub.Publisher) *testProtocolD } edSgn, err := signing.NewEdSigner() require.NoError(tb, err) - edVerify, err := signing.NewEdVerifier() - require.NoError(tb, err) - minerID := edSgn.NodeID() - lg := logtest.New(tb).WithName(minerID.ShortString()) + lg := logtest.New(tb) tpd.mVerifier.EXPECT().Verify(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(true) tpd.mNonceFetcher.EXPECT().VRFNonce(gomock.Any(), gomock.Any()).AnyTimes().Return(types.VRFPostIndex(1), nil) tpd.cdb = datastore.NewCachedDB(sql.InMemory(), lg) - tpd.ProtocolDriver = New(p, edSgn, edVerify, tpd.mVerifier, tpd.cdb, tpd.mClock, + tpd.ProtocolDriver = New(p, edSgn, signing.NewEdVerifier(), tpd.mVerifier, tpd.cdb, tpd.mClock, WithConfig(cfg), WithLogger(lg), withWeakCoin(coinValueMock(tb, true)), diff --git a/blocks/certifier_test.go b/blocks/certifier_test.go index 5740600315..d41483472a 100644 --- a/blocks/certifier_test.go +++ b/blocks/certifier_test.go @@ -44,8 +44,6 @@ func newTestCertifier(t *testing.T) *testCertifier { db := datastore.NewCachedDB(sql.InMemory(), logtest.New(t)) signer, err := signing.NewEdSigner() require.NoError(t, err) - edVerifier, err := signing.NewEdVerifier() - require.NoError(t, err) nid := signer.NodeID() ctrl := gomock.NewController(t) mo := hmocks.NewMockRolacle(ctrl) @@ -53,7 +51,7 @@ func newTestCertifier(t *testing.T) *testCertifier { mc := mocks.NewMocklayerClock(ctrl) mb := smocks.NewMockBeaconGetter(ctrl) mtortoise := smocks.NewMockTortoise(ctrl) - c := NewCertifier(db, mo, nid, signer, edVerifier, mp, mc, mb, mtortoise, + c := NewCertifier(db, mo, nid, signer, signing.NewEdVerifier(), mp, mc, mb, mtortoise, WithCertifierLogger(logtest.New(t)), ) return &testCertifier{ @@ -607,9 +605,6 @@ func Test_CertifyIfEligible(t *testing.T) { tc.mb.EXPECT().GetBeacon(b.LayerIndex.GetEpoch()).Return(types.RandomBeacon(), nil) proof := types.RandomVrfSignature() - edVerifier, err := signing.NewEdVerifier() - require.NoError(t, err) - tc.mOracle.EXPECT().Proof(gomock.Any(), b.LayerIndex, eligibility.CertifyRound).Return(proof, nil) tc.mOracle.EXPECT().CalcEligibility(gomock.Any(), b.LayerIndex, eligibility.CertifyRound, tc.cfg.CommitteeSize, tc.nodeID, proof).Return(defaultCnt, nil) tc.mPub.EXPECT().Publish(gomock.Any(), pubsub.BlockCertify, gomock.Any()).DoAndReturn( @@ -617,7 +612,7 @@ func Test_CertifyIfEligible(t *testing.T) { var msg types.CertifyMessage require.NoError(t, codec.Decode(got, &msg)) - ok := edVerifier.Verify(signing.HARE, msg.SmesherID, msg.Bytes(), msg.Signature) + ok := signing.NewEdVerifier().Verify(signing.HARE, msg.SmesherID, msg.Bytes(), msg.Signature) require.True(t, ok) require.Equal(t, b.LayerIndex, msg.LayerID) require.Equal(t, b.ID(), msg.BlockID) diff --git a/checkpoint/recovery_test.go b/checkpoint/recovery_test.go index 0ab14a4201..5aaa24b91d 100644 --- a/checkpoint/recovery_test.go +++ b/checkpoint/recovery_test.go @@ -217,8 +217,6 @@ func TestRecover_SameRecoveryInfo(t *testing.T) { func validateAndPreserveData(tb testing.TB, db *sql.Database, deps []*types.VerifiedActivationTx, proofs []*types.PoetProofMessage) { lg := logtest.New(tb) - edVerifier, err := signing.NewEdVerifier() - require.NoError(tb, err) poetDb := activation.NewPoetDb(db, lg) ctrl := gomock.NewController(tb) mclock := activation.NewMocklayerClock(ctrl) @@ -230,7 +228,7 @@ func validateAndPreserveData(tb testing.TB, db *sql.Database, deps []*types.Veri atxHandler := activation.NewHandler( "", cdb, - edVerifier, + signing.NewEdVerifier(), mclock, nil, mfetch, diff --git a/common/types/block_test.go b/common/types/block_test.go index 688e6d033b..389afdad17 100644 --- a/common/types/block_test.go +++ b/common/types/block_test.go @@ -41,9 +41,7 @@ func Test_CertifyMessage(t *testing.T) { require.NoError(t, codec.Decode(data, &decoded)) require.Equal(t, msg, decoded) - pke, err := signing.NewEdVerifier() - require.NoError(t, err) - ok := pke.Verify(signing.HARE, decoded.SmesherID, msg.Bytes(), decoded.Signature) + ok := signing.NewEdVerifier().Verify(signing.HARE, decoded.SmesherID, msg.Bytes(), decoded.Signature) require.True(t, ok) } diff --git a/config/logging.go b/config/logging.go index 16943313ca..d132d6de3c 100644 --- a/config/logging.go +++ b/config/logging.go @@ -22,6 +22,7 @@ type LoggerConfig struct { PostLoggerLevel string `mapstructure:"post"` StateDbLoggerLevel string `mapstructure:"stateDb"` StateLoggerLevel string `mapstructure:"state"` + AtxHandlerLevel string `mapstructure:"atxHandler"` AtxDbStoreLoggerLevel string `mapstructure:"atxDbStore"` BeaconLoggerLevel string `mapstructure:"beacon"` WeakCoinLoggerLevel string `mapstructure:"weakCoin"` @@ -45,6 +46,7 @@ type LoggerConfig struct { HareBeaconLoggerLevel string `mapstructure:"hare-beacon"` TimeSyncLoggerLevel string `mapstructure:"timesync"` VMLogLevel string `mapstructure:"vm"` + ProposalListenerLevel string `mapstructure:"proposalListener"` } func DefaultLoggingConfig() LoggerConfig { @@ -57,6 +59,7 @@ func DefaultLoggingConfig() LoggerConfig { StateDbLoggerLevel: defaultLoggingLevel.String(), StateLoggerLevel: defaultLoggingLevel.String(), AtxDbStoreLoggerLevel: defaultLoggingLevel.String(), + AtxHandlerLevel: defaultLoggingLevel.String(), BeaconLoggerLevel: defaultLoggingLevel.String(), WeakCoinLoggerLevel: defaultLoggingLevel.String(), PoetDbStoreLoggerLevel: defaultLoggingLevel.String(), @@ -79,5 +82,6 @@ func DefaultLoggingConfig() LoggerConfig { HareBeaconLoggerLevel: defaultLoggingLevel.String(), TimeSyncLoggerLevel: defaultLoggingLevel.String(), VMLogLevel: defaultLoggingLevel.String(), + ProposalListenerLevel: defaultLoggingLevel.String(), } } diff --git a/config/mainnet.go b/config/mainnet.go index 223f9d40f8..18029d0be9 100644 --- a/config/mainnet.go +++ b/config/mainnet.go @@ -53,6 +53,8 @@ func MainnetConfig() Config { } logging := DefaultLoggingConfig() logging.TrtlLoggerLevel = zapcore.WarnLevel.String() + logging.AtxHandlerLevel = zapcore.WarnLevel.String() + logging.ProposalListenerLevel = zapcore.WarnLevel.String() return Config{ BaseConfig: BaseConfig{ DataDirParent: defaultDataDir, diff --git a/fetch/fetch.go b/fetch/fetch.go index de321b333d..40416fdaf2 100644 --- a/fetch/fetch.go +++ b/fetch/fetch.go @@ -103,6 +103,7 @@ type Config struct { MaxRetriesForRequest int EnableServesMetrics bool `mapstructure:"servers-metrics"` ServersConfig map[string]ServerConfig `mapstructure:"servers"` + PeersRateThreshold float64 `mapstructure:"peers-rate-threshold"` } func (c Config) getServerConfig(protocol string) ServerConfig { @@ -142,6 +143,7 @@ func DefaultConfig() Config { // 64 bytes OpnProtocol: ServerConfig{Queue: 10000, Requests: 1000, Interval: time.Second}, }, + PeersRateThreshold: 0.02, } } @@ -222,12 +224,12 @@ func NewFetch( opts ...Option, ) *Fetch { bs := datastore.NewBlobStore(cdb.Database) + f := &Fetch{ cfg: DefaultConfig(), logger: log.NewNop(), bs: bs, host: host, - peers: peers.New(), servers: map[string]requester{}, unprocessed: make(map[types.Hash32]*request), ongoing: make(map[types.Hash32]*request), @@ -237,6 +239,11 @@ func NewFetch( for _, opt := range opts { opt(f) } + popts := []peers.Opt{} + if f.cfg.PeersRateThreshold != 0 { + popts = append(popts, peers.WithRateThreshold(f.cfg.PeersRateThreshold)) + } + f.peers = peers.New(popts...) // NOTE(dshulyak) this is to avoid tests refactoring. // there is one test that covers this part. if host != nil { diff --git a/fetch/peers/peers.go b/fetch/peers/peers.go index e57d8ac712..5a6c5f5b4d 100644 --- a/fetch/peers/peers.go +++ b/fetch/peers/peers.go @@ -21,11 +21,10 @@ func (p *data) successRate() float64 { return float64(p.success) / float64(p.success+p.failures) } -func (p *data) cmp(other *data) int { +func (p *data) cmp(other *data, rateThreshold float64) int { if p == nil && other != nil { return -1 } - const rateThreshold = 0.1 switch { case p.rate-other.rate > rateThreshold: return 1 @@ -41,13 +40,30 @@ func (p *data) cmp(other *data) int { return strings.Compare(string(p.id), string(other.id)) } -func New() *Peers { - return &Peers{peers: map[peer.ID]*data{}} +type Opt func(*Peers) + +func WithRateThreshold(rate float64) Opt { + return func(p *Peers) { + p.rateThreshold = rate + } +} + +func New(opts ...Opt) *Peers { + p := &Peers{ + peers: map[peer.ID]*data{}, + rateThreshold: 0.1, + } + for _, opt := range opts { + opt(p) + } + return p } type Peers struct { mu sync.Mutex peers map[peer.ID]*data + + rateThreshold float64 } func (p *Peers) Add(id peer.ID) { @@ -107,7 +123,7 @@ func (p *Peers) SelectBestFrom(peers []peer.ID) peer.ID { if !exist { continue } - if best.cmp(pdata) == -1 { + if best.cmp(pdata, p.rateThreshold) == -1 { best = pdata } } @@ -134,7 +150,7 @@ func (p *Peers) SelectBest(n int) []peer.ID { for _, peer := range p.peers { worst := peer for i := range cache { - if cache[i].cmp(worst) == -1 { + if cache[i].cmp(worst, p.rateThreshold) == -1 { cache[i], worst = worst, cache[i] } } diff --git a/fetch/peers/peers_test.go b/fetch/peers/peers_test.go index 2217e9e55d..356c68b532 100644 --- a/fetch/peers/peers_test.go +++ b/fetch/peers/peers_test.go @@ -19,7 +19,7 @@ type event struct { } func withEvents(events []event) *Peers { - tracker := New() + tracker := New(WithRateThreshold(0.1)) for _, ev := range events { if ev.delete { tracker.Delete(ev.id) diff --git a/go.mod b/go.mod index 57d0c53c43..3ab22bf0e2 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/libp2p/go-libp2p-pubsub v0.9.3 github.com/libp2p/go-libp2p-record v0.2.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiformats/go-multiaddr v0.11.0 + github.com/multiformats/go-multiaddr v0.12.0 github.com/multiformats/go-varint v0.0.7 github.com/natefinch/atomic v1.0.1 github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a diff --git a/go.sum b/go.sum index dac0de61ce..3edffbeba2 100644 --- a/go.sum +++ b/go.sum @@ -500,8 +500,8 @@ github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9 github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.11.0 h1:XqGyJ8ufbCE0HmTDwx2kPdsrQ36AGPZNZX6s6xfJH10= -github.com/multiformats/go-multiaddr v0.11.0/go.mod h1:gWUm0QLR4thQ6+ZF6SXUw8YjtwQSPapICM+NmCkxHSM= +github.com/multiformats/go-multiaddr v0.12.0 h1:1QlibTFkoXJuDjjYsMHhE73TnzJQl8FSWatk/0gxGzE= +github.com/multiformats/go-multiaddr v0.12.0/go.mod h1:WmZXgObOQOYp9r3cslLlppkrz1FYSHmE834dfz/lWu8= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= diff --git a/hare/algorithm_test.go b/hare/algorithm_test.go index 9fb5ac7d8d..d7b7cc1ef4 100644 --- a/hare/algorithm_test.go +++ b/hare/algorithm_test.go @@ -152,11 +152,9 @@ func buildBrokerWithLimit(tb testing.TB, testName string, limit int) *testBroker mockStateQ := mocks.NewMockstateQuerier(ctrl) mockSyncS := smocks.NewMockSyncStateProvider(ctrl) mockMesh := mocks.NewMockmesh(ctrl) - edVerifier, err := signing.NewEdVerifier() - require.NoError(tb, err) mpub := pubsubmocks.NewMockPublisher(ctrl) return &testBroker{ - Broker: newBroker(config.DefaultConfig(), mockMesh, edVerifier, &mockEligibilityValidator{valid: 1}, mockStateQ, mockSyncS, + Broker: newBroker(config.DefaultConfig(), mockMesh, signing.NewEdVerifier(), &mockEligibilityValidator{valid: 1}, mockStateQ, mockSyncS, mpub, limit, logtest.New(tb).WithName(testName)), mockMesh: mockMesh, mockSyncS: mockSyncS, @@ -348,8 +346,6 @@ func generateConsensusProcessWithConfig(tb testing.TB, cfg config.Config, inbox oracle := eligibility.New(logger) edSigner, err := signing.NewEdSigner() require.NoError(tb, err) - edVerifier, err := signing.NewEdVerifier() - require.NoError(tb, err) edPubkey := edSigner.PublicKey() nid := types.BytesToNodeID(edPubkey.Bytes()) oracle.Register(true, nid) @@ -372,7 +368,7 @@ func generateConsensusProcessWithConfig(tb testing.TB, cfg config.Config, inbox oracle, sq, edSigner, - edVerifier, + signing.NewEdVerifier(), NewEligibilityTracker(cfg.N), types.BytesToNodeID(edPubkey.Bytes()), noopPubSub(tb), diff --git a/hare/consensus_test.go b/hare/consensus_test.go index fcf015279e..3f959c4598 100644 --- a/hare/consensus_test.go +++ b/hare/consensus_test.go @@ -185,8 +185,6 @@ func createConsensusProcess( output := make(chan report, 1) wc := make(chan wcReport, 1) oracle.Register(isHonest, sig.NodeID()) - edVerifier, err := signing.NewEdVerifier() - require.NoError(tb, err) c, et, err := broker.Register(ctx, layer) require.NoError(tb, err) mch := make(chan *types.MalfeasanceGossip, cfg.N) @@ -204,7 +202,7 @@ func createConsensusProcess( oracle, broker.mockStateQ, sig, - edVerifier, + signing.NewEdVerifier(), et, sig.NodeID(), network, diff --git a/hare/flows_test.go b/hare/flows_test.go index 5d5644dd8f..52c5c24924 100644 --- a/hare/flows_test.go +++ b/hare/flows_test.go @@ -132,8 +132,6 @@ func createTestHare(tb testing.TB, msh mesh, tcfg config.Config, clock *mockCloc tb.Helper() signer, err := signing.NewEdSigner() require.NoError(tb, err) - edVerifier, err := signing.NewEdVerifier() - require.NoError(tb, err) ctrl := gomock.NewController(tb) patrol := mocks.NewMocklayerPatrol(ctrl) @@ -153,7 +151,7 @@ func createTestHare(tb testing.TB, msh mesh, tcfg config.Config, clock *mockCloc tcfg, p2p, signer, - edVerifier, + signing.NewEdVerifier(), signer.NodeID(), make(chan LayerOutput, 100), mockSyncS, diff --git a/hare/hare_test.go b/hare/hare_test.go index 3a56618c22..f15621ea01 100644 --- a/hare/hare_test.go +++ b/hare/hare_test.go @@ -123,9 +123,6 @@ func TestHare_New(t *testing.T) { signer, err := signing.NewEdSigner() require.NoError(t, err) - edVerifier, err := signing.NewEdVerifier() - require.NoError(t, err) - logger := logtest.New(t).WithName(t.Name()) cfg := config.Config{N: 10, RoundDuration: 2 * time.Second, ExpectedLeaders: 5, LimitIterations: 1000, LimitConcurrent: 1000, Hdist: 20} h := New( @@ -133,7 +130,7 @@ func TestHare_New(t *testing.T) { cfg, noopPubSub(t), signer, - edVerifier, + signing.NewEdVerifier(), signer.NodeID(), make(chan LayerOutput, 1), smocks.NewMockSyncStateProvider(ctrl), smocks.NewMockBeaconGetter(ctrl), diff --git a/hare/messagevalidation_test.go b/hare/messagevalidation_test.go index c3fe09d503..0fd7df2231 100644 --- a/hare/messagevalidation_test.go +++ b/hare/messagevalidation_test.go @@ -30,10 +30,8 @@ func defaultValidator(tb testing.TB) *syntaxContextValidator { signer, err := signing.NewEdSigner() require.NoError(tb, err) - edVerifier, err := signing.NewEdVerifier() - require.NoError(tb, err) - return newSyntaxContextValidator(signer, edVerifier, lowThresh10, trueValidator, + return newSyntaxContextValidator(signer, signing.NewEdVerifier(), lowThresh10, trueValidator, sq, truer{}, newPubGetter(), NewEligibilityTracker(lowThresh10), logtest.New(tb), ) } @@ -342,12 +340,10 @@ func (pg pubGetter) NodeID(m *Message) types.NodeID { func TestMessageValidator_SyntacticallyValidateMessage(t *testing.T) { signer, err := signing.NewEdSigner() require.NoError(t, err) - edVerifier, err := signing.NewEdVerifier() - require.NoError(t, err) et := NewEligibilityTracker(100) vfunc := func(m *Message) bool { return true } - sv := newSyntaxContextValidator(signer, edVerifier, 1, vfunc, nil, truer{}, newPubGetter(), et, logtest.New(t)) + sv := newSyntaxContextValidator(signer, signing.NewEdVerifier(), 1, vfunc, nil, truer{}, newPubGetter(), et, logtest.New(t)) m := BuildPreRoundMsg(signer, NewDefaultEmptySet(), types.EmptyVrfSignature) require.True(t, sv.SyntacticallyValidateMessage(context.Background(), m)) m = BuildPreRoundMsg(signer, NewSetFromValues(types.RandomProposalID()), types.EmptyVrfSignature) @@ -387,15 +383,13 @@ func TestMessageValidator_validateSVPTypeB(t *testing.T) { func TestMessageValidator_validateSVP(t *testing.T) { signer, err := signing.NewEdSigner() require.NoError(t, err) - edVerifier, err := signing.NewEdVerifier() - require.NoError(t, err) ctrl := gomock.NewController(t) mockStateQ := mocks.NewMockstateQuerier(ctrl) mockStateQ.EXPECT().IsIdentityActiveOnConsensusView(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).AnyTimes() et := NewEligibilityTracker(100) vfunc := func(m *Message) bool { return true } - sv := newSyntaxContextValidator(signer, edVerifier, 1, vfunc, mockStateQ, truer{}, newPubGetter(), et, logtest.New(t)) + sv := newSyntaxContextValidator(signer, signing.NewEdVerifier(), 1, vfunc, mockStateQ, truer{}, newPubGetter(), et, logtest.New(t)) m := buildProposalMsg(signer, NewSetFromValues(types.ProposalID{1}, types.ProposalID{2}, types.ProposalID{3}), types.EmptyVrfSignature) s1 := NewSetFromValues(types.ProposalID{1}) m.Svp = buildSVP(preRound, s1) diff --git a/hare/statustracker_test.go b/hare/statustracker_test.go index 4f6ab1ca2a..37fd514a44 100644 --- a/hare/statustracker_test.go +++ b/hare/statustracker_test.go @@ -333,12 +333,10 @@ func createIdentity(t *testing.T, db *sql.Database, sig *signing.EdSigner) { } func verifyMalfeasanceProof(t *testing.T, sig *signing.EdSigner, gossip *types.MalfeasanceGossip) { - edVerifier, err := signing.NewEdVerifier() - require.NoError(t, err) lg := logtest.New(t) cdb := datastore.NewCachedDB(sql.InMemory(), lg) createIdentity(t, cdb.Database, sig) - nodeID, err := malfeasance.Validate(context.Background(), lg, cdb, edVerifier, nil, gossip) + nodeID, err := malfeasance.Validate(context.Background(), lg, cdb, signing.NewEdVerifier(), nil, gossip) require.NoError(t, err) require.Equal(t, sig.NodeID(), nodeID) } diff --git a/hare3/hare_test.go b/hare3/hare_test.go index 8fe49b1d48..d35337ed9e 100644 --- a/hare3/hare_test.go +++ b/hare3/hare_test.go @@ -200,8 +200,6 @@ func (n *node) withPublisher() *node { func (n *node) withHare() *node { logger := logtest.New(n.t).Named(fmt.Sprintf("hare=%d", n.i)) - verifier, err := signing.NewEdVerifier() - require.NoError(n.t, err) n.nclock = &testNodeClock{ genesis: n.t.start, @@ -210,7 +208,7 @@ func (n *node) withHare() *node { tracer := newTestTracer(n.t) n.tracer = tracer n.patrol = layerpatrol.New() - n.hare = New(n.nclock, n.mpublisher, n.db, verifier, n.oracle, n.msyncer, n.patrol, + n.hare = New(n.nclock, n.mpublisher, n.db, signing.NewEdVerifier(), n.oracle, n.msyncer, n.patrol, WithConfig(n.t.cfg), WithLogger(logger.Zap()), WithWallclock(n.clock), diff --git a/malfeasance/handler_test.go b/malfeasance/handler_test.go index 5c86a04fd6..b83181be2b 100644 --- a/malfeasance/handler_test.go +++ b/malfeasance/handler_test.go @@ -50,10 +50,8 @@ func TestHandler_HandleMalfeasanceProof_multipleATXs(t *testing.T) { ctrl := gomock.NewController(t) trt := malfeasance.NewMocktortoise(ctrl) mcp := malfeasance.NewMockconsensusProtocol(ctrl) - sigVerifier, err := signing.NewEdVerifier() - require.NoError(t, err) - h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, sigVerifier, trt) + h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, signing.NewEdVerifier(), trt) sig, err := signing.NewEdSigner() require.NoError(t, err) lid := types.LayerID(11) @@ -258,10 +256,8 @@ func TestHandler_HandleMalfeasanceProof_multipleBallots(t *testing.T) { ctrl := gomock.NewController(t) trt := malfeasance.NewMocktortoise(ctrl) mcp := malfeasance.NewMockconsensusProtocol(ctrl) - sigVerifier, err := signing.NewEdVerifier() - require.NoError(t, err) - h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, sigVerifier, trt) + h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, signing.NewEdVerifier(), trt) sig, err := signing.NewEdSigner() require.NoError(t, err) lid := types.LayerID(11) @@ -473,10 +469,8 @@ func TestHandler_HandleMalfeasanceProof_hareEquivocation(t *testing.T) { ctrl := gomock.NewController(t) trt := malfeasance.NewMocktortoise(ctrl) mcp := malfeasance.NewMockconsensusProtocol(ctrl) - sigVerifier, err := signing.NewEdVerifier() - require.NoError(t, err) - h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, sigVerifier, trt) + h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, signing.NewEdVerifier(), trt) sig, err := signing.NewEdSigner() require.NoError(t, err) lid := types.LayerID(11) @@ -703,10 +697,8 @@ func TestHandler_HandleMalfeasanceProof_validateHare(t *testing.T) { ctrl := gomock.NewController(t) trt := malfeasance.NewMocktortoise(ctrl) mcp := malfeasance.NewMockconsensusProtocol(ctrl) - sigVerifier, err := signing.NewEdVerifier() - require.NoError(t, err) - h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, sigVerifier, trt) + h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, signing.NewEdVerifier(), trt) sig, err := signing.NewEdSigner() require.NoError(t, err) createIdentity(t, db, sig) @@ -784,10 +776,8 @@ func TestHandler_CrossDomain(t *testing.T) { ctrl := gomock.NewController(t) trt := malfeasance.NewMocktortoise(ctrl) mcp := malfeasance.NewMockconsensusProtocol(ctrl) - sigVerifier, err := signing.NewEdVerifier() - require.NoError(t, err) - h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, sigVerifier, trt) + h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, signing.NewEdVerifier(), trt) sig, err := signing.NewEdSigner() require.NoError(t, err) createIdentity(t, db, sig) @@ -842,10 +832,8 @@ func TestHandler_HandleSyncedMalfeasanceProof_multipleATXs(t *testing.T) { ctrl := gomock.NewController(t) trt := malfeasance.NewMocktortoise(ctrl) mcp := malfeasance.NewMockconsensusProtocol(ctrl) - sigVerifier, err := signing.NewEdVerifier() - require.NoError(t, err) - h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, sigVerifier, trt) + h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, signing.NewEdVerifier(), trt) sig, err := signing.NewEdSigner() require.NoError(t, err) createIdentity(t, db, sig) @@ -899,10 +887,8 @@ func TestHandler_HandleSyncedMalfeasanceProof_multipleBallots(t *testing.T) { ctrl := gomock.NewController(t) trt := malfeasance.NewMocktortoise(ctrl) mcp := malfeasance.NewMockconsensusProtocol(ctrl) - sigVerifier, err := signing.NewEdVerifier() - require.NoError(t, err) - h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, sigVerifier, trt) + h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, signing.NewEdVerifier(), trt) sig, err := signing.NewEdSigner() require.NoError(t, err) createIdentity(t, db, sig) @@ -955,10 +941,8 @@ func TestHandler_HandleSyncedMalfeasanceProof_hareEquivocation(t *testing.T) { ctrl := gomock.NewController(t) trt := malfeasance.NewMocktortoise(ctrl) mcp := malfeasance.NewMockconsensusProtocol(ctrl) - sigVerifier, err := signing.NewEdVerifier() - require.NoError(t, err) - h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, sigVerifier, trt) + h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, signing.NewEdVerifier(), trt) sig, err := signing.NewEdSigner() require.NoError(t, err) createIdentity(t, db, sig) @@ -1014,10 +998,8 @@ func TestHandler_HandleSyncedMalfeasanceProof_wrongHash(t *testing.T) { ctrl := gomock.NewController(t) trt := malfeasance.NewMocktortoise(ctrl) mcp := malfeasance.NewMockconsensusProtocol(ctrl) - sigVerifier, err := signing.NewEdVerifier() - require.NoError(t, err) - h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, sigVerifier, trt) + h := malfeasance.NewHandler(datastore.NewCachedDB(db, lg), lg, "self", types.EmptyNodeID, mcp, signing.NewEdVerifier(), trt) sig, err := signing.NewEdSigner() require.NoError(t, err) createIdentity(t, db, sig) diff --git a/mesh/executor.go b/mesh/executor.go index a98f92f04d..cb7e22e183 100644 --- a/mesh/executor.go +++ b/mesh/executor.go @@ -46,7 +46,6 @@ func (e *Executor) Revert(ctx context.Context, revertTo types.LayerID) error { e.mu.Lock() defer e.mu.Unlock() - logger := e.logger.WithContext(ctx).WithFields(log.Stringer("revert_to", revertTo)) if err := e.vm.Revert(revertTo); err != nil { return fmt.Errorf("revert state: %w", err) } @@ -57,7 +56,11 @@ func (e *Executor) Revert(ctx context.Context, revertTo types.LayerID) error { if err != nil { return fmt.Errorf("get state hash: %w", err) } - logger.Event().Info("reverted state", log.Stringer("state_hash", root)) + e.logger.With().Info("reverted state", + log.Context(ctx), + log.Stringer("state_hash", root), + log.Uint32("revert_to", revertTo.Uint32()), + ) return nil } @@ -75,7 +78,6 @@ func (e *Executor) ExecuteOptimistic( start := time.Now() - logger := e.logger.WithContext(ctx).WithFields(lid) if err := e.checkOrder(lid); err != nil { return nil, err } @@ -110,7 +112,9 @@ func (e *Executor) ExecuteOptimistic( if err != nil { return nil, fmt.Errorf("get state hash: %w", err) } - logger.Event().Info("optimistically executed block", + e.logger.With().Info("optimistically executed block", + log.Context(ctx), + log.Uint32("lid", lid.Uint32()), log.Stringer("block", b.ID()), log.Stringer("state_hash", state), log.Duration("duration", time.Since(start)), @@ -133,7 +137,6 @@ func (e *Executor) Execute(ctx context.Context, lid types.LayerID, block *types. return e.executeEmpty(ctx, lid) } - logger := e.logger.WithContext(ctx).WithFields(lid, block.ID()) executable, err := e.getExecutableTxs(block.TxIDs) if err != nil { return err @@ -142,7 +145,11 @@ func (e *Executor) Execute(ctx context.Context, lid types.LayerID, block *types. if err != nil { return err } - ineffective, executed, err := e.vm.Apply(vm.ApplyContext{Layer: block.LayerIndex}, executable, rewards) + ineffective, executed, err := e.vm.Apply( + vm.ApplyContext{Layer: block.LayerIndex}, + executable, + rewards, + ) if err != nil { return fmt.Errorf("apply block: %w", err) } @@ -154,11 +161,14 @@ func (e *Executor) Execute(ctx context.Context, lid types.LayerID, block *types. if err != nil { return fmt.Errorf("get state hash: %w", err) } - logger.Event().Info("executed block", + e.logger.With().Info("executed block", + log.Context(ctx), + log.Uint32("lid", lid.Uint32()), log.Stringer("block", block.ID()), log.Stringer("state_hash", state), log.Duration("duration", time.Since(start)), log.Int("count", len(executed)), + log.Int("rewards", len(rewards)), ) return nil } @@ -183,7 +193,6 @@ func (e *Executor) convertRewards(rewards []types.AnyReward) ([]types.CoinbaseRe func (e *Executor) executeEmpty(ctx context.Context, lid types.LayerID) error { start := time.Now() - logger := e.logger.WithContext(ctx).WithFields(lid) if _, _, err := e.vm.Apply(vm.ApplyContext{Layer: lid}, nil, nil); err != nil { return fmt.Errorf("apply empty layer: %w", err) } @@ -194,7 +203,9 @@ func (e *Executor) executeEmpty(ctx context.Context, lid types.LayerID) error { if err != nil { return fmt.Errorf("get state hash: %w", err) } - logger.Event().Info("executed empty layer", + e.logger.With().Info("executed empty layer", + log.Context(ctx), + log.Uint32("lid", lid.Uint32()), log.Stringer("state_hash", state), log.Duration("duration", time.Since(start)), ) diff --git a/mesh/mesh.go b/mesh/mesh.go index 368f6770e6..b461457740 100644 --- a/mesh/mesh.go +++ b/mesh/mesh.go @@ -53,14 +53,17 @@ type Mesh struct { processedLayer atomic.Value nextProcessedLayers map[types.LayerID]struct{} maxProcessedLayer types.LayerID - - pendingUpdates struct { - min, max types.LayerID - } } // NewMesh creates a new instant of a mesh. -func NewMesh(cdb *datastore.CachedDB, c layerClock, trtl system.Tortoise, exec *Executor, state conservativeState, logger log.Log) (*Mesh, error) { +func NewMesh( + cdb *datastore.CachedDB, + c layerClock, + trtl system.Tortoise, + exec *Executor, + state conservativeState, + logger log.Log, +) (*Mesh, error) { msh := &Mesh{ logger: logger, cdb: cdb, @@ -70,10 +73,6 @@ func NewMesh(cdb *datastore.CachedDB, c layerClock, trtl system.Tortoise, exec * conState: state, nextProcessedLayers: make(map[types.LayerID]struct{}), missingBlocks: make(chan []types.BlockID, 32), - - pendingUpdates: struct { - min, max types.LayerID - }{min: math.MaxUint32}, } msh.latestLayer.Store(types.LayerID(0)) msh.latestLayerInState.Store(types.LayerID(0)) @@ -105,14 +104,14 @@ func NewMesh(cdb *datastore.CachedDB, c layerClock, trtl system.Tortoise, exec * msh.logger.With().Panic("error initialize genesis data", log.Err(err)) } - msh.setLatestLayer(msh.logger, genesis) + msh.setLatestLayer(genesis) msh.processedLayer.Store(genesis) msh.setLatestLayerInState(genesis) return msh, nil } func (msh *Mesh) recoverFromDB(latest types.LayerID) { - msh.setLatestLayer(msh.logger, latest) + msh.setLatestLayer(latest) lyr, err := layers.GetProcessed(msh.cdb) if err != nil { @@ -128,7 +127,8 @@ func (msh *Mesh) recoverFromDB(latest types.LayerID) { if applied.After(types.GetEffectiveGenesis()) { if err = msh.executor.Revert(context.Background(), applied); err != nil { - msh.logger.With().Fatal("failed to load state for layer", msh.LatestLayerInState(), log.Err(err)) + msh.logger.With(). + Fatal("failed to load state for layer", msh.LatestLayerInState(), log.Err(err)) } } msh.logger.With().Info("recovered mesh from disk", @@ -158,7 +158,7 @@ func (msh *Mesh) MeshHash(lid types.LayerID) (types.Hash32, error) { } // setLatestLayer sets the latest layer we saw from the network. -func (msh *Mesh) setLatestLayer(logger log.Log, lid types.LayerID) { +func (msh *Mesh) setLatestLayer(lid types.LayerID) { events.ReportLayerUpdate(events.LayerUpdate{ LayerID: lid, Status: events.LayerStatusTypeUnknown, @@ -170,7 +170,6 @@ func (msh *Mesh) setLatestLayer(logger log.Log, lid types.LayerID) { } if msh.latestLayer.CompareAndSwap(current, lid) { events.ReportNodeStatusUpdate() - logger.With().Debug("set latest known layer", lid) } } } @@ -196,9 +195,6 @@ func (msh *Mesh) ProcessedLayer() types.LayerID { func (msh *Mesh) setProcessedLayer(layerID types.LayerID) error { processed := msh.ProcessedLayer() if !layerID.After(processed) { - msh.logger.With().Debug("trying to set processed layer to an older layer", - log.Uint32("processed", processed.Uint32()), - layerID) return nil } @@ -207,9 +203,6 @@ func (msh *Mesh) setProcessedLayer(layerID types.LayerID) error { } if layerID != processed.Add(1) { - msh.logger.With().Debug("trying to set processed layer out of order", - log.Uint32("processed", processed.Uint32()), - layerID) msh.nextProcessedLayers[layerID] = struct{}{} return nil } @@ -229,7 +222,6 @@ func (msh *Mesh) setProcessedLayer(layerID types.LayerID) error { } msh.processedLayer.Store(processed) events.ReportNodeStatusUpdate() - msh.logger.Event().Debug("processed layer set", processed) return nil } @@ -261,10 +253,6 @@ func (msh *Mesh) ensureStateConsistent(ctx context.Context, results []result.Lay return nil } revert := changed.Sub(1) - msh.logger.With().Info("reverting state", - log.Context(ctx), - log.Uint32("revert_to", revert.Uint32()), - ) if err := msh.executor.Revert(ctx, revert); err != nil { return fmt.Errorf("revert state to layer %v: %w", revert, err) } @@ -282,37 +270,16 @@ func (msh *Mesh) ProcessLayer(ctx context.Context, lid types.LayerID) error { msh.mu.Lock() defer msh.mu.Unlock() - msh.logger.With().Debug("processing layer", - log.Context(ctx), - log.Uint32("layer_id", lid.Uint32()), - ) - msh.trtl.TallyVotes(ctx, lid) if err := msh.setProcessedLayer(lid); err != nil { return err } results := msh.trtl.Updates() - pending := msh.pendingUpdates.min != math.MaxUint32 - if len(results) > 0 { - msh.pendingUpdates.min = min(msh.pendingUpdates.min, results[0].Layer) - msh.pendingUpdates.max = max(msh.pendingUpdates.max, results[len(results)-1].Layer) - } next := msh.LatestLayerInState() + 1 - if msh.pendingUpdates.min != math.MaxUint32 && next < msh.pendingUpdates.min { - msh.pendingUpdates.min = next - pending = true - } - if pending { - var err error - results, err = msh.trtl.Results(msh.pendingUpdates.min, msh.pendingUpdates.max) - if err != nil { - return err - } - } // TODO(dshulyak) https://github.com/spacemeshos/go-spacemesh/issues/4425 if len(results) > 0 { - msh.logger.With().Info("consensus results", + msh.logger.With().Debug("consensus results", log.Context(ctx), log.Uint32("layer_id", lid.Uint32()), log.Array("results", log.ArrayMarshalerFunc(func(encoder log.ArrayEncoder) error { @@ -340,13 +307,6 @@ func (msh *Mesh) ProcessLayer(ctx context.Context, lid types.LayerID) error { if err := msh.applyResults(ctx, applicable); err != nil { return err } - if len(missing) > 0 { - msh.pendingUpdates.min = applicable[len(applicable)-1].Layer - msh.pendingUpdates.max = max(msh.pendingUpdates.min, msh.pendingUpdates.max) - } else { - msh.pendingUpdates.min = math.MaxUint32 - msh.pendingUpdates.max = 0 - } return nil } @@ -376,7 +336,6 @@ func filterMissing(results []result.Layer, next types.LayerID) ([]result.Layer, } func (msh *Mesh) applyResults(ctx context.Context, results []result.Layer) error { - msh.logger.With().Debug("applying results", log.Context(ctx)) for _, layer := range results { target := layer.FirstValid() if !layer.Verified && target.IsEmpty() { @@ -398,12 +357,6 @@ func (msh *Mesh) applyResults(ctx context.Context, results []result.Layer) error if err := msh.executor.Execute(ctx, layer.Layer, block); err != nil { return fmt.Errorf("execute block %v/%v: %w", layer.Layer, target, err) } - } else { - msh.logger.With().Debug("correct block already applied", - log.Context(ctx), - log.Uint32("layer", layer.Layer.Uint32()), - log.Stringer("block", current), - ) } if err := msh.cdb.WithTx(ctx, func(dbtx *sql.Tx) error { if err := layers.SetApplied(dbtx, layer.Layer, target); err != nil { @@ -427,18 +380,13 @@ func (msh *Mesh) applyResults(ctx context.Context, results []result.Layer) error }); err != nil { return err } + msh.trtl.OnApplied(layer.Layer, layer.Opinion) if layer.Verified { events.ReportLayerUpdate(events.LayerUpdate{ LayerID: layer.Layer, Status: events.LayerStatusTypeApplied, }) } - - msh.logger.With().Debug("state persisted", - log.Context(ctx), - log.Stringer("layer", layer.Layer), - log.Stringer("applied", target), - ) if layer.Layer > msh.LatestLayerInState() { msh.setLatestLayerInState(layer.Layer) } @@ -447,10 +395,10 @@ func (msh *Mesh) applyResults(ctx context.Context, results []result.Layer) error } func (msh *Mesh) saveHareOutput(ctx context.Context, lid types.LayerID, bid types.BlockID) error { - msh.logger.With().Debug("saving hare output for layer", + msh.logger.With().Debug("saving hare output", log.Context(ctx), - log.Uint32("layer_id", lid.Uint32()), - log.Stringer("block_id", bid), + log.Uint32("lid", lid.Uint32()), + log.Stringer("block", bid), ) var ( certs []certificates.CertValidity @@ -495,7 +443,7 @@ func (msh *Mesh) saveHareOutput(ctx context.Context, lid types.LayerID, bid type case 0: msh.trtl.OnHareOutput(lid, bid) case 1: - msh.logger.With().Info("already synced certificate", + msh.logger.With().Debug("already synced certificate", log.Context(ctx), log.Stringer("cert_block_id", certs[0].Block), log.Bool("cert_valid", certs[0].Valid)) @@ -508,20 +456,19 @@ func (msh *Mesh) saveHareOutput(ctx context.Context, lid types.LayerID, bid type encoder.AddBool("valid", cert.Valid) } return nil - }))) + })), + ) } return nil } // ProcessLayerPerHareOutput receives hare output once it finishes running for a given layer. -func (msh *Mesh) ProcessLayerPerHareOutput(ctx context.Context, layerID types.LayerID, blockID types.BlockID, executed bool) error { - if blockID == types.EmptyBlockID { - msh.logger.With().Info("received empty set from hare", - log.Context(ctx), - log.Uint32("layer_id", layerID.Uint32()), - log.Stringer("block_id", blockID), - ) - } +func (msh *Mesh) ProcessLayerPerHareOutput( + ctx context.Context, + layerID types.LayerID, + blockID types.BlockID, + executed bool, +) error { events.ReportLayerUpdate(events.LayerUpdate{ LayerID: layerID, Status: events.LayerStatusTypeApproved, @@ -544,22 +491,28 @@ func (msh *Mesh) setLatestLayerInState(lyr types.LayerID) { // SetZeroBlockLayer advances the latest layer in the network with a layer // that has no data. func (msh *Mesh) SetZeroBlockLayer(ctx context.Context, lid types.LayerID) { - msh.setLatestLayer(msh.logger.WithContext(ctx), lid) + msh.setLatestLayer(lid) } // AddTXsFromProposal adds the TXs in a Proposal into the database. -func (msh *Mesh) AddTXsFromProposal(ctx context.Context, layerID types.LayerID, proposalID types.ProposalID, txIDs []types.TransactionID) error { - logger := msh.logger.WithContext(ctx).WithFields(layerID, proposalID, log.Int("num_txs", len(txIDs))) +func (msh *Mesh) AddTXsFromProposal( + ctx context.Context, + layerID types.LayerID, + proposalID types.ProposalID, + txIDs []types.TransactionID, +) error { if err := msh.conState.LinkTXsWithProposal(layerID, proposalID, txIDs); err != nil { return fmt.Errorf("link proposal txs: %v/%v: %w", layerID, proposalID, err) } - msh.setLatestLayer(logger, layerID) - logger.Debug("associated txs to proposal") + msh.setLatestLayer(layerID) return nil } // AddBallot to the mesh. -func (msh *Mesh) AddBallot(ctx context.Context, ballot *types.Ballot) (*types.MalfeasanceProof, error) { +func (msh *Mesh) AddBallot( + ctx context.Context, + ballot *types.Ballot, +) (*types.MalfeasanceProof, error) { malicious, err := msh.cdb.IsMalicious(ballot.SmesherID) if err != nil { return nil, err @@ -626,12 +579,10 @@ func (msh *Mesh) AddBallot(ctx context.Context, ballot *types.Ballot) (*types.Ma // AddBlockWithTXs adds the block and its TXs in into the database. func (msh *Mesh) AddBlockWithTXs(ctx context.Context, block *types.Block) error { - logger := msh.logger.WithContext(ctx).WithFields(block.LayerIndex, block.ID(), log.Int("num_txs", len(block.TxIDs))) if err := msh.conState.LinkTXsWithBlock(block.LayerIndex, block.ID(), block.TxIDs); err != nil { return fmt.Errorf("link block txs: %v/%v: %w", block.LayerIndex, block.ID(), err) } - msh.setLatestLayer(logger, block.LayerIndex) - logger.Debug("associated txs to block") + msh.setLatestLayer(block.LayerIndex) // add block to the tortoise before storing it // otherwise fetcher will not wait until data is stored in the tortoise @@ -643,13 +594,18 @@ func (msh *Mesh) AddBlockWithTXs(ctx context.Context, block *types.Block) error } // GetATXs uses GetFullAtx to return a list of atxs corresponding to atxIds requested. -func (msh *Mesh) GetATXs(ctx context.Context, atxIds []types.ATXID) (map[types.ATXID]*types.VerifiedActivationTx, []types.ATXID) { +func (msh *Mesh) GetATXs( + ctx context.Context, + atxIds []types.ATXID, +) (map[types.ATXID]*types.VerifiedActivationTx, []types.ATXID) { var mIds []types.ATXID atxs := make(map[types.ATXID]*types.VerifiedActivationTx, len(atxIds)) for _, id := range atxIds { t, err := msh.cdb.GetFullAtx(id) if err != nil { - msh.logger.WithContext(ctx).With().Warning("could not get atx from database", id, log.Err(err)) + msh.logger.WithContext(ctx). + With(). + Warning("could not get atx from database", id, log.Err(err)) mIds = append(mIds, id) } else { atxs[t.ID()] = t diff --git a/mesh/mesh_test.go b/mesh/mesh_test.go index 8426f97906..4d3626e4df 100644 --- a/mesh/mesh_test.go +++ b/mesh/mesh_test.go @@ -68,7 +68,12 @@ func createTestMesh(t *testing.T) *testMesh { return tm } -func genTx(t testing.TB, signer *signing.EdSigner, dest types.Address, amount, nonce, price uint64) types.Transaction { +func genTx( + t testing.TB, + signer *signing.EdSigner, + dest types.Address, + amount, nonce, price uint64, +) types.Transaction { t.Helper() raw := wallet.Spend(signer.PrivateKey(), dest, amount, nonce) tx := types.Transaction{ @@ -97,7 +102,13 @@ func CreateAndSaveTxs(t testing.TB, db sql.Executor, numOfTxs int) []types.Trans return txIDs } -func createBlock(t testing.TB, db sql.Executor, mesh *Mesh, layerID types.LayerID, nodeID types.NodeID) *types.Block { +func createBlock( + t testing.TB, + db sql.Executor, + mesh *Mesh, + layerID types.LayerID, + nodeID types.NodeID, +) *types.Block { t.Helper() txIDs := CreateAndSaveTxs(t, db, numTXs) b := &types.Block{ @@ -124,7 +135,12 @@ func createIdentity(t *testing.T, db sql.Executor, sig *signing.EdSigner) { require.NoError(t, atxs.Add(db, vAtx)) } -func createLayerBlocks(t *testing.T, db sql.Executor, mesh *Mesh, lyrID types.LayerID) []*types.Block { +func createLayerBlocks( + t *testing.T, + db sql.Executor, + mesh *Mesh, + lyrID types.LayerID, +) []*types.Block { t.Helper() blks := make([]*types.Block, 0, numBlocks) for i := 0; i < numBlocks; i++ { @@ -190,7 +206,14 @@ func TestMesh_FromGenesis(t *testing.T) { func TestMesh_WakeUpWhileGenesis(t *testing.T) { tm := createTestMesh(t) - msh, err := NewMesh(tm.cdb, tm.mockClock, tm.mockTortoise, tm.executor, tm.mockState, logtest.New(t)) + msh, err := NewMesh( + tm.cdb, + tm.mockClock, + tm.mockTortoise, + tm.executor, + tm.mockState, + logtest.New(t), + ) require.NoError(t, err) gLid := types.GetEffectiveGenesis() checkProcessedInDB(t, msh, gLid) @@ -206,7 +229,12 @@ func TestMesh_WakeUpWhileGenesis(t *testing.T) { func TestMesh_WakeUp(t *testing.T) { tm := createTestMesh(t) latest := types.LayerID(11) - b := types.NewExistingBallot(types.BallotID{1, 2, 3}, types.EmptyEdSignature, types.EmptyNodeID, latest) + b := types.NewExistingBallot( + types.BallotID{1, 2, 3}, + types.EmptyEdSignature, + types.EmptyNodeID, + latest, + ) require.NoError(t, ballots.Add(tm.cdb, &b)) require.NoError(t, layers.SetProcessed(tm.cdb, latest)) latestState := latest.Sub(1) @@ -215,7 +243,14 @@ func TestMesh_WakeUp(t *testing.T) { tm.mockVM.EXPECT().Revert(latestState) tm.mockState.EXPECT().RevertCache(latestState) tm.mockVM.EXPECT().GetStateRoot() - msh, err := NewMesh(tm.cdb, tm.mockClock, tm.mockTortoise, tm.executor, tm.mockState, logtest.New(t)) + msh, err := NewMesh( + tm.cdb, + tm.mockClock, + tm.mockTortoise, + tm.executor, + tm.mockState, + logtest.New(t), + ) require.NoError(t, err) gotL := msh.LatestLayer() require.Equal(t, latest, gotL) @@ -245,12 +280,11 @@ func TestMesh_GetLayer(t *testing.T) { func TestMesh_LatestKnownLayer(t *testing.T) { tm := createTestMesh(t) - lg := logtest.New(t) - tm.setLatestLayer(lg, types.LayerID(3)) - tm.setLatestLayer(lg, types.LayerID(7)) - tm.setLatestLayer(lg, types.LayerID(10)) - tm.setLatestLayer(lg, types.LayerID(1)) - tm.setLatestLayer(lg, types.LayerID(2)) + tm.setLatestLayer(types.LayerID(3)) + tm.setLatestLayer(types.LayerID(7)) + tm.setLatestLayer(types.LayerID(10)) + tm.setLatestLayer(types.LayerID(1)) + tm.setLatestLayer(types.LayerID(2)) require.Equal(t, types.LayerID(10), tm.LatestLayer(), "wrong layer") } @@ -336,9 +370,14 @@ func TestMesh_MaliciousBallots(t *testing.T) { require.NoError(t, err) require.NotNil(t, malProof) require.True(t, blts[1].IsMalicious()) - edVerifier, err := signing.NewEdVerifier() - require.NoError(t, err) - nodeID, err := malfeasance.Validate(context.Background(), tm.logger, tm.cdb, edVerifier, nil, &types.MalfeasanceGossip{MalfeasanceProof: *malProof}) + nodeID, err := malfeasance.Validate( + context.Background(), + tm.logger, + tm.cdb, + signing.NewEdVerifier(), + nil, + &types.MalfeasanceGossip{MalfeasanceProof: *malProof}, + ) require.NoError(t, err) require.Equal(t, sig.NodeID(), nodeID) mal, err = identities.IsMalicious(tm.cdb, sig.NodeID()) @@ -371,7 +410,6 @@ func TestProcessLayer(t *testing.T) { type call struct { // inputs updates []result.Layer - results []result.Layer // outputs err string @@ -418,7 +456,7 @@ func TestProcessLayer(t *testing.T) { applied: map[types.LayerID]types.BlockID{start: idg("1"), start + 1: idg("2")}, }, { - results: rlayers( + updates: rlayers( rlayer(start+2, rblock(idg("3"), fixture.Valid(), fixture.Data())), rlayer(start+3, rblock(idg("4"), fixture.Valid(), fixture.Data())), ), @@ -437,7 +475,7 @@ func TestProcessLayer(t *testing.T) { err: "missing", }, { - results: rlayers( + updates: rlayers( rlayer(start, rblock(idg("1"), fixture.Data(), fixture.Valid())), ), executed: map[types.LayerID]types.BlockID{start: idg("1")}, @@ -458,7 +496,7 @@ func TestProcessLayer(t *testing.T) { err: "missing", }, { - results: rlayers( + updates: rlayers( rlayer(start, rblock(idg("1"), fixture.Invalid())), ), executed: map[types.LayerID]types.BlockID{start: {}}, @@ -585,7 +623,7 @@ func TestProcessLayer(t *testing.T) { err: "missing", }, { - results: rlayers( + updates: rlayers( rlayer(start, rblock(idg("1"), fixture.Invalid(), fixture.Data()), rblock(idg("3"), fixture.Valid(), fixture.Data()), @@ -609,9 +647,6 @@ func TestProcessLayer(t *testing.T) { }, { updates: rlayers( - rlayer(start+1, rblock(idg("2"), fixture.Valid(), fixture.Data())), - ), - results: rlayers( rlayer(start, rblock(idg("1"), fixture.Valid(), fixture.Data())), rlayer(start+1, rblock(idg("2"), fixture.Valid(), fixture.Data())), ), @@ -642,11 +677,6 @@ func TestProcessLayer(t *testing.T) { []call{ { updates: rlayers( - fixture.RLayerNonFinal(start.Add(1), - fixture.RBlock(fixture.IDGen("2"), fixture.Valid(), fixture.Data()), - ), - ), - results: rlayers( rlayer(start, fixture.RBlock(fixture.IDGen("1"), fixture.Valid(), fixture.Data()), ), @@ -699,6 +729,7 @@ func TestProcessLayer(t *testing.T) { tm := createTestMesh(t) tm.mockTortoise.EXPECT().TallyVotes(gomock.Any(), gomock.Any()).AnyTimes() + tm.mockTortoise.EXPECT().OnApplied(gomock.Any(), gomock.Any()).AnyTimes() tm.mockVM.EXPECT().GetStateRoot().AnyTimes() tm.mockVM.EXPECT().Revert(gomock.Any()).AnyTimes() tm.mockState.EXPECT().RevertCache(gomock.Any()).AnyTimes() @@ -707,14 +738,12 @@ func TestProcessLayer(t *testing.T) { for _, c := range tc.calls { for _, executed := range c.executed { tm.mockVM.EXPECT().Apply(gomock.Any(), gomock.Any(), gomock.Any()) - tm.mockState.EXPECT().UpdateCache(gomock.Any(), gomock.Any(), executed, gomock.Any(), gomock.Any()).Return(nil) + tm.mockState.EXPECT(). + UpdateCache(gomock.Any(), gomock.Any(), executed, gomock.Any(), gomock.Any()). + Return(nil) } tm.mockTortoise.EXPECT().Updates().Return(c.updates) - if c.results != nil { - tm.mockTortoise.EXPECT().Results(gomock.Any(), gomock.Any()).Return(c.results, nil) - } ensuresDatabaseConsistent(t, tm.cdb, c.updates) - ensuresDatabaseConsistent(t, tm.cdb, c.results) err := tm.ProcessLayer(context.TODO(), lid) if len(c.err) > 0 { require.ErrorContains(t, err, c.err) @@ -898,7 +927,11 @@ func TestProcessLayerPerHareOutput(t *testing.T) { t.Parallel() tm := createTestMesh(t) tm.mockTortoise.EXPECT().TallyVotes(gomock.Any(), gomock.Any()).AnyTimes() - tm.mockTortoise.EXPECT().Updates().Return(nil).AnyTimes() // this makes ProcessLayer noop + tm.mockTortoise.EXPECT(). + Updates(). + Return(nil). + AnyTimes() + // this makes ProcessLayer noop for _, c := range tc.certs { if c.cert.Cert != nil { require.NoError(t, certificates.Add(tm.cdb, c.layer, c.cert.Cert)) diff --git a/miner/metrics.go b/miner/metrics.go index 2361af1a56..40eb79d4a5 100644 --- a/miner/metrics.go +++ b/miner/metrics.go @@ -33,8 +33,8 @@ func (lt *latencyTracker) total() time.Duration { func (lt *latencyTracker) MarshalLogObject(encoder log.ObjectEncoder) error { encoder.AddDuration("data", lt.data.Sub(lt.start)) encoder.AddDuration("tortoise", lt.tortoise.Sub(lt.data)) - encoder.AddDuration("txs", lt.txs.Sub(lt.tortoise)) - encoder.AddDuration("hash", lt.hash.Sub(lt.txs)) + encoder.AddDuration("hash", lt.hash.Sub(lt.tortoise)) + encoder.AddDuration("txs", lt.txs.Sub(lt.hash)) encoder.AddDuration("publish", lt.publish.Sub(lt.hash)) total := lt.total() encoder.AddDuration("total", total) diff --git a/miner/proposal_builder.go b/miner/proposal_builder.go index 984536f8aa..6a1d42d826 100644 --- a/miner/proposal_builder.go +++ b/miner/proposal_builder.go @@ -6,11 +6,14 @@ import ( "context" "errors" "fmt" + "runtime" + "slices" "sort" + "sync" "time" "golang.org/x/exp/maps" - "golang.org/x/exp/slices" + "golang.org/x/sync/errgroup" "github.com/spacemeshos/go-spacemesh/codec" "github.com/spacemeshos/go-spacemesh/common/types" @@ -68,23 +71,37 @@ type ProposalBuilder struct { tortoise votesEncoder syncer system.SyncStateProvider + mu sync.Mutex + signers map[types.NodeID]*signerSession + shared sharedSession +} + +type signerSession struct { signer *signing.EdSigner - session *session + log log.Log + session session + latency latencyTracker } -// session per every signing key for the whole epoch. -type session struct { - epoch types.EpochID - beacon types.Beacon - atx types.ATXID - atxWeight uint64 - ref types.BallotID - prev types.LayerID - nonce types.VRFPostIndex - active struct { +// shared data for all signers in the epoch. +type sharedSession struct { + epoch types.EpochID + beacon types.Beacon + active struct { set types.ATXIDList weight uint64 } +} + +// session per every signing key for the whole epoch. +type session struct { + epoch types.EpochID + atx types.ATXID + atxWeight uint64 + ref types.BallotID + beacon types.Beacon + prev types.LayerID + nonce types.VRFPostIndex eligibilities struct { proofs map[types.LayerID][]types.VotingEligibility slots uint32 @@ -132,9 +149,9 @@ type config struct { hdist uint32 minActiveSetWeight uint64 networkDelay time.Duration - + workersLimit int // used to determine whether a node has enough information on the active set this epoch - GoodAtxPercent int + goodAtxPercent int } func (c *config) MarshalLogObject(encoder log.ObjectEncoder) error { @@ -143,7 +160,7 @@ func (c *config) MarshalLogObject(encoder log.ObjectEncoder) error { encoder.AddUint32("hdist", c.hdist) encoder.AddUint64("min active weight", c.minActiveSetWeight) encoder.AddDuration("network delay", c.networkDelay) - encoder.AddInt("good atx percent", c.GoodAtxPercent) + encoder.AddInt("good atx percent", c.goodAtxPercent) return nil } @@ -157,6 +174,14 @@ func WithLayerSize(size uint32) Opt { } } +// WithWorkersLimit configures paralelization factor for builder operation when working with +// more than one signer. +func WithWorkersLimit(limit int) Opt { + return func(pb *ProposalBuilder) { + pb.cfg.workersLimit = limit + } +} + // WithLayerPerEpoch defines the number of layers per epoch. func WithLayerPerEpoch(layers uint32) Opt { return func(pb *ProposalBuilder) { @@ -191,14 +216,23 @@ func WithNetworkDelay(delay time.Duration) Opt { func WithMinGoodAtxPercent(percent int) Opt { return func(pb *ProposalBuilder) { - pb.cfg.GoodAtxPercent = percent + pb.cfg.goodAtxPercent = percent + } +} + +// WithSigners guarantees that builder will start execution with provided list of signers. +// Should be after logging. +func WithSigners(signers ...*signing.EdSigner) Opt { + return func(pb *ProposalBuilder) { + for _, signer := range signers { + pb.Register(signer) + } } } // New creates a struct of block builder type. func New( clock layerClock, - signer *signing.EdSigner, cdb *datastore.CachedDB, publisher pubsub.Publisher, trtl votesEncoder, @@ -207,14 +241,17 @@ func New( opts ...Opt, ) *ProposalBuilder { pb := &ProposalBuilder{ + cfg: config{ + workersLimit: runtime.NumCPU(), + }, logger: log.NewNop(), - signer: signer, clock: clock, cdb: cdb, publisher: publisher, tortoise: trtl, syncer: syncer, conState: conState, + signers: map[types.NodeID]*signerSession{}, } for _, opt := range opts { opt(pb) @@ -222,6 +259,18 @@ func New( return pb } +func (pb *ProposalBuilder) Register(signer *signing.EdSigner) { + pb.mu.Lock() + defer pb.mu.Unlock() + _, exist := pb.signers[signer.NodeID()] + if !exist { + pb.signers[signer.NodeID()] = &signerSession{ + signer: signer, + log: pb.logger.WithFields(log.String("signer", signer.NodeID().ShortString())), + } + } +} + // Start the loop that listens to layers and build proposals. func (pb *ProposalBuilder) Run(ctx context.Context) error { next := pb.clock.CurrentLayer().Add(1) @@ -240,16 +289,16 @@ func (pb *ProposalBuilder) Run(ctx context.Context) error { continue } next = current.Add(1) - sctx := log.WithNewSessionID(ctx) - if current <= types.GetEffectiveGenesis() || !pb.syncer.IsSynced(sctx) { + ctx := log.WithNewSessionID(ctx) + if current <= types.GetEffectiveGenesis() || !pb.syncer.IsSynced(ctx) { continue } if err := pb.build(ctx, current); err != nil { if errors.Is(err, errAtxNotAvailable) { pb.logger.With(). - Debug("signer is not active in epoch", log.Context(sctx), log.Uint32("lid", current.Uint32()), log.Err(err)) + Debug("signer is not active in epoch", log.Context(ctx), log.Uint32("lid", current.Uint32()), log.Err(err)) } else { - pb.logger.With().Warning("failed to build proposal", log.Context(sctx), log.Uint32("lid", current.Uint32()), log.Err(err)) + pb.logger.With().Warning("failed to build proposal", log.Context(ctx), log.Uint32("lid", current.Uint32()), log.Err(err)) } } } @@ -317,141 +366,174 @@ func (pb *ProposalBuilder) decideMeshHash(ctx context.Context, current types.Lay return mesh } -func (pb *ProposalBuilder) initSessionData(ctx context.Context, lid types.LayerID) error { - if pb.session == nil || pb.session.epoch != lid.GetEpoch() { - pb.session = &session{epoch: lid.GetEpoch()} +func (pb *ProposalBuilder) initSharedData(ctx context.Context, lid types.LayerID) error { + if pb.shared.epoch != lid.GetEpoch() { + pb.shared = sharedSession{epoch: lid.GetEpoch()} } - if pb.session.atx == types.EmptyATXID { - atx, err := atxs.GetByEpochAndNodeID(pb.cdb, pb.session.epoch-1, pb.signer.NodeID()) + if pb.shared.beacon == types.EmptyBeacon { + beacon, err := beacons.Get(pb.cdb, pb.shared.epoch) + if err != nil || beacon == types.EmptyBeacon { + return fmt.Errorf("missing beacon for epoch %d", pb.shared.epoch) + } + pb.shared.beacon = beacon + } + if pb.shared.active.set == nil { + weight, set, err := generateActiveSet( + pb.logger, + pb.cdb, + pb.shared.epoch, + pb.clock.LayerToTime(pb.shared.epoch.FirstLayer()), + pb.cfg.goodAtxPercent, + pb.cfg.networkDelay, + ) + if err != nil { + return err + } + pb.shared.active.set = set + pb.shared.active.weight = weight + } + return nil +} + +func (pb *ProposalBuilder) initSignerData( + ctx context.Context, + ss *signerSession, + lid types.LayerID, +) error { + if ss.session.epoch != lid.GetEpoch() { + ss.session = session{epoch: lid.GetEpoch()} + } + if ss.session.atx == types.EmptyATXID { + atx, err := atxs.GetByEpochAndNodeID(pb.cdb, ss.session.epoch-1, ss.signer.NodeID()) if err != nil { if errors.Is(err, sql.ErrNotFound) { err = errAtxNotAvailable } - return fmt.Errorf("get atx in epoch %v: %w", pb.session.epoch-1, err) + return fmt.Errorf("get atx in epoch %v: %w", ss.session.epoch-1, err) } - pb.session.atx = atx.ID() - pb.session.atxWeight = atx.GetWeight() + ss.session.atx = atx.ID() + ss.session.atxWeight = atx.GetWeight() } - if pb.session.nonce == 0 { - nonce, err := pb.cdb.VRFNonce(pb.signer.NodeID(), pb.session.epoch) + if ss.session.nonce == 0 { + nonce, err := pb.cdb.VRFNonce(ss.signer.NodeID(), ss.session.epoch) if err != nil { return fmt.Errorf("missing nonce: %w", err) } - pb.session.nonce = nonce + ss.session.nonce = nonce } - if pb.session.beacon == types.EmptyBeacon { - beacon, err := beacons.Get(pb.cdb, pb.session.epoch) - if err != nil || beacon == types.EmptyBeacon { - return fmt.Errorf("missing beacon for epoch %d", pb.session.epoch) - } - pb.session.beacon = beacon - } - if pb.session.prev == 0 { - prev, err := ballots.LastInEpoch(pb.cdb, pb.session.atx, pb.session.epoch) - switch { - case err == nil: - pb.session.prev = prev.Layer - case errors.Is(err, sql.ErrNotFound): - default: + if ss.session.prev == 0 { + prev, err := ballots.LastInEpoch(pb.cdb, ss.session.atx, ss.session.epoch) + if err != nil && !errors.Is(err, sql.ErrNotFound) { return err } + if err == nil { + ss.session.prev = prev.Layer + } } - if pb.session.ref == types.EmptyBallotID { - ballot, err := ballots.FirstInEpoch(pb.cdb, pb.session.atx, pb.session.epoch) + if ss.session.ref == types.EmptyBallotID { + ballot, err := ballots.FirstInEpoch(pb.cdb, ss.session.atx, ss.session.epoch) if err != nil && !errors.Is(err, sql.ErrNotFound) { return fmt.Errorf("get refballot %w", err) } if errors.Is(err, sql.ErrNotFound) { - weight, set, err := generateActiveSet( - pb.logger, - pb.cdb, - pb.signer.VRFSigner(), - pb.session.epoch, - pb.clock.LayerToTime(pb.session.epoch.FirstLayer()), - pb.cfg.GoodAtxPercent, - pb.cfg.networkDelay, - pb.session.atx, - pb.session.atxWeight, - ) - if err != nil { - return err - } - pb.session.active.set = set - pb.session.active.weight = weight - pb.session.eligibilities.slots = proposals.MustGetNumEligibleSlots( - pb.session.atxWeight, + ss.session.beacon = pb.shared.beacon + ss.session.eligibilities.slots = proposals.MustGetNumEligibleSlots( + ss.session.atxWeight, pb.cfg.minActiveSetWeight, - weight, + pb.shared.active.weight, pb.cfg.layerSize, pb.cfg.layersPerEpoch, ) } else { if ballot.EpochData == nil { - return fmt.Errorf("atx %d created invalid first ballot", pb.session.atx) - } - hash := ballot.EpochData.ActiveSetHash - set, err := activesets.Get(pb.cdb, hash) - if err != nil { - return fmt.Errorf("get activeset %s: %w", hash.String(), err) - } - var weight uint64 - for _, id := range set.Set { - atx, err := pb.cdb.GetAtxHeader(id) - if err != nil { - return err - } - weight += atx.GetWeight() + return fmt.Errorf("atx %d created invalid first ballot", ss.session.atx) } - pb.session.ref = ballot.ID() - pb.session.active.set = set.Set - pb.session.active.weight = weight - pb.session.eligibilities.slots = ballot.EpochData.EligibilityCount + ss.session.ref = ballot.ID() + ss.session.beacon = ballot.EpochData.Beacon + ss.session.eligibilities.slots = ballot.EpochData.EligibilityCount } } - if pb.session.eligibilities.proofs == nil { - pb.session.eligibilities.proofs = calcEligibilityProofs( - pb.signer.VRFSigner(), - pb.session.epoch, - pb.session.beacon, - pb.session.nonce, - pb.session.eligibilities.slots, + if ss.session.eligibilities.proofs == nil { + ss.session.eligibilities.proofs = calcEligibilityProofs( + ss.signer.VRFSigner(), + ss.session.epoch, + ss.session.beacon, + ss.session.nonce, + ss.session.eligibilities.slots, pb.cfg.layersPerEpoch, ) - pb.logger.With().Info("proposal eligibilities for an epoch", log.Inline(pb.session)) + ss.log.With().Info("proposal eligibilities for an epoch", log.Inline(&ss.session)) events.EmitEligibilities( - pb.session.epoch, - pb.session.beacon, - pb.session.atx, - uint32(len(pb.session.active.set)), - pb.session.eligibilities.proofs, + ss.session.epoch, + ss.session.beacon, + ss.session.atx, + uint32(len(pb.shared.active.set)), + ss.session.eligibilities.proofs, ) } return nil } func (pb *ProposalBuilder) build(ctx context.Context, lid types.LayerID) error { - latency := latencyTracker{start: time.Now()} - if err := pb.initSessionData(ctx, lid); err != nil { + start := time.Now() + if err := pb.initSharedData(ctx, lid); err != nil { return err } - latency.data = time.Now() - if lid <= pb.session.prev { - return fmt.Errorf("layer %d was already built", lid) + pb.mu.Lock() + // don't accept registration in the middle of computing proposals + signers := maps.Values(pb.signers) + pb.mu.Unlock() + + var eg errgroup.Group + eg.SetLimit(pb.cfg.workersLimit) + for _, ss := range signers { + ss := ss + ss.latency.start = start + eg.Go(func() error { + if err := pb.initSignerData(ctx, ss, lid); err != nil { + if errors.Is(err, errAtxNotAvailable) { + ss.log.With().Info("smesher doesn't have atx that targets this epoch", + log.Context(ctx), ss.session.epoch.Field(), + ) + } else { + return err + } + } + if lid <= ss.session.prev { + return fmt.Errorf( + "layer %d was already built by signer %s", + lid, + ss.signer.NodeID().ShortString(), + ) + } + ss.session.prev = lid + ss.latency.data = time.Now() + return nil + }) + } + if err := eg.Wait(); err != nil { + return err } - pb.session.prev = lid - proofs := pb.session.eligibilities.proofs[lid] - if len(proofs) == 0 { - pb.logger.With().Debug("not eligible for proposal in layer", - log.Context(ctx), log.Uint32("lid", lid.Uint32()), - log.Uint32("epoch", lid.GetEpoch().Uint32()), - ) + any := false + for _, ss := range signers { + if n := len(ss.session.eligibilities.proofs[lid]); n == 0 { + ss.log.With().Debug("not eligible for proposal in layer", + log.Context(ctx), + lid.Field(), lid.GetEpoch().Field()) + continue + } else { + ss.log.With().Debug("eligible for proposals in layer", + log.Context(ctx), + lid.Field(), log.Int("num proposals", n), + ) + any = true + } + } + if !any { return nil } - pb.logger.With().Debug("eligible for proposals in layer", - log.Context(ctx), log.Uint32("lid", lid.Uint32()), log.Int("num proposals", len(proofs)), - ) pb.tortoise.TallyVotes(ctx, lid) // TODO(dshulyak) get rid from the EncodeVotesWithCurrent option in a followup @@ -460,45 +542,78 @@ func (pb *ProposalBuilder) build(ctx context.Context, lid types.LayerID) error { if err != nil { return fmt.Errorf("encode votes: %w", err) } - latency.tortoise = time.Now() - - txs := pb.conState.SelectProposalTXs(lid, len(proofs)) - latency.txs = time.Now() + for _, ss := range signers { + ss.latency.tortoise = time.Now() + } meshHash := pb.decideMeshHash(ctx, lid) - latency.hash = time.Now() - - proposal := createProposal(pb.session, pb.signer, lid, txs, opinion, proofs, meshHash) + for _, ss := range signers { + ss.latency.hash = time.Now() + } - // needs to be saved before publishing, as we will query it in handler - if pb.session.ref == types.EmptyBallotID { - if err := activesets.Add(pb.cdb, proposal.EpochData.ActiveSetHash, &types.EpochActiveSet{ - Epoch: pb.session.epoch, - Set: pb.session.active.set, - }); err != nil && !errors.Is(err, sql.ErrObjectExists) { - return err + for _, ss := range signers { + proofs := ss.session.eligibilities.proofs[lid] + if len(proofs) == 0 { + ss.log.With().Debug("not eligible for proposal in layer", + log.Context(ctx), + lid.Field(), lid.GetEpoch().Field()) + continue } - } - if err = pb.publisher.Publish(ctx, pubsub.ProposalProtocol, codec.MustEncode(proposal)); err != nil { - return fmt.Errorf( - "failed to publish proposal %d/%s: %w", - proposal.Layer, - proposal.ID(), - err, + ss.log.With().Debug("eligible for proposals in layer", + log.Context(ctx), + lid.Field(), log.Int("num proposals", len(proofs)), ) - } - latency.publish = time.Now() - pb.logger.With(). - Info("proposal created", log.Context(ctx), log.Inline(proposal), log.Object("latency", &latency)) - proposalBuild.Observe(latency.total().Seconds()) - events.EmitProposal(lid, proposal.ID()) - events.ReportProposal(events.ProposalCreated, proposal) - return nil + txs := pb.conState.SelectProposalTXs(lid, len(proofs)) + ss.latency.txs = time.Now() + + // needs to be saved before publishing, as we will query it in handler + if ss.session.ref == types.EmptyBallotID { + if err := activesets.Add(pb.cdb, pb.shared.active.set.Hash(), &types.EpochActiveSet{ + Epoch: ss.session.epoch, + Set: pb.shared.active.set, + }); err != nil && !errors.Is(err, sql.ErrObjectExists) { + return err + } + } + + ss := ss + eg.Go(func() error { + proposal := createProposal( + &ss.session, + pb.shared.beacon, + pb.shared.active.set, + ss.signer, + lid, + txs, + opinion, + proofs, + meshHash, + ) + if err := pb.publisher.Publish(ctx, pubsub.ProposalProtocol, codec.MustEncode(proposal)); err != nil { + ss.log.Error("failed to publish proposal", + log.Context(ctx), + log.Uint32("lid", proposal.Layer.Uint32()), + log.Stringer("id", proposal.ID()), + log.Err(err), + ) + } else { + ss.latency.publish = time.Now() + ss.log.With().Info("proposal created", log.Context(ctx), log.Inline(proposal), log.Object("latency", &ss.latency)) + proposalBuild.Observe(ss.latency.total().Seconds()) + events.EmitProposal(lid, proposal.ID()) + events.ReportProposal(events.ProposalCreated, proposal) + } + return nil + }) + } + return eg.Wait() } func createProposal( session *session, + beacon types.Beacon, + activeset types.ATXIDList, signer *signing.EdSigner, lid types.LayerID, txs []types.TransactionID, @@ -524,8 +639,8 @@ func createProposal( if session.ref == types.EmptyBallotID { p.Ballot.RefBallot = types.EmptyBallotID p.Ballot.EpochData = &types.EpochData{ - ActiveSetHash: session.active.set.Hash(), - Beacon: session.beacon, + ActiveSetHash: activeset.Hash(), + Beacon: beacon, EligibilityCount: session.eligibilities.slots, } } else { @@ -576,45 +691,30 @@ func activeSetFromBlock(db sql.Executor, bid types.BlockID) ([]types.ATXID, erro func activesFromFirstBlock( cdb *datastore.CachedDB, - signer *signing.VRFSigner, target types.EpochID, - ownAtx types.ATXID, - ownWeight uint64, ) (uint64, []types.ATXID, error) { set, err := ActiveSetFromEpochFirstBlock(cdb, target) if err != nil { return 0, nil, err } - var ( - totalWeight uint64 - ownIncluded bool - ) + var totalWeight uint64 for _, id := range set { - ownIncluded = ownIncluded || id == ownAtx atx, err := cdb.GetAtxHeader(id) if err != nil { return 0, nil, err } totalWeight += atx.GetWeight() } - if !ownIncluded { - // miner is not included in the active set derived from the epoch's first block - set = append(set, ownAtx) - totalWeight += ownWeight - } return totalWeight, set, nil } func generateActiveSet( logger log.Log, cdb *datastore.CachedDB, - signer *signing.VRFSigner, target types.EpochID, epochStart time.Time, goodAtxPercent int, networkDelay time.Duration, - ownAtx types.ATXID, - ownWeight uint64, ) (uint64, []types.ATXID, error) { var ( totalWeight uint64 @@ -626,12 +726,11 @@ func generateActiveSet( if err != nil { return err } - if grade != good && header.NodeID != signer.NodeID() { + if grade != good { logger.With().Info("atx omitted from active set", header.ID, log.Int("grade", int(grade)), log.Stringer("smesher", header.NodeID), - log.Bool("own", header.NodeID == signer.NodeID()), log.Time("received", header.Received), log.Time("epoch_start", epochStart), ) @@ -652,7 +751,7 @@ func generateActiveSet( // for all the atx and malfeasance proof. this active set is not usable. // TODO: change after timing info of ATXs and malfeasance proofs is sync'ed from peers as well var err error - totalWeight, set, err = activesFromFirstBlock(cdb, signer, target, ownAtx, ownWeight) + totalWeight, set, err = activesFromFirstBlock(cdb, target) if err != nil { return 0, nil, err } diff --git a/miner/proposal_builder_test.go b/miner/proposal_builder_test.go index 7e42ca624d..9b9b4e829e 100644 --- a/miner/proposal_builder_test.go +++ b/miner/proposal_builder_test.go @@ -1,10 +1,12 @@ package miner import ( + "bytes" "context" "errors" "math/rand" "os" + "sort" "testing" "time" @@ -218,13 +220,20 @@ type step struct { encodeVotesErr, publishErr error - expectProposal *types.Proposal - expectErr string + expectProposal *types.Proposal + expectProposals []*types.Proposal + expectErr string } func TestBuild(t *testing.T) { - signer, err := signing.NewEdSigner(signing.WithKeyFromRand(rand.New(rand.NewSource(10101)))) - require.NoError(t, err) + signers := make([]*signing.EdSigner, 4) + rng := rand.New(rand.NewSource(10101)) + for i := range signers { + signer, err := signing.NewEdSigner(signing.WithKeyFromRand(rng)) + require.NoError(t, err) + signers[i] = signer + } + signer := signers[0] defaults := []Opt{ WithLayerPerEpoch(types.GetLayersPerEpoch()), WithLayerSize(10), @@ -301,6 +310,7 @@ func TestBuild(t *testing.T) { gballot(types.BallotID{1}, types.ATXID{1}, signer.NodeID(), 15, &types.EpochData{ ActiveSetHash: types.ATXIDList{{1}, {2}}.Hash(), EligibilityCount: 5, + Beacon: types.Beacon{1}, }), }, activeset: types.ATXIDList{{1}, {2}}, @@ -320,46 +330,41 @@ func TestBuild(t *testing.T) { steps: []step{ { lid: 15, - expectErr: "atx not available", + expectErr: "missing beacon", + }, + { + lid: 15, + beacon: types.Beacon{1}, + expectErr: "empty active set", }, { lid: 15, atxs: []*types.VerifiedActivationTx{ - gatx(types.ATXID{10}, 2, signer.NodeID(), 1), + gatx(types.ATXID{20}, 2, types.NodeID{20}, 1), }, - expectErr: "missing nonce", }, { lid: 15, atxs: []*types.VerifiedActivationTx{ - gatx(types.ATXID{1}, 2, signer.NodeID(), 1, genAtxWithNonce(777)), + gatx(types.ATXID{10}, 2, signer.NodeID(), 1), }, - expectErr: "missing beacon", + expectErr: "missing nonce", }, { - lid: 16, - beacon: types.Beacon{1}, + lid: 16, + atxs: []*types.VerifiedActivationTx{ + gatx(types.ATXID{1}, 2, signer.NodeID(), 1, genAtxWithNonce(777)), + }, ballots: []*types.Ballot{ gballot(types.BallotID{1}, types.ATXID{10}, signer.NodeID(), 15, &types.EpochData{ ActiveSetHash: types.ATXIDList{{10}, {2}}.Hash(), EligibilityCount: 5, + Beacon: types.Beacon{1}, }), }, - expectErr: "get activeset", - }, - { - lid: 16, - activeset: types.ATXIDList{{10}, {2}}, - expectErr: "get ATXs from DB", - }, - { - lid: 16, - atxs: []*types.VerifiedActivationTx{ - gatx(types.ATXID{2}, 2, types.NodeID{1}, 1), - }, opinion: &types.Opinion{Hash: types.Hash32{1}}, txs: []types.TransactionID{{1}}, - latestComplete: 10, + latestComplete: 14, expectProposal: expectProposal( signer, 16, types.ATXID{10}, types.Opinion{Hash: types.Hash32{1}}, expectRef(types.BallotID{1}), @@ -406,6 +411,7 @@ func TestBuild(t *testing.T) { gballot(types.BallotID{1}, types.ATXID{1}, signer.NodeID(), 15, &types.EpochData{ ActiveSetHash: types.ATXIDList{{1}}.Hash(), EligibilityCount: 10, + Beacon: types.Beacon{1}, }), }, activeset: types.ATXIDList{{1}}, @@ -428,6 +434,7 @@ func TestBuild(t *testing.T) { gballot(types.BallotID{1}, types.ATXID{1}, signer.NodeID(), 15, &types.EpochData{ ActiveSetHash: types.ATXIDList{{1}}.Hash(), EligibilityCount: 10, + Beacon: types.Beacon{1}, }), }, activeset: types.ATXIDList{{1}}, @@ -435,7 +442,11 @@ func TestBuild(t *testing.T) { opinion: &types.Opinion{}, txs: []types.TransactionID{}, publishErr: errors.New("test publish"), - expectErr: "test publish", + expectProposal: expectProposal( + signer, 16, types.ATXID{1}, types.Opinion{}, + expectRef(types.BallotID{1}), + expectCounters(signer, 3, types.Beacon{1}, 777, 2, 5), + ), }, }, }, @@ -560,6 +571,7 @@ func TestBuild(t *testing.T) { gballot(types.BallotID{1}, types.ATXID{1}, signer.NodeID(), 15, &types.EpochData{ ActiveSetHash: types.ATXIDList{{1}}.Hash(), EligibilityCount: 10, + Beacon: types.Beacon{1}, }), }, activeset: types.ATXIDList{{1}}, @@ -611,6 +623,7 @@ func TestBuild(t *testing.T) { gballot(types.BallotID{1}, types.ATXID{1}, signer.NodeID(), 15, &types.EpochData{ ActiveSetHash: types.ATXIDList{{1}}.Hash(), EligibilityCount: 10, + Beacon: types.Beacon{1}, }), }, activeset: types.ATXIDList{{1}}, @@ -639,6 +652,45 @@ func TestBuild(t *testing.T) { }, }, }, + { + desc: "multi signers", + opts: []Opt{WithSigners(signers...), WithWorkersLimit(len(signers) / 2)}, + steps: []step{ + { + lid: 15, + beacon: types.Beacon{1}, + atxs: []*types.VerifiedActivationTx{ + gatx(types.ATXID{1}, 2, signers[0].NodeID(), 1, genAtxWithNonce(777)), + gatx(types.ATXID{2}, 2, signers[1].NodeID(), 1, genAtxWithNonce(999)), + }, + opinion: &types.Opinion{Hash: types.Hash32{1}}, + txs: []types.TransactionID{{1}, {2}}, + latestComplete: 14, + expectProposals: []*types.Proposal{ + expectProposal( + signers[0], 15, types.ATXID{1}, types.Opinion{Hash: types.Hash32{1}}, + expectEpochData( + gactiveset(types.ATXID{1}, types.ATXID{2}), + 25, + types.Beacon{1}, + ), + expectTxs([]types.TransactionID{{1}, {2}}), + expectCounters(signers[0], 3, types.Beacon{1}, 777, 0, 6, 9, 12, 16, 18, 20, 23), + ), + expectProposal( + signers[1], 15, types.ATXID{2}, types.Opinion{Hash: types.Hash32{1}}, + expectEpochData( + gactiveset(types.ATXID{1}, types.ATXID{2}), + 25, + types.Beacon{1}, + ), + expectTxs([]types.TransactionID{{1}, {2}}), + expectCounters(signers[1], 3, types.Beacon{1}, 999, 0, 4, 6, 8, 9, 17), + ), + }, + }, + }, + }, } { tc := tc t.Run(tc.desc, func(t *testing.T) { @@ -655,9 +707,10 @@ func TestBuild(t *testing.T) { clock.EXPECT().LayerToTime(gomock.Any()).Return(time.Unix(0, 0)).AnyTimes() - full := append(defaults, tc.opts...) - full = append(full, WithLogger(logtest.New(t))) - builder := New(clock, signer, cdb, publisher, tortoise, syncer, conState, full...) + full := append(defaults, WithLogger(logtest.New(t)), WithSigners(signer)) + full = append(full, tc.opts...) + builder := New(clock, cdb, publisher, tortoise, syncer, conState, full...) + var decoded chan *types.Proposal for _, step := range tc.steps { { if step.beacon != types.EmptyBeacon { @@ -710,33 +763,59 @@ func TestBuild(t *testing.T) { Return(step.opinion, step.encodeVotesErr) } if step.txs != nil { - conState.EXPECT().SelectProposalTXs(step.lid, gomock.Any()).Return(step.txs) + conState.EXPECT(). + SelectProposalTXs(step.lid, gomock.Any()). + Return(step.txs). + AnyTimes() } if step.latestComplete != 0 { tortoise.EXPECT().LatestComplete().Return(step.latestComplete) } } - var decoded *types.Proposal - if step.expectProposal != nil || step.publishErr != nil { + decoded = make( + chan *types.Proposal, + len(signers), + ) // set the maximum possible capacity + if step.expectProposal != nil || len(step.expectProposals) > 0 || + step.publishErr != nil { publisher.EXPECT(). Publish(ctx, pubsub.ProposalProtocol, gomock.Any()). DoAndReturn(func(_ context.Context, _ string, msg []byte) error { var proposal types.Proposal codec.MustDecode(msg, &proposal) proposal.MustInitialize() - decoded = &proposal + select { + case decoded <- &proposal: + default: + require.FailNow( + t, + "blocking in Publish. check decoded channel capacity", + ) + } return step.publishErr - }) + }). + AnyTimes() } err := builder.build(ctx, step.lid) + close(decoded) if len(step.expectErr) > 0 { require.ErrorContains(t, err, step.expectErr) } else { require.NoError(t, err) + expect := step.expectProposals if step.expectProposal != nil { - require.Equal(t, *step.expectProposal, *decoded) - } else { - require.Nil(t, decoded) + expect = []*types.Proposal{step.expectProposal} + } + received := []*types.Proposal{} + for proposal := range decoded { + received = append(received, proposal) + } + sort.Slice(received, func(i, j int) bool { + return bytes.Compare(received[i].SmesherID[:], received[j].SmesherID[:]) == -1 + }) + require.Len(t, received, len(expect)) + for i := range expect { + require.Equal(t, expect[i], received[i], "i=%d", i) } } } @@ -809,7 +888,6 @@ func TestStartStop(t *testing.T) { builder := New( clock, - signer, cdb, publisher, tortoise, @@ -817,6 +895,7 @@ func TestStartStop(t *testing.T) { conState, WithLogger(logtest.New(t)), ) + builder.Register(signer) var ( ctx, cancel = context.WithCancel(context.Background()) eg errgroup.Group diff --git a/node/node.go b/node/node.go index c0728083c1..9a5bf0f060 100644 --- a/node/node.go +++ b/node/node.go @@ -170,7 +170,10 @@ func GetCommand() *cobra.Command { // This blocks until the context is finished or until an error is produced err = app.Start(ctx) - cleanupCtx, cleanupCancel := context.WithTimeout(context.Background(), 30*time.Second) + cleanupCtx, cleanupCancel := context.WithTimeout( + context.Background(), + 30*time.Second, + ) defer cleanupCancel() done := make(chan struct{}, 1) // FIXME: per https://github.com/spacemeshos/go-spacemesh/issues/3830 @@ -452,7 +455,9 @@ func (app *App) Initialize() error { // vote against all blocks in that layer. so it's important to make sure zdist takes longer than // hare's max time duration to run consensus for a layer maxHareRoundsPerLayer := 1 + app.Config.HARE.LimitIterations*hare.RoundsPerIteration // pre-round + 4 rounds per iteration - maxHareLayerDuration := app.Config.HARE.WakeupDelta + time.Duration(maxHareRoundsPerLayer)*app.Config.HARE.RoundDuration + maxHareLayerDuration := app.Config.HARE.WakeupDelta + time.Duration( + maxHareRoundsPerLayer, + )*app.Config.HARE.RoundDuration if app.Config.LayerDuration*time.Duration(app.Config.Tortoise.Zdist) <= maxHareLayerDuration { app.log.With().Error("incompatible params", log.Uint32("tortoise_zdist", app.Config.Tortoise.Zdist), @@ -553,7 +558,11 @@ func (app *App) initServices(ctx context.Context) error { nipostValidatorLogger := app.addLogger(NipostValidatorLogger, lg) postVerifiers := make([]activation.PostVerifier, 0, app.Config.SMESHING.VerifyingOpts.Workers) lg.Debug("creating post verifier") - verifier, err := activation.NewPostVerifier(app.Config.POST, nipostValidatorLogger.Zap(), verifying.WithPowFlags(app.Config.SMESHING.VerifyingOpts.Flags.Value())) + verifier, err := activation.NewPostVerifier( + app.Config.POST, + nipostValidatorLogger.Zap(), + verifying.WithPowFlags(app.Config.SMESHING.VerifyingOpts.Flags.Value()), + ) lg.With().Debug("created post verifier", log.Err(err)) if err != nil { return err @@ -564,7 +573,13 @@ func (app *App) initServices(ctx context.Context) error { app.postVerifier = activation.NewOffloadingPostVerifier(postVerifiers, nipostValidatorLogger) if app.Config.SMESHING.Start { - app.postSupervisor, err = activation.NewPostSupervisor(app.log.Zap(), app.Config.POSTService, app.Config.POST, app.Config.SMESHING.Opts, app.Config.SMESHING.ProvingOpts) + app.postSupervisor, err = activation.NewPostSupervisor( + app.log.Zap(), + app.Config.POSTService, + app.Config.POST, + app.Config.SMESHING.Opts, + app.Config.SMESHING.ProvingOpts, + ) if err != nil { return fmt.Errorf("start post service: %w", err) } @@ -573,7 +588,12 @@ func (app *App) initServices(ctx context.Context) error { } } - validator := activation.NewValidator(poetDb, app.Config.POST, app.Config.SMESHING.Opts.Scrypt, app.postVerifier) + validator := activation.NewValidator( + poetDb, + app.Config.POST, + app.Config.SMESHING.Opts.Scrypt, + app.postVerifier, + ) app.validator = validator cfg := vm.DefaultConfig() @@ -593,7 +613,11 @@ func (app *App) initServices(ctx context.Context) error { if len(genesisAccts) > 0 { exists, err := state.AccountExists(genesisAccts[0].Address) if err != nil { - return fmt.Errorf("failed to check genesis account %v: %w", genesisAccts[0].Address, err) + return fmt.Errorf( + "failed to check genesis account %v: %w", + genesisAccts[0].Address, + err, + ) } if !exists { if err = state.ApplyGenesis(genesisAccts); err != nil { @@ -607,13 +631,18 @@ func (app *App) initServices(ctx context.Context) error { return errors.New("invalid golden atx id") } - app.edVerifier, err = signing.NewEdVerifier(signing.WithVerifierPrefix(app.Config.Genesis.GenesisID().Bytes())) - if err != nil { - return fmt.Errorf("failed to create signature verifier: %w", err) - } + app.edVerifier = signing.NewEdVerifier( + signing.WithVerifierPrefix(app.Config.Genesis.GenesisID().Bytes()), + ) vrfVerifier := signing.NewVRFVerifier() - beaconProtocol := beacon.New(app.host, app.edSgn, app.edVerifier, vrfVerifier, app.cachedDB, app.clock, + beaconProtocol := beacon.New( + app.host, + app.edSgn, + app.edVerifier, + vrfVerifier, + app.cachedDB, + app.clock, beacon.WithContext(ctx), beacon.WithConfig(app.Config.Beacon), beacon.WithLogger(app.addLogger(BeaconLogger, lg)), @@ -651,7 +680,12 @@ func (app *App) initServices(ctx context.Context) error { return nil }) - executor := mesh.NewExecutor(app.cachedDB, state, app.conState, app.addLogger(ExecutorLogger, lg)) + executor := mesh.NewExecutor( + app.cachedDB, + state, + app.conState, + app.addLogger(ExecutorLogger, lg), + ) mlog := app.addLogger(MeshLogger, lg) msh, err := mesh.NewMesh(app.cachedDB, app.clock, trtl, executor, app.conState, mlog) if err != nil { @@ -659,7 +693,14 @@ func (app *App) initServices(ctx context.Context) error { } app.eg.Go(func() error { - prune.Prune(ctx, mlog.Zap(), app.db, app.clock, app.Config.Tortoise.Hdist, app.Config.DatabasePruneInterval) + prune.Prune( + ctx, + mlog.Zap(), + app.db, + app.clock, + app.Config.Tortoise.Hdist, + app.Config.DatabasePruneInterval, + ) return nil }) @@ -683,11 +724,23 @@ func (app *App) initServices(ctx context.Context) error { // we can't have an epoch offset which is greater/equal than the number of layers in an epoch if app.Config.HareEligibility.ConfidenceParam >= app.Config.BaseConfig.LayersPerEpoch { - return fmt.Errorf("confidence param should be smaller than layers per epoch. eligibility-confidence-param: %d. layers-per-epoch: %d", - app.Config.HareEligibility.ConfidenceParam, app.Config.BaseConfig.LayersPerEpoch) + return fmt.Errorf( + "confidence param should be smaller than layers per epoch. eligibility-confidence-param: %d. layers-per-epoch: %d", + app.Config.HareEligibility.ConfidenceParam, + app.Config.BaseConfig.LayersPerEpoch, + ) } - proposalListener := proposals.NewHandler(app.cachedDB, app.edVerifier, app.host, fetcherWrapped, beaconProtocol, msh, trtl, vrfVerifier, app.clock, + proposalListener := proposals.NewHandler( + app.cachedDB, + app.edVerifier, + app.host, + fetcherWrapped, + beaconProtocol, + msh, + trtl, + vrfVerifier, + app.clock, proposals.WithLogger(app.addLogger(ProposalListenerLogger, lg)), proposals.WithConfig(proposals.Config{ LayerSize: layerSize, @@ -708,7 +761,15 @@ func (app *App) initServices(ctx context.Context) error { app.addLogger(TxHandlerLogger, lg), ) - app.hOracle = eligibility.New(beaconProtocol, app.cachedDB, vrfVerifier, vrfSigner, app.Config.LayersPerEpoch, app.Config.HareEligibility, app.addLogger(HareOracleLogger, lg)) + app.hOracle = eligibility.New( + beaconProtocol, + app.cachedDB, + vrfVerifier, + vrfSigner, + app.Config.LayersPerEpoch, + app.Config.HareEligibility, + app.addLogger(HareOracleLogger, lg), + ) // TODO: genesisMinerWeight is set to app.Config.SpaceToCommit, because PoET ticks are currently hardcoded to 1 bscfg := app.Config.Bootstrap @@ -720,7 +781,16 @@ func (app *App) initServices(ctx context.Context) error { bootstrap.WithLogger(app.addLogger(BootstrapLogger, lg)), ) - app.certifier = blocks.NewCertifier(app.cachedDB, app.hOracle, app.edSgn.NodeID(), app.edSgn, app.edVerifier, app.host, app.clock, beaconProtocol, trtl, + app.certifier = blocks.NewCertifier( + app.cachedDB, + app.hOracle, + app.edSgn.NodeID(), + app.edSgn, + app.edVerifier, + app.host, + app.clock, + beaconProtocol, + trtl, blocks.WithCertContext(ctx), blocks.WithCertConfig(blocks.CertConfig{ CommitteeSize: app.Config.HARE.N, @@ -747,7 +817,15 @@ func (app *App) initServices(ctx context.Context) error { syncerConf.HareDelayLayers = app.Config.Tortoise.Zdist syncerConf.SyncCertDistance = app.Config.Tortoise.Hdist syncerConf.Standalone = app.Config.Standalone - newSyncer := syncer.NewSyncer(app.cachedDB, app.clock, beaconProtocol, msh, trtl, fetcher, patrol, app.certifier, + newSyncer := syncer.NewSyncer( + app.cachedDB, + app.clock, + beaconProtocol, + msh, + trtl, + fetcher, + patrol, + app.certifier, syncer.WithConfig(syncerConf), syncer.WithLogger(app.addLogger(SyncLogger, lg)), ) @@ -756,7 +834,13 @@ func (app *App) initServices(ctx context.Context) error { app.hOracle.SetSync(newSyncer) hareOutputCh := make(chan hare.LayerOutput, app.Config.HARE.LimitConcurrent) - app.blockGen = blocks.NewGenerator(app.cachedDB, executor, msh, fetcherWrapped, app.certifier, patrol, + app.blockGen = blocks.NewGenerator( + app.cachedDB, + executor, + msh, + fetcherWrapped, + app.certifier, + patrol, blocks.WithContext(ctx), blocks.WithConfig(blocks.Config{ BlockGasLimit: app.Config.BlockGasLimit, @@ -764,7 +848,8 @@ func (app *App) initServices(ctx context.Context) error { GenBlockInterval: 500 * time.Millisecond, }), blocks.WithHareOutputChan(hareOutputCh), - blocks.WithGeneratorLogger(app.addLogger(BlockGenLogger, lg))) + blocks.WithGeneratorLogger(app.addLogger(BlockGenLogger, lg)), + ) hareCfg := app.Config.HARE hareCfg.Hdist = app.Config.Tortoise.Hdist @@ -798,7 +883,12 @@ func (app *App) initServices(ctx context.Context) error { app.hare3.Register(app.edSgn) app.hare3.Start() app.eg.Go(func() error { - compat.ReportWeakcoin(ctx, logger, app.hare3.Coins(), tortoiseWeakCoin{db: app.cachedDB, tortoise: trtl}) + compat.ReportWeakcoin( + ctx, + logger, + app.hare3.Coins(), + tortoiseWeakCoin{db: app.cachedDB, tortoise: trtl}, + ) return nil }) app.eg.Go(func() error { @@ -813,7 +903,6 @@ func (app *App) initServices(ctx context.Context) error { } proposalBuilder := miner.New( app.clock, - app.edSgn, app.cachedDB, app.host, trtl, @@ -827,6 +916,7 @@ func (app *App) initServices(ctx context.Context) error { miner.WithMinGoodAtxPercent(minerGoodAtxPct), miner.WithLogger(app.addLogger(ProposalBuilderLogger, lg)), ) + proposalBuilder.Register(app.edSgn) postSetupMgr, err := activation.NewPostSetupManager( app.edSgn.NodeID(), @@ -859,7 +949,11 @@ func (app *App) initServices(ctx context.Context) error { if app.Config.SMESHING.Start { coinbaseAddr, err = types.StringToAddress(app.Config.SMESHING.CoinbaseAccount) if err != nil { - app.log.Panic("failed to parse CoinbaseAccount address `%s`: %v", app.Config.SMESHING.CoinbaseAccount, err) + app.log.Panic( + "failed to parse CoinbaseAccount address `%s`: %v", + app.Config.SMESHING.CoinbaseAccount, + err, + ) } if coinbaseAddr.IsEmpty() { app.log.Panic("invalid coinbase account") @@ -900,15 +994,53 @@ func (app *App) initServices(ctx context.Context) error { trtl, ) fetcher.SetValidators( - fetch.ValidatorFunc(pubsub.DropPeerOnSyncValidationReject(atxHandler.HandleSyncedAtx, app.host, lg)), - fetch.ValidatorFunc(pubsub.DropPeerOnSyncValidationReject(poetDb.ValidateAndStoreMsg, app.host, lg)), - fetch.ValidatorFunc(pubsub.DropPeerOnSyncValidationReject(proposalListener.HandleSyncedBallot, app.host, lg)), - fetch.ValidatorFunc(pubsub.DropPeerOnSyncValidationReject(proposalListener.HandleActiveSet, app.host, lg)), - fetch.ValidatorFunc(pubsub.DropPeerOnSyncValidationReject(blockHandler.HandleSyncedBlock, app.host, lg)), - fetch.ValidatorFunc(pubsub.DropPeerOnSyncValidationReject(proposalListener.HandleSyncedProposal, app.host, lg)), - fetch.ValidatorFunc(pubsub.DropPeerOnSyncValidationReject(app.txHandler.HandleBlockTransaction, app.host, lg)), - fetch.ValidatorFunc(pubsub.DropPeerOnSyncValidationReject(app.txHandler.HandleProposalTransaction, app.host, lg)), - fetch.ValidatorFunc(pubsub.DropPeerOnSyncValidationReject(malfeasanceHandler.HandleSyncedMalfeasanceProof, app.host, lg)), + fetch.ValidatorFunc( + pubsub.DropPeerOnSyncValidationReject(atxHandler.HandleSyncedAtx, app.host, lg), + ), + fetch.ValidatorFunc( + pubsub.DropPeerOnSyncValidationReject(poetDb.ValidateAndStoreMsg, app.host, lg), + ), + fetch.ValidatorFunc( + pubsub.DropPeerOnSyncValidationReject( + proposalListener.HandleSyncedBallot, + app.host, + lg, + ), + ), + fetch.ValidatorFunc( + pubsub.DropPeerOnSyncValidationReject(proposalListener.HandleActiveSet, app.host, lg), + ), + fetch.ValidatorFunc( + pubsub.DropPeerOnSyncValidationReject(blockHandler.HandleSyncedBlock, app.host, lg), + ), + fetch.ValidatorFunc( + pubsub.DropPeerOnSyncValidationReject( + proposalListener.HandleSyncedProposal, + app.host, + lg, + ), + ), + fetch.ValidatorFunc( + pubsub.DropPeerOnSyncValidationReject( + app.txHandler.HandleBlockTransaction, + app.host, + lg, + ), + ), + fetch.ValidatorFunc( + pubsub.DropPeerOnSyncValidationReject( + app.txHandler.HandleProposalTransaction, + app.host, + lg, + ), + ), + fetch.ValidatorFunc( + pubsub.DropPeerOnSyncValidationReject( + malfeasanceHandler.HandleSyncedMalfeasanceProof, + app.host, + lg, + ), + ), ) syncHandler := func(_ context.Context, _ p2p.Peer, _ []byte) error { @@ -925,17 +1057,51 @@ func (app *App) initServices(ctx context.Context) error { } if app.Config.Beacon.RoundsNumber > 0 { - app.host.Register(pubsub.BeaconWeakCoinProtocol, pubsub.ChainGossipHandler(syncHandler, beaconProtocol.HandleWeakCoinProposal), pubsub.WithValidatorInline(true)) - app.host.Register(pubsub.BeaconProposalProtocol, pubsub.ChainGossipHandler(syncHandler, beaconProtocol.HandleProposal), pubsub.WithValidatorInline(true)) - app.host.Register(pubsub.BeaconFirstVotesProtocol, pubsub.ChainGossipHandler(syncHandler, beaconProtocol.HandleFirstVotes), pubsub.WithValidatorInline(true)) - app.host.Register(pubsub.BeaconFollowingVotesProtocol, pubsub.ChainGossipHandler(syncHandler, beaconProtocol.HandleFollowingVotes), pubsub.WithValidatorInline(true)) - } - app.host.Register(pubsub.ProposalProtocol, pubsub.ChainGossipHandler(syncHandler, proposalListener.HandleProposal)) - app.host.Register(pubsub.AtxProtocol, pubsub.ChainGossipHandler(atxSyncHandler, atxHandler.HandleGossipAtx)) - app.host.Register(pubsub.TxProtocol, pubsub.ChainGossipHandler(syncHandler, app.txHandler.HandleGossipTransaction)) - app.host.Register(pubsub.HareProtocol, pubsub.ChainGossipHandler(syncHandler, app.hare.GetHareMsgHandler())) - app.host.Register(pubsub.BlockCertify, pubsub.ChainGossipHandler(syncHandler, app.certifier.HandleCertifyMessage)) - app.host.Register(pubsub.MalfeasanceProof, pubsub.ChainGossipHandler(atxSyncHandler, malfeasanceHandler.HandleMalfeasanceProof)) + app.host.Register( + pubsub.BeaconWeakCoinProtocol, + pubsub.ChainGossipHandler(syncHandler, beaconProtocol.HandleWeakCoinProposal), + pubsub.WithValidatorInline(true), + ) + app.host.Register( + pubsub.BeaconProposalProtocol, + pubsub.ChainGossipHandler(syncHandler, beaconProtocol.HandleProposal), + pubsub.WithValidatorInline(true), + ) + app.host.Register( + pubsub.BeaconFirstVotesProtocol, + pubsub.ChainGossipHandler(syncHandler, beaconProtocol.HandleFirstVotes), + pubsub.WithValidatorInline(true), + ) + app.host.Register( + pubsub.BeaconFollowingVotesProtocol, + pubsub.ChainGossipHandler(syncHandler, beaconProtocol.HandleFollowingVotes), + pubsub.WithValidatorInline(true), + ) + } + app.host.Register( + pubsub.ProposalProtocol, + pubsub.ChainGossipHandler(syncHandler, proposalListener.HandleProposal), + ) + app.host.Register( + pubsub.AtxProtocol, + pubsub.ChainGossipHandler(atxSyncHandler, atxHandler.HandleGossipAtx), + ) + app.host.Register( + pubsub.TxProtocol, + pubsub.ChainGossipHandler(syncHandler, app.txHandler.HandleGossipTransaction), + ) + app.host.Register( + pubsub.HareProtocol, + pubsub.ChainGossipHandler(syncHandler, app.hare.GetHareMsgHandler()), + ) + app.host.Register( + pubsub.BlockCertify, + pubsub.ChainGossipHandler(syncHandler, app.certifier.HandleCertifyMessage), + ) + app.host.Register( + pubsub.MalfeasanceProof, + pubsub.ChainGossipHandler(atxSyncHandler, malfeasanceHandler.HandleMalfeasanceProof), + ) app.proposalBuilder = proposalBuilder app.proposalListener = proposalListener @@ -969,7 +1135,10 @@ func (app *App) launchStandalone(ctx context.Context) error { return nil } if len(app.Config.PoETServers) != 1 { - return fmt.Errorf("to launch in a standalone mode provide single local address for poet: %v", app.Config.PoETServers) + return fmt.Errorf( + "to launch in a standalone mode provide single local address for poet: %v", + app.Config.PoETServers, + ) } value := types.Beacon{} genesis := app.Config.Genesis.GenesisID() @@ -1061,7 +1230,11 @@ func (app *App) startServices(ctx context.Context) error { if app.Config.SMESHING.Start { coinbaseAddr, err := types.StringToAddress(app.Config.SMESHING.CoinbaseAccount) if err != nil { - app.log.Panic("failed to parse CoinbaseAccount address on start `%s`: %v", app.Config.SMESHING.CoinbaseAccount, err) + app.log.Panic( + "failed to parse CoinbaseAccount address on start `%s`: %v", + app.Config.SMESHING.CoinbaseAccount, + err, + ) } if err := app.atxBuilder.StartSmeshing(coinbaseAddr, app.Config.SMESHING.Opts); err != nil { app.log.Panic("failed to start smeshing: %v", err) @@ -1080,44 +1253,100 @@ func (app *App) startServices(ctx context.Context) error { return nil } -func (app *App) initService(ctx context.Context, svc grpcserver.Service) (grpcserver.ServiceAPI, error) { +func (app *App) initService( + ctx context.Context, + svc grpcserver.Service, +) (grpcserver.ServiceAPI, error) { switch svc { case grpcserver.Debug: return grpcserver.NewDebugService(app.db, app.conState, app.host, app.hOracle), nil case grpcserver.GlobalState: return grpcserver.NewGlobalStateService(app.mesh, app.conState), nil case grpcserver.Mesh: - return grpcserver.NewMeshService(app.cachedDB, app.mesh, app.conState, app.clock, app.Config.LayersPerEpoch, app.Config.Genesis.GenesisID(), app.Config.LayerDuration, app.Config.LayerAvgSize, uint32(app.Config.TxsPerProposal)), nil + return grpcserver.NewMeshService( + app.cachedDB, + app.mesh, + app.conState, + app.clock, + app.Config.LayersPerEpoch, + app.Config.Genesis.GenesisID(), + app.Config.LayerDuration, + app.Config.LayerAvgSize, + uint32(app.Config.TxsPerProposal), + ), nil case grpcserver.Node: - return grpcserver.NewNodeService(app.host, app.mesh, app.clock, app.syncer, cmd.Version, cmd.Commit), nil + return grpcserver.NewNodeService( + app.host, + app.mesh, + app.clock, + app.syncer, + cmd.Version, + cmd.Commit, + ), nil case grpcserver.Admin: return grpcserver.NewAdminService(app.db, app.Config.DataDir(), app.host), nil case grpcserver.Smesher: - return grpcserver.NewSmesherService(app.postSetupMgr, app.atxBuilder, app.postSupervisor, app.Config.API.SmesherStreamInterval, app.Config.SMESHING.Opts), nil + return grpcserver.NewSmesherService( + app.postSetupMgr, + app.atxBuilder, + app.postSupervisor, + app.Config.API.SmesherStreamInterval, + app.Config.SMESHING.Opts, + ), nil case grpcserver.Post: return app.grpcPostService, nil case grpcserver.Transaction: - return grpcserver.NewTransactionService(app.db, app.host, app.mesh, app.conState, app.syncer, app.txHandler), nil + return grpcserver.NewTransactionService( + app.db, + app.host, + app.mesh, + app.conState, + app.syncer, + app.txHandler, + ), nil case grpcserver.Activation: - return grpcserver.NewActivationService(app.cachedDB, types.ATXID(app.Config.Genesis.GoldenATX())), nil + return grpcserver.NewActivationService( + app.cachedDB, + types.ATXID(app.Config.Genesis.GoldenATX()), + ), nil } return nil, fmt.Errorf("unknown service %s", svc) } -func unaryGrpcLogStart(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { +func unaryGrpcLogStart( + ctx context.Context, + req any, + _ *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, +) (any, error) { ctxzap.Info(ctx, "started unary call") return handler(ctx, req) } -func streamingGrpcLogStart(srv any, stream grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { +func streamingGrpcLogStart( + srv any, + stream grpc.ServerStream, + _ *grpc.StreamServerInfo, + handler grpc.StreamHandler, +) error { ctxzap.Info(stream.Context(), "started streaming call") return handler(srv, stream) } func (app *App) newGrpc(logger log.Log, endpoint string) *grpcserver.Server { - return grpcserver.New(endpoint, logger, - grpc.ChainStreamInterceptor(grpctags.StreamServerInterceptor(), grpczap.StreamServerInterceptor(logger.Zap()), streamingGrpcLogStart), - grpc.ChainUnaryInterceptor(grpctags.UnaryServerInterceptor(), grpczap.UnaryServerInterceptor(logger.Zap()), unaryGrpcLogStart), + return grpcserver.New( + endpoint, + logger, + grpc.ChainStreamInterceptor( + grpctags.StreamServerInterceptor(), + grpczap.StreamServerInterceptor(logger.Zap()), + streamingGrpcLogStart, + ), + grpc.ChainUnaryInterceptor( + grpctags.UnaryServerInterceptor(), + grpczap.UnaryServerInterceptor(logger.Zap()), + unaryGrpcLogStart, + ), grpc.MaxSendMsgSize(app.Config.API.GrpcSendMsgSize), grpc.MaxRecvMsgSize(app.Config.API.GrpcRecvMsgSize), ) @@ -1165,7 +1394,10 @@ func (app *App) startAPIServices(ctx context.Context) error { if len(public) == 0 { return fmt.Errorf("can't start json server without public services") } - app.jsonAPIService = grpcserver.NewJSONHTTPServer(app.Config.API.JSONListener, logger.WithName("JSON")) + app.jsonAPIService = grpcserver.NewJSONHTTPServer( + app.Config.API.JSONListener, + logger.WithName("JSON"), + ) app.jsonAPIService.StartService(ctx, public...) } if app.grpcPublicService != nil { @@ -1359,9 +1591,18 @@ func (app *App) setupDBs(ctx context.Context, lg log.Log) error { } app.db = sqlDB if app.Config.CollectMetrics && app.Config.DatabaseSizeMeteringInterval != 0 { - app.dbMetrics = dbmetrics.NewDBMetricsCollector(ctx, sqlDB, app.addLogger(StateDbLogger, lg), app.Config.DatabaseSizeMeteringInterval) + app.dbMetrics = dbmetrics.NewDBMetricsCollector( + ctx, + sqlDB, + app.addLogger(StateDbLogger, lg), + app.Config.DatabaseSizeMeteringInterval, + ) } - app.cachedDB = datastore.NewCachedDB(sqlDB, app.addLogger(CachedDBLogger, lg), datastore.WithConfig(app.Config.Cache)) + app.cachedDB = datastore.NewCachedDB( + sqlDB, + app.addLogger(CachedDBLogger, lg), + datastore.WithConfig(app.Config.Cache), + ) return nil } @@ -1414,7 +1655,11 @@ func (app *App) startSynchronous(ctx context.Context) (err error) { ) if err := os.MkdirAll(app.Config.DataDir(), 0o700); err != nil { - return fmt.Errorf("data-dir %s not found or could not be created: %w", app.Config.DataDir(), err) + return fmt.Errorf( + "data-dir %s not found or could not be created: %w", + app.Config.DataDir(), + err, + ) } /* Setup monitoring */ diff --git a/proposals/handler_test.go b/proposals/handler_test.go index 172696848c..eac784da87 100644 --- a/proposals/handler_test.go +++ b/proposals/handler_test.go @@ -87,8 +87,6 @@ func fullMockSet(tb testing.TB) *mockSet { func createTestHandler(t *testing.T) *testHandler { types.SetLayersPerEpoch(layersPerEpoch) ms := fullMockSet(t) - edVerifier, err := signing.NewEdVerifier() - require.NoError(t, err) db := datastore.NewCachedDB(sql.InMemory(), logtest.New(t)) ms.md.EXPECT().GetBallot(gomock.Any()).AnyTimes().DoAndReturn(func(id types.BallotID) *tortoise.BallotData { ballot, err := ballots.Get(db, id) @@ -109,7 +107,7 @@ func createTestHandler(t *testing.T) *testHandler { return data }) return &testHandler{ - Handler: NewHandler(db, edVerifier, ms.mpub, ms.mf, ms.mbc, ms.mm, ms.md, ms.mvrf, ms.mclock, + Handler: NewHandler(db, signing.NewEdVerifier(), ms.mpub, ms.mf, ms.mbc, ms.mm, ms.md, ms.mvrf, ms.mclock, WithLogger(logtest.New(t)), WithConfig(Config{ LayerSize: layerAvgSize, diff --git a/proposals/util.go b/proposals/util.go index a5ca4afcf7..1d765c62cd 100644 --- a/proposals/util.go +++ b/proposals/util.go @@ -11,7 +11,10 @@ var ( GetNumEligibleSlots = util.GetNumEligibleSlots ) -func MustGetNumEligibleSlots(weight, minWeight, totalWeight uint64, committeeSize, layersPerEpoch uint32) uint32 { +func MustGetNumEligibleSlots( + weight, minWeight, totalWeight uint64, + committeeSize, layersPerEpoch uint32, +) uint32 { slots, err := GetNumEligibleSlots(weight, minWeight, totalWeight, committeeSize, layersPerEpoch) if err != nil { panic(err) @@ -31,7 +34,12 @@ type VrfMessage struct { } // MustSerializeVRFMessage serializes a message for generating/verifying a VRF signature. -func MustSerializeVRFMessage(beacon types.Beacon, epoch types.EpochID, nonce types.VRFPostIndex, counter uint32) []byte { +func MustSerializeVRFMessage( + beacon types.Beacon, + epoch types.EpochID, + nonce types.VRFPostIndex, + counter uint32, +) []byte { m := VrfMessage{ Type: types.EligibilityVoting, Beacon: beacon, diff --git a/prune/prune.go b/prune/prune.go index b5722be7c1..a36d3abe68 100644 --- a/prune/prune.go +++ b/prune/prune.go @@ -30,31 +30,41 @@ func Prune( case <-ctx.Done(): return case <-time.After(interval): - oldest := lc.CurrentLayer() - types.LayerID(safeDist) - t0 := time.Now() - if err := proposals.DeleteBefore(db, oldest); err != nil { - logger.Error("failed to delete proposals", - zap.Stringer("lid", oldest), - zap.Error(err), - ) - } - proposalLatency.Observe(time.Since(t0).Seconds()) - t1 := time.Now() - if err := certificates.DeleteCertBefore(db, oldest); err != nil { - logger.Error("failed to delete certificates", - zap.Stringer("lid", oldest), - zap.Error(err), - ) - } - certLatency.Observe(time.Since(t1).Seconds()) - t2 := time.Now() - if err := transactions.DeleteProposalTxsBefore(db, oldest); err != nil { - logger.Error("failed to delete proposal tx mapping", - zap.Stringer("lid", oldest), - zap.Error(err), - ) - } - propTxLatency.Observe(time.Since(t2).Seconds()) + prune(logger, db, lc, safeDist) } } } + +func prune( + logger *zap.Logger, + db sql.Executor, + lc layerClock, + safeDist uint32, +) { + oldest := lc.CurrentLayer() - types.LayerID(safeDist) + time.Sleep(100 * time.Millisecond) + t0 := time.Now() + if err := proposals.DeleteBefore(db, oldest); err != nil { + logger.Error("failed to delete proposals", + zap.Stringer("lid", oldest), + zap.Error(err), + ) + } + proposalLatency.Observe(time.Since(t0).Seconds()) + t1 := time.Now() + if err := certificates.DeleteCertBefore(db, oldest); err != nil { + logger.Error("failed to delete certificates", + zap.Stringer("lid", oldest), + zap.Error(err), + ) + } + certLatency.Observe(time.Since(t1).Seconds()) + t2 := time.Now() + if err := transactions.DeleteProposalTxsBefore(db, oldest); err != nil { + logger.Error("failed to delete proposal tx mapping", + zap.Stringer("lid", oldest), + zap.Error(err), + ) + } + propTxLatency.Observe(time.Since(t2).Seconds()) +} diff --git a/prune/prune_test.go b/prune/prune_test.go index 286725459e..8acb325b56 100644 --- a/prune/prune_test.go +++ b/prune/prune_test.go @@ -1,13 +1,10 @@ package prune import ( - "context" "testing" - "time" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" - "golang.org/x/sync/errgroup" "github.com/spacemeshos/go-spacemesh/common/types" "github.com/spacemeshos/go-spacemesh/log/logtest" @@ -22,15 +19,7 @@ func TestPrune(t *testing.T) { db := sql.InMemory() current := types.LayerID(10) mc := NewMocklayerClock(gomock.NewController(t)) - done := make(chan struct{}) - count := 0 - mc.EXPECT().CurrentLayer().DoAndReturn(func() types.LayerID { - if count == 0 { - close(done) - } - count++ - return current - }).AnyTimes() + mc.EXPECT().CurrentLayer().Return(current).AnyTimes() lyrProps := make([]*types.Proposal, 0, current) for lid := types.LayerID(0); lid < current; lid++ { blt := types.NewExistingBallot(types.RandomBallotID(), types.RandomEdSignature(), types.NodeID{1}, lid) @@ -51,45 +40,34 @@ func TestPrune(t *testing.T) { lyrProps = append(lyrProps, p) } confidenceDist := uint32(3) - ctx, cancel := context.WithCancel(context.Background()) - var eg errgroup.Group - eg.Go(func() error { - Prune(ctx, logtest.New(t).Zap(), db, mc, confidenceDist, time.Millisecond) - return nil - }) - require.Eventually(t, func() bool { - select { - case <-done: - oldest := current - types.LayerID(confidenceDist) - for lid := types.LayerID(0); lid < oldest; lid++ { - _, err := certificates.CertifiedBlock(db, lid) - require.ErrorIs(t, err, sql.ErrNotFound) - _, err = proposals.GetByLayer(db, lid) - require.ErrorIs(t, err, sql.ErrNotFound) - for _, tid := range lyrProps[lid].TxIDs { - exists, err := transactions.HasProposalTX(db, lyrProps[lid].ID(), tid) - require.NoError(t, err) - require.False(t, exists) - } - } - for lid := oldest; lid < current; lid++ { - got, err := certificates.CertifiedBlock(db, lid) - require.NoError(t, err) - require.NotEqual(t, types.EmptyBlockID, got) - pps, err := proposals.GetByLayer(db, lid) - require.NoError(t, err) - require.NotEmpty(t, pps) - for _, tid := range lyrProps[lid].TxIDs { - exists, err := transactions.HasProposalTX(db, lyrProps[lid].ID(), tid) - require.NoError(t, err) - require.True(t, exists) - } - } - return true - default: - return false + + // Act + prune(logtest.New(t).Zap(), db, mc, confidenceDist) + + // Verify + oldest := current - types.LayerID(confidenceDist) + for lid := types.LayerID(0); lid < oldest; lid++ { + _, err := certificates.CertifiedBlock(db, lid) + require.ErrorIs(t, err, sql.ErrNotFound) + _, err = proposals.GetByLayer(db, lid) + require.ErrorIs(t, err, sql.ErrNotFound) + for _, tid := range lyrProps[lid].TxIDs { + exists, err := transactions.HasProposalTX(db, lyrProps[lid].ID(), tid) + require.NoError(t, err) + require.False(t, exists) } - }, time.Second, 10*time.Millisecond) - cancel() - require.NoError(t, eg.Wait()) + } + for lid := oldest; lid < current; lid++ { + got, err := certificates.CertifiedBlock(db, lid) + require.NoError(t, err) + require.NotEqual(t, types.EmptyBlockID, got) + pps, err := proposals.GetByLayer(db, lid) + require.NoError(t, err) + require.NotEmpty(t, pps) + for _, tid := range lyrProps[lid].TxIDs { + exists, err := transactions.HasProposalTX(db, lyrProps[lid].ID(), tid) + require.NoError(t, err) + require.True(t, exists) + } + } } diff --git a/signing/verifier.go b/signing/verifier.go index 42c5a1a868..fb175d70ba 100644 --- a/signing/verifier.go +++ b/signing/verifier.go @@ -11,13 +11,12 @@ type edVerifierOption struct { } // VerifierOptionFunc to modify verifier. -type VerifierOptionFunc func(*edVerifierOption) error +type VerifierOptionFunc func(*edVerifierOption) // WithVerifierPrefix sets the prefix used by PubKeyVerifier. This usually is the Network ID. func WithVerifierPrefix(prefix []byte) VerifierOptionFunc { - return func(opts *edVerifierOption) error { + return func(opts *edVerifierOption) { opts.prefix = prefix - return nil } } @@ -26,17 +25,14 @@ type EdVerifier struct { prefix []byte } -func NewEdVerifier(opts ...VerifierOptionFunc) (*EdVerifier, error) { +func NewEdVerifier(opts ...VerifierOptionFunc) *EdVerifier { cfg := &edVerifierOption{} for _, opt := range opts { - if err := opt(cfg); err != nil { - return nil, err - } + opt(cfg) } - Verifier := &EdVerifier{ + return &EdVerifier{ prefix: cfg.prefix, } - return Verifier, nil } // Verify verifies that a signature matches public key and message. diff --git a/signing/verifier_test.go b/signing/verifier_test.go index c024a4c1a1..b5cefff398 100644 --- a/signing/verifier_test.go +++ b/signing/verifier_test.go @@ -24,10 +24,7 @@ func TestEdVerifier_Verify(t *testing.T) { var sig types.EdSignature copy(sig[:], ed25519.Sign(priv, signed)) - ed, err := signing.NewEdVerifier() - require.NoError(t, err) - - ok := ed.Verify(signing.ATX, types.BytesToNodeID(pub), m, sig) + ok := signing.NewEdVerifier().Verify(signing.ATX, types.BytesToNodeID(pub), m, sig) require.Truef(t, ok, "failed to verify message %x with sig %x", m, sig) } @@ -36,8 +33,7 @@ func TestVerifier_WithPrefix(t *testing.T) { signer, err := signing.NewEdSigner(signing.WithPrefix([]byte("one"))) require.NoError(t, err) - verifier, err := signing.NewEdVerifier(signing.WithVerifierPrefix([]byte("one"))) - require.NoError(t, err) + verifier := signing.NewEdVerifier(signing.WithVerifierPrefix([]byte("one"))) msg := []byte("test") sig := signer.Sign(signing.ATX, msg) @@ -49,8 +45,7 @@ func TestVerifier_WithPrefix(t *testing.T) { signer, err := signing.NewEdSigner(signing.WithPrefix([]byte("one"))) require.NoError(t, err) - verifier, err := signing.NewEdVerifier(signing.WithVerifierPrefix([]byte("two"))) - require.NoError(t, err) + verifier := signing.NewEdVerifier(signing.WithVerifierPrefix([]byte("two"))) msg := []byte("test") sig := signer.Sign(signing.ATX, msg) @@ -62,8 +57,7 @@ func TestVerifier_WithPrefix(t *testing.T) { signer, err := signing.NewEdSigner(signing.WithPrefix([]byte("one"))) require.NoError(t, err) - verifier, err := signing.NewEdVerifier(signing.WithVerifierPrefix([]byte("one"))) - require.NoError(t, err) + verifier := signing.NewEdVerifier(signing.WithVerifierPrefix([]byte("one"))) msg := []byte("test") sig := signer.Sign(signing.ATX, msg) @@ -77,8 +71,7 @@ func Fuzz_EdVerifier(f *testing.F) { signer, err := signing.NewEdSigner(signing.WithPrefix(prefix)) require.NoError(t, err) - verifier, err := signing.NewEdVerifier(signing.WithVerifierPrefix(prefix)) - require.NoError(t, err) + verifier := signing.NewEdVerifier(signing.WithVerifierPrefix(prefix)) sig := signer.Sign(signing.ATX, msg) diff --git a/syncer/state_syncer_test.go b/syncer/state_syncer_test.go index 04edcbbd74..cc47dfc9a2 100644 --- a/syncer/state_syncer_test.go +++ b/syncer/state_syncer_test.go @@ -45,7 +45,9 @@ func TestProcessLayers_MultiLayers(t *testing.T) { peers := test.GeneratePeerIDs(3) ts.mDataFetcher.EXPECT().SelectBest(gomock.Any()).Return(peers).AnyTimes() - ts.mForkFinder.EXPECT().UpdateAgreement(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + ts.mForkFinder.EXPECT(). + UpdateAgreement(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + AnyTimes() adopted := make(map[types.LayerID]types.BlockID) for lid := gLid.Add(1); lid.Before(current); lid = lid.Add(1) { lid := lid @@ -63,7 +65,13 @@ func TestProcessLayers_MultiLayers(t *testing.T) { func(_ context.Context, got []types.BlockID) error { require.Equal(t, []types.BlockID{adopted[lid]}, got) for _, bid := range got { - require.NoError(t, blocks.Add(ts.cdb, types.NewExistingBlock(bid, types.InnerBlock{LayerIndex: lid}))) + require.NoError( + t, + blocks.Add( + ts.cdb, + types.NewExistingBlock(bid, types.InnerBlock{LayerIndex: lid}), + ), + ) } return nil }) @@ -75,8 +83,11 @@ func TestProcessLayers_MultiLayers(t *testing.T) { }) ts.mTortoise.EXPECT().TallyVotes(gomock.Any(), lid) ts.mTortoise.EXPECT().Updates().DoAndReturn(func() []result.Layer { - return fixture.RLayers(fixture.RLayer(lid, fixture.RBlock(adopted[lid], fixture.Good()))) + return fixture.RLayers( + fixture.RLayer(lid, fixture.RBlock(adopted[lid], fixture.Good())), + ) }) + ts.mTortoise.EXPECT().OnApplied(lid, gomock.Any()) ts.mVm.EXPECT().Apply(gomock.Any(), gomock.Any(), gomock.Any()) ts.mConState.EXPECT().UpdateCache(gomock.Any(), lid, gomock.Any(), nil, nil).DoAndReturn( func(_ context.Context, _ types.LayerID, got types.BlockID, _ []types.TransactionWithResult, _ []types.Transaction) error { @@ -156,8 +167,17 @@ func TestProcessLayers_OpinionsNotAdopted(t *testing.T) { // saves opinions if tc.localCert != types.EmptyBlockID { - require.NoError(t, blocks.Add(ts.cdb, types.NewExistingBlock(tc.localCert, types.InnerBlock{LayerIndex: lid}))) - require.NoError(t, certificates.Add(ts.cdb, lid, &types.Certificate{BlockID: tc.localCert})) + require.NoError( + t, + blocks.Add( + ts.cdb, + types.NewExistingBlock(tc.localCert, types.InnerBlock{LayerIndex: lid}), + ), + ) + require.NoError( + t, + certificates.Add(ts.cdb, lid, &types.Certificate{BlockID: tc.localCert}), + ) require.NoError(t, blocks.SetValid(ts.cdb, tc.localCert)) ts.mVm.EXPECT().Apply(vm.ApplyContext{Layer: lid}, gomock.Any(), gomock.Any()) ts.mConState.EXPECT().UpdateCache(gomock.Any(), lid, tc.localCert, nil, nil) @@ -168,10 +188,14 @@ func TestProcessLayers_OpinionsNotAdopted(t *testing.T) { ts.mVm.EXPECT().GetStateRoot() } ts.mLyrPatrol.EXPECT().IsHareInCharge(lid).Return(false) - ts.mDataFetcher.EXPECT().PollLayerOpinions(gomock.Any(), lid, tc.localCert == types.EmptyBlockID, peers).Return(tc.opns, tc.certs, nil) + ts.mDataFetcher.EXPECT(). + PollLayerOpinions(gomock.Any(), lid, tc.localCert == types.EmptyBlockID, peers). + Return(tc.opns, tc.certs, nil) ts.mDataFetcher.EXPECT().RegisterPeerHashes(gomock.Any(), gomock.Any()).MaxTimes(1) if tc.localCert == types.EmptyBlockID && hasCert { - ts.mCertHdr.EXPECT().HandleSyncedCertificate(gomock.Any(), lid, tc.certs[0]).Return(tc.certErr) + ts.mCertHdr.EXPECT(). + HandleSyncedCertificate(gomock.Any(), lid, tc.certs[0]). + Return(tc.certErr) ts.mDataFetcher.EXPECT().GetBlocks(gomock.Any(), gomock.Any()).DoAndReturn( func(_ context.Context, got []types.BlockID) error { require.Equal(t, []types.BlockID{*tc.opns[1].Certified}, got) @@ -181,9 +205,12 @@ func TestProcessLayers_OpinionsNotAdopted(t *testing.T) { ts.mTortoise.EXPECT().TallyVotes(gomock.Any(), lid) results := fixture.RLayers(fixture.RLayer(lid)) if tc.localCert != types.EmptyBlockID { - results = fixture.RLayers(fixture.RLayer(lid, fixture.RBlock(tc.localCert, fixture.Good()))) + results = fixture.RLayers( + fixture.RLayer(lid, fixture.RBlock(tc.localCert, fixture.Good())), + ) } ts.mTortoise.EXPECT().Updates().Return(results) + ts.mTortoise.EXPECT().OnApplied(lid, gomock.Any()) require.False(t, ts.syncer.stateSynced()) require.NoError(t, ts.syncer.processLayers(context.Background())) @@ -232,9 +259,12 @@ func TestProcessLayers_HareIsStillWorking(t *testing.T) { ts.mLyrPatrol.EXPECT().IsHareInCharge(lastSynced).Return(false) peers := test.GeneratePeerIDs(3) ts.mDataFetcher.EXPECT().SelectBest(gomock.Any()).Return(peers) - ts.mDataFetcher.EXPECT().PollLayerOpinions(gomock.Any(), lastSynced, true, peers).Return(nil, nil, nil) + ts.mDataFetcher.EXPECT(). + PollLayerOpinions(gomock.Any(), lastSynced, true, peers). + Return(nil, nil, nil) ts.mTortoise.EXPECT().TallyVotes(gomock.Any(), lastSynced) ts.mTortoise.EXPECT().Updates().Return(fixture.RLayers(fixture.RLayer(lastSynced))) + ts.mTortoise.EXPECT().OnApplied(lastSynced, gomock.Any()) ts.mVm.EXPECT().Apply(gomock.Any(), nil, nil) ts.mConState.EXPECT().UpdateCache(gomock.Any(), lastSynced, types.EmptyBlockID, nil, nil) ts.mVm.EXPECT().GetStateRoot() @@ -258,9 +288,12 @@ func TestProcessLayers_HareTakesTooLong(t *testing.T) { } peers := test.GeneratePeerIDs(3) ts.mDataFetcher.EXPECT().SelectBest(gomock.Any()).Return(peers) - ts.mDataFetcher.EXPECT().PollLayerOpinions(gomock.Any(), lid, gomock.Any(), peers).Return(nil, nil, nil) + ts.mDataFetcher.EXPECT(). + PollLayerOpinions(gomock.Any(), lid, gomock.Any(), peers). + Return(nil, nil, nil) ts.mTortoise.EXPECT().TallyVotes(gomock.Any(), lid) ts.mTortoise.EXPECT().Updates().Return(fixture.RLayers(fixture.RLayer(lid))) + ts.mTortoise.EXPECT().OnApplied(lid, gomock.Any()) ts.mVm.EXPECT().Apply(vm.ApplyContext{Layer: lid}, nil, nil) ts.mConState.EXPECT().UpdateCache(gomock.Any(), lid, types.EmptyBlockID, nil, nil) ts.mVm.EXPECT().GetStateRoot() @@ -278,9 +311,12 @@ func TestProcessLayers_OpinionsOptional(t *testing.T) { ts.mLyrPatrol.EXPECT().IsHareInCharge(lastSynced).Return(false) peers := test.GeneratePeerIDs(5) ts.mDataFetcher.EXPECT().SelectBest(gomock.Any()).Return(peers) - ts.mDataFetcher.EXPECT().PollLayerOpinions(gomock.Any(), lastSynced, true, peers).Return(nil, nil, errors.New("meh")) + ts.mDataFetcher.EXPECT(). + PollLayerOpinions(gomock.Any(), lastSynced, true, peers). + Return(nil, nil, errors.New("meh")) ts.mTortoise.EXPECT().TallyVotes(gomock.Any(), lastSynced) ts.mTortoise.EXPECT().Updates().Return(fixture.RLayers(fixture.RLayer(lastSynced))) + ts.mTortoise.EXPECT().OnApplied(lastSynced, gomock.Any()) require.False(t, ts.syncer.stateSynced()) ts.mVm.EXPECT().Apply(vm.ApplyContext{Layer: lastSynced}, nil, nil) ts.mConState.EXPECT().UpdateCache(gomock.Any(), lastSynced, types.EmptyBlockID, nil, nil) @@ -299,11 +335,17 @@ func TestProcessLayers_MeshHashDiverged(t *testing.T) { ts.msh.SetZeroBlockLayer(context.Background(), lid) ts.mTortoise.EXPECT().OnHareOutput(lid, types.EmptyBlockID) ts.mTortoise.EXPECT().TallyVotes(gomock.Any(), lid) - ts.mTortoise.EXPECT().Updates().Return(fixture.RLayers(fixture.ROpinion(lid, types.RandomHash()))) + ts.mTortoise.EXPECT(). + Updates(). + Return(fixture.RLayers(fixture.ROpinion(lid, types.RandomHash()))) + ts.mTortoise.EXPECT().OnApplied(lid, gomock.Any()) ts.mVm.EXPECT().Apply(gomock.Any(), nil, nil) ts.mConState.EXPECT().UpdateCache(gomock.Any(), lid, types.EmptyBlockID, nil, nil) ts.mVm.EXPECT().GetStateRoot() - require.NoError(t, ts.msh.ProcessLayerPerHareOutput(context.Background(), lid, types.EmptyBlockID, false)) + require.NoError( + t, + ts.msh.ProcessLayerPerHareOutput(context.Background(), lid, types.EmptyBlockID, false), + ) } instate := ts.syncer.mesh.LatestLayerInState() require.Equal(t, current.Sub(1), instate) @@ -335,7 +377,9 @@ func TestProcessLayers_MeshHashDiverged(t *testing.T) { ts.mLyrPatrol.EXPECT().IsHareInCharge(instate).Return(false) peers := test.GeneratePeerIDs(3) ts.mDataFetcher.EXPECT().SelectBest(gomock.Any()).Return(peers) - ts.mDataFetcher.EXPECT().PollLayerOpinions(gomock.Any(), instate, false, peers).Return(opns, nil, nil) + ts.mDataFetcher.EXPECT(). + PollLayerOpinions(gomock.Any(), instate, false, peers). + Return(opns, nil, nil) ts.mForkFinder.EXPECT().UpdateAgreement(opns[1].Peer(), instate.Sub(1), prevHash, gomock.Any()) for i := 0; i < numPeers; i++ { if i == 1 { @@ -351,11 +395,21 @@ func TestProcessLayers_MeshHashDiverged(t *testing.T) { } } - ts.mDataFetcher.EXPECT().PeerEpochInfo(gomock.Any(), opns[0].Peer(), epoch-1).Return(eds[0], nil) - ts.mDataFetcher.EXPECT().PeerEpochInfo(gomock.Any(), opns[2].Peer(), epoch-1).Return(eds[2], nil) - ts.mDataFetcher.EXPECT().PeerEpochInfo(gomock.Any(), opns[3].Peer(), epoch-1).Return(eds[3], nil) - ts.mDataFetcher.EXPECT().PeerEpochInfo(gomock.Any(), opns[4].Peer(), epoch-1).Return(nil, errUnknown) - ts.mDataFetcher.EXPECT().PeerEpochInfo(gomock.Any(), opns[5].Peer(), epoch-1).Return(eds[5], nil) + ts.mDataFetcher.EXPECT(). + PeerEpochInfo(gomock.Any(), opns[0].Peer(), epoch-1). + Return(eds[0], nil) + ts.mDataFetcher.EXPECT(). + PeerEpochInfo(gomock.Any(), opns[2].Peer(), epoch-1). + Return(eds[2], nil) + ts.mDataFetcher.EXPECT(). + PeerEpochInfo(gomock.Any(), opns[3].Peer(), epoch-1). + Return(eds[3], nil) + ts.mDataFetcher.EXPECT(). + PeerEpochInfo(gomock.Any(), opns[4].Peer(), epoch-1). + Return(nil, errUnknown) + ts.mDataFetcher.EXPECT(). + PeerEpochInfo(gomock.Any(), opns[5].Peer(), epoch-1). + Return(eds[5], nil) ts.mDataFetcher.EXPECT().GetAtxs(gomock.Any(), gomock.Any()).DoAndReturn( func(_ context.Context, got []types.ATXID) error { require.ElementsMatch(t, eds[0].AtxIDs, got) @@ -382,9 +436,15 @@ func TestProcessLayers_MeshHashDiverged(t *testing.T) { ) fork0 := types.LayerID(101) fork2 := types.LayerID(121) - ts.mForkFinder.EXPECT().FindFork(gomock.Any(), opns[0].Peer(), instate.Sub(1), opns[0].PrevAggHash).Return(fork0, nil) - ts.mForkFinder.EXPECT().FindFork(gomock.Any(), opns[2].Peer(), instate.Sub(1), opns[2].PrevAggHash).Return(fork2, nil) - ts.mForkFinder.EXPECT().FindFork(gomock.Any(), opns[5].Peer(), instate.Sub(1), opns[5].PrevAggHash).Return(types.LayerID(0), errUnknown) + ts.mForkFinder.EXPECT(). + FindFork(gomock.Any(), opns[0].Peer(), instate.Sub(1), opns[0].PrevAggHash). + Return(fork0, nil) + ts.mForkFinder.EXPECT(). + FindFork(gomock.Any(), opns[2].Peer(), instate.Sub(1), opns[2].PrevAggHash). + Return(fork2, nil) + ts.mForkFinder.EXPECT(). + FindFork(gomock.Any(), opns[5].Peer(), instate.Sub(1), opns[5].PrevAggHash). + Return(types.LayerID(0), errUnknown) for lid := fork0.Add(1); lid.Before(current); lid = lid.Add(1) { ts.mDataFetcher.EXPECT().PollLayerData(gomock.Any(), lid, opns[0].Peer()) } @@ -397,7 +457,10 @@ func TestProcessLayers_MeshHashDiverged(t *testing.T) { ts.mForkFinder.EXPECT().Purge(true) ts.mTortoise.EXPECT().TallyVotes(gomock.Any(), instate) - ts.mTortoise.EXPECT().Updates().Return(fixture.RLayers(fixture.ROpinion(instate.Sub(1), opns[2].PrevAggHash))) + ts.mTortoise.EXPECT(). + Updates(). + Return(fixture.RLayers(fixture.ROpinion(instate.Sub(1), opns[2].PrevAggHash))) + ts.mTortoise.EXPECT().OnApplied(instate.Sub(1), gomock.Any()) require.NoError(t, ts.syncer.processLayers(context.Background())) } @@ -410,11 +473,17 @@ func TestProcessLayers_NoHashResolutionForNewlySyncedNode(t *testing.T) { ts.msh.SetZeroBlockLayer(context.Background(), lid) ts.mTortoise.EXPECT().OnHareOutput(lid, types.EmptyBlockID) ts.mTortoise.EXPECT().TallyVotes(gomock.Any(), lid) - ts.mTortoise.EXPECT().Updates().Return(fixture.RLayers(fixture.ROpinion(lid, types.RandomHash()))) + ts.mTortoise.EXPECT(). + Updates(). + Return(fixture.RLayers(fixture.ROpinion(lid, types.RandomHash()))) + ts.mTortoise.EXPECT().OnApplied(lid, gomock.Any()) ts.mVm.EXPECT().Apply(gomock.Any(), nil, nil) ts.mConState.EXPECT().UpdateCache(gomock.Any(), lid, types.EmptyBlockID, nil, nil) ts.mVm.EXPECT().GetStateRoot() - require.NoError(t, ts.msh.ProcessLayerPerHareOutput(context.Background(), lid, types.EmptyBlockID, false)) + require.NoError( + t, + ts.msh.ProcessLayerPerHareOutput(context.Background(), lid, types.EmptyBlockID, false), + ) } instate := ts.syncer.mesh.LatestLayerInState() require.Equal(t, current.Sub(1), instate) @@ -435,9 +504,14 @@ func TestProcessLayers_NoHashResolutionForNewlySyncedNode(t *testing.T) { ts.mLyrPatrol.EXPECT().IsHareInCharge(lid) peers := test.GeneratePeerIDs(3) ts.mDataFetcher.EXPECT().SelectBest(gomock.Any()).Return(peers) - ts.mDataFetcher.EXPECT().PollLayerOpinions(gomock.Any(), lid, gomock.Any(), peers).Return(opns, nil, nil) + ts.mDataFetcher.EXPECT(). + PollLayerOpinions(gomock.Any(), lid, gomock.Any(), peers). + Return(opns, nil, nil) ts.mTortoise.EXPECT().TallyVotes(gomock.Any(), lid) - ts.mTortoise.EXPECT().Updates().Return(fixture.RLayers(fixture.ROpinion(lid.Sub(1), opns[2].PrevAggHash))) + ts.mTortoise.EXPECT(). + Updates(). + Return(fixture.RLayers(fixture.ROpinion(lid.Sub(1), opns[2].PrevAggHash))) + ts.mTortoise.EXPECT().OnApplied(lid.Sub(1), gomock.Any()) if lid != instate && lid != current { ts.mVm.EXPECT().Apply(gomock.Any(), nil, nil) ts.mConState.EXPECT().UpdateCache(gomock.Any(), lid, types.EmptyBlockID, nil, nil) diff --git a/syncer/syncer_test.go b/syncer/syncer_test.go index a80f8b40de..9811e38689 100644 --- a/syncer/syncer_test.go +++ b/syncer/syncer_test.go @@ -104,11 +104,20 @@ func newTestSyncer(t *testing.T, interval time.Duration) *testSyncer { HareDelayLayers: 5, OutOfSyncThresholdLayers: outOfSyncThreshold, } - ts.syncer = NewSyncer(ts.cdb, ts.mTicker, ts.mBeacon, ts.msh, ts.mAtxCache, nil, ts.mLyrPatrol, ts.mCertHdr, + ts.syncer = NewSyncer( + ts.cdb, + ts.mTicker, + ts.mBeacon, + ts.msh, + ts.mAtxCache, + nil, + ts.mLyrPatrol, + ts.mCertHdr, WithConfig(cfg), WithLogger(lg), withDataFetcher(ts.mDataFetcher), - withForkFinder(ts.mForkFinder)) + withForkFinder(ts.mForkFinder), + ) return ts } @@ -138,7 +147,8 @@ func TestStartAndShutdown(t *testing.T) { ts.mForkFinder.EXPECT().Purge(false).AnyTimes() ts.mDataFetcher.EXPECT().SelectBest(gomock.Any()).Return(nil).AnyTimes() require.Eventually(t, func() bool { - return ts.syncer.ListenToATXGossip() && ts.syncer.ListenToGossip() && ts.syncer.IsSynced(ctx) + return ts.syncer.ListenToATXGossip() && ts.syncer.ListenToGossip() && + ts.syncer.IsSynced(ctx) }, time.Second, 10*time.Millisecond) cancel() @@ -189,10 +199,14 @@ func TestSynchronize_OnlyOneSynchronize(t *testing.T) { func advanceState(t testing.TB, ts *testSyncer, from, to types.LayerID) { t.Helper() for lid := from; lid <= to; lid++ { - require.NoError(t, certificates.Add(ts.cdb, lid, &types.Certificate{BlockID: types.EmptyBlockID})) + require.NoError( + t, + certificates.Add(ts.cdb, lid, &types.Certificate{BlockID: types.EmptyBlockID}), + ) ts.mLyrPatrol.EXPECT().IsHareInCharge(lid) ts.mDataFetcher.EXPECT().PollLayerOpinions(gomock.Any(), lid, false, gomock.Any()) ts.mTortoise.EXPECT().TallyVotes(gomock.Any(), lid) + ts.mTortoise.EXPECT().OnApplied(lid, gomock.Any()) ts.mTortoise.EXPECT().Updates().Return(fixture.RLayers(fixture.RLayer(lid))) ts.mVm.EXPECT().Apply(gomock.Any(), gomock.Any(), gomock.Any()) ts.mConState.EXPECT().UpdateCache(gomock.Any(), lid, gomock.Any(), nil, nil) @@ -297,7 +311,9 @@ func TestSynchronize_FailedInitialATXsSync(t *testing.T) { for epoch := types.GetEffectiveGenesis().GetEpoch(); epoch < failedEpoch; epoch++ { ts.mDataFetcher.EXPECT().GetEpochATXs(gomock.Any(), epoch) } - ts.mDataFetcher.EXPECT().GetEpochATXs(gomock.Any(), failedEpoch).Return(errors.New("no ATXs. should fail sync")) + ts.mDataFetcher.EXPECT(). + GetEpochATXs(gomock.Any(), failedEpoch). + Return(errors.New("no ATXs. should fail sync")) var wg sync.WaitGroup wg.Add(1) @@ -540,7 +556,11 @@ func TestNetworkHasNoData(t *testing.T) { require.True(t, ts.syncer.IsSynced(context.Background())) } // the network hasn't received any data - require.Greater(t, int(ts.syncer.ticker.CurrentLayer()-ts.msh.LatestLayer()), outOfSyncThreshold) + require.Greater( + t, + int(ts.syncer.ticker.CurrentLayer()-ts.msh.LatestLayer()), + outOfSyncThreshold, + ) } // test the case where the node was originally synced, and somehow gets out of sync, but @@ -607,11 +627,15 @@ func TestSync_AlsoSyncProcessedLayer(t *testing.T) { // simulate hare advancing the mesh forward ts.mTortoise.EXPECT().TallyVotes(gomock.Any(), lyr) ts.mTortoise.EXPECT().Updates().Return(fixture.RLayers(fixture.RLayer(lyr))) + ts.mTortoise.EXPECT().OnApplied(lyr, gomock.Any()) ts.mVm.EXPECT().Apply(gomock.Any(), nil, nil) ts.mConState.EXPECT().UpdateCache(gomock.Any(), lyr, types.EmptyBlockID, nil, nil) ts.mVm.EXPECT().GetStateRoot() ts.mTortoise.EXPECT().OnHareOutput(lyr, types.EmptyBlockID) - require.NoError(t, ts.msh.ProcessLayerPerHareOutput(context.Background(), lyr, types.EmptyBlockID, false)) + require.NoError( + t, + ts.msh.ProcessLayerPerHareOutput(context.Background(), lyr, types.EmptyBlockID, false), + ) require.Equal(t, lyr, ts.msh.ProcessedLayer()) // no data sync should happen @@ -658,11 +682,20 @@ func TestSynchronize_RecoverFromCheckpoint(t *testing.T) { // recover from a checkpoint types.SetEffectiveGenesis(current.Uint32()) ts.mTicker.advanceToLayer(current) - ts.syncer = NewSyncer(ts.cdb, ts.mTicker, ts.mBeacon, ts.msh, nil, nil, ts.mLyrPatrol, ts.mCertHdr, + ts.syncer = NewSyncer( + ts.cdb, + ts.mTicker, + ts.mBeacon, + ts.msh, + nil, + nil, + ts.mLyrPatrol, + ts.mCertHdr, WithConfig(ts.syncer.cfg), WithLogger(ts.syncer.logger), withDataFetcher(ts.mDataFetcher), - withForkFinder(ts.mForkFinder)) + withForkFinder(ts.mForkFinder), + ) // should not sync any atxs before current epoch ts.mDataFetcher.EXPECT().GetEpochATXs(gomock.Any(), current.GetEpoch()) ts.mDataFetcher.EXPECT().PollMaliciousProofs(gomock.Any()) diff --git a/system/mocks/tortoise.go b/system/mocks/tortoise.go index e03c2e5c60..8c96a6a2f5 100644 --- a/system/mocks/tortoise.go +++ b/system/mocks/tortoise.go @@ -78,6 +78,44 @@ func (c *TortoiseLatestCompleteCall) DoAndReturn(f func() types.LayerID) *Tortoi return c } +// OnApplied mocks base method. +func (m *MockTortoise) OnApplied(arg0 types.LayerID, arg1 types.Hash32) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OnApplied", arg0, arg1) + ret0, _ := ret[0].(bool) + return ret0 +} + +// OnApplied indicates an expected call of OnApplied. +func (mr *MockTortoiseMockRecorder) OnApplied(arg0, arg1 any) *TortoiseOnAppliedCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnApplied", reflect.TypeOf((*MockTortoise)(nil).OnApplied), arg0, arg1) + return &TortoiseOnAppliedCall{Call: call} +} + +// TortoiseOnAppliedCall wrap *gomock.Call +type TortoiseOnAppliedCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *TortoiseOnAppliedCall) Return(arg0 bool) *TortoiseOnAppliedCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *TortoiseOnAppliedCall) Do(f func(types.LayerID, types.Hash32) bool) *TortoiseOnAppliedCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *TortoiseOnAppliedCall) DoAndReturn(f func(types.LayerID, types.Hash32) bool) *TortoiseOnAppliedCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // OnAtx mocks base method. func (m *MockTortoise) OnAtx(arg0 *types.AtxTortoiseData) { m.ctrl.T.Helper() @@ -258,45 +296,6 @@ func (c *TortoiseOnWeakCoinCall) DoAndReturn(f func(types.LayerID, bool)) *Torto return c } -// Results mocks base method. -func (m *MockTortoise) Results(from, to types.LayerID) ([]result.Layer, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Results", from, to) - ret0, _ := ret[0].([]result.Layer) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Results indicates an expected call of Results. -func (mr *MockTortoiseMockRecorder) Results(from, to any) *TortoiseResultsCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Results", reflect.TypeOf((*MockTortoise)(nil).Results), from, to) - return &TortoiseResultsCall{Call: call} -} - -// TortoiseResultsCall wrap *gomock.Call -type TortoiseResultsCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *TortoiseResultsCall) Return(arg0 []result.Layer, arg1 error) *TortoiseResultsCall { - c.Call = c.Call.Return(arg0, arg1) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *TortoiseResultsCall) Do(f func(types.LayerID, types.LayerID) ([]result.Layer, error)) *TortoiseResultsCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *TortoiseResultsCall) DoAndReturn(f func(types.LayerID, types.LayerID) ([]result.Layer, error)) *TortoiseResultsCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - // TallyVotes mocks base method. func (m *MockTortoise) TallyVotes(arg0 context.Context, arg1 types.LayerID) { m.ctrl.T.Helper() diff --git a/system/tortoise.go b/system/tortoise.go index 0e7b34b14f..bb36896674 100644 --- a/system/tortoise.go +++ b/system/tortoise.go @@ -17,7 +17,7 @@ type Tortoise interface { TallyVotes(context.Context, types.LayerID) LatestComplete() types.LayerID Updates() []result.Layer - Results(from, to types.LayerID) ([]result.Layer, error) + OnApplied(types.LayerID, types.Hash32) bool OnMalfeasance(types.NodeID) OnAtx(*types.AtxTortoiseData) } diff --git a/systest/tests/transactions_test.go b/systest/tests/transactions_test.go index c305bed039..f94f38e811 100644 --- a/systest/tests/transactions_test.go +++ b/systest/tests/transactions_test.go @@ -13,7 +13,12 @@ import ( "github.com/spacemeshos/go-spacemesh/systest/testcontext" ) -func testTransactions(t *testing.T, tctx *testcontext.Context, cl *cluster.Cluster, sendFor uint32) { +func testTransactions( + t *testing.T, + tctx *testcontext.Context, + cl *cluster.Cluster, + sendFor uint32, +) { var ( // start sending transactions after two layers or after genesis first = max(currentLayer(tctx, t, cl.Client(0))+2, 8) @@ -32,36 +37,47 @@ func testTransactions(t *testing.T, tctx *testcontext.Context, cl *cluster.Clust ) receiver := types.GenerateAddress([]byte{11, 1, 1}) state := pb.NewGlobalStateServiceClient(cl.Client(0)) - response, err := state.Account(tctx, &pb.AccountRequest{AccountId: &pb.AccountId{Address: receiver.String()}}) + response, err := state.Account( + tctx, + &pb.AccountRequest{AccountId: &pb.AccountId{Address: receiver.String()}}, + ) require.NoError(t, err) before := response.AccountWrapper.StateCurrent.Balance eg, ctx := errgroup.WithContext(tctx) - require.NoError(t, sendTransactions(ctx, eg, tctx.Log, cl, first, stopSending, receiver, batch, amount)) + require.NoError( + t, + sendTransactions(ctx, eg, tctx.Log, cl, first, stopSending, receiver, batch, amount), + ) txs := make([][]*pb.Transaction, cl.Total()) for i := 0; i < cl.Total(); i++ { i := i client := cl.Client(i) - watchTransactionResults(tctx.Context, eg, client, func(rst *pb.TransactionResult) (bool, error) { - txs[i] = append(txs[i], rst.Tx) - count := len(txs[i]) - tctx.Log.Debugw("received transaction client", - "layer", rst.Layer, - "client", client.Name, - "tx", "0x"+hex.EncodeToString(rst.Tx.Id), - "count", count, - ) - return len(txs[i]) < expectedCount, nil - }) + watchTransactionResults( + tctx.Context, + eg, + client, + func(rst *pb.TransactionResult) (bool, error) { + txs[i] = append(txs[i], rst.Tx) + count := len(txs[i]) + tctx.Log.Debugw("received transaction client", + "layer", rst.Layer, + "client", client.Name, + "tx", "0x"+hex.EncodeToString(rst.Tx.Id), + "count", count, + ) + return len(txs[i]) < expectedCount, nil + }, + ) } require.NoError(t, eg.Wait()) reference := txs[0] - for _, tested := range txs[1:] { + for i, tested := range txs[1:] { require.Len(t, tested, len(reference)) - for i := range reference { - require.Equal(t, reference[i], tested[i]) + for j := range reference { + require.Equal(t, reference[j], tested[j], "%s", cl.Client(i+1).Name) } } @@ -69,7 +85,10 @@ func testTransactions(t *testing.T, tctx *testcontext.Context, cl *cluster.Clust for i := 0; i < cl.Total(); i++ { client := cl.Client(i) state := pb.NewGlobalStateServiceClient(client) - response, err := state.Account(tctx, &pb.AccountRequest{AccountId: &pb.AccountId{Address: receiver.String()}}) + response, err := state.Account( + tctx, + &pb.AccountRequest{AccountId: &pb.AccountId{Address: receiver.String()}}, + ) require.NoError(t, err) after := response.AccountWrapper.StateCurrent.Balance tctx.Log.Debugw("receiver state", diff --git a/tortoise/algorithm.go b/tortoise/algorithm.go index fe486e5f72..c45c44ac43 100644 --- a/tortoise/algorithm.go +++ b/tortoise/algorithm.go @@ -219,7 +219,10 @@ func EncodeVotesWithCurrent(current types.LayerID) EncodeVotesOpts { } // EncodeVotes chooses a base ballot and creates a differences list. needs the hare results for latest layers. -func (t *Tortoise) EncodeVotes(ctx context.Context, opts ...EncodeVotesOpts) (*types.Opinion, error) { +func (t *Tortoise) EncodeVotes( + ctx context.Context, + opts ...EncodeVotesOpts, +) (*types.Opinion, error) { start := time.Now() t.mu.Lock() defer t.mu.Unlock() @@ -405,7 +408,11 @@ func (t *Tortoise) decodeBallot(ballot *types.BallotTortoiseData) (*DecodedBallo errorsCounter.Inc() return nil, fmt.Errorf( "computed opinion hash %s doesn't match signed %s for ballot %d / %s", - info.opinion().ShortString(), ballot.Opinion.Hash.ShortString(), ballot.Layer, ballot.ID, + info.opinion(). + ShortString(), + ballot.Opinion.Hash.ShortString(), + ballot.Layer, + ballot.ID, ) } return &DecodedBallot{BallotTortoiseData: ballot, info: info, minHint: min}, nil @@ -467,6 +474,29 @@ func (t *Tortoise) GetMissingActiveSet(epoch types.EpochID, atxs []types.ATXID) return missing } +// OnApplied compares stored opinion with computed opinion and sets +// pending layer to the layer above equal layer. +// this method is meant to be used only in recovery from disk codepath. +func (t *Tortoise) OnApplied(lid types.LayerID, opinion types.Hash32) bool { + t.mu.Lock() + defer t.mu.Unlock() + layer := t.trtl.layer(lid) + t.logger.Debug("on applied", + zap.Uint32("lid", lid.Uint32()), + log.ZShortStringer("computed", layer.opinion), + log.ZShortStringer("stored", opinion), + ) + rst := false + if layer.opinion == opinion { + t.trtl.pending = min(lid+1, t.trtl.processed) + rst = true + } + if t.tracer != nil { + t.tracer.On(&AppliedTrace{Layer: lid, Opinion: opinion, Result: rst}) + } + return rst +} + // Updates returns list of layers where opinion was changed since previous call. func (t *Tortoise) Updates() []result.Layer { t.mu.Lock() @@ -482,34 +512,15 @@ func (t *Tortoise) Updates() []result.Layer { zap.Error(err), ) } - t.trtl.pending = 0 if t.tracer != nil { - t.tracer.On(&UpdatesTrace{ResultsTrace{ + t.tracer.On(&UpdatesTrace{ From: t.trtl.pending, To: t.trtl.processed, Results: rst, - }}) + }) } return rst } -// Results returns layers that crossed threshold in range [from, to]. -func (t *Tortoise) Results(from, to types.LayerID) ([]result.Layer, error) { - t.mu.Lock() - defer t.mu.Unlock() - rst, err := t.results(from, to) - if t.tracer != nil { - ev := &ResultsTrace{ - From: from, To: to, - Results: rst, - } - if err != nil { - ev.Error = err.Error() - } - t.tracer.On(ev) - } - return rst, err -} - func (t *Tortoise) results(from, to types.LayerID) ([]result.Layer, error) { if from <= t.trtl.evicted { return nil, fmt.Errorf("requested layer %d is before evicted %d", from, t.trtl.evicted) @@ -564,19 +575,3 @@ func (t *Tortoise) Mode() Mode { } return Verifying } - -// resetPending compares stored opinion with computed opinion and sets -// pending layer to the layer above equal layer. -// this method is meant to be used only in recovery from disk codepath. -func (t *Tortoise) resetPending(lid types.LayerID, opinion types.Hash32) bool { - t.logger.Debug("reset pending", - zap.Uint32("lid", lid.Uint32()), - log.ZShortStringer("computed", t.trtl.layer(lid).opinion), - log.ZShortStringer("stored", opinion), - ) - if t.trtl.layer(lid).opinion == opinion { - t.trtl.pending = lid + 1 - return true - } - return false -} diff --git a/tortoise/fixture_test.go b/tortoise/fixture_test.go index 9a485e2c23..5593ef75f3 100644 --- a/tortoise/fixture_test.go +++ b/tortoise/fixture_test.go @@ -574,6 +574,7 @@ func (u *updateActions) execute(trt *Tortoise) { u.tb.Helper() updates := trt.Updates() for i := range updates { + trt.OnApplied(updates[i].Layer, updates[i].Opinion) // TODO(dshulyak) don't know yet how to implement updates[i].Opinion = types.Hash32{} } diff --git a/tortoise/recover.go b/tortoise/recover.go index 64b50339af..3e7f8b6777 100644 --- a/tortoise/recover.go +++ b/tortoise/recover.go @@ -18,7 +18,12 @@ import ( ) // Recover tortoise state from database. -func Recover(ctx context.Context, db *datastore.CachedDB, current types.LayerID, opts ...Opt) (*Tortoise, error) { +func Recover( + ctx context.Context, + db *datastore.CachedDB, + current types.LayerID, + opts ...Opt, +) (*Tortoise, error) { trtl, err := New(opts...) if err != nil { return nil, err @@ -36,7 +41,9 @@ func Recover(ctx context.Context, db *datastore.CachedDB, current types.LayerID, start := types.GetEffectiveGenesis() + 1 if applied > types.LayerID(trtl.cfg.WindowSize) { window := applied - types.LayerID(trtl.cfg.WindowSize) - window = window.GetEpoch().FirstLayer() // windback to the start of the epoch to load ref ballots + window = window.GetEpoch(). + FirstLayer() + // windback to the start of the epoch to load ref ballots if window > start { prev, err1 := layers.GetAggregatedHash(db, window-1) opinion, err2 := layers.GetAggregatedHash(db, window) @@ -99,7 +106,7 @@ func Recover(ctx context.Context, db *datastore.CachedDB, current types.LayerID, for prev := last - 1; prev >= start; prev-- { opinion, err := layers.GetAggregatedHash(db, prev) if err == nil && opinion != types.EmptyLayerHash { - if trtl.resetPending(prev, opinion) { + if trtl.OnApplied(prev, opinion) { break } } @@ -126,7 +133,13 @@ func recoverEpoch(epoch types.EpochID, trtl *Tortoise, db *datastore.CachedDB) e type ballotFunc func(*types.BallotTortoiseData) -func RecoverLayer(ctx context.Context, trtl *Tortoise, db *datastore.CachedDB, lid types.LayerID, onBallot ballotFunc) error { +func RecoverLayer( + ctx context.Context, + trtl *Tortoise, + db *datastore.CachedDB, + lid types.LayerID, + onBallot ballotFunc, +) error { if lid.FirstInEpoch() { if err := recoverEpoch(lid.GetEpoch(), trtl, db); err != nil { return err diff --git a/tortoise/tortoise_test.go b/tortoise/tortoise_test.go index 85cfda3f9d..bc2b86629b 100644 --- a/tortoise/tortoise_test.go +++ b/tortoise/tortoise_test.go @@ -59,7 +59,12 @@ func TestLayerPatterns(t *testing.T) { ctx := context.Background() cfg := defaultTestConfig() cfg.LayerSize = size - tortoise := tortoiseFromSimState(t, s.GetState(0), WithConfig(cfg), WithLogger(logtest.New(t))) + tortoise := tortoiseFromSimState( + t, + s.GetState(0), + WithConfig(cfg), + WithLogger(logtest.New(t)), + ) var ( last types.LayerID @@ -84,7 +89,12 @@ func TestLayerPatterns(t *testing.T) { cfg.LayerSize = size cfg.Hdist = 4 cfg.Zdist = cfg.Hdist - tortoise := tortoiseFromSimState(t, s.GetState(0), WithConfig(cfg), WithLogger(logtest.New(t))) + tortoise := tortoiseFromSimState( + t, + s.GetState(0), + WithConfig(cfg), + WithLogger(logtest.New(t)), + ) var ( last types.LayerID @@ -115,7 +125,12 @@ func TestLayerPatterns(t *testing.T) { ctx := context.Background() cfg := defaultTestConfig() cfg.LayerSize = size - tortoise := tortoiseFromSimState(t, s.GetState(0), WithConfig(cfg), WithLogger(logtest.New(t))) + tortoise := tortoiseFromSimState( + t, + s.GetState(0), + WithConfig(cfg), + WithLogger(logtest.New(t)), + ) var ( last types.LayerID @@ -194,7 +209,11 @@ func TestAbstainLateBlock(t *testing.T) { s.Next(sim.WithNumBlocks(1)) s.Next(sim.WithNumBlocks(0)) - last := s.Next(sim.WithNumBlocks(1), sim.WithoutHareOutput(), sim.WithVoteGenerator(abstainVoting)) + last := s.Next( + sim.WithNumBlocks(1), + sim.WithoutHareOutput(), + sim.WithVoteGenerator(abstainVoting), + ) tortoise.TallyVotes(ctx, last) events := tortoise.Updates() @@ -203,13 +222,14 @@ func TestAbstainLateBlock(t *testing.T) { for _, v := range events[1].Blocks { require.True(t, v.Valid) } + require.True(t, tortoise.OnApplied(events[3].Layer, events[3].Opinion)) block := types.BlockHeader{ID: types.BlockID{1}, LayerID: last.Sub(1)} tortoise.OnBlock(block) tortoise.TallyVotes(ctx, last) events = tortoise.Updates() - require.Empty(t, events) + require.Len(t, events, 1) } func TestEncodeAbstainVotesForZdist(t *testing.T) { @@ -395,7 +415,12 @@ func TestCalculateOpinionWithThreshold(t *testing.T) { func TestComputeExpectedWeight(t *testing.T) { genesis := types.GetEffectiveGenesis() - require.EqualValues(t, 4, types.GetLayersPerEpoch(), "expecting layers per epoch to be 4. adjust test if it will change") + require.EqualValues( + t, + 4, + types.GetLayersPerEpoch(), + "expecting layers per epoch to be 4. adjust test if it will change", + ) for _, tc := range []struct { desc string target, last types.LayerID @@ -550,7 +575,12 @@ func TestLongTermination(t *testing.T) { cfg.LayerSize = size cfg.Zdist = zdist cfg.Hdist = hdist - tortoise := tortoiseFromSimState(t, s.GetState(0), WithConfig(cfg), WithLogger(logtest.New(t))) + tortoise := tortoiseFromSimState( + t, + s.GetState(0), + WithConfig(cfg), + WithLogger(logtest.New(t)), + ) var ( last types.LayerID @@ -593,7 +623,12 @@ func TestLongTermination(t *testing.T) { cfg.LayerSize = size cfg.Zdist = zdist cfg.Hdist = hdist - tortoise := tortoiseFromSimState(t, s.GetState(0), WithConfig(cfg), WithLogger(logtest.New(t))) + tortoise := tortoiseFromSimState( + t, + s.GetState(0), + WithConfig(cfg), + WithLogger(logtest.New(t)), + ) var ( last types.LayerID @@ -645,7 +680,12 @@ func TestLongTermination(t *testing.T) { cfg.LayerSize = size cfg.Zdist = zdist cfg.Hdist = hdist - tortoise := tortoiseFromSimState(t, s.GetState(0), WithConfig(cfg), WithLogger(logtest.New(t))) + tortoise := tortoiseFromSimState( + t, + s.GetState(0), + WithConfig(cfg), + WithLogger(logtest.New(t)), + ) var ( last types.LayerID @@ -814,7 +854,12 @@ func TestVotesDecodingWithoutBaseBallot(t *testing.T) { s := sim.New() s.Setup() cfg := defaultTestConfig() - tortoise := tortoiseFromSimState(t, s.GetState(0), WithConfig(cfg), WithLogger(logtest.New(t))) + tortoise := tortoiseFromSimState( + t, + s.GetState(0), + WithConfig(cfg), + WithLogger(logtest.New(t)), + ) var verified types.LayerID for _, last := range sim.GenLayers(s, sim.WithSequence(2, @@ -833,7 +878,12 @@ func TestVotesDecodingWithoutBaseBallot(t *testing.T) { s.Setup() cfg := defaultTestConfig() cfg.LayerSize = size - tortoise := tortoiseFromSimState(t, s.GetState(0), WithConfig(cfg), WithLogger(logtest.New(t))) + tortoise := tortoiseFromSimState( + t, + s.GetState(0), + WithConfig(cfg), + WithLogger(logtest.New(t)), + ) var last, verified types.LayerID for _, last = range sim.GenLayers(s, sim.WithSequence(2, sim.WithVoteGenerator(func(rng *rand.Rand, layers []*types.Layer, i int) sim.Voting { @@ -854,12 +904,22 @@ func TestDecodeVotes(t *testing.T) { s := sim.New() s.Setup() cfg := defaultTestConfig() - tortoise := tortoiseFromSimState(t, s.GetState(0), WithConfig(cfg), WithLogger(logtest.New(t))) + tortoise := tortoiseFromSimState( + t, + s.GetState(0), + WithConfig(cfg), + WithLogger(logtest.New(t)), + ) last := s.Next() tortoise.TallyVotes(context.TODO(), last) ballots, err := ballots.Layer(s.GetState(0).DB, last) require.NoError(t, err) - ballot := types.NewExistingBallot(types.BallotID{3, 3, 3}, types.EmptyEdSignature, types.EmptyNodeID, ballots[0].Layer) + ballot := types.NewExistingBallot( + types.BallotID{3, 3, 3}, + types.EmptyEdSignature, + types.EmptyNodeID, + ballots[0].Layer, + ) ballot.InnerBallot = ballots[0].InnerBallot hasher := opinionhash.New() supported := types.BlockID{2, 2, 2} @@ -1023,7 +1083,12 @@ func TestBaseBallotGenesis(t *testing.T) { require.Empty(t, votes.Base) } -func ensureBaseAndExceptionsFromLayer(tb testing.TB, lid types.LayerID, votes *types.Opinion, cdb *datastore.CachedDB) { +func ensureBaseAndExceptionsFromLayer( + tb testing.TB, + lid types.LayerID, + votes *types.Opinion, + cdb *datastore.CachedDB, +) { tb.Helper() blts, err := ballots.Get(cdb, votes.Base) @@ -1033,7 +1098,15 @@ func ensureBaseAndExceptionsFromLayer(tb testing.TB, lid types.LayerID, votes *t for _, vote := range votes.Support { block, err := blocks.Get(cdb, vote.ID) require.NoError(tb, err) - require.Equal(tb, lid, block.LayerIndex, "block=%s block layer=%s last=%s", block.ID(), block.LayerIndex, lid) + require.Equal( + tb, + lid, + block.LayerIndex, + "block=%s block layer=%s last=%s", + block.ID(), + block.LayerIndex, + lid, + ) } } @@ -1061,7 +1134,8 @@ func TestBaseBallotEvictedBlock(t *testing.T) { sim.WithSequence(2), ) { last = lid - tortoise.Updates() // drain pending + updates := tortoise.Updates() // drain pending + tortoise.OnApplied(updates[0].Layer, updates[0].Opinion) tortoise.TallyVotes(ctx, lid) verified = tortoise.LatestComplete() } @@ -1133,7 +1207,12 @@ func TestBaseBallotPrioritization(t *testing.T) { cfg := defaultTestConfig() cfg.LayerSize = size cfg.WindowSize = tc.window - tortoise := tortoiseFromSimState(t, s.GetState(0), WithConfig(cfg), WithLogger(logtest.New(t))) + tortoise := tortoiseFromSimState( + t, + s.GetState(0), + WithConfig(cfg), + WithLogger(logtest.New(t)), + ) for _, lid := range sim.GenLayers(s, tc.seqs...) { tortoise.TallyVotes(ctx, lid) @@ -1181,7 +1260,12 @@ func splitVoting(n int) sim.VotesGenerator { } } -func ensureBallotLayerWithin(tb testing.TB, cdb *datastore.CachedDB, ballotID types.BallotID, from, to types.LayerID) { +func ensureBallotLayerWithin( + tb testing.TB, + cdb *datastore.CachedDB, + ballotID types.BallotID, + from, to types.LayerID, +) { tb.Helper() ballot, err := ballots.Get(cdb, ballotID) @@ -1191,7 +1275,12 @@ func ensureBallotLayerWithin(tb testing.TB, cdb *datastore.CachedDB, ballotID ty ) } -func ensureBlockLayerWithin(tb testing.TB, cdb *datastore.CachedDB, bid types.BlockID, from, to types.LayerID) { +func ensureBlockLayerWithin( + tb testing.TB, + cdb *datastore.CachedDB, + bid types.BlockID, + from, to types.LayerID, +) { tb.Helper() block, err := blocks.Get(cdb, bid) @@ -1221,9 +1310,14 @@ func TestWeakCoinVoting(t *testing.T) { cfg.Zdist = hdist var ( - tortoise = tortoiseFromSimState(t, s.GetState(0), WithConfig(cfg), WithLogger(logtest.New(t))) - last types.LayerID - genesis = types.GetEffectiveGenesis() + tortoise = tortoiseFromSimState( + t, + s.GetState(0), + WithConfig(cfg), + WithLogger(logtest.New(t)), + ) + last types.LayerID + genesis = types.GetEffectiveGenesis() ) for _, lid := range sim.GenLayers(s, @@ -1269,7 +1363,12 @@ func TestVoteAgainstSupportedByBaseBallot(t *testing.T) { cfg.Zdist = 1 var ( - tortoise = tortoiseFromSimState(t, s.GetState(0), WithConfig(cfg), WithLogger(logtest.New(t))) + tortoise = tortoiseFromSimState( + t, + s.GetState(0), + WithConfig(cfg), + WithLogger(logtest.New(t)), + ) last, verified types.LayerID genesis = types.GetEffectiveGenesis() ) @@ -1645,12 +1744,15 @@ func TestNetworkRecoversFromFullPartition(t *testing.T) { // make enough progress to cross global threshold with new votes for i := 0; i < int(types.GetLayersPerEpoch())*4; i++ { - last = s1.Next(sim.WithNumBlocks(1), sim.WithVoteGenerator(func(rng *rand.Rand, layers []*types.Layer, i int) sim.Voting { - if i < size/2 { - return tortoiseVoting(tortoise1)(rng, layers, i) - } - return tortoiseVoting(tortoise2)(rng, layers, i) - })) + last = s1.Next( + sim.WithNumBlocks(1), + sim.WithVoteGenerator(func(rng *rand.Rand, layers []*types.Layer, i int) sim.Voting { + if i < size/2 { + return tortoiseVoting(tortoise1)(rng, layers, i) + } + return tortoiseVoting(tortoise2)(rng, layers, i) + }), + ) tortoise1.TallyVotes(ctx, last) tortoise2.TallyVotes(ctx, last) processBlockUpdates(t, tortoise1, s1.GetState(0).DB) @@ -1927,7 +2029,8 @@ func TestStateManagement(t *testing.T) { "should not be evicted unless pending is drained", ) - tortoise.Updates() + updates := tortoise.Updates() + tortoise.OnApplied(updates[len(updates)-1].Layer, updates[len(updates)-1].Opinion) tortoise.TallyVotes(ctx, last) evicted := tortoise.trtl.evicted @@ -1938,7 +2041,14 @@ func TestStateManagement(t *testing.T) { for _, ballot := range tortoise.trtl.ballots[lid] { require.Contains(t, tortoise.trtl.ballotRefs, ballot.id, "layer %s", lid) for current := ballot.votes.tail; current != nil; current = current.prev { - require.True(t, !current.lid.Before(evicted), "no votes for layers before evicted (evicted %s, in state %s, ballot %s)", evicted, current.lid, ballot.layer) + require.True( + t, + !current.lid.Before(evicted), + "no votes for layers before evicted (evicted %s, in state %s, ballot %s)", + evicted, + current.lid, + ballot.layer, + ) if current.prev == nil { require.Equal(t, current.lid, evicted, "last vote is exactly evicted") } @@ -1996,7 +2106,10 @@ func TestFutureHeight(t *testing.T) { tortoise := tortoiseFromSimState(t, s.GetState(0), WithConfig(cfg), WithLogger(logtest.New(t)), ) - tortoise.TallyVotes(context.Background(), s.Next(sim.WithNumBlocks(1), sim.WithBlockTickHeights(slow+1))) + tortoise.TallyVotes( + context.Background(), + s.Next(sim.WithNumBlocks(1), sim.WithBlockTickHeights(slow+1)), + ) tortoise.TallyVotes(context.Background(), s.Next(sim.WithEmptyHareOutput(), sim.WithNumBlocks(0))) // 3 is handpicked so that threshold will be crossed if bug wasn't fixed @@ -2027,7 +2140,11 @@ func TestFutureHeight(t *testing.T) { ) var last types.LayerID for i := 0; i < int(cfg.Hdist); i++ { - last = s.Next(sim.WithNumBlocks(1), sim.WithBlockTickHeights(slow+1), sim.WithVoteGenerator(sim.ConsistentVoting)) + last = s.Next( + sim.WithNumBlocks(1), + sim.WithBlockTickHeights(slow+1), + sim.WithVoteGenerator(sim.ConsistentVoting), + ) tortoise.TallyVotes(context.Background(), last) } require.Equal(t, last.Sub(2), tortoise.LatestComplete()) @@ -2264,6 +2381,7 @@ func TestSwitchMode(t *testing.T) { require.False(t, v.Valid) require.True(t, v.Hare) } + tortoise.OnApplied(events[len(events)-1].Layer, events[len(events)-1].Opinion) templates, err := ballots.Layer(s.GetState(0).DB, nohare.Add(1)) require.NoError(t, err) @@ -2272,13 +2390,22 @@ func TestSwitchMode(t *testing.T) { template.Votes.Support = nil // add an atx to increase optimistic threshold in verifying tortoise to trigger a switch - header := &types.ActivationTxHeader{ID: types.ATXID{1}, EffectiveNumUnits: 1, TickCount: 200} + header := &types.ActivationTxHeader{ + ID: types.ATXID{1}, + EffectiveNumUnits: 1, + TickCount: 200, + } header.PublishEpoch = types.EpochID(1) tortoise.OnAtx(header.ToData()) // feed ballots that vote against previously validated layer // without the fix they would be ignored for i := 1; i <= 16; i++ { - ballot := types.NewExistingBallot(types.BallotID{byte(i)}, types.EmptyEdSignature, types.EmptyNodeID, template.Layer) + ballot := types.NewExistingBallot( + types.BallotID{byte(i)}, + types.EmptyEdSignature, + types.EmptyNodeID, + template.Layer, + ) ballot.InnerBallot = template.InnerBallot ballot.EligibilityProofs = template.EligibilityProofs tortoise.OnBallot(ballot.ToTortoiseData()) @@ -2325,7 +2452,12 @@ func TestOnBallotComputeOpinion(t *testing.T) { require.NotEmpty(t, rst) id := types.BallotID{1} - ballot := types.NewExistingBallot(id, types.EmptyEdSignature, types.EmptyNodeID, rst[0].Layer) + ballot := types.NewExistingBallot( + id, + types.EmptyEdSignature, + types.EmptyNodeID, + rst[0].Layer, + ) ballot.InnerBallot = rst[0].InnerBallot ballot.EligibilityProofs = rst[0].EligibilityProofs ballot.Votes.Base = types.EmptyBallotID @@ -2499,13 +2631,28 @@ func TestDecodeExceptions(t *testing.T) { ballots3 := tortoise.trtl.ballots[last] for _, ballot := range ballots1 { - require.Equal(t, against, findVote(ballot.votes, layer.lid, block.id), "base ballot votes against") + require.Equal( + t, + against, + findVote(ballot.votes, layer.lid, block.id), + "base ballot votes against", + ) } for _, ballot := range ballots2 { - require.Equal(t, support, findVote(ballot.votes, layer.lid, block.id), "new ballot overwrites vote") + require.Equal( + t, + support, + findVote(ballot.votes, layer.lid, block.id), + "new ballot overwrites vote", + ) } for _, ballot := range ballots3 { - require.Equal(t, against, findVote(ballot.votes, layer.lid, block.id), "latest ballot overwrites back to against") + require.Equal( + t, + against, + findVote(ballot.votes, layer.lid, block.id), + "latest ballot overwrites back to against", + ) } } @@ -2551,7 +2698,12 @@ func TestCountOnBallot(t *testing.T) { for i := 1; i <= size*2; i++ { id := types.BallotID{} binary.BigEndian.PutUint64(id[:], uint64(i)) - ballot := types.NewExistingBallot(id, types.EmptyEdSignature, types.EmptyNodeID, blts[0].Layer) + ballot := types.NewExistingBallot( + id, + types.EmptyEdSignature, + types.EmptyNodeID, + blts[0].Layer, + ) ballot.InnerBallot = blts[0].InnerBallot ballot.EligibilityProofs = blts[0].EligibilityProofs // unset support to be consistent with local opinion @@ -2845,7 +2997,12 @@ func TestBaseBallotBeforeCurrentLayer(t *testing.T) { tortoise.TallyVotes(ctx, last) ballots, err := ballots.Layer(s.GetState(0).DB, last) require.NoError(t, err) - ballot := types.NewExistingBallot(types.BallotID{1}, types.EmptyEdSignature, types.EmptyNodeID, ballots[0].Layer) + ballot := types.NewExistingBallot( + types.BallotID{1}, + types.EmptyEdSignature, + types.EmptyNodeID, + ballots[0].Layer, + ) ballot.InnerBallot = ballots[0].InnerBallot ballot.EligibilityProofs = ballots[0].EligibilityProofs ballot.Votes.Base = ballots[1].ID() @@ -2919,7 +3076,12 @@ func BenchmarkOnBallot(b *testing.B) { for i := 0; i < b.N; i++ { id := types.BallotID{} binary.BigEndian.PutUint64(id[:], uint64(i)+1) - ballot := types.NewExistingBallot(id, types.EmptyEdSignature, types.EmptyNodeID, modified.Layer) + ballot := types.NewExistingBallot( + id, + types.EmptyEdSignature, + types.EmptyNodeID, + modified.Layer, + ) ballot.InnerBallot = modified.InnerBallot ballot.EligibilityProofs = modified.EligibilityProofs tortoise.OnBallot(ballot.ToTortoiseData()) @@ -2982,10 +3144,9 @@ func TestMultipleTargets(t *testing.T) { last := s.Next(sim.WithNumBlocks(0), sim.WithVoteGenerator(upvote)) tortoise.TallyVotes(ctx, last) - rst, err := tortoise.Results(types.GetEffectiveGenesis().Add(1), last.Sub(1)) - require.NoError(t, err) - require.Len(t, rst, 2) - block := rst[0].Blocks[0] + rst := tortoise.Updates() + require.Len(t, rst, 4) + block := rst[1].Blocks[0] require.Equal(t, block.Header.Height, heights[0]) require.True(t, block.Valid) require.False(t, block.Data) @@ -3035,6 +3196,7 @@ func TestUpdates(t *testing.T) { ID: id, LayerID: lid, }) + require.True(t, trt.OnApplied(updates[0].Layer, updates[0].Opinion)) trt.OnHareOutput(lid, id) updates = trt.Updates() require.Len(t, updates, 1) @@ -3145,7 +3307,9 @@ func TestOnMalfeasance(t *testing.T) { totalEligibilities(s.epochEligibilities()). eligibilities(elig)) } - s.smesher(0).malfeasant() // without this call threshold will be very large, and s.updates fail + s.smesher(0). + malfeasant() + // without this call threshold will be very large, and s.updates fail for i := 0; i < 10; i++ { s.smesher(0).rawatx(types.ATXID{byte(i)}, 1, new(aopt).weight(100).height(10)) } diff --git a/tortoise/tracer.go b/tortoise/tracer.go index 8c54f3541c..260cbaac8e 100644 --- a/tortoise/tracer.go +++ b/tortoise/tracer.go @@ -122,6 +122,7 @@ const ( traceActiveset traceResults traceUpdates + traceApplied traceMalfeasence ) @@ -362,52 +363,47 @@ func (h *HareTrace) Run(r *traceRunner) error { return nil } -type ResultsTrace struct { +type UpdatesTrace struct { From types.LayerID `json:"from"` To types.LayerID `json:"to"` Error string `json:"e"` Results []result.Layer `json:"results"` } -func (r *ResultsTrace) Type() eventType { - return traceResults +func (u *UpdatesTrace) Type() eventType { + return traceUpdates } -func (r *ResultsTrace) New() traceEvent { - return &ResultsTrace{} +func (u *UpdatesTrace) New() traceEvent { + return &UpdatesTrace{} } -func (r *ResultsTrace) Run(rt *traceRunner) error { - rst, err := rt.trt.Results(r.From, r.To) - if rt.assertErrors { - if err := assertErrors(err, r.Error); err != nil { - return err - } - } - if err == nil { - if diff := cmp.Diff(rst, r.Results, cmpopts.EquateEmpty()); len(diff) > 0 && rt.assertOutputs { - return errors.New(diff) - } +func (u *UpdatesTrace) Run(r *traceRunner) error { + rst := r.trt.Updates() + if diff := cmp.Diff(rst, u.Results, cmpopts.EquateEmpty()); len(diff) > 0 && r.assertOutputs { + return errors.New(diff) } return nil } -type UpdatesTrace struct { - ResultsTrace `json:",inline"` +type AppliedTrace struct { + Layer types.LayerID `json:"layer"` + Opinion types.Hash32 `json:"opinion"` + Result bool `json:"rst"` } -func (u *UpdatesTrace) Type() eventType { - return traceUpdates +func (a *AppliedTrace) Type() eventType { + return traceApplied } -func (u *UpdatesTrace) New() traceEvent { - return &UpdatesTrace{} +func (a *AppliedTrace) New() traceEvent { + return &AppliedTrace{} } -func (u *UpdatesTrace) Run(r *traceRunner) error { - rst := r.trt.Updates() - if diff := cmp.Diff(rst, u.Results, cmpopts.EquateEmpty()); len(diff) > 0 && r.assertOutputs { - return errors.New(diff) +func (a *AppliedTrace) Run(r *traceRunner) error { + rst := r.trt.OnApplied(a.Layer, a.Opinion) + if rst != a.Result { + return fmt.Errorf("on applied: expected %v got %v", a.Result, rst) } return nil } @@ -475,8 +471,8 @@ func newEventEnum() eventEnum { enum.Register(&TallyTrace{}) enum.Register(&BlockTrace{}) enum.Register(&HareTrace{}) - enum.Register(&ResultsTrace{}) enum.Register(&UpdatesTrace{}) + enum.Register(&AppliedTrace{}) enum.Register(&MalfeasanceTrace{}) return enum } diff --git a/tortoise/tracer_test.go b/tortoise/tracer_test.go index 3f822ef0d9..a6803f6a59 100644 --- a/tortoise/tracer_test.go +++ b/tortoise/tracer_test.go @@ -43,10 +43,14 @@ func TestTracer(t *testing.T) { t.Run("recover", func(t *testing.T) { t.Parallel() path := filepath.Join(t.TempDir(), "tortoise.trace") - trt, err := Recover(context.Background(), s.GetState(0).DB, last, WithTracer(WithOutput(path))) + trt, err := Recover( + context.Background(), + s.GetState(0).DB, + last, + WithTracer(WithOutput(path)), + ) require.NoError(t, err) trt.Updates() - trt.Results(types.GetEffectiveGenesis(), trt.LatestComplete()) require.NoError(t, RunTrace(path, nil, WithLogger(logtest.New(t)))) }) t.Run("errors", func(t *testing.T) { @@ -78,7 +82,10 @@ func TestData(t *testing.T) { } t.Run(entry.Name(), func(t *testing.T) { t.Parallel() - require.NoError(t, RunTrace(filepath.Join(data, entry.Name()), nil, WithLogger(logtest.New(t)))) + require.NoError( + t, + RunTrace(filepath.Join(data, entry.Name()), nil, WithLogger(logtest.New(t))), + ) }) } }