Skip to content

Commit

Permalink
remove hare1 code (#5256)
Browse files Browse the repository at this point in the history
on mainnet we are running certificate with 200 committee size, while hare3 runs with 400.
to keep config compatible i added a separate configuration variable, it defaults to hare3 committee size on every network except mainnet. and on mainnet it will require scheduled upgrade to increase comittee size.

the rest of the change is removal of hare1 code and some dependencies that were spread around the codebase.
  • Loading branch information
dshulyak committed Nov 18, 2023
1 parent 90a6ff2 commit 785cf12
Show file tree
Hide file tree
Showing 59 changed files with 340 additions and 12,620 deletions.
1 change: 0 additions & 1 deletion activation/handler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -983,7 +983,6 @@ func TestHandler_ProcessAtx(t *testing.T) {
atxHdlr.log,
atxHdlr.cdb,
atxHdlr.edVerifier,
nil,
&got,
)
require.NoError(t, err)
Expand Down
7 changes: 3 additions & 4 deletions blocks/certifier.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ import (
"github.com/spacemeshos/go-spacemesh/codec"
"github.com/spacemeshos/go-spacemesh/common/types"
"github.com/spacemeshos/go-spacemesh/datastore"
"github.com/spacemeshos/go-spacemesh/hare"
"github.com/spacemeshos/go-spacemesh/hare/eligibility"
"github.com/spacemeshos/go-spacemesh/log"
"github.com/spacemeshos/go-spacemesh/p2p"
Expand All @@ -34,7 +33,7 @@ var (

// CertConfig is the config for Certifier.
type CertConfig struct {
CommitteeSize int
CommitteeSize int `mapstructure:"committee-size"`
CertifyThreshold int
LayerBuffer uint32
NumLayersToKeep uint32
Expand Down Expand Up @@ -83,7 +82,7 @@ type Certifier struct {
stopped atomic.Bool

db *datastore.CachedDB
oracle hare.Rolacle
oracle eligibility.Rolacle
signers map[types.NodeID]*signing.EdSigner
edVerifier *signing.EdVerifier
publisher pubsub.Publisher
Expand All @@ -101,7 +100,7 @@ type Certifier struct {
// NewCertifier creates new block certifier.
func NewCertifier(
db *datastore.CachedDB,
o hare.Rolacle,
o eligibility.Rolacle,

v *signing.EdVerifier,
p pubsub.Publisher,
Expand Down
5 changes: 2 additions & 3 deletions blocks/certifier_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ import (
"github.com/spacemeshos/go-spacemesh/common/types"
"github.com/spacemeshos/go-spacemesh/datastore"
"github.com/spacemeshos/go-spacemesh/hare/eligibility"
hmocks "github.com/spacemeshos/go-spacemesh/hare/mocks"
"github.com/spacemeshos/go-spacemesh/log/logtest"
"github.com/spacemeshos/go-spacemesh/p2p/pubsub"
pubsubmock "github.com/spacemeshos/go-spacemesh/p2p/pubsub/mocks"
Expand All @@ -30,7 +29,7 @@ const defaultCnt = uint16(2)
type testCertifier struct {
*Certifier
db *datastore.CachedDB
mOracle *hmocks.MockRolacle
mOracle *eligibility.MockRolacle
mPub *pubsubmock.MockPublisher
mClk *mocks.MocklayerClock
mb *smocks.MockBeaconGetter
Expand All @@ -42,7 +41,7 @@ func newTestCertifier(t *testing.T, signers int) *testCertifier {
types.SetLayersPerEpoch(3)
db := datastore.NewCachedDB(sql.InMemory(), logtest.New(t))
ctrl := gomock.NewController(t)
mo := hmocks.NewMockRolacle(ctrl)
mo := eligibility.NewMockRolacle(ctrl)
mp := pubsubmock.NewMockPublisher(ctrl)
mc := mocks.NewMocklayerClock(ctrl)
mb := smocks.NewMockBeaconGetter(ctrl)
Expand Down
30 changes: 16 additions & 14 deletions blocks/generator.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ import (

"github.com/spacemeshos/go-spacemesh/atxsdata"
"github.com/spacemeshos/go-spacemesh/common/types"
"github.com/spacemeshos/go-spacemesh/hare"
"github.com/spacemeshos/go-spacemesh/hare/eligibility"
"github.com/spacemeshos/go-spacemesh/hare3"
"github.com/spacemeshos/go-spacemesh/log"
"github.com/spacemeshos/go-spacemesh/sql"
"github.com/spacemeshos/go-spacemesh/sql/layers"
Expand All @@ -37,7 +37,7 @@ type Generator struct {
cert certifier
patrol layerPatrol

hareCh chan hare.LayerOutput
hareCh <-chan hare3.ConsensusOutput
optimisticOutput map[types.LayerID]*proposalMetadata
}

Expand Down Expand Up @@ -74,7 +74,7 @@ func WithGeneratorLogger(logger log.Log) GeneratorOpt {
}

// WithHareOutputChan sets the chan to listen to hare output.
func WithHareOutputChan(ch chan hare.LayerOutput) GeneratorOpt {
func WithHareOutputChan(ch <-chan hare3.ConsensusOutput) GeneratorOpt {
return func(g *Generator) {
g.hareCh = ch
}
Expand Down Expand Up @@ -106,7 +106,6 @@ func NewGenerator(
for _, opt := range opts {
opt(g)
}

return g
}

Expand Down Expand Up @@ -135,24 +134,27 @@ func (g *Generator) run(ctx context.Context) error {
select {
case <-ctx.Done():
return fmt.Errorf("context done: %w", ctx.Err())
case out := <-g.hareCh:
case out, open := <-g.hareCh:
if !open {
return nil
}
g.logger.With().Debug("received hare output",
log.Context(out.Ctx),
log.Context(ctx),
out.Layer,
log.Int("num_proposals", len(out.Proposals)),
)
maxLayer = max(maxLayer, out.Layer)
_, err := g.processHareOutput(out)
_, err := g.processHareOutput(ctx, out)
if err != nil {
if errors.Is(err, errNodeHasBadMeshHash) {
g.logger.With().Info("node has different mesh hash from majority, will download block instead",
log.Context(out.Ctx),
log.Context(ctx),
out.Layer,
log.Err(err),
)
} else {
g.logger.With().Error("failed to process hare output",
log.Context(out.Ctx),
log.Context(ctx),
out.Layer,
log.Err(err),
)
Expand Down Expand Up @@ -184,12 +186,12 @@ func (g *Generator) getProposals(pids []types.ProposalID) ([]*types.Proposal, er
return result, nil
}

func (g *Generator) processHareOutput(out hare.LayerOutput) (*types.Block, error) {
func (g *Generator) processHareOutput(ctx context.Context, out hare3.ConsensusOutput) (*types.Block, error) {
var md *proposalMetadata
if len(out.Proposals) > 0 {
getMetadata := func() error {
// fetch proposals from peers if not locally available
if err := g.fetcher.GetProposals(out.Ctx, out.Proposals); err != nil {
if err := g.fetcher.GetProposals(ctx, out.Proposals); err != nil {
failFetchCnt.Inc()
return fmt.Errorf("preprocess fetch layer %d proposals: %w", out.Layer, err)
}
Expand All @@ -199,7 +201,7 @@ func (g *Generator) processHareOutput(out hare.LayerOutput) (*types.Block, error
failErrCnt.Inc()
return fmt.Errorf("preprocess get layer %d proposals: %w", out.Layer, err)
}
md, err = getProposalMetadata(out.Ctx, g.logger, g.db, g.atxs, g.cfg, out.Layer, props)
md, err = getProposalMetadata(ctx, g.logger, g.db, g.atxs, g.cfg, out.Layer, props)
if err != nil {
return err
}
Expand Down Expand Up @@ -234,10 +236,10 @@ func (g *Generator) processHareOutput(out hare.LayerOutput) (*types.Block, error
hareOutput = block.ID()
g.logger.With().Info("generated block", out.Layer, block.ID())
}
if err := g.saveAndCertify(out.Ctx, out.Layer, block); err != nil {
if err := g.saveAndCertify(ctx, out.Layer, block); err != nil {
return block, err
}
if err := g.msh.ProcessLayerPerHareOutput(out.Ctx, out.Layer, hareOutput, false); err != nil {
if err := g.msh.ProcessLayerPerHareOutput(ctx, out.Layer, hareOutput, false); err != nil {
return block, err
}
return block, nil
Expand Down
Loading

0 comments on commit 785cf12

Please sign in to comment.