diff --git a/CHANGELOG.md b/CHANGELOG.md index 3fa833c173..1081b18522 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,10 +8,14 @@ See [RELEASE](./RELEASE.md) for workflow instructions. * [#5793](https://github.com/spacemeshos/go-spacemesh/pull/5793) Reduced hare committee 8x from 400 to 50 to decrease network traffic caused by Hare. + * [#6099](https://github.com/spacemeshos/go-spacemesh/pull/6099) Adds new metrics to the API to provide insights into the performance and behavior of the node's APIs. +* [#6115](https://github.com/spacemeshos/go-spacemesh/pull/6115) Increase the number of supported ATXs to 8.0 Mio. + ### Features + * [#6112](https://github.com/spacemeshos/go-spacemesh/pull/6112) Adds vesting, vault, and drain vault contents to the v2alpha2 Transaction API. Fixes the 'unspecified' transaction type. diff --git a/common/types/activation.go b/common/types/activation.go index ca99b151c4..c956d2833b 100644 --- a/common/types/activation.go +++ b/common/types/activation.go @@ -396,7 +396,7 @@ func ATXIDsToHashes(ids []ATXID) []Hash32 { type EpochActiveSet struct { Epoch EpochID - Set []ATXID `scale:"max=7000000"` // to be in line with `EpochData` in fetch/wire_types.go + Set []ATXID `scale:"max=8000000"` // to be in line with `EpochData` in fetch/wire_types.go } var MaxEpochActiveSetSize = scale.MustGetMaxElements[EpochActiveSet]("Set") diff --git a/common/types/activation_scale.go b/common/types/activation_scale.go index ff2be4cdff..aba52fb122 100644 --- a/common/types/activation_scale.go +++ b/common/types/activation_scale.go @@ -91,7 +91,7 @@ func (t *EpochActiveSet) EncodeScale(enc *scale.Encoder) (total int, err error) total += n } { - n, err := scale.EncodeStructSliceWithLimit(enc, t.Set, 7000000) + n, err := scale.EncodeStructSliceWithLimit(enc, t.Set, 8000000) if err != nil { return total, err } @@ -110,7 +110,7 @@ func (t *EpochActiveSet) DecodeScale(dec *scale.Decoder) (total int, err error) t.Epoch = EpochID(field) } { - field, n, err := scale.DecodeStructSliceWithLimit[ATXID](dec, 7000000) + field, n, err := scale.DecodeStructSliceWithLimit[ATXID](dec, 8000000) if err != nil { return total, err } diff --git a/common/types/block.go b/common/types/block.go index ae878ac198..28668fc131 100644 --- a/common/types/block.go +++ b/common/types/block.go @@ -77,14 +77,14 @@ type InnerBlock struct { // In this case they will get all 50 available slots in all 4032 layers of the epoch. // Additionally every other identity on the network that successfully published an ATX will get 1 slot. // - // If we expect 7.0 Mio ATXs that would be a total of 7.0 Mio + 50 * 4032 = 7 201 600 slots. + // If we expect 8.0 Mio ATXs that would be a total of 8.0 Mio + 50 * 4032 = 8 201 600 slots. // Since these are randomly distributed across the epoch, we can expect an average of n * p = - // 7 201 600 / 4032 = 1786.1 rewards in a block with a standard deviation of sqrt(n * p * (1 - p)) = - // sqrt(7 201 600 * 1/4032 * 4031/4032) = 42.3 + // 8 201 600 / 4032 = 2034.1 rewards in a block with a standard deviation of sqrt(n * p * (1 - p)) = + // sqrt(8 201 600 * 1/4032 * 4031/4032) = 45.1 // - // This means that we can expect a maximum of 1786.1 + 6*42.3 = 2039.7 rewards per block with + // This means that we can expect a maximum of 2034.1 + 6*45.1 = 2304.7 rewards per block with // > 99.9997% probability. - Rewards []AnyReward `scale:"max=2050"` + Rewards []AnyReward `scale:"max=2350"` TxIDs []TransactionID `scale:"max=100000"` } diff --git a/common/types/block_scale.go b/common/types/block_scale.go index 9a7a8ad46c..ed925ea778 100644 --- a/common/types/block_scale.go +++ b/common/types/block_scale.go @@ -45,7 +45,7 @@ func (t *InnerBlock) EncodeScale(enc *scale.Encoder) (total int, err error) { total += n } { - n, err := scale.EncodeStructSliceWithLimit(enc, t.Rewards, 2050) + n, err := scale.EncodeStructSliceWithLimit(enc, t.Rewards, 2350) if err != nil { return total, err } @@ -79,7 +79,7 @@ func (t *InnerBlock) DecodeScale(dec *scale.Decoder) (total int, err error) { t.TickHeight = uint64(field) } { - field, n, err := scale.DecodeStructSliceWithLimit[AnyReward](dec, 2050) + field, n, err := scale.DecodeStructSliceWithLimit[AnyReward](dec, 2350) if err != nil { return total, err } diff --git a/fetch/wire_types.go b/fetch/wire_types.go index 4d1b497e61..725b6af613 100644 --- a/fetch/wire_types.go +++ b/fetch/wire_types.go @@ -37,7 +37,7 @@ type RequestMessage struct { type ResponseMessage struct { Hash types.Hash32 // keep in line with limit of Response.Data in `p2p/server/server.go` - Data []byte `scale:"max=235929600"` // 225 MiB > 7.0 mio ATX * 32 bytes per ID + Data []byte `scale:"max=272629760"` // 260 MiB > 8.0 mio ATX * 32 bytes per ID } // RequestBatch is a batch of requests and a hash of all requests as ID. @@ -116,7 +116,7 @@ type MeshHashes struct { } type MaliciousIDs struct { - NodeIDs []types.NodeID `scale:"max=7000000"` // to be in line with `EpochData.AtxIDs` below + NodeIDs []types.NodeID `scale:"max=8000000"` // to be in line with `EpochData.AtxIDs` below } type EpochData struct { @@ -128,7 +128,7 @@ type EpochData struct { // - the size of `Rewards` in the type `InnerBlock` in common/types/block.go // - the size of `Ballots` in the type `LayerData` below // - the size of `Proposals` in the type `Value` in hare3/types.go - AtxIDs []types.ATXID `scale:"max=7000000"` + AtxIDs []types.ATXID `scale:"max=8000000"` } // LayerData is the data response for a given layer ID. @@ -139,14 +139,14 @@ type LayerData struct { // In this case they will get all 50 available slots in all 4032 layers of the epoch. // Additionally every other identity on the network that successfully published an ATX will get 1 slot. // - // If we expect 7.0 Mio ATXs that would be a total of 7.0 Mio + 50 * 4032 = 7 201 600 slots. + // If we expect 8.0 Mio ATXs that would be a total of 8.0 Mio + 50 * 4032 = 8 201 600 slots. // Since these are randomly distributed across the epoch, we can expect an average of n * p = - // 7 201 600 / 4032 = 1786.1 ballots in a layer with a standard deviation of sqrt(n * p * (1 - p)) = - // sqrt(7 201 600 * 1/4032 * 4031/4032) = 42.3 + // 8 201 600 / 4032 = 2034.1 ballots in a layer with a standard deviation of sqrt(n * p * (1 - p)) = + // sqrt(8 201 600 * 1/4032 * 4031/4032) = 45.1 // - // This means that we can expect a maximum of 1786.1 + 6*42.3 = 2039.7 ballots per layer with + // This means that we can expect a maximum of 2034.1 + 6*45.1 = 2304.7 ballots per layer with // > 99.9997% probability. - Ballots []types.BallotID `scale:"max=2050"` + Ballots []types.BallotID `scale:"max=2350"` } type OpinionRequest struct { diff --git a/fetch/wire_types_scale.go b/fetch/wire_types_scale.go index 0219c4d7bd..baf4d3f082 100644 --- a/fetch/wire_types_scale.go +++ b/fetch/wire_types_scale.go @@ -55,7 +55,7 @@ func (t *ResponseMessage) EncodeScale(enc *scale.Encoder) (total int, err error) total += n } { - n, err := scale.EncodeByteSliceWithLimit(enc, t.Data, 235929600) + n, err := scale.EncodeByteSliceWithLimit(enc, t.Data, 272629760) if err != nil { return total, err } @@ -73,7 +73,7 @@ func (t *ResponseMessage) DecodeScale(dec *scale.Decoder) (total int, err error) total += n } { - field, n, err := scale.DecodeByteSliceWithLimit(dec, 235929600) + field, n, err := scale.DecodeByteSliceWithLimit(dec, 272629760) if err != nil { return total, err } @@ -235,7 +235,7 @@ func (t *MeshHashes) DecodeScale(dec *scale.Decoder) (total int, err error) { func (t *MaliciousIDs) EncodeScale(enc *scale.Encoder) (total int, err error) { { - n, err := scale.EncodeStructSliceWithLimit(enc, t.NodeIDs, 7000000) + n, err := scale.EncodeStructSliceWithLimit(enc, t.NodeIDs, 8000000) if err != nil { return total, err } @@ -246,7 +246,7 @@ func (t *MaliciousIDs) EncodeScale(enc *scale.Encoder) (total int, err error) { func (t *MaliciousIDs) DecodeScale(dec *scale.Decoder) (total int, err error) { { - field, n, err := scale.DecodeStructSliceWithLimit[types.NodeID](dec, 7000000) + field, n, err := scale.DecodeStructSliceWithLimit[types.NodeID](dec, 8000000) if err != nil { return total, err } @@ -258,7 +258,7 @@ func (t *MaliciousIDs) DecodeScale(dec *scale.Decoder) (total int, err error) { func (t *EpochData) EncodeScale(enc *scale.Encoder) (total int, err error) { { - n, err := scale.EncodeStructSliceWithLimit(enc, t.AtxIDs, 7000000) + n, err := scale.EncodeStructSliceWithLimit(enc, t.AtxIDs, 8000000) if err != nil { return total, err } @@ -269,7 +269,7 @@ func (t *EpochData) EncodeScale(enc *scale.Encoder) (total int, err error) { func (t *EpochData) DecodeScale(dec *scale.Decoder) (total int, err error) { { - field, n, err := scale.DecodeStructSliceWithLimit[types.ATXID](dec, 7000000) + field, n, err := scale.DecodeStructSliceWithLimit[types.ATXID](dec, 8000000) if err != nil { return total, err } @@ -281,7 +281,7 @@ func (t *EpochData) DecodeScale(dec *scale.Decoder) (total int, err error) { func (t *LayerData) EncodeScale(enc *scale.Encoder) (total int, err error) { { - n, err := scale.EncodeStructSliceWithLimit(enc, t.Ballots, 2050) + n, err := scale.EncodeStructSliceWithLimit(enc, t.Ballots, 2350) if err != nil { return total, err } @@ -292,7 +292,7 @@ func (t *LayerData) EncodeScale(enc *scale.Encoder) (total int, err error) { func (t *LayerData) DecodeScale(dec *scale.Decoder) (total int, err error) { { - field, n, err := scale.DecodeStructSliceWithLimit[types.BallotID](dec, 2050) + field, n, err := scale.DecodeStructSliceWithLimit[types.BallotID](dec, 2350) if err != nil { return total, err } diff --git a/hare3/types.go b/hare3/types.go index a7f9c9fb40..b94800c207 100644 --- a/hare3/types.go +++ b/hare3/types.go @@ -82,14 +82,14 @@ type Value struct { // In this case they will get all 50 available slots in all 4032 layers of the epoch. // Additionally every other identity on the network that successfully published an ATX will get 1 slot. // - // If we expect 7.0 Mio ATXs that would be a total of 7.0 Mio + 50 * 4032 = 7 201 600 slots. + // If we expect 7.0 Mio ATXs that would be a total of 7.0 Mio + 50 * 4032 = 8 201 600 slots. // Since these are randomly distributed across the epoch, we can expect an average of n * p = - // 7 201 600 / 4032 = 1786.1 eligibilities in a layer with a standard deviation of sqrt(n * p * (1 - p)) = - // sqrt(7 201 600 * 1/4032 * 4031/4032) = 42.3 + // 8 201 600 / 4032 = 2034.1 eligibilities in a layer with a standard deviation of sqrt(n * p * (1 - p)) = + // sqrt(8 201 600 * 1/4032 * 4031/4032) = 45.1 // - // This means that we can expect a maximum of 1786.1 + 6*42.3 = 2039.7 eligibilities in a layer with + // This means that we can expect a maximum of 2034.1 + 6*45.1 = 2304.7 eligibilities in a layer with // > 99.9997% probability. - Proposals []types.ProposalID `scale:"max=2050"` + Proposals []types.ProposalID `scale:"max=2350"` // Reference is set in messages for commit and notify rounds. Reference *types.Hash32 } diff --git a/hare3/types_scale.go b/hare3/types_scale.go index 7e0db6f5a6..28e8b64035 100644 --- a/hare3/types_scale.go +++ b/hare3/types_scale.go @@ -48,7 +48,7 @@ func (t *IterRound) DecodeScale(dec *scale.Decoder) (total int, err error) { func (t *Value) EncodeScale(enc *scale.Encoder) (total int, err error) { { - n, err := scale.EncodeStructSliceWithLimit(enc, t.Proposals, 2050) + n, err := scale.EncodeStructSliceWithLimit(enc, t.Proposals, 2350) if err != nil { return total, err } @@ -66,7 +66,7 @@ func (t *Value) EncodeScale(enc *scale.Encoder) (total int, err error) { func (t *Value) DecodeScale(dec *scale.Decoder) (total int, err error) { { - field, n, err := scale.DecodeStructSliceWithLimit[types.ProposalID](dec, 2050) + field, n, err := scale.DecodeStructSliceWithLimit[types.ProposalID](dec, 2350) if err != nil { return total, err }