Skip to content

Commit

Permalink
Merge pull request #2023 from ssvlabs/stage
Browse files Browse the repository at this point in the history
Main <- Stage
  • Loading branch information
y0sher authored Feb 4, 2025
2 parents 34929cb + 3c4c92d commit 7fcd336
Show file tree
Hide file tree
Showing 3 changed files with 57 additions and 28 deletions.
4 changes: 2 additions & 2 deletions message/validation/common_checks.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,9 @@ func (mv *messageValidator) messageLateness(slot phase0.Slot, role spectypes.Run
var ttl phase0.Slot
switch role {
case spectypes.RoleProposer, spectypes.RoleSyncCommitteeContribution:
ttl = 1 + lateSlotAllowance
ttl = 1 + LateSlotAllowance
case spectypes.RoleCommittee, spectypes.RoleAggregator:
ttl = phase0.Slot(mv.netCfg.Beacon.SlotsPerEpoch()) + lateSlotAllowance
ttl = phase0.Slot(mv.netCfg.Beacon.SlotsPerEpoch()) + LateSlotAllowance
case spectypes.RoleValidatorRegistration, spectypes.RoleVoluntaryExit:
return 0
}
Expand Down
2 changes: 1 addition & 1 deletion message/validation/const.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ const (
clockErrorTolerance = time.Millisecond * 50
allowedRoundsInFuture = 1
allowedRoundsInPast = 2
lateSlotAllowance = 2
LateSlotAllowance = 2
syncCommitteeSize = 512
rsaSignatureSize = 256
operatorIDSize = 8 // uint64
Expand Down
79 changes: 54 additions & 25 deletions protocol/v2/ssv/validator/non_committee_validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ import (

"github.com/ssvlabs/ssv/ibft/storage"
"github.com/ssvlabs/ssv/logging/fields"
"github.com/ssvlabs/ssv/message/validation"
"github.com/ssvlabs/ssv/networkconfig"
"github.com/ssvlabs/ssv/protocol/v2/blockchain/beacon"
qbftcontroller "github.com/ssvlabs/ssv/protocol/v2/qbft/controller"
Expand All @@ -28,17 +29,18 @@ import (
)

type CommitteeObserver struct {
msgID spectypes.MessageID
logger *zap.Logger
Storage *storage.ParticipantStores
beaconNetwork beacon.BeaconNetwork
networkConfig networkconfig.NetworkConfig
ValidatorStore registrystorage.ValidatorStore
newDecidedHandler qbftcontroller.NewDecidedHandler
attesterRoots *ttlcache.Cache[phase0.Root, struct{}]
syncCommRoots *ttlcache.Cache[phase0.Root, struct{}]
domainCache *DomainCache
postConsensusContainer map[phase0.ValidatorIndex]*ssv.PartialSigContainer
msgID spectypes.MessageID
logger *zap.Logger
Storage *storage.ParticipantStores
beaconNetwork beacon.BeaconNetwork
networkConfig networkconfig.NetworkConfig
ValidatorStore registrystorage.ValidatorStore
newDecidedHandler qbftcontroller.NewDecidedHandler
attesterRoots *ttlcache.Cache[phase0.Root, struct{}]
syncCommRoots *ttlcache.Cache[phase0.Root, struct{}]
domainCache *DomainCache
// TODO: consider using round-robin container as []map[phase0.ValidatorIndex]*ssv.PartialSigContainer similar to what is used in OperatorState
postConsensusContainer map[phase0.Slot]map[phase0.ValidatorIndex]*ssv.PartialSigContainer
}

type CommitteeObserverOptions struct {
Expand All @@ -59,19 +61,22 @@ type CommitteeObserverOptions struct {
func NewCommitteeObserver(msgID spectypes.MessageID, opts CommitteeObserverOptions) *CommitteeObserver {
// TODO: does the specific operator matters?

return &CommitteeObserver{
msgID: msgID,
logger: opts.Logger,
Storage: opts.Storage,
beaconNetwork: opts.NetworkConfig.Beacon,
networkConfig: opts.NetworkConfig,
ValidatorStore: opts.ValidatorStore,
newDecidedHandler: opts.NewDecidedHandler,
attesterRoots: opts.AttesterRoots,
syncCommRoots: opts.SyncCommRoots,
domainCache: opts.DomainCache,
postConsensusContainer: make(map[phase0.ValidatorIndex]*ssv.PartialSigContainer),
co := &CommitteeObserver{
msgID: msgID,
logger: opts.Logger,
Storage: opts.Storage,
beaconNetwork: opts.NetworkConfig.Beacon,
networkConfig: opts.NetworkConfig,
ValidatorStore: opts.ValidatorStore,
newDecidedHandler: opts.NewDecidedHandler,
attesterRoots: opts.AttesterRoots,
syncCommRoots: opts.SyncCommRoots,
domainCache: opts.DomainCache,
}

co.postConsensusContainer = make(map[phase0.Slot]map[phase0.ValidatorIndex]*ssv.PartialSigContainer, co.postConsensusContainerCapacity())

return co
}

func (ncv *CommitteeObserver) ProcessMessage(msg *queue.SSVMessage) error {
Expand Down Expand Up @@ -214,15 +219,22 @@ func (ncv *CommitteeObserver) processMessage(
) (map[validatorIndexAndRoot][]spectypes.OperatorID, error) {
quorums := make(map[validatorIndexAndRoot][]spectypes.OperatorID)

currentSlot := signedMsg.Slot
slotValidators, exist := ncv.postConsensusContainer[currentSlot]
if !exist {
slotValidators = make(map[phase0.ValidatorIndex]*ssv.PartialSigContainer)
ncv.postConsensusContainer[signedMsg.Slot] = slotValidators
}

for _, msg := range signedMsg.Messages {
validator, exists := ncv.ValidatorStore.ValidatorByIndex(msg.ValidatorIndex)
if !exists {
return nil, fmt.Errorf("could not find share for validator with index %d", msg.ValidatorIndex)
}
container, ok := ncv.postConsensusContainer[msg.ValidatorIndex]
container, ok := slotValidators[msg.ValidatorIndex]
if !ok {
container = ssv.NewPartialSigContainer(validator.Quorum())
ncv.postConsensusContainer[msg.ValidatorIndex] = container
slotValidators[msg.ValidatorIndex] = container
}
if container.HasSignature(msg.ValidatorIndex, msg.Signer, msg.SigningRoot) {
ncv.resolveDuplicateSignature(container, msg, validator)
Expand All @@ -244,6 +256,18 @@ func (ncv *CommitteeObserver) processMessage(
}
}
}

// Remove older slots container
if len(ncv.postConsensusContainer) >= ncv.postConsensusContainerCapacity() {
// #nosec G115 -- capacity must be low epoch not to cause overflow
thresholdSlot := currentSlot - phase0.Slot(ncv.postConsensusContainerCapacity())
for slot := range ncv.postConsensusContainer {
if slot < thresholdSlot {
delete(ncv.postConsensusContainer, slot)
}
}
}

return quorums, nil
}

Expand Down Expand Up @@ -354,6 +378,11 @@ func (ncv *CommitteeObserver) saveSyncCommRoots(epoch phase0.Epoch, beaconVote *
return nil
}

func (ncv *CommitteeObserver) postConsensusContainerCapacity() int {
// #nosec G115 -- slots per epoch must be low epoch not to cause overflow
return int(ncv.networkConfig.SlotsPerEpoch()) + validation.LateSlotAllowance
}

func constructAttestationData(vote *spectypes.BeaconVote, slot phase0.Slot, committeeIndex phase0.CommitteeIndex) *phase0.AttestationData {
return &phase0.AttestationData{
Slot: slot,
Expand Down

0 comments on commit 7fcd336

Please sign in to comment.