From afa3e68b6dc379823f8589e4bd713941a35f5636 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 11 Nov 2024 16:38:45 -0400 Subject: [PATCH 01/31] feat: starting the fragment chain implementation - Added: Candidate Entry - (inclusion_emulator): Added ProspectiveCandidate, Modification error and Fragment Validity error --- .../fragment-chain/errors.go | 72 ++++++ .../fragment-chain/fragment_chain.go | 121 +++++++++ .../inclusion-emulator/inclusion_emulator.go | 240 ++++++++++++++++++ 3 files changed, 433 insertions(+) create mode 100644 dot/parachain/prospective-parachains/fragment-chain/errors.go create mode 100644 dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go create mode 100644 dot/parachain/util/inclusion-emulator/inclusion_emulator.go diff --git a/dot/parachain/prospective-parachains/fragment-chain/errors.go b/dot/parachain/prospective-parachains/fragment-chain/errors.go new file mode 100644 index 0000000000..a0013004fa --- /dev/null +++ b/dot/parachain/prospective-parachains/fragment-chain/errors.go @@ -0,0 +1,72 @@ +package fragmentchain + +import ( + "errors" + "fmt" + + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" + inclusionemulator "github.com/ChainSafe/gossamer/dot/parachain/util/inclusion-emulator" + "github.com/ChainSafe/gossamer/lib/common" +) + +var ( + ErrCandidateAlradyKnown = errors.New("candidate already known") + ErrZeroLengthCycle = errors.New("candidate's parent head is equal to its output head. Would introduce a cycle") + ErrCycle = errors.New("candidate would introduce a cycle") + ErrMultiplePaths = errors.New("candidate would introduce two paths to the same output state") + ErrIntroduceBackedCandidate = errors.New("attempting to directly introduce a Backed candidate. It should first be introduced as Seconded") + ErrParentCandidateNotFound = errors.New("could not find parent of the candidate") + ErrRelayParentMovedBackwards = errors.New("relay parent would move backwards from the latest candidate in the chain") + ErrPersistedValidationDataMismatch = errors.New("candidate does not match the persisted validation data provided alongside it") + ErrCandidateEntryZeroLengthCycle = errors.New("candidate's parent head is equal to its output head. Would introduce a cycle") +) + +type ErrRelayParentPrecedesCandidatePendingAvailability struct { + relayParentA, relayParentB common.Hash +} + +func (e ErrRelayParentPrecedesCandidatePendingAvailability) Error() string { + return fmt.Sprintf("relay parent %x of the candidate precedes the relay parent %x of a pending availability candidate", + e.relayParentA, e.relayParentB) +} + +type ErrForkWithCandidatePendingAvailability struct { + candidateHash parachaintypes.CandidateHash +} + +func (e ErrForkWithCandidatePendingAvailability) Error() string { + return fmt.Sprintf("candidate would introduce a fork with a pending availability candidate: %x", e.candidateHash.Value) +} + +type ErrForkChoiceRule struct { + candidateHash parachaintypes.CandidateHash +} + +func (e ErrForkChoiceRule) Error() string { + return fmt.Sprintf("fork selection rule favours another candidate: %x", e.candidateHash.Value) +} + +type ErrComputeConstraints struct { + modificationErr inclusionemulator.ModificationError +} + +func (e ErrComputeConstraints) Error() string { + return fmt.Sprintf("could not compute candidate constraints: %s", e.modificationErr) +} + +type ErrCheckAgainstConstraints struct { + fragmentValidityErr inclusionemulator.FragmentValidityError +} + +func (e ErrCheckAgainstConstraints) Error() string { + return fmt.Sprintf("candidate violates constraints: %s", e.fragmentValidityErr) +} + +type ErrRelayParentNotInScope struct { + relayParentA, relayParentB common.Hash +} + +func (e ErrRelayParentNotInScope) Error() string { + return fmt.Sprintf("relay parent %x not in scope, earliest relay parent allowed %x", + e.relayParentA, e.relayParentB) +} diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go new file mode 100644 index 0000000000..3371b4e284 --- /dev/null +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go @@ -0,0 +1,121 @@ +package fragmentchain + +import ( + "fmt" + + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" + inclusionemulator "github.com/ChainSafe/gossamer/dot/parachain/util/inclusion-emulator" + "github.com/ChainSafe/gossamer/lib/common" +) + +type CandidateState int + +const ( + Seconded CandidateState = iota + Backed +) + +type CandidateEntry struct { + candidateHash parachaintypes.CandidateHash + parentHeadDataHash common.Hash + outputHeadDataHash common.Hash + relayParent common.Hash + // TODO: this is under a Arc smart pointer, should we + // have that here? maybe some specialized struct that protects the underlying data? + candidate inclusionemulator.ProspectiveCandidate + state CandidateState +} + +func (c *CandidateEntry) Hash() parachaintypes.CandidateHash { + return c.candidateHash +} + +func NewCandidateEntry( + candidateHash parachaintypes.CandidateHash, + candidate parachaintypes.CommittedCandidateReceipt, + persistedValidationData parachaintypes.PersistedValidationData, + state CandidateState, +) (*CandidateEntry, error) { + pvdHash, err := persistedValidationData.Hash() + if err != nil { + return nil, fmt.Errorf("while hashing persisted validation data: %w", err) + } + + if pvdHash != candidate.Descriptor.PersistedValidationDataHash { + return nil, ErrPersistedValidationDataMismatch + } + + parendHeadDataHash, err := persistedValidationData.ParentHead.Hash() + if err != nil { + return nil, fmt.Errorf("while hashing parent head data: %w", err) + } + + outputHeadDataHash, err := candidate.Commitments.HeadData.Hash() + if err != nil { + return nil, fmt.Errorf("while hashing output head data: %w", err) + } + + if parendHeadDataHash == outputHeadDataHash { + return nil, ErrCandidateEntryZeroLengthCycle + } + + return &CandidateEntry{ + candidateHash: candidateHash, + parentHeadDataHash: parendHeadDataHash, + outputHeadDataHash: outputHeadDataHash, + relayParent: candidate.Descriptor.RelayParent, + state: state, + candidate: inclusionemulator.ProspectiveCandidate{ + Commitments: candidate.Commitments, + PersistedValidationData: persistedValidationData, + PoVHash: candidate.Descriptor.PovHash, + ValidationCodeHash: candidate.Descriptor.ValidationCodeHash, + }, + }, nil +} + +// CandidateStorage is an utility for storing candidates and information about them such as +// their relay-parents and their backing states. This does not assume any restriction on whether +// or not candidates form a chain. Useful for storing all kinds of candidates. +type CandidateStorage struct { + byParentHead map[common.Hash]map[parachaintypes.CandidateHash]any + byOutputHead map[common.Hash]map[parachaintypes.CandidateHash]any + byCandidateHash map[parachaintypes.CandidateHash]CandidateEntry +} + +func (c *CandidateStorage) AddPendingAvailabilityCandidate( + candidateHash parachaintypes.CandidateHash, + candidate parachaintypes.CommittedCandidateReceipt, + persistedValidationData parachaintypes.PersistedValidationData, +) error { + entry, err := NewCandidateEntry(candidateHash, candidate, persistedValidationData, Backed) + if err != nil { + return err + } + + return c.addCandidateEntry(entry) +} + +func (c *CandidateStorage) addCandidateEntry(candidate *CandidateEntry) error { + _, ok := c.byCandidateHash[candidate.candidateHash] + if ok { + return ErrCandidateAlradyKnown + } + + // updates the reference parent hash -> candidate + setOfCandidates := c.byParentHead[candidate.parentHeadDataHash] + if setOfCandidates == nil { + setOfCandidates = make(map[parachaintypes.CandidateHash]any) + } + setOfCandidates[candidate.candidateHash] = struct{}{} + c.byParentHead[candidate.parentHeadDataHash] = setOfCandidates + + // udpates the reference output hash -> candidate + setOfCandidates = c.byOutputHead[candidate.outputHeadDataHash] + if setOfCandidates == nil { + setOfCandidates = make(map[parachaintypes.CandidateHash]any) + } + setOfCandidates[candidate.candidateHash] = struct{}{} + c.byOutputHead[candidate.outputHeadDataHash] = setOfCandidates + +} diff --git a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go new file mode 100644 index 0000000000..0412a0b412 --- /dev/null +++ b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go @@ -0,0 +1,240 @@ +package inclusionemulator + +import ( + "fmt" + + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" + "github.com/ChainSafe/gossamer/lib/common" +) + +// ProspectiveCandidate includes key informations that represents a candidate +// without pinning it to a particular session. For example, commitments are +// represented here, but the erasure-root is not. This means that, prospective +// candidates are not correlated to any session in particular. +type ProspectiveCandidate struct { + Commitments parachaintypes.CandidateCommitments + PersistedValidationData parachaintypes.PersistedValidationData + PoVHash common.Hash + ValidationCodeHash parachaintypes.ValidationCodeHash +} + +// ModificationError is kinds of errors that can happen when modifying constraints +type ModificationError interface { + isModificationError() +} + +var ( + _ ModificationError = (*DisallowedHrmpWatermark)(nil) + _ ModificationError = (*NoSuchHrmpChannel)(nil) + _ ModificationError = (*HrmpMessagesOverflow)(nil) + _ ModificationError = (*HrmpBytesOverflow)(nil) + _ ModificationError = (*UmpMessagesOverflow)(nil) + _ ModificationError = (*UmpBytesOverflow)(nil) + _ ModificationError = (*DmpMessagesUnderflow)(nil) + _ ModificationError = (*AppliedNonexistentCodeUpgrade)(nil) +) + +type DisallowedHrmpWatermark struct { + blockNumber uint +} + +func (*DisallowedHrmpWatermark) isModificationError() {} + +func (e *DisallowedHrmpWatermark) String() string { + return fmt.Sprintf("DisallowedHrmpWatermark(BlockNumber: %d)", e.blockNumber) +} + +type NoSuchHrmpChannel struct { + paraId parachaintypes.ParaID +} + +func (*NoSuchHrmpChannel) isModificationError() {} + +func (e *NoSuchHrmpChannel) String() string { + return fmt.Sprintf("NoSuchHrmpChannel(ParaId: %d)", e.paraId) +} + +type HrmpMessagesOverflow struct { + paraId parachaintypes.ParaID + messagesRemaining uint + messagesSubmitted uint +} + +func (*HrmpMessagesOverflow) isModificationError() {} + +func (e *HrmpMessagesOverflow) String() string { + return fmt.Sprintf("HrmpMessagesOverflow(ParaId: %d, MessagesRemaining: %d, MessagesSubmitted: %d)", e.paraId, e.messagesRemaining, e.messagesSubmitted) +} + +type HrmpBytesOverflow struct { + paraId parachaintypes.ParaID + bytesRemaining uint + bytesSubmitted uint +} + +func (*HrmpBytesOverflow) isModificationError() {} + +func (e *HrmpBytesOverflow) String() string { + return fmt.Sprintf("HrmpBytesOverflow(ParaId: %d, BytesRemaining: %d, BytesSubmitted: %d)", e.paraId, e.bytesRemaining, e.bytesSubmitted) +} + +type UmpMessagesOverflow struct { + messagesRemaining uint + messagesSubmitted uint +} + +func (*UmpMessagesOverflow) isModificationError() {} + +func (e *UmpMessagesOverflow) String() string { + return fmt.Sprintf("UmpMessagesOverflow(MessagesRemaining: %d, MessagesSubmitted: %d)", e.messagesRemaining, e.messagesSubmitted) +} + +type UmpBytesOverflow struct { + bytesRemaining uint + bytesSubmitted uint +} + +func (*UmpBytesOverflow) isModificationError() {} + +func (e *UmpBytesOverflow) String() string { + return fmt.Sprintf("UmpBytesOverflow(BytesRemaining: %d, BytesSubmitted: %d)", e.bytesRemaining, e.bytesSubmitted) +} + +type DmpMessagesUnderflow struct { + messagesRemaining uint + messagesProcessed uint +} + +func (*DmpMessagesUnderflow) isModificationError() {} + +func (e *DmpMessagesUnderflow) String() string { + return fmt.Sprintf("DmpMessagesUnderflow(MessagesRemaining: %d, MessagesProcessed: %d)", e.messagesRemaining, e.messagesProcessed) +} + +type AppliedNonexistentCodeUpgrade struct{} + +func (*AppliedNonexistentCodeUpgrade) isModificationError() {} + +func (e *AppliedNonexistentCodeUpgrade) String() string { + return "AppliedNonexistentCodeUpgrade()" +} + +// FragmentValidityError kinds of errors with the validity of a fragment. +type FragmentValidityError interface { + isFragmentValidityError() +} + +var ( + _ FragmentValidityError = (*ValidationCodeMismatch)(nil) + _ FragmentValidityError = (*PersistedValidationDataMismatch)(nil) + _ FragmentValidityError = (*OutputsInvalid)(nil) + _ FragmentValidityError = (*CodeSizeTooLarge)(nil) + _ FragmentValidityError = (*RelayParentTooOld)(nil) + _ FragmentValidityError = (*DmpAdvancementRule)(nil) + _ FragmentValidityError = (*UmpMessagesPerCandidateOverflow)(nil) + _ FragmentValidityError = (*HrmpMessagesPerCandidateOverflow)(nil) + _ FragmentValidityError = (*CodeUpgradeRestricted)(nil) + _ FragmentValidityError = (*HrmpMessagesDescendingOrDuplicate)(nil) +) + +type ValidationCodeMismatch struct { + expected parachaintypes.ValidationCodeHash + got parachaintypes.ValidationCodeHash +} + +func (*ValidationCodeMismatch) isFragmentValidityError() {} + +func (e *ValidationCodeMismatch) String() string { + return fmt.Sprintf("ValidationCodeMismatch(Expected: %s, Got: %s)", e.expected, e.got) +} + +type PersistedValidationDataMismatch struct { + expected parachaintypes.PersistedValidationData + got parachaintypes.PersistedValidationData +} + +func (*PersistedValidationDataMismatch) isFragmentValidityError() {} + +func (e *PersistedValidationDataMismatch) String() string { + return fmt.Sprintf("PersistedValidationDataMismatch(Expected: %v, Got: %v)", e.expected, e.got) +} + +type OutputsInvalid struct { + modificationError ModificationError +} + +func (*OutputsInvalid) isFragmentValidityError() {} + +func (e *OutputsInvalid) String() string { + return fmt.Sprintf("OutputsInvalid(ModificationError: %v)", e.modificationError) +} + +type CodeSizeTooLarge struct { + maxAllowed uint + newSize uint +} + +func (*CodeSizeTooLarge) isFragmentValidityError() {} + +func (e *CodeSizeTooLarge) String() string { + return fmt.Sprintf("CodeSizeTooLarge(MaxAllowed: %d, NewSize: %d)", e.maxAllowed, e.newSize) +} + +type RelayParentTooOld struct { + minAllowed uint + current uint +} + +func (*RelayParentTooOld) isFragmentValidityError() {} + +func (e *RelayParentTooOld) String() string { + return fmt.Sprintf("RelayParentTooOld(MinAllowed: %d, Current: %d)", e.minAllowed, e.current) +} + +type DmpAdvancementRule struct{} + +func (*DmpAdvancementRule) isFragmentValidityError() {} + +func (e *DmpAdvancementRule) String() string { + return "DmpAdvancementRule()" +} + +type UmpMessagesPerCandidateOverflow struct { + messagesAllowed uint + messagesSubmitted uint +} + +func (*UmpMessagesPerCandidateOverflow) isFragmentValidityError() {} + +func (e *UmpMessagesPerCandidateOverflow) String() string { + return fmt.Sprintf("UmpMessagesPerCandidateOverflow(MessagesAllowed: %d, MessagesSubmitted: %d)", e.messagesAllowed, e.messagesSubmitted) +} + +type HrmpMessagesPerCandidateOverflow struct { + messagesAllowed uint + messagesSubmitted uint +} + +func (*HrmpMessagesPerCandidateOverflow) isFragmentValidityError() {} + +func (e *HrmpMessagesPerCandidateOverflow) String() string { + return fmt.Sprintf("HrmpMessagesPerCandidateOverflow(MessagesAllowed: %d, MessagesSubmitted: %d)", e.messagesAllowed, e.messagesSubmitted) +} + +type CodeUpgradeRestricted struct{} + +func (*CodeUpgradeRestricted) isFragmentValidityError() {} + +func (e *CodeUpgradeRestricted) String() string { + return "CodeUpgradeRestricted()" +} + +type HrmpMessagesDescendingOrDuplicate struct { + index uint +} + +func (*HrmpMessagesDescendingOrDuplicate) isFragmentValidityError() {} + +func (e *HrmpMessagesDescendingOrDuplicate) String() string { + return fmt.Sprintf("HrmpMessagesDescendingOrDuplicate(Index: %d)", e.index) +} From 404f5fe021b8fe8784fd96edbf3f973ed53d9fd7 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 12 Nov 2024 18:02:20 -0400 Subject: [PATCH 02/31] feat: implemented `CandidateStorage` --- .../fragment-chain/fragment_chain.go | 97 ++++++- .../fragment-chain/fragment_chain_test.go | 251 ++++++++++++++++++ go.mod | 3 +- go.sum | 3 - 4 files changed, 348 insertions(+), 6 deletions(-) create mode 100644 dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go index 3371b4e284..6707f3e907 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go @@ -2,6 +2,7 @@ package fragmentchain import ( "fmt" + "iter" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" inclusionemulator "github.com/ChainSafe/gossamer/dot/parachain/util/inclusion-emulator" @@ -80,7 +81,7 @@ func NewCandidateEntry( type CandidateStorage struct { byParentHead map[common.Hash]map[parachaintypes.CandidateHash]any byOutputHead map[common.Hash]map[parachaintypes.CandidateHash]any - byCandidateHash map[parachaintypes.CandidateHash]CandidateEntry + byCandidateHash map[parachaintypes.CandidateHash]*CandidateEntry } func (c *CandidateStorage) AddPendingAvailabilityCandidate( @@ -103,6 +104,8 @@ func (c *CandidateStorage) addCandidateEntry(candidate *CandidateEntry) error { } // updates the reference parent hash -> candidate + // we don't check the `ok` value since the key can + // exists in the map but pointing to a nil hashset setOfCandidates := c.byParentHead[candidate.parentHeadDataHash] if setOfCandidates == nil { setOfCandidates = make(map[parachaintypes.CandidateHash]any) @@ -118,4 +121,96 @@ func (c *CandidateStorage) addCandidateEntry(candidate *CandidateEntry) error { setOfCandidates[candidate.candidateHash] = struct{}{} c.byOutputHead[candidate.outputHeadDataHash] = setOfCandidates + return nil +} + +func (c *CandidateStorage) removeCandidate(candidateHash parachaintypes.CandidateHash) { + entry, ok := c.byCandidateHash[candidateHash] + if !ok { + return + } + + delete(c.byCandidateHash, candidateHash) + + if setOfCandidates, ok := c.byParentHead[entry.parentHeadDataHash]; ok { + delete(setOfCandidates, candidateHash) + if len(setOfCandidates) == 0 { + delete(c.byParentHead, entry.parentHeadDataHash) + } + } + + if setOfCandidates, ok := c.byOutputHead[entry.outputHeadDataHash]; ok { + delete(setOfCandidates, candidateHash) + if len(setOfCandidates) == 0 { + delete(c.byOutputHead, entry.outputHeadDataHash) + } + } +} + +func (c *CandidateStorage) MarkBacked(candidateHash parachaintypes.CandidateHash) { + entry, ok := c.byCandidateHash[candidateHash] + if !ok { + fmt.Println("candidate not found while marking as backed") + } + + entry.state = Backed + fmt.Println("candidate marked as backed") +} + +func (c *CandidateStorage) Contains(candidateHash parachaintypes.CandidateHash) bool { + _, ok := c.byCandidateHash[candidateHash] + return ok +} + +// Candidates returns an iterator over references to the stored candidates, in arbitrary order. +func (c *CandidateStorage) Candidates() iter.Seq[*CandidateEntry] { + return func(yield func(*CandidateEntry) bool) { + for _, entry := range c.byCandidateHash { + if !yield(entry) { + return + } + } + } +} + +func (c *CandidateStorage) HeadDataByHash(hash common.Hash) *parachaintypes.HeadData { + // first, search for candidates outputting this head data and extract the head data + // from their commitments if they exist. + // otherwise, search for candidates building upon this head data and extract the + // head data from their persisted validation data if they exist. + + if setOfCandidateHashes, ok := c.byOutputHead[hash]; ok { + for candidateHash := range setOfCandidateHashes { + if candidate, ok := c.byCandidateHash[candidateHash]; ok { + return &candidate.candidate.Commitments.HeadData + } + } + } + + if setOfCandidateHashes, ok := c.byParentHead[hash]; ok { + for candidateHash := range setOfCandidateHashes { + if candidate, ok := c.byCandidateHash[candidateHash]; ok { + return &candidate.candidate.PersistedValidationData.ParentHead + } + } + } + + return nil +} + +func (c *CandidateStorage) PossibleBackedParaChildren(parentHeadHash common.Hash) iter.Seq[*CandidateEntry] { + return func(yield func(*CandidateEntry) bool) { + seqOfCandidateHashes, ok := c.byParentHead[parentHeadHash] + if !ok { + return + } + + for candidateHash := range seqOfCandidateHashes { + if entry, ok := c.byCandidateHash[candidateHash]; ok && entry.state == Backed { + if !yield(entry) { + return + } + } + } + } } diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go new file mode 100644 index 0000000000..bae24167c8 --- /dev/null +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go @@ -0,0 +1,251 @@ +package fragmentchain + +import ( + "testing" + + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" + inclusionemulator "github.com/ChainSafe/gossamer/dot/parachain/util/inclusion-emulator" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/stretchr/testify/assert" +) + +func TestCandidateStorage_RemoveCandidate(t *testing.T) { + storage := &CandidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + } + + candidateHash := parachaintypes.CandidateHash{Value: common.Hash{1, 2, 3}} + parentHeadHash := common.Hash{4, 5, 6} + outputHeadHash := common.Hash{7, 8, 9} + + entry := &CandidateEntry{ + candidateHash: candidateHash, + parentHeadDataHash: parentHeadHash, + outputHeadDataHash: outputHeadHash, + state: Backed, + } + + storage.byCandidateHash[candidateHash] = entry + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} + storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} + + storage.removeCandidate(candidateHash) + + _, exists := storage.byCandidateHash[candidateHash] + assert.False(t, exists, "candidate should be removed from byCandidateHash") + + _, exists = storage.byParentHead[parentHeadHash] + assert.False(t, exists, "candidate should be removed from byParentHead") + + _, exists = storage.byOutputHead[outputHeadHash] + assert.False(t, exists, "candidate should be removed from byOutputHead") +} + +func TestCandidateStorage_MarkBacked(t *testing.T) { + storage := &CandidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + } + + candidateHash := parachaintypes.CandidateHash{Value: common.Hash{1, 2, 3}} + parentHeadHash := common.Hash{4, 5, 6} + outputHeadHash := common.Hash{7, 8, 9} + + entry := &CandidateEntry{ + candidateHash: candidateHash, + parentHeadDataHash: parentHeadHash, + outputHeadDataHash: outputHeadHash, + state: Seconded, + } + + storage.byCandidateHash[candidateHash] = entry + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} + storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} + + storage.MarkBacked(candidateHash) + + assert.Equal(t, Backed, entry.state, "candidate state should be marked as backed") +} + +func TestCandidateStorage_HeadDataByHash(t *testing.T) { + tests := map[string]struct { + setup func() *CandidateStorage + hash common.Hash + expected *parachaintypes.HeadData + }{ + "find_head_data_of_first_candidate_using_output_head_data_hash": { + setup: func() *CandidateStorage { + storage := &CandidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + } + + candidateHash := parachaintypes.CandidateHash{Value: common.Hash{1, 2, 3}} + parentHeadHash := common.Hash{4, 5, 6} + outputHeadHash := common.Hash{7, 8, 9} + headData := parachaintypes.HeadData{Data: []byte{10, 11, 12}} + + entry := &CandidateEntry{ + candidateHash: candidateHash, + parentHeadDataHash: parentHeadHash, + outputHeadDataHash: outputHeadHash, + candidate: inclusionemulator.ProspectiveCandidate{ + Commitments: parachaintypes.CandidateCommitments{ + HeadData: headData, + }, + }, + } + + storage.byCandidateHash[candidateHash] = entry + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} + storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} + + return storage + }, + hash: common.Hash{7, 8, 9}, + expected: ¶chaintypes.HeadData{Data: []byte{10, 11, 12}}, + }, + "find_head_data_using_parent_head_data_hash_from_second_candidate": { + setup: func() *CandidateStorage { + storage := &CandidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + } + + candidateHash := parachaintypes.CandidateHash{Value: common.Hash{13, 14, 15}} + parentHeadHash := common.Hash{16, 17, 18} + outputHeadHash := common.Hash{19, 20, 21} + headData := parachaintypes.HeadData{Data: []byte{22, 23, 24}} + + entry := &CandidateEntry{ + candidateHash: candidateHash, + parentHeadDataHash: parentHeadHash, + outputHeadDataHash: outputHeadHash, + candidate: inclusionemulator.ProspectiveCandidate{ + PersistedValidationData: parachaintypes.PersistedValidationData{ + ParentHead: headData, + }, + }, + } + + storage.byCandidateHash[candidateHash] = entry + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} + storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} + + return storage + }, + hash: common.Hash{16, 17, 18}, + expected: ¶chaintypes.HeadData{Data: []byte{22, 23, 24}}, + }, + "use_nonexistent_hash_and_should_get_nil": { + setup: func() *CandidateStorage { + storage := &CandidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + } + return storage + }, + hash: common.Hash{99, 99, 99}, + expected: nil, + }, + "insert_0_candidates_and_try_to_find_but_should_get_nil": { + setup: func() *CandidateStorage { + return &CandidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + } + }, + hash: common.Hash{7, 8, 9}, + expected: nil, + }, + } + + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + storage := tt.setup() + result := storage.HeadDataByHash(tt.hash) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestCandidateStorage_PossibleBackedParaChildren(t *testing.T) { + tests := map[string]struct { + setup func() *CandidateStorage + hash common.Hash + expected []*CandidateEntry + }{ + "insert_2_candidates_for_same_parent_one_seconded_one_backed": { + setup: func() *CandidateStorage { + storage := &CandidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + } + + candidateHash1 := parachaintypes.CandidateHash{Value: common.Hash{1, 2, 3}} + parentHeadHash := common.Hash{4, 5, 6} + outputHeadHash1 := common.Hash{7, 8, 9} + + candidateHash2 := parachaintypes.CandidateHash{Value: common.Hash{10, 11, 12}} + outputHeadHash2 := common.Hash{13, 14, 15} + + entry1 := &CandidateEntry{ + candidateHash: candidateHash1, + parentHeadDataHash: parentHeadHash, + outputHeadDataHash: outputHeadHash1, + state: Seconded, + } + + entry2 := &CandidateEntry{ + candidateHash: candidateHash2, + parentHeadDataHash: parentHeadHash, + outputHeadDataHash: outputHeadHash2, + state: Backed, + } + + storage.byCandidateHash[candidateHash1] = entry1 + storage.byCandidateHash[candidateHash2] = entry2 + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]any{ + candidateHash1: struct{}{}, + candidateHash2: struct{}{}, + } + + return storage + }, + hash: common.Hash{4, 5, 6}, + expected: []*CandidateEntry{{candidateHash: parachaintypes.CandidateHash{Value: common.Hash{10, 11, 12}}, parentHeadDataHash: common.Hash{4, 5, 6}, outputHeadDataHash: common.Hash{13, 14, 15}, state: Backed}}, + }, + "insert_nothing_and_call_function_should_return_nothing": { + setup: func() *CandidateStorage { + return &CandidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + } + }, + hash: common.Hash{4, 5, 6}, + expected: nil, + }, + } + + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + storage := tt.setup() + var result []*CandidateEntry + for entry := range storage.PossibleBackedParaChildren(tt.hash) { + result = append(result, entry) + } + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/go.mod b/go.mod index f585b2c749..b624115e9a 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,6 @@ require ( github.com/fatih/color v1.17.0 github.com/gammazero/deque v0.2.1 github.com/go-playground/validator/v10 v10.21.0 - github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 @@ -200,7 +199,7 @@ require ( lukechampine.com/blake3 v1.2.1 // indirect ) -go 1.21 +go 1.23.1 replace github.com/tetratelabs/wazero => github.com/ChainSafe/wazero v0.0.0-20240319130522-78b21a59bd5f diff --git a/go.sum b/go.sum index 990d74d772..03ec8f0ac3 100644 --- a/go.sum +++ b/go.sum @@ -206,8 +206,6 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4er github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -799,7 +797,6 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= From 6648464158d870bcd27125107218fcf69d0c2918 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 13 Nov 2024 17:01:29 -0400 Subject: [PATCH 03/31] feat: create `Scope`, working on inclusionemulator.Fragment --- .../fragment-chain/errors.go | 11 + .../fragment-chain/fragment_chain.go | 133 ++++++++++- .../fragment-chain/fragment_chain_test.go | 70 +++++- dot/parachain/types/async_backing.go | 58 +++++ dot/parachain/types/types.go | 13 + .../inclusion-emulator/inclusion_emulator.go | 226 ++++++++++++++++++ 6 files changed, 502 insertions(+), 9 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/errors.go b/dot/parachain/prospective-parachains/fragment-chain/errors.go index a0013004fa..3f13f8ba0a 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/errors.go +++ b/dot/parachain/prospective-parachains/fragment-chain/errors.go @@ -70,3 +70,14 @@ func (e ErrRelayParentNotInScope) Error() string { return fmt.Sprintf("relay parent %x not in scope, earliest relay parent allowed %x", e.relayParentA, e.relayParentB) } + +type ErrUnexpectedAncestor struct { + // The block number that this error occurred at + Number uint + // The previous seen block number, which did not match `number`. + Prev uint +} + +func (e ErrUnexpectedAncestor) Error() string { + return fmt.Sprintf("unexpected ancestor %d, expected %d", e.Number, e.Prev) +} diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go index 6707f3e907..18e7a62ff7 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go @@ -7,6 +7,7 @@ import ( parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" inclusionemulator "github.com/ChainSafe/gossamer/dot/parachain/util/inclusion-emulator" "github.com/ChainSafe/gossamer/lib/common" + "github.com/tidwall/btree" ) type CandidateState int @@ -16,6 +17,8 @@ const ( Backed ) +// CandidateEntry represents a candidate into the CandidateStorage +// TODO: Should CandidateEntry implements `HypotheticalOrConcreteCandidate` type CandidateEntry struct { candidateHash parachaintypes.CandidateHash parentHeadDataHash common.Hash @@ -97,6 +100,11 @@ func (c *CandidateStorage) AddPendingAvailabilityCandidate( return c.addCandidateEntry(entry) } +// Len return the number of stored candidate +func (c *CandidateStorage) Len() uint { + return uint(len(c.byCandidateHash)) +} + func (c *CandidateStorage) addCandidateEntry(candidate *CandidateEntry) error { _, ok := c.byCandidateHash[candidate.candidateHash] if ok { @@ -147,7 +155,7 @@ func (c *CandidateStorage) removeCandidate(candidateHash parachaintypes.Candidat } } -func (c *CandidateStorage) MarkBacked(candidateHash parachaintypes.CandidateHash) { +func (c *CandidateStorage) markBacked(candidateHash parachaintypes.CandidateHash) { entry, ok := c.byCandidateHash[candidateHash] if !ok { fmt.Println("candidate not found while marking as backed") @@ -157,13 +165,13 @@ func (c *CandidateStorage) MarkBacked(candidateHash parachaintypes.CandidateHash fmt.Println("candidate marked as backed") } -func (c *CandidateStorage) Contains(candidateHash parachaintypes.CandidateHash) bool { +func (c *CandidateStorage) contains(candidateHash parachaintypes.CandidateHash) bool { _, ok := c.byCandidateHash[candidateHash] return ok } -// Candidates returns an iterator over references to the stored candidates, in arbitrary order. -func (c *CandidateStorage) Candidates() iter.Seq[*CandidateEntry] { +// candidates returns an iterator over references to the stored candidates, in arbitrary order. +func (c *CandidateStorage) candidates() iter.Seq[*CandidateEntry] { return func(yield func(*CandidateEntry) bool) { for _, entry := range c.byCandidateHash { if !yield(entry) { @@ -173,7 +181,7 @@ func (c *CandidateStorage) Candidates() iter.Seq[*CandidateEntry] { } } -func (c *CandidateStorage) HeadDataByHash(hash common.Hash) *parachaintypes.HeadData { +func (c *CandidateStorage) headDataByHash(hash common.Hash) *parachaintypes.HeadData { // first, search for candidates outputting this head data and extract the head data // from their commitments if they exist. // otherwise, search for candidates building upon this head data and extract the @@ -198,7 +206,7 @@ func (c *CandidateStorage) HeadDataByHash(hash common.Hash) *parachaintypes.Head return nil } -func (c *CandidateStorage) PossibleBackedParaChildren(parentHeadHash common.Hash) iter.Seq[*CandidateEntry] { +func (c *CandidateStorage) possibleBackedParaChildren(parentHeadHash common.Hash) iter.Seq[*CandidateEntry] { return func(yield func(*CandidateEntry) bool) { seqOfCandidateHashes, ok := c.byParentHead[parentHeadHash] if !ok { @@ -214,3 +222,116 @@ func (c *CandidateStorage) PossibleBackedParaChildren(parentHeadHash common.Hash } } } + +// PendindAvailability is a candidate on-chain but pending availability, for special +// treatment in the `Scope` +type PendindAvailability struct { + CandidateHash parachaintypes.CandidateHash + RelayParent inclusionemulator.RelayChainBlockInfo +} + +// The scope of a fragment chain +type Scope struct { + // the relay parent we're currently building on top of + relayParent inclusionemulator.RelayChainBlockInfo + // the other relay parents candidates are allowed to build upon, + // mapped by the block number + ancestors *btree.Map[uint, inclusionemulator.RelayChainBlockInfo] + // the other relay parents candidates are allowed to build upon, + // mapped by hash + ancestorsByHash map[common.Hash]inclusionemulator.RelayChainBlockInfo + // candidates pending availability at this block + pendindAvailability []*PendindAvailability + // the base constraints derived from the latest included candidate + baseConstraints parachaintypes.Constraints + // equal to `max_candidate_depth` + maxDepth uint +} + +// NewScopeWithAncestors defines a new scope, all arguments are straightforward +// expect ancestors. Ancestor should be in reverse order, starting with the parent +// of the relayParent, and proceeding backwards in block number decrements of 1. +// Ancestors not following these conditions will be rejected. +// +// This function will only consume ancestors up to the `MinRelayParentNumber` of the +// `baseConstraints`. +// +// Only ancestor whose children have the same session id as the relay parent's children +// should be provided. It is allowed to provide 0 ancestors. +func NewScopeWithAncestors( + relayParent inclusionemulator.RelayChainBlockInfo, + baseConstraints parachaintypes.Constraints, + pendingAvailability []*PendindAvailability, + maxDepth uint, + ancestors iter.Seq[inclusionemulator.RelayChainBlockInfo], +) (*Scope, error) { + ancestorsMap := btree.NewMap[uint, inclusionemulator.RelayChainBlockInfo](100) + ancestorsByHash := make(map[common.Hash]inclusionemulator.RelayChainBlockInfo) + + prev := relayParent.Number + for ancestor := range ancestors { + if prev == 0 { + return nil, ErrUnexpectedAncestor{Number: ancestor.Number, Prev: prev} + } + + if ancestor.Number != prev-1 { + return nil, ErrUnexpectedAncestor{Number: ancestor.Number, Prev: prev} + } + + if prev == baseConstraints.MinRelayParentNumber { + break + } + + prev = ancestor.Number + ancestorsByHash[ancestor.Hash] = ancestor + ancestorsMap.Set(ancestor.Number, ancestor) + } + + return &Scope{ + relayParent: relayParent, + baseConstraints: baseConstraints, + pendindAvailability: pendingAvailability, + maxDepth: maxDepth, + ancestors: ancestorsMap, + ancestorsByHash: ancestorsByHash, + }, nil +} + +// EarliestRelayParent gets the earliest relay-parent allowed in the scope of the fragment chain. +func (s *Scope) EarliestRelayParent() inclusionemulator.RelayChainBlockInfo { + if iter := s.ancestors.Iter(); iter.Next() { + return iter.Value() + } + return s.relayParent +} + +// Ancestor gets the relay ancestor of the fragment chain by hash. +func (s *Scope) Ancestor(hash common.Hash) *inclusionemulator.RelayChainBlockInfo { + if hash == s.relayParent.Hash { + return &s.relayParent + } + + if blockInfo, ok := s.ancestorsByHash[hash]; ok { + return &blockInfo + } + + return nil +} + +// Whether the candidate in question is one pending availability in this scope. +func (s *Scope) GetPendingAvailability(candidateHash parachaintypes.CandidateHash) *PendindAvailability { + for _, c := range s.pendindAvailability { + if c.CandidateHash == candidateHash { + return c + } + } + return nil +} + +type FragmentNode struct { + fragment inclusionemulator.Fragment + candidateHash parachaintypes.CandidateHash + cumulativeModifications inclusionemulator.ConstraintModifications + parentHeadDataHash common.Hash + outputHeadDataHash common.Hash +} diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go index bae24167c8..327e9f0692 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go @@ -7,6 +7,7 @@ import ( inclusionemulator "github.com/ChainSafe/gossamer/dot/parachain/util/inclusion-emulator" "github.com/ChainSafe/gossamer/lib/common" "github.com/stretchr/testify/assert" + "github.com/tidwall/btree" ) func TestCandidateStorage_RemoveCandidate(t *testing.T) { @@ -65,7 +66,7 @@ func TestCandidateStorage_MarkBacked(t *testing.T) { storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} - storage.MarkBacked(candidateHash) + storage.markBacked(candidateHash) assert.Equal(t, Backed, entry.state, "candidate state should be marked as backed") } @@ -171,7 +172,7 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { tt := tt t.Run(name, func(t *testing.T) { storage := tt.setup() - result := storage.HeadDataByHash(tt.hash) + result := storage.headDataByHash(tt.hash) assert.Equal(t, tt.expected, result) }) } @@ -242,10 +243,73 @@ func TestCandidateStorage_PossibleBackedParaChildren(t *testing.T) { t.Run(name, func(t *testing.T) { storage := tt.setup() var result []*CandidateEntry - for entry := range storage.PossibleBackedParaChildren(tt.hash) { + for entry := range storage.possibleBackedParaChildren(tt.hash) { result = append(result, entry) } assert.Equal(t, tt.expected, result) }) } } + +func TestEarliestRelayParent(t *testing.T) { + tests := map[string]struct { + setup func() *Scope + expect inclusionemulator.RelayChainBlockInfo + }{ + "returns from ancestors": { + setup: func() *Scope { + relayParent := inclusionemulator.RelayChainBlockInfo{ + Hash: common.Hash{0x01}, + Number: 10, + } + baseConstraints := parachaintypes.Constraints{ + MinRelayParentNumber: 5, + } + ancestor := inclusionemulator.RelayChainBlockInfo{ + Hash: common.Hash{0x02}, + Number: 9, + } + ancestorsMap := btree.NewMap[uint, inclusionemulator.RelayChainBlockInfo](100) + ancestorsMap.Set(ancestor.Number, ancestor) + return &Scope{ + relayParent: relayParent, + baseConstraints: baseConstraints, + ancestors: ancestorsMap, + } + }, + expect: inclusionemulator.RelayChainBlockInfo{ + Hash: common.Hash{0x02}, + Number: 9, + }, + }, + "returns relayParent": { + setup: func() *Scope { + relayParent := inclusionemulator.RelayChainBlockInfo{ + Hash: common.Hash{0x01}, + Number: 10, + } + baseConstraints := parachaintypes.Constraints{ + MinRelayParentNumber: 5, + } + return &Scope{ + relayParent: relayParent, + baseConstraints: baseConstraints, + ancestors: btree.NewMap[uint, inclusionemulator.RelayChainBlockInfo](100), + } + }, + expect: inclusionemulator.RelayChainBlockInfo{ + Hash: common.Hash{0x01}, + Number: 10, + }, + }, + } + + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + scope := tt.setup() + result := scope.EarliestRelayParent() + assert.Equal(t, tt.expect, result) + }) + } +} diff --git a/dot/parachain/types/async_backing.go b/dot/parachain/types/async_backing.go index 2463f8d645..91b0044927 100644 --- a/dot/parachain/types/async_backing.go +++ b/dot/parachain/types/async_backing.go @@ -17,3 +17,61 @@ type AsyncBackingParams struct { // When async backing is disabled, the only valid value is 0. AllowedAncestryLen uint32 `scale:"2"` } + +// InboundHrmpLimitations constraints on inbound HRMP channels. +type InboundHrmpLimitations struct { + // An exhaustive set of all valid watermarks, sorted ascending. + // + // It's only expected to contain block numbers at which messages were + // previously sent to a para, excluding most recent head. + ValidWatermarks []uint +} + +// OutboundHrmpChannelLimitations constraints on outbound HRMP channels. +type OutboundHrmpChannelLimitations struct { + // The maximum bytes that can be written to the channel. + BytesRemaining uint32 + // The maximum messages that can be written to the channel. + MessagesRemaining uint32 +} + +// Constraints on the actions that can be taken by a new parachain block. These +// limitations are implicitly associated with some particular parachain, which should +// be apparent from usage. +type Constraints struct { + // The minimum relay-parent number accepted under these constraints. + MinRelayParentNumber uint + // The maximum Proof-of-Validity size allowed, in bytes. + MaxPoVSize uint32 + // The maximum new validation code size allowed, in bytes. + MaxCodeSize uint32 + // The amount of UMP messages remaining. + UmpRemaining uint32 + // The amount of UMP bytes remaining. + UmpRemainingBytes uint32 + // The maximum number of UMP messages allowed per candidate. + MaxUmpNumPerCandidate uint32 + // Remaining DMP queue. Only includes sent-at block numbers. + DmpRemainingMessages []uint + // The limitations of all registered inbound HRMP channels. + HrmpInbound InboundHrmpLimitations + // The limitations of all registered outbound HRMP channels. + HrmpChannelsOut map[ParaID]OutboundHrmpChannelLimitations + // The maximum number of HRMP messages allowed per candidate. + MaxHrmpNumPerCandidate uint32 + // The required parent head-data of the parachain. + RequiredParent HeadData + // The expected validation-code-hash of this parachain. + ValidationCodeHash ValidationCodeHash + // The code upgrade restriction signal as-of this parachain. + UpgradeRestriction UpgradeRestriction + // The future validation code hash, if any, and at what relay-parent + // number the upgrade would be minimally applied. + FutureValidationCode *FutureValidationCode +} + +// FutureValidationCode represents a tuple of BlockNumber an ValidationCodeHash +type FutureValidationCode struct { + BlockNumber uint + ValidationCodeHash ValidationCodeHash +} diff --git a/dot/parachain/types/types.go b/dot/parachain/types/types.go index f9ddd446a5..7ee33dd6a7 100644 --- a/dot/parachain/types/types.go +++ b/dot/parachain/types/types.go @@ -739,3 +739,16 @@ type Subsystem interface { ProcessBlockFinalizedSignal(BlockFinalizedSignal) error Stop() } + +// UpgradeRestriction a possible restriction that prevents a parachain +// from performing an upgrade +// TODO: should be scale encoded/decoded +type UpgradeRestriction interface { + isUpgradeRestriction() +} + +var _ UpgradeRestriction = (*Present)(nil) + +type Present struct{} + +func (*Present) isUpgradeRestriction() {} diff --git a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go index 0412a0b412..64453b62e7 100644 --- a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go +++ b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go @@ -11,6 +11,7 @@ import ( // without pinning it to a particular session. For example, commitments are // represented here, but the erasure-root is not. This means that, prospective // candidates are not correlated to any session in particular. +// TODO: should we have a specialized struct to simulate an Arc? type ProspectiveCandidate struct { Commitments parachaintypes.CandidateCommitments PersistedValidationData parachaintypes.PersistedValidationData @@ -238,3 +239,228 @@ func (*HrmpMessagesDescendingOrDuplicate) isFragmentValidityError() {} func (e *HrmpMessagesDescendingOrDuplicate) String() string { return fmt.Sprintf("HrmpMessagesDescendingOrDuplicate(Index: %d)", e.index) } + +// RelayChainBlockInfo contains minimum information about a relay-chain block. +type RelayChainBlockInfo struct { + Hash common.Hash + StorageRoot common.Hash + Number uint +} + +// Constraints on the actions that can be taken by a new parachain block. These +// limitations are implicitly associated with some particular parachain, which should +// be apparent from usage. +type Constraints struct { + // The minimum relay-parent number accepted under these constraints. + MinRelayParentNumber uint + // The maximum Proof-of-Validity size allowed, in bytes. + MaxPoVSize uint + // The maximum new validation code size allowed, in bytes. + MaxCodeSize uint + // The amount of UMP messages remaining. + UmpRemaining uint + // The amount of UMP bytes remaining. + UmpRemainingBytes uint + // The maximum number of UMP messages allowed per candidate. + MaxUmpNumPerCandidate uint + // Remaining DMP queue. Only includes sent-at block numbers. + DmpRemainingMessages []uint + // The limitations of all registered inbound HRMP channels. + HrmpInbound InboundHrmpLimitations + // The limitations of all registered outbound HRMP channels. + HrmpChannelsOut map[parachaintypes.ParaID]OutboundHrmpChannelLimitations + // The maximum number of HRMP messages allowed per candidate. + MaxHrmpNumPerCandidate uint + // The required parent head-data of the parachain. + RequiredParent parachaintypes.HeadData + // The expected validation-code-hash of this parachain. + ValidationCodeHash parachaintypes.ValidationCodeHash + // The code upgrade restriction signal as-of this parachain. + UpgradeRestriction parachaintypes.UpgradeRestriction + // The future validation code hash, if any, and at what relay-parent + // number the upgrade would be minimally applied. + FutureValidationCode *FutureValidationCode +} + +func FromPrimitiveConstraints(pc parachaintypes.Constraints) *Constraints { + hrmpChannelsOut := make(map[parachaintypes.ParaID]OutboundHrmpChannelLimitations) + for k, v := range pc.HrmpChannelsOut { + hrmpChannelsOut[k] = OutboundHrmpChannelLimitations{ + BytesRemaining: uint(v.BytesRemaining), + MessagesRemaining: uint(v.MessagesRemaining), + } + } + + var futureValidationCode *FutureValidationCode + if pc.FutureValidationCode != nil { + futureValidationCode = &FutureValidationCode{ + BlockNumber: pc.FutureValidationCode.BlockNumber, + ValidationCodeHash: pc.FutureValidationCode.ValidationCodeHash, + } + } + + return &Constraints{ + MinRelayParentNumber: pc.MinRelayParentNumber, + MaxPoVSize: uint(pc.MaxPoVSize), + MaxCodeSize: uint(pc.MaxCodeSize), + UmpRemaining: uint(pc.UmpRemaining), + UmpRemainingBytes: uint(pc.UmpRemainingBytes), + MaxUmpNumPerCandidate: uint(pc.MaxUmpNumPerCandidate), + DmpRemainingMessages: pc.DmpRemainingMessages, + HrmpInbound: InboundHrmpLimitations{ + ValidWatermarks: pc.HrmpInbound.ValidWatermarks, + }, + HrmpChannelsOut: hrmpChannelsOut, + MaxHrmpNumPerCandidate: uint(pc.MaxHrmpNumPerCandidate), + RequiredParent: pc.RequiredParent, + ValidationCodeHash: pc.ValidationCodeHash, + UpgradeRestriction: pc.UpgradeRestriction, + FutureValidationCode: futureValidationCode, + } +} + +// InboundHrmpLimitations constraints on inbound HRMP channels +type InboundHrmpLimitations struct { + ValidWatermarks []uint +} + +// OutboundHrmpChannelLimitations constraints on outbound HRMP channels. +type OutboundHrmpChannelLimitations struct { + BytesRemaining uint + MessagesRemaining uint +} + +// FutureValidationCode represents the future validation code hash, if any, and at what relay-parent +// number the upgrade would be minimally applied. +type FutureValidationCode struct { + BlockNumber uint + ValidationCodeHash parachaintypes.ValidationCodeHash +} + +// OutboundHrmpChannelModification represents modifications to outbound HRMP channels. +type OutboundHrmpChannelModification struct { + BytesSubmitted uint + MessagesSubmitted uint +} + +// HrmpWatermarkUpdate represents an update to the HRMP Watermark. +type HrmpWatermarkUpdate struct { + Type HrmpWatermarkUpdateType + Block uint +} + +// HrmpWatermarkUpdateType defines the type of HrmpWatermarkUpdate. +type HrmpWatermarkUpdateType int + +const ( + Head HrmpWatermarkUpdateType = iota + Trunk +) + +// Watermark returns the block number of the HRMP Watermark update. +func (h HrmpWatermarkUpdate) Watermark() uint { + return h.Block +} + +// ConstraintModifications represents modifications to constraints as a result of prospective candidates. +type ConstraintModifications struct { + // The required parent head to build upon. + RequiredParent *parachaintypes.HeadData + // The new HRMP watermark. + HrmpWatermark *HrmpWatermarkUpdate + // Outbound HRMP channel modifications. + OutboundHrmp map[parachaintypes.ParaID]OutboundHrmpChannelModification + // The amount of UMP XCM messages sent. `UMPSignal` and separator are excluded. + UmpMessagesSent uint + // The amount of UMP XCM bytes sent. `UMPSignal` and separator are excluded. + UmpBytesSent uint + // The amount of DMP messages processed. + DmpMessagesProcessed uint + // Whether a pending code upgrade has been applied. + CodeUpgradeApplied bool +} + +// Identity returns the 'identity' modifications: these can be applied to +// any constraints and yield the exact same result. +func NewConstraintModificationsIdentity() ConstraintModifications { + return ConstraintModifications{ + RequiredParent: nil, + HrmpWatermark: nil, + OutboundHrmp: make(map[parachaintypes.ParaID]OutboundHrmpChannelModification), + UmpMessagesSent: 0, + UmpBytesSent: 0, + DmpMessagesProcessed: 0, + CodeUpgradeApplied: false, + } +} + +// Stack stacks other modifications on top of these. This does no sanity-checking, so if +// `other` is garbage relative to `self`, then the new value will be garbage as well. +// This is an addition which is not commutative. +func (cm *ConstraintModifications) Stack(other *ConstraintModifications) { + if other.RequiredParent != nil { + cm.RequiredParent = other.RequiredParent + } + + if other.HrmpWatermark != nil { + cm.HrmpWatermark = other.HrmpWatermark + } + + for id, mods := range other.OutboundHrmp { + record, ok := cm.OutboundHrmp[id] + if !ok { + record = OutboundHrmpChannelModification{} + } + + record.BytesSubmitted += mods.BytesSubmitted + record.MessagesSubmitted += mods.MessagesSubmitted + cm.OutboundHrmp[id] = record + } + + cm.UmpMessagesSent += other.UmpMessagesSent + cm.UmpBytesSent += other.UmpBytesSent + cm.DmpMessagesProcessed += other.DmpMessagesProcessed + cm.CodeUpgradeApplied = cm.CodeUpgradeApplied || other.CodeUpgradeApplied +} + +// Fragment represents another prospective parachain block +// This is a type which guarantees that the candidate is valid under the operating constraints +type Fragment struct { + relayParent RelayChainBlockInfo + operatingConstraints Constraints + candidate ProspectiveCandidate + modifications ConstraintModifications +} + +func NewFragment( + relayParent RelayChainBlockInfo, + operatingConstraints Constraints, + candidate ProspectiveCandidate) (*Fragment, error) { + modifications, err := checkAgainstConstraints( + relayParent, + operatingConstraints, + candidate.Commitments, + candidate.ValidationCodeHash, + candidate.PersistedValidationData, + ) + if err != nil { + return nil, err + } + + return &Fragment{ + relayParent: relayParent, + operatingConstraints: operatingConstraints, + candidate: candidate, + modifications: modifications, + }, nil +} + +func checkAgainstConstraints( + relayParent RelayChainBlockInfo, + operatingConstraints Constraints, + commitments parachaintypes.CandidateCommitments, + validationCodeHash parachaintypes.ValidationCodeHash, + persistedValidationData parachaintypes.PersistedValidationData, +) (ConstraintModifications, error) { + return ConstraintModifications{}, nil +} From 51196497e4c23847ad13f5be843d714234408cd6 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 13 Nov 2024 20:42:45 -0400 Subject: [PATCH 04/31] wip: implementing `validateAgainstConstraints` --- .../fragment-chain/errors.go | 5 +- .../inclusion-emulator/inclusion_emulator.go | 249 +++++++++--------- 2 files changed, 132 insertions(+), 122 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/errors.go b/dot/parachain/prospective-parachains/fragment-chain/errors.go index 3f13f8ba0a..ed3b8bc62a 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/errors.go +++ b/dot/parachain/prospective-parachains/fragment-chain/errors.go @@ -5,7 +5,6 @@ import ( "fmt" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" - inclusionemulator "github.com/ChainSafe/gossamer/dot/parachain/util/inclusion-emulator" "github.com/ChainSafe/gossamer/lib/common" ) @@ -47,7 +46,7 @@ func (e ErrForkChoiceRule) Error() string { } type ErrComputeConstraints struct { - modificationErr inclusionemulator.ModificationError + modificationErr error } func (e ErrComputeConstraints) Error() string { @@ -55,7 +54,7 @@ func (e ErrComputeConstraints) Error() string { } type ErrCheckAgainstConstraints struct { - fragmentValidityErr inclusionemulator.FragmentValidityError + fragmentValidityErr error } func (e ErrCheckAgainstConstraints) Error() string { diff --git a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go index 64453b62e7..fcb910b2bc 100644 --- a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go +++ b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go @@ -1,7 +1,10 @@ package inclusionemulator import ( + "bytes" + "errors" "fmt" + "iter" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" "github.com/ChainSafe/gossamer/lib/common" @@ -19,224 +22,142 @@ type ProspectiveCandidate struct { ValidationCodeHash parachaintypes.ValidationCodeHash } -// ModificationError is kinds of errors that can happen when modifying constraints -type ModificationError interface { - isModificationError() -} - -var ( - _ ModificationError = (*DisallowedHrmpWatermark)(nil) - _ ModificationError = (*NoSuchHrmpChannel)(nil) - _ ModificationError = (*HrmpMessagesOverflow)(nil) - _ ModificationError = (*HrmpBytesOverflow)(nil) - _ ModificationError = (*UmpMessagesOverflow)(nil) - _ ModificationError = (*UmpBytesOverflow)(nil) - _ ModificationError = (*DmpMessagesUnderflow)(nil) - _ ModificationError = (*AppliedNonexistentCodeUpgrade)(nil) -) - -type DisallowedHrmpWatermark struct { +type ErrDisallowedHrmpWatermark struct { blockNumber uint } -func (*DisallowedHrmpWatermark) isModificationError() {} - -func (e *DisallowedHrmpWatermark) String() string { +func (e *ErrDisallowedHrmpWatermark) Error() string { return fmt.Sprintf("DisallowedHrmpWatermark(BlockNumber: %d)", e.blockNumber) } -type NoSuchHrmpChannel struct { +type ErrNoSuchHrmpChannel struct { paraId parachaintypes.ParaID } -func (*NoSuchHrmpChannel) isModificationError() {} - -func (e *NoSuchHrmpChannel) String() string { +func (e *ErrNoSuchHrmpChannel) Error() string { return fmt.Sprintf("NoSuchHrmpChannel(ParaId: %d)", e.paraId) } -type HrmpMessagesOverflow struct { +type ErrHrmpMessagesOverflow struct { paraId parachaintypes.ParaID messagesRemaining uint messagesSubmitted uint } -func (*HrmpMessagesOverflow) isModificationError() {} - -func (e *HrmpMessagesOverflow) String() string { +func (e *ErrHrmpMessagesOverflow) Error() string { return fmt.Sprintf("HrmpMessagesOverflow(ParaId: %d, MessagesRemaining: %d, MessagesSubmitted: %d)", e.paraId, e.messagesRemaining, e.messagesSubmitted) } -type HrmpBytesOverflow struct { +type ErrHrmpBytesOverflow struct { paraId parachaintypes.ParaID bytesRemaining uint bytesSubmitted uint } -func (*HrmpBytesOverflow) isModificationError() {} - -func (e *HrmpBytesOverflow) String() string { +func (e *ErrHrmpBytesOverflow) Error() string { return fmt.Sprintf("HrmpBytesOverflow(ParaId: %d, BytesRemaining: %d, BytesSubmitted: %d)", e.paraId, e.bytesRemaining, e.bytesSubmitted) } -type UmpMessagesOverflow struct { +type ErrUmpMessagesOverflow struct { messagesRemaining uint messagesSubmitted uint } -func (*UmpMessagesOverflow) isModificationError() {} - -func (e *UmpMessagesOverflow) String() string { +func (e *ErrUmpMessagesOverflow) Error() string { return fmt.Sprintf("UmpMessagesOverflow(MessagesRemaining: %d, MessagesSubmitted: %d)", e.messagesRemaining, e.messagesSubmitted) } -type UmpBytesOverflow struct { +type ErrUmpBytesOverflow struct { bytesRemaining uint bytesSubmitted uint } -func (*UmpBytesOverflow) isModificationError() {} - -func (e *UmpBytesOverflow) String() string { +func (e *ErrUmpBytesOverflow) Error() string { return fmt.Sprintf("UmpBytesOverflow(BytesRemaining: %d, BytesSubmitted: %d)", e.bytesRemaining, e.bytesSubmitted) } -type DmpMessagesUnderflow struct { +type ErrDmpMessagesUnderflow struct { messagesRemaining uint messagesProcessed uint } -func (*DmpMessagesUnderflow) isModificationError() {} - -func (e *DmpMessagesUnderflow) String() string { +func (e *ErrDmpMessagesUnderflow) Error() string { return fmt.Sprintf("DmpMessagesUnderflow(MessagesRemaining: %d, MessagesProcessed: %d)", e.messagesRemaining, e.messagesProcessed) } -type AppliedNonexistentCodeUpgrade struct{} - -func (*AppliedNonexistentCodeUpgrade) isModificationError() {} - -func (e *AppliedNonexistentCodeUpgrade) String() string { - return "AppliedNonexistentCodeUpgrade()" -} - -// FragmentValidityError kinds of errors with the validity of a fragment. -type FragmentValidityError interface { - isFragmentValidityError() -} - var ( - _ FragmentValidityError = (*ValidationCodeMismatch)(nil) - _ FragmentValidityError = (*PersistedValidationDataMismatch)(nil) - _ FragmentValidityError = (*OutputsInvalid)(nil) - _ FragmentValidityError = (*CodeSizeTooLarge)(nil) - _ FragmentValidityError = (*RelayParentTooOld)(nil) - _ FragmentValidityError = (*DmpAdvancementRule)(nil) - _ FragmentValidityError = (*UmpMessagesPerCandidateOverflow)(nil) - _ FragmentValidityError = (*HrmpMessagesPerCandidateOverflow)(nil) - _ FragmentValidityError = (*CodeUpgradeRestricted)(nil) - _ FragmentValidityError = (*HrmpMessagesDescendingOrDuplicate)(nil) + ErrAppliedNonexistentCodeUpgrade = errors.New("AppliedNonexistentCodeUpgrade()") + ErrDmpAdvancementRule = errors.New("DmpAdvancementRule()") + ErrCodeUpgradeRestricted = errors.New("CodeUpgradeRestricted()") ) -type ValidationCodeMismatch struct { +type ErrValidationCodeMismatch struct { expected parachaintypes.ValidationCodeHash got parachaintypes.ValidationCodeHash } -func (*ValidationCodeMismatch) isFragmentValidityError() {} - -func (e *ValidationCodeMismatch) String() string { +func (e *ErrValidationCodeMismatch) Error() string { return fmt.Sprintf("ValidationCodeMismatch(Expected: %s, Got: %s)", e.expected, e.got) } -type PersistedValidationDataMismatch struct { +type ErrPersistedValidationDataMismatch struct { expected parachaintypes.PersistedValidationData got parachaintypes.PersistedValidationData } -func (*PersistedValidationDataMismatch) isFragmentValidityError() {} - -func (e *PersistedValidationDataMismatch) String() string { +func (e *ErrPersistedValidationDataMismatch) Error() string { return fmt.Sprintf("PersistedValidationDataMismatch(Expected: %v, Got: %v)", e.expected, e.got) } -type OutputsInvalid struct { - modificationError ModificationError +type ErrOutputsInvalid struct { + modificationError error } -func (*OutputsInvalid) isFragmentValidityError() {} - -func (e *OutputsInvalid) String() string { +func (e *ErrOutputsInvalid) Error() string { return fmt.Sprintf("OutputsInvalid(ModificationError: %v)", e.modificationError) } -type CodeSizeTooLarge struct { +type ErrCodeSizeTooLarge struct { maxAllowed uint newSize uint } -func (*CodeSizeTooLarge) isFragmentValidityError() {} - -func (e *CodeSizeTooLarge) String() string { +func (e *ErrCodeSizeTooLarge) Error() string { return fmt.Sprintf("CodeSizeTooLarge(MaxAllowed: %d, NewSize: %d)", e.maxAllowed, e.newSize) } -type RelayParentTooOld struct { +type ErrRelayParentTooOld struct { minAllowed uint current uint } -func (*RelayParentTooOld) isFragmentValidityError() {} - -func (e *RelayParentTooOld) String() string { +func (e *ErrRelayParentTooOld) Error() string { return fmt.Sprintf("RelayParentTooOld(MinAllowed: %d, Current: %d)", e.minAllowed, e.current) } -type DmpAdvancementRule struct{} - -func (*DmpAdvancementRule) isFragmentValidityError() {} - -func (e *DmpAdvancementRule) String() string { - return "DmpAdvancementRule()" -} - -type UmpMessagesPerCandidateOverflow struct { +type ErrUmpMessagesPerCandidateOverflow struct { messagesAllowed uint messagesSubmitted uint } -func (*UmpMessagesPerCandidateOverflow) isFragmentValidityError() {} - -func (e *UmpMessagesPerCandidateOverflow) String() string { +func (e *ErrUmpMessagesPerCandidateOverflow) Error() string { return fmt.Sprintf("UmpMessagesPerCandidateOverflow(MessagesAllowed: %d, MessagesSubmitted: %d)", e.messagesAllowed, e.messagesSubmitted) } -type HrmpMessagesPerCandidateOverflow struct { +type ErrHrmpMessagesPerCandidateOverflow struct { messagesAllowed uint messagesSubmitted uint } -func (*HrmpMessagesPerCandidateOverflow) isFragmentValidityError() {} - -func (e *HrmpMessagesPerCandidateOverflow) String() string { +func (e *ErrHrmpMessagesPerCandidateOverflow) Error() string { return fmt.Sprintf("HrmpMessagesPerCandidateOverflow(MessagesAllowed: %d, MessagesSubmitted: %d)", e.messagesAllowed, e.messagesSubmitted) } -type CodeUpgradeRestricted struct{} - -func (*CodeUpgradeRestricted) isFragmentValidityError() {} - -func (e *CodeUpgradeRestricted) String() string { - return "CodeUpgradeRestricted()" -} - -type HrmpMessagesDescendingOrDuplicate struct { +type ErrHrmpMessagesDescendingOrDuplicate struct { index uint } -func (*HrmpMessagesDescendingOrDuplicate) isFragmentValidityError() {} - -func (e *HrmpMessagesDescendingOrDuplicate) String() string { +func (e *ErrHrmpMessagesDescendingOrDuplicate) Error() string { return fmt.Sprintf("HrmpMessagesDescendingOrDuplicate(Index: %d)", e.index) } @@ -429,9 +350,14 @@ type Fragment struct { relayParent RelayChainBlockInfo operatingConstraints Constraints candidate ProspectiveCandidate - modifications ConstraintModifications + modifications *ConstraintModifications } +// NewFragment creates a new Fragment. This fails if the fragment isnt in line +// with the operating constraints. That is, either its inputs or outputs fail +// checks against the constraints. +// This does not check that the collator signature is valid or wheter the PoV is +// small enough. func NewFragment( relayParent RelayChainBlockInfo, operatingConstraints Constraints, @@ -461,6 +387,91 @@ func checkAgainstConstraints( commitments parachaintypes.CandidateCommitments, validationCodeHash parachaintypes.ValidationCodeHash, persistedValidationData parachaintypes.PersistedValidationData, -) (ConstraintModifications, error) { - return ConstraintModifications{}, nil +) (*ConstraintModifications, error) { + upwardMessages := make([]parachaintypes.UpwardMessage, 0) + // filter UMP signals + for upwardMessage := range skipUmpSignals(commitments.UpwardMessages) { + upwardMessages = append(upwardMessages, upwardMessage) + } + + umpMessagesSent := len(upwardMessages) + umpBytesSent := 0 + for _, message := range upwardMessages { + umpBytesSent += len(message) + } + + hrmpWatermark := HrmpWatermarkUpdate{ + Type: Trunk, + Block: uint(commitments.HrmpWatermark), + } + + if uint(commitments.HrmpWatermark) == relayParent.Number { + hrmpWatermark.Type = Head + } + + outboundHrmp := make(map[parachaintypes.ParaID]OutboundHrmpChannelModification) + var lastRecipient *parachaintypes.ParaID + + for i, message := range commitments.HorizontalMessages { + if lastRecipient != nil && *lastRecipient >= parachaintypes.ParaID(message.Recipient) { + return nil, &ErrHrmpMessagesDescendingOrDuplicate{index: uint(i)} + } + + recipientParaID := parachaintypes.ParaID(message.Recipient) + lastRecipient = &recipientParaID + record, ok := outboundHrmp[recipientParaID] + if !ok { + record = OutboundHrmpChannelModification{} + } + + record.BytesSubmitted += uint(len(message.Data)) + record.MessagesSubmitted++ + outboundHrmp[recipientParaID] = record + } + + codeUpgradeApplied := false + if operatingConstraints.FutureValidationCode != nil { + codeUpgradeApplied = relayParent.Number >= operatingConstraints.FutureValidationCode.BlockNumber + } + + modifications := &ConstraintModifications{ + RequiredParent: &commitments.HeadData, + HrmpWatermark: &hrmpWatermark, + OutboundHrmp: outboundHrmp, + UmpMessagesSent: uint(umpMessagesSent), + UmpBytesSent: uint(umpBytesSent), + DmpMessagesProcessed: uint(commitments.ProcessedDownwardMessages), + CodeUpgradeApplied: codeUpgradeApplied, + } + + err := validateAgainstConstraints( + operatingConstraints, + relayParent, + commitments, + persistedValidationData, + validationCodeHash, + modifications, + ) + if err != nil { + return nil, err + } + + return modifications, nil +} + +// UmpSeparator is a constant used to separate UMP signals. +var UmpSeparator = []byte{} + +// skipUmpSignals is a utility function for skipping the UMP signals. +func skipUmpSignals(upwardMessages []parachaintypes.UpwardMessage) iter.Seq[parachaintypes.UpwardMessage] { + return func(yield func(parachaintypes.UpwardMessage) bool) { + for _, message := range upwardMessages { + if !bytes.Equal([]byte(message), UmpSeparator) { + if !yield([]byte(message)) { + return + } + } + return + } + } } From e072160d877b2d7fa439b02647af099435636f63 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 14 Nov 2024 17:25:38 -0400 Subject: [PATCH 05/31] feat: implement `BackedChain` --- .../fragment-chain/fragment_chain.go | 90 ++++++++++ .../fragment-chain/fragment_chain_test.go | 106 ++++++++++++ .../statement_distribution.go | 1 - dot/parachain/types/types.go | 7 + .../inclusion-emulator/inclusion_emulator.go | 161 +++++++++++++++++- 5 files changed, 361 insertions(+), 4 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go index 18e7a62ff7..aa55877e9c 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go @@ -3,6 +3,7 @@ package fragmentchain import ( "fmt" "iter" + "slices" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" inclusionemulator "github.com/ChainSafe/gossamer/dot/parachain/util/inclusion-emulator" @@ -328,6 +329,8 @@ func (s *Scope) GetPendingAvailability(candidateHash parachaintypes.CandidateHas return nil } +// Fragment node is a node that belongs to a `BackedChain`. It holds constraints based on +// the ancestors in the chain type FragmentNode struct { fragment inclusionemulator.Fragment candidateHash parachaintypes.CandidateHash @@ -335,3 +338,90 @@ type FragmentNode struct { parentHeadDataHash common.Hash outputHeadDataHash common.Hash } + +func (f *FragmentNode) relayParent() common.Hash { + return f.fragment.RelayParent().Hash +} + +// NewCandidateEntryFromFragment creates a candidate entry from a fragment, we dont need +// to perform the checks done in `NewCandidateEntry` since a `FragmentNode` always comes +// from a `CandidateEntry` +func NewCandidateEntryFromFragment(node *FragmentNode) *CandidateEntry { + return &CandidateEntry{ + candidateHash: node.candidateHash, + parentHeadDataHash: node.parentHeadDataHash, + outputHeadDataHash: node.outputHeadDataHash, + candidate: node.fragment.Candidate(), + relayParent: node.relayParent(), + // a fragment node is always backed + state: Backed, + } +} + +// BackedChain is a chain of backed/backable candidates +// Includes candidates pending availability and candidates which may be backed on-chain +type BackedChain struct { + // holds the candidate chain + chain []*FragmentNode + + // index from parent head data to the candidate that has that head data as parent + // only contains the candidates present in the `chain` + byParentHead map[common.Hash]parachaintypes.CandidateHash + + // index from head data hash to the candidate hash outputting that head data + // only contains the candidates present in the `chain` + byOutputHead map[common.Hash]parachaintypes.CandidateHash + + // a set of candidate hashes in the `chain` + candidates map[parachaintypes.CandidateHash]struct{} +} + +func (bc *BackedChain) Push(candidate FragmentNode) { + bc.candidates[candidate.candidateHash] = struct{}{} + bc.byParentHead[candidate.parentHeadDataHash] = candidate.candidateHash + bc.byOutputHead[candidate.outputHeadDataHash] = candidate.candidateHash + bc.chain = append(bc.chain, &candidate) +} + +func (bc *BackedChain) Clear() []*FragmentNode { + bc.byParentHead = make(map[common.Hash]parachaintypes.CandidateHash) + bc.byOutputHead = make(map[common.Hash]parachaintypes.CandidateHash) + bc.candidates = make(map[parachaintypes.CandidateHash]struct{}) + + oldChain := bc.chain + bc.chain = nil + return oldChain +} + +func (bc *BackedChain) RevertToParentHash(parentHeadDataHash common.Hash) []*FragmentNode { + foundIndex := -1 + + for i := 0; i < len(bc.chain); i++ { + node := bc.chain[i] + + if foundIndex != -1 { + delete(bc.byParentHead, node.parentHeadDataHash) + delete(bc.byOutputHead, node.outputHeadDataHash) + delete(bc.candidates, node.candidateHash) + } else if node.outputHeadDataHash == parentHeadDataHash { + foundIndex = i + } + } + + if foundIndex != -1 { + // drain the elements from the found index until + // the end of the slice and return them + removed := make([]*FragmentNode, len(bc.chain)-(foundIndex+1)) + copy(removed, bc.chain[foundIndex+1:]) + bc.chain = slices.Delete(bc.chain, foundIndex+1, len(bc.chain)) + + return removed + } + + return nil +} + +func (bc *BackedChain) Contains(hash parachaintypes.CandidateHash) bool { + _, ok := bc.candidates[hash] + return ok +} diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go index 327e9f0692..2690206698 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go @@ -313,3 +313,109 @@ func TestEarliestRelayParent(t *testing.T) { }) } } + +func TestBackedChain_RevertToParentHash(t *testing.T) { + tests := map[string]struct { + setup func() *BackedChain + hash common.Hash + expectedChainSize int + expectedRemovedFragments int + }{ + "revert_to_parent_at_pos_2": { + setup: func() *BackedChain { + chain := &BackedChain{ + chain: make([]*FragmentNode, 0), + byParentHead: make(map[common.Hash]parachaintypes.CandidateHash), + byOutputHead: make(map[common.Hash]parachaintypes.CandidateHash), + candidates: make(map[parachaintypes.CandidateHash]struct{}), + } + + for i := 0; i < 5; i++ { + node := &FragmentNode{ + candidateHash: parachaintypes.CandidateHash{Value: common.Hash{byte(i)}}, + parentHeadDataHash: common.Hash{byte(i)}, + outputHeadDataHash: common.Hash{byte(i + 1)}, + cumulativeModifications: inclusionemulator.ConstraintModifications{}, + } + chain.Push(*node) + } + return chain + }, + hash: common.Hash{3}, + expectedChainSize: 3, + expectedRemovedFragments: 2, + }, + "revert_to_parent_at_pos_0": { + setup: func() *BackedChain { + chain := &BackedChain{ + chain: make([]*FragmentNode, 0), + byParentHead: make(map[common.Hash]parachaintypes.CandidateHash), + byOutputHead: make(map[common.Hash]parachaintypes.CandidateHash), + candidates: make(map[parachaintypes.CandidateHash]struct{}), + } + + for i := 0; i < 2; i++ { + node := &FragmentNode{ + candidateHash: parachaintypes.CandidateHash{Value: common.Hash{byte(i)}}, + parentHeadDataHash: common.Hash{byte(i)}, + outputHeadDataHash: common.Hash{byte(i + 1)}, + cumulativeModifications: inclusionemulator.ConstraintModifications{}, + } + chain.Push(*node) + } + return chain + }, + hash: common.Hash{1}, + expectedChainSize: 1, + expectedRemovedFragments: 1, + }, + "no_node_removed": { + setup: func() *BackedChain { + chain := &BackedChain{ + chain: make([]*FragmentNode, 0), + byParentHead: make(map[common.Hash]parachaintypes.CandidateHash), + byOutputHead: make(map[common.Hash]parachaintypes.CandidateHash), + candidates: make(map[parachaintypes.CandidateHash]struct{}), + } + + for i := 0; i < 3; i++ { + node := &FragmentNode{ + candidateHash: parachaintypes.CandidateHash{Value: common.Hash{byte(i)}}, + parentHeadDataHash: common.Hash{byte(i)}, + outputHeadDataHash: common.Hash{byte(i + 1)}, + cumulativeModifications: inclusionemulator.ConstraintModifications{}, + } + chain.Push(*node) + } + return chain + }, + hash: common.Hash{99}, // Non-existent hash + expectedChainSize: 3, + expectedRemovedFragments: 0, + }, + } + + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + backedChain := tt.setup() + removedNodes := backedChain.RevertToParentHash(tt.hash) + + // Check the number of removed nodes + assert.Equal(t, tt.expectedRemovedFragments, len(removedNodes)) + + // Check the properties of the chain + assert.Equal(t, tt.expectedChainSize, len(backedChain.chain)) + assert.Equal(t, tt.expectedChainSize, len(backedChain.byParentHead)) + assert.Equal(t, tt.expectedChainSize, len(backedChain.byOutputHead)) + assert.Equal(t, tt.expectedChainSize, len(backedChain.candidates)) + + // Check that the remaining nodes are correct + for i := 0; i < len(backedChain.chain); i++ { + assert.Contains(t, backedChain.byParentHead, common.Hash{byte(i)}) + assert.Contains(t, backedChain.byOutputHead, common.Hash{byte(i + 1)}) + assert.Contains(t, backedChain.candidates, parachaintypes.CandidateHash{Value: common.Hash{byte(i)}}) + } + }) + } +} diff --git a/dot/parachain/statement-distribution/statement_distribution.go b/dot/parachain/statement-distribution/statement_distribution.go index e9d01812db..ff4ae40d8c 100644 --- a/dot/parachain/statement-distribution/statement_distribution.go +++ b/dot/parachain/statement-distribution/statement_distribution.go @@ -34,7 +34,6 @@ func (s StatementDistribution) Run(ctx context.Context, overseerToSubSystem <-ch } func (s StatementDistribution) processMessage(msg any) error { - switch msg := msg.(type) { case statementedistributionmessages.Backed: // TODO #4171 diff --git a/dot/parachain/types/types.go b/dot/parachain/types/types.go index 7ee33dd6a7..c2e1eeb005 100644 --- a/dot/parachain/types/types.go +++ b/dot/parachain/types/types.go @@ -534,6 +534,13 @@ type PersistedValidationData struct { MaxPovSize uint32 `scale:"4"` } +func (pvd PersistedValidationData) Equal(other PersistedValidationData) bool { + return bytes.Equal(pvd.ParentHead.Data, other.ParentHead.Data) && + pvd.RelayParentNumber == other.RelayParentNumber && + pvd.RelayParentStorageRoot == other.RelayParentStorageRoot && + pvd.MaxPovSize == other.MaxPovSize +} + func (pvd PersistedValidationData) Hash() (common.Hash, error) { bytes, err := scale.Marshal(pvd) if err != nil { diff --git a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go index fcb910b2bc..9459b00306 100644 --- a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go +++ b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go @@ -5,9 +5,11 @@ import ( "errors" "fmt" "iter" + "slices" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" "github.com/ChainSafe/gossamer/lib/common" + "github.com/ethereum/go-ethereum/common/math" ) // ProspectiveCandidate includes key informations that represents a candidate @@ -203,6 +205,69 @@ type Constraints struct { FutureValidationCode *FutureValidationCode } +func (c *Constraints) CheckModifications(modifications *ConstraintModifications) error { + if modifications.HrmpWatermark != nil && modifications.HrmpWatermark.Type == Trunk { + if !slices.Contains(c.HrmpInbound.ValidWatermarks, modifications.HrmpWatermark.Watermark()) { + return &ErrDisallowedHrmpWatermark{blockNumber: modifications.HrmpWatermark.Watermark()} + } + } + + for id, outboundHrmpMod := range modifications.OutboundHrmp { + outbound, ok := c.HrmpChannelsOut[id] + if !ok { + return &ErrNoSuchHrmpChannel{paraId: id} + } + + _, overflow := math.SafeSub(uint64(outbound.BytesRemaining), uint64(outboundHrmpMod.BytesSubmitted)) + if overflow { + return &ErrHrmpBytesOverflow{ + paraId: id, + bytesRemaining: outbound.BytesRemaining, + bytesSubmitted: outboundHrmpMod.BytesSubmitted, + } + } + + _, overflow = math.SafeSub(uint64(outbound.MessagesRemaining), uint64(outboundHrmpMod.MessagesSubmitted)) + if overflow { + return &ErrHrmpMessagesOverflow{ + paraId: id, + messagesRemaining: outbound.MessagesRemaining, + messagesSubmitted: outboundHrmpMod.MessagesSubmitted, + } + } + } + + _, overflow := math.SafeSub(uint64(c.UmpRemaining), uint64(modifications.UmpMessagesSent)) + if overflow { + return &ErrUmpMessagesOverflow{ + messagesRemaining: c.UmpRemaining, + messagesSubmitted: modifications.UmpMessagesSent, + } + } + + _, overflow = math.SafeSub(uint64(c.UmpRemainingBytes), uint64(modifications.UmpBytesSent)) + if overflow { + return &ErrUmpBytesOverflow{ + bytesRemaining: c.UmpRemainingBytes, + bytesSubmitted: modifications.UmpBytesSent, + } + } + + _, overflow = math.SafeSub(uint64(len(c.DmpRemainingMessages)), uint64(modifications.DmpMessagesProcessed)) + if overflow { + return &ErrDmpMessagesUnderflow{ + messagesRemaining: uint(len(c.DmpRemainingMessages)), + messagesProcessed: modifications.DmpMessagesProcessed, + } + } + + if c.FutureValidationCode == nil && modifications.CodeUpgradeApplied { + return ErrAppliedNonexistentCodeUpgrade + } + + return nil +} + func FromPrimitiveConstraints(pc parachaintypes.Constraints) *Constraints { hrmpChannelsOut := make(map[parachaintypes.ParaID]OutboundHrmpChannelLimitations) for k, v := range pc.HrmpChannelsOut { @@ -348,11 +413,19 @@ func (cm *ConstraintModifications) Stack(other *ConstraintModifications) { // This is a type which guarantees that the candidate is valid under the operating constraints type Fragment struct { relayParent RelayChainBlockInfo - operatingConstraints Constraints + operatingConstraints *Constraints candidate ProspectiveCandidate modifications *ConstraintModifications } +func (f *Fragment) RelayParent() RelayChainBlockInfo { + return f.relayParent +} + +func (f *Fragment) Candidate() ProspectiveCandidate { + return f.candidate +} + // NewFragment creates a new Fragment. This fails if the fragment isnt in line // with the operating constraints. That is, either its inputs or outputs fail // checks against the constraints. @@ -360,7 +433,7 @@ type Fragment struct { // small enough. func NewFragment( relayParent RelayChainBlockInfo, - operatingConstraints Constraints, + operatingConstraints *Constraints, candidate ProspectiveCandidate) (*Fragment, error) { modifications, err := checkAgainstConstraints( relayParent, @@ -383,7 +456,7 @@ func NewFragment( func checkAgainstConstraints( relayParent RelayChainBlockInfo, - operatingConstraints Constraints, + operatingConstraints *Constraints, commitments parachaintypes.CandidateCommitments, validationCodeHash parachaintypes.ValidationCodeHash, persistedValidationData parachaintypes.PersistedValidationData, @@ -475,3 +548,85 @@ func skipUmpSignals(upwardMessages []parachaintypes.UpwardMessage) iter.Seq[para } } } + +func validateAgainstConstraints( + constraints *Constraints, + relayParent RelayChainBlockInfo, + commitments parachaintypes.CandidateCommitments, + persistedValidationData parachaintypes.PersistedValidationData, + validationCodeHash parachaintypes.ValidationCodeHash, + modifications *ConstraintModifications, +) error { + expectedPVD := parachaintypes.PersistedValidationData{ + ParentHead: constraints.RequiredParent, + RelayParentNumber: uint32(relayParent.Number), + RelayParentStorageRoot: relayParent.StorageRoot, + MaxPovSize: uint32(constraints.MaxPoVSize), + } + + if !expectedPVD.Equal(persistedValidationData) { + return &ErrPersistedValidationDataMismatch{ + expected: expectedPVD, + got: persistedValidationData, + } + } + + if constraints.ValidationCodeHash != validationCodeHash { + return &ErrValidationCodeMismatch{ + expected: constraints.ValidationCodeHash, + got: validationCodeHash, + } + } + + if relayParent.Number < constraints.MinRelayParentNumber { + return &ErrRelayParentTooOld{ + minAllowed: constraints.MinRelayParentNumber, + current: relayParent.Number, + } + } + + if commitments.NewValidationCode != nil { + switch constraints.UpgradeRestriction.(type) { + case *parachaintypes.Present: + return ErrCodeUpgradeRestricted + } + } + + announcedCodeSize := 0 + if commitments.NewValidationCode != nil { + announcedCodeSize = len(*commitments.NewValidationCode) + } + + if uint(announcedCodeSize) > constraints.MaxCodeSize { + return &ErrCodeSizeTooLarge{ + maxAllowed: constraints.MaxCodeSize, + newSize: uint(announcedCodeSize), + } + } + + if modifications.DmpMessagesProcessed == 0 { + if len(constraints.DmpRemainingMessages) > 0 && constraints.DmpRemainingMessages[0] <= relayParent.Number { + return ErrDmpAdvancementRule + } + } + + if len(commitments.HorizontalMessages) > int(constraints.MaxHrmpNumPerCandidate) { + return &ErrHrmpMessagesPerCandidateOverflow{ + messagesAllowed: constraints.MaxHrmpNumPerCandidate, + messagesSubmitted: uint(len(commitments.HorizontalMessages)), + } + } + + if modifications.UmpMessagesSent > constraints.MaxUmpNumPerCandidate { + return &ErrUmpMessagesPerCandidateOverflow{ + messagesAllowed: constraints.MaxUmpNumPerCandidate, + messagesSubmitted: modifications.UmpMessagesSent, + } + } + + if err := constraints.CheckModifications(modifications); err != nil { + return &ErrOutputsInvalid{modificationError: err} + } + + return nil +} From 0de059a7a46c5f2aaba0835065cf3e6d2ec42192 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 15 Nov 2024 09:51:30 -0400 Subject: [PATCH 06/31] wip: `FragmentChain` struct implementation --- .../fragment-chain/fragment_chain.go | 274 +++++++++++++++++- .../fragment-chain/fragment_chain_test.go | 16 +- dot/parachain/util/.DS_Store | Bin 0 -> 6148 bytes .../inclusion-emulator/inclusion_emulator.go | 134 ++++++++- 4 files changed, 408 insertions(+), 16 deletions(-) create mode 100644 dot/parachain/util/.DS_Store diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go index aa55877e9c..dd75953f78 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go @@ -1,6 +1,7 @@ package fragmentchain import ( + "bytes" "fmt" "iter" "slices" @@ -18,6 +19,10 @@ const ( Backed ) +func forkSelectionRule(hash1, hash2 parachaintypes.CandidateHash) int { + return bytes.Compare(hash1.Value[:], hash2.Value[:]) +} + // CandidateEntry represents a candidate into the CandidateStorage // TODO: Should CandidateEntry implements `HypotheticalOrConcreteCandidate` type CandidateEntry struct { @@ -88,6 +93,14 @@ type CandidateStorage struct { byCandidateHash map[parachaintypes.CandidateHash]*CandidateEntry } +func NewCandidateStorage() *CandidateStorage { + return &CandidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + } +} + func (c *CandidateStorage) AddPendingAvailabilityCandidate( candidateHash parachaintypes.CandidateHash, candidate parachaintypes.CommittedCandidateReceipt, @@ -244,7 +257,7 @@ type Scope struct { // candidates pending availability at this block pendindAvailability []*PendindAvailability // the base constraints derived from the latest included candidate - baseConstraints parachaintypes.Constraints + baseConstraints *inclusionemulator.Constraints // equal to `max_candidate_depth` maxDepth uint } @@ -261,7 +274,7 @@ type Scope struct { // should be provided. It is allowed to provide 0 ancestors. func NewScopeWithAncestors( relayParent inclusionemulator.RelayChainBlockInfo, - baseConstraints parachaintypes.Constraints, + baseConstraints *inclusionemulator.Constraints, pendingAvailability []*PendindAvailability, maxDepth uint, ancestors iter.Seq[inclusionemulator.RelayChainBlockInfo], @@ -332,9 +345,9 @@ func (s *Scope) GetPendingAvailability(candidateHash parachaintypes.CandidateHas // Fragment node is a node that belongs to a `BackedChain`. It holds constraints based on // the ancestors in the chain type FragmentNode struct { - fragment inclusionemulator.Fragment + fragment *inclusionemulator.Fragment candidateHash parachaintypes.CandidateHash - cumulativeModifications inclusionemulator.ConstraintModifications + cumulativeModifications *inclusionemulator.ConstraintModifications parentHeadDataHash common.Hash outputHeadDataHash common.Hash } @@ -376,11 +389,20 @@ type BackedChain struct { candidates map[parachaintypes.CandidateHash]struct{} } -func (bc *BackedChain) Push(candidate FragmentNode) { +func NewBackedChain() *BackedChain { + return &BackedChain{ + chain: make([]*FragmentNode, 0), + byParentHead: make(map[common.Hash]parachaintypes.CandidateHash), + byOutputHead: make(map[common.Hash]parachaintypes.CandidateHash), + candidates: make(map[parachaintypes.CandidateHash]struct{}), + } +} + +func (bc *BackedChain) Push(candidate *FragmentNode) { bc.candidates[candidate.candidateHash] = struct{}{} bc.byParentHead[candidate.parentHeadDataHash] = candidate.candidateHash bc.byOutputHead[candidate.outputHeadDataHash] = candidate.candidateHash - bc.chain = append(bc.chain, &candidate) + bc.chain = append(bc.chain, candidate) } func (bc *BackedChain) Clear() []*FragmentNode { @@ -425,3 +447,243 @@ func (bc *BackedChain) Contains(hash parachaintypes.CandidateHash) bool { _, ok := bc.candidates[hash] return ok } + +// this is a fragment chain specific to an active leaf. It holds the current +// best backable candidate chain, as well as potential candidates which could +// become connected to the chain in the future or which could even overwrite +// the existing chain +type FragmentChain struct { + // the current scope, which dictates the on-chain operating constraints that + // all future candidates must ad-here to. + scope *Scope + + // the current best chain of backable candidates. It only contains candidates + // which build on top of each other and which have reached the backing quorum. + // In the presence of potential forks, this chain will pick a fork according to + // the `forkSelectionRule` + bestChain *BackedChain + + // the potential candidate storage. Contains candidates which are not yet part of + // the `chain` but may become in the future. These can form any tree shape as well + // as contain unconnected candidates for which we don't know the parent. + unconnected *CandidateStorage +} + +// NewFragmentChain createa a new fragment chain with the given scope and populates it with +// the candidates pending availability +func NewFragmentChain(scope *Scope, candidatesPendingAvailability *CandidateStorage) *FragmentChain { + fragmentChain := &FragmentChain{ + scope: scope, + bestChain: NewBackedChain(), + unconnected: NewCandidateStorage(), + } + + // we only need to populate the best backable chain. Candidates pending availability + // must form a chain with the latest included head. + fragmentChain.populateChain(candidatesPendingAvailability) + return fragmentChain +} + +// earliestRelayParent returns the earliest relay parent a new candidate can have in order +// to be added to the chain right now. This is the relay parent of the latest candidate in +// the chain. The value returned may not be valid if we want to add a candidate pending +// availability, which may have a relay parent which is out of scope, special handling +// is needed in that case. +func (f *FragmentChain) earliestRelayParent() *inclusionemulator.RelayChainBlockInfo { + if len(f.bestChain.chain) > 0 { + lastCandidate := f.bestChain.chain[len(f.bestChain.chain)-1] + info := f.scope.Ancestor(lastCandidate.relayParent()) + if info != nil { + return info + } + + // if the relay parent is out of scope AND it is in the chain + // it must be a candidate pending availability + pending := f.scope.GetPendingAvailability(lastCandidate.candidateHash) + if pending == nil { + return nil + } + + return &pending.RelayParent + } + + earliest := f.scope.EarliestRelayParent() + return &earliest +} + +type possibleChild struct { + fragment *inclusionemulator.Fragment + candidateHash parachaintypes.CandidateHash + outputHeadDataHash common.Hash + parentHeadDataHash common.Hash +} + +// populateChain populates the fragment chain with candidates from the supplied `CandidateStorage`. +// Can be called by the `NewFragmentChain` or when backing a new candidate. When this is called +// it may cause the previous chain to be completely erased or it may add more than one candidate +func (f *FragmentChain) populateChain(storage *CandidateStorage) { + var cumulativeModifications *inclusionemulator.ConstraintModifications + if len(f.bestChain.chain) > 0 { + lastCandidate := f.bestChain.chain[len(f.bestChain.chain)-1] + cumulativeModifications = lastCandidate.cumulativeModifications + } else { + cumulativeModifications = inclusionemulator.NewConstraintModificationsIdentity() + } + + earliestRelayParent := f.earliestRelayParent() + if earliestRelayParent == nil { + return + } + + for { + if len(f.bestChain.chain) > int(f.scope.maxDepth) { + break + } + + childConstraints, err := f.scope.baseConstraints.ApplyModifications(cumulativeModifications) + if err != nil { + // TODO: include logger + fmt.Println("failed to apply modifications:", err) + break + } + + requiredHeadHash, err := childConstraints.RequiredParent.Hash() + if err != nil { + fmt.Println("failed while hashing required parent:", err) + } + + possibleChildren := make([]*possibleChild, 0) + // select the few possible backed/backable children which can be added to the chain right now + for candidateEntry := range storage.possibleBackedParaChildren(requiredHeadHash) { + // only select a candidate if: + // 1. it does not introduce a fork or a cycle + // 2. parent hash is correct + // 3. relay parent does not move backwards + // 4. all non-pending-availability candidates have relay-parent in the scope + // 5. candidate outputs fulfill constraints + + var relayParent inclusionemulator.RelayChainBlockInfo + var minRelayParent uint + + pending := f.scope.GetPendingAvailability(candidateEntry.candidateHash) + if pending != nil { + relayParent = pending.RelayParent + if len(f.bestChain.chain) == 0 { + minRelayParent = pending.RelayParent.Number + } else { + minRelayParent = earliestRelayParent.Number + } + } else { + info := f.scope.Ancestor(candidateEntry.relayParent) + if info == nil { + continue + } + + relayParent = *info + minRelayParent = earliestRelayParent.Number + } + + if err := f.checkCyclesOrInvalidTree(candidateEntry.outputHeadDataHash); err != nil { + fmt.Println("checking cycle or invalid tree:", err) + continue + } + + // require: candidates dont move backwards and only pending availability + // candidates can be out-of-scope. + // + // earliest relay parent can be before the + + if relayParent.Number < minRelayParent { + // relay parent moved backwards + continue + } + + // don't add candidates if they're already present in the chain + // this can never happen, as candidates can only be duplicated + // if there's a cycle and we shouldnt have allowed for a cycle + // to be chained + if f.bestChain.Contains(candidateEntry.candidateHash) { + continue + } + + constraints := childConstraints.Clone() + if pending != nil { + // overwrite for candidates pending availability as a special-case + constraints.MinRelayParentNumber = pending.RelayParent.Number + } + + fragment, err := inclusionemulator.NewFragment(relayParent, constraints, candidateEntry.candidate) + if err != nil { + fmt.Println("failed to create fragment:", err) + continue + } + + possibleChildren = append(possibleChildren, &possibleChild{ + fragment: fragment, + candidateHash: candidateEntry.candidateHash, + outputHeadDataHash: candidateEntry.outputHeadDataHash, + parentHeadDataHash: candidateEntry.parentHeadDataHash, + }) + } + + if len(possibleChildren) == 0 { + break + } + + // choose the best candidate + bestCandidate := slices.MinFunc(possibleChildren, func(fst, snd *possibleChild) int { + // always pick a candidate pending availability as best. + if f.scope.GetPendingAvailability(fst.candidateHash) != nil { + return -1 + } else if f.scope.GetPendingAvailability(snd.candidateHash) != nil { + return 1 + } else { + return forkSelectionRule(fst.candidateHash, snd.candidateHash) + } + }) + + // remove the candidate from storage + storage.removeCandidate(bestCandidate.candidateHash) + + // update the cumulative constraint modifications + cumulativeModifications.Stack(bestCandidate.fragment.ConstraintModifications()) + + // update the earliest relay parent + earliestRelayParent = &inclusionemulator.RelayChainBlockInfo{ + Hash: bestCandidate.fragment.RelayParent().Hash, + Number: bestCandidate.fragment.RelayParent().Number, + StorageRoot: bestCandidate.fragment.RelayParent().StorageRoot, + } + + node := &FragmentNode{ + fragment: bestCandidate.fragment, + candidateHash: bestCandidate.candidateHash, + parentHeadDataHash: bestCandidate.parentHeadDataHash, + outputHeadDataHash: bestCandidate.outputHeadDataHash, + cumulativeModifications: cumulativeModifications.Clone(), + } + + // add the candidate to the chain now + f.bestChain.Push(node) + } +} + +// checkCyclesOrInvalidTree checks whether a candidate outputting this head data would +// introduce a cycle or multiple paths to the same state. Trivial 0-length cycles are +// checked in `NewCandidateEntry`. +func (f *FragmentChain) checkCyclesOrInvalidTree(outputHeadDataHash common.Hash) error { + // this should catch a cycle where this candidate would point back to the parent + // of some candidate in the chain + _, ok := f.bestChain.byParentHead[outputHeadDataHash] + if ok { + return ErrCycle + } + + // multiple paths to the same state, which cannot happen for a chain + _, ok = f.bestChain.byOutputHead[outputHeadDataHash] + if ok { + return ErrMultiplePaths + } + + return nil +} diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go index 2690206698..13ed8e3071 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go @@ -262,7 +262,7 @@ func TestEarliestRelayParent(t *testing.T) { Hash: common.Hash{0x01}, Number: 10, } - baseConstraints := parachaintypes.Constraints{ + baseConstraints := &inclusionemulator.Constraints{ MinRelayParentNumber: 5, } ancestor := inclusionemulator.RelayChainBlockInfo{ @@ -288,7 +288,7 @@ func TestEarliestRelayParent(t *testing.T) { Hash: common.Hash{0x01}, Number: 10, } - baseConstraints := parachaintypes.Constraints{ + baseConstraints := &inclusionemulator.Constraints{ MinRelayParentNumber: 5, } return &Scope{ @@ -335,9 +335,9 @@ func TestBackedChain_RevertToParentHash(t *testing.T) { candidateHash: parachaintypes.CandidateHash{Value: common.Hash{byte(i)}}, parentHeadDataHash: common.Hash{byte(i)}, outputHeadDataHash: common.Hash{byte(i + 1)}, - cumulativeModifications: inclusionemulator.ConstraintModifications{}, + cumulativeModifications: &inclusionemulator.ConstraintModifications{}, } - chain.Push(*node) + chain.Push(node) } return chain }, @@ -359,9 +359,9 @@ func TestBackedChain_RevertToParentHash(t *testing.T) { candidateHash: parachaintypes.CandidateHash{Value: common.Hash{byte(i)}}, parentHeadDataHash: common.Hash{byte(i)}, outputHeadDataHash: common.Hash{byte(i + 1)}, - cumulativeModifications: inclusionemulator.ConstraintModifications{}, + cumulativeModifications: &inclusionemulator.ConstraintModifications{}, } - chain.Push(*node) + chain.Push(node) } return chain }, @@ -383,9 +383,9 @@ func TestBackedChain_RevertToParentHash(t *testing.T) { candidateHash: parachaintypes.CandidateHash{Value: common.Hash{byte(i)}}, parentHeadDataHash: common.Hash{byte(i)}, outputHeadDataHash: common.Hash{byte(i + 1)}, - cumulativeModifications: inclusionemulator.ConstraintModifications{}, + cumulativeModifications: &inclusionemulator.ConstraintModifications{}, } - chain.Push(*node) + chain.Push(node) } return chain }, diff --git a/dot/parachain/util/.DS_Store b/dot/parachain/util/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..f9e0685c8f2e305625d74c63e50bed948df668e9 GIT binary patch literal 6148 zcmeHKu};G<5Pb(7f>IR?`*Z=)G+E$AQT7%z7){&A+acyj-6pXI#}ftfT+*tZ1gpk zP)>3z9Xmsg(8Q=jql%{(Vs!Q=k1HKJL!(1H`4Io|XYwNX>zqI3a7bwwbtn)D^c6U^ z=2-9lEB-QrNq(OatxzBo_-6{psG3z%Zp!c0Z`;$mHnGfE)HJSBhep5m2w+3ck@Gxg c<4JwSm5!aE&Z7O9PK<{@79^@r;1?A50P1o;5&!@I literal 0 HcmV?d00001 diff --git a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go index 9459b00306..99a3fc8ef8 100644 --- a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go +++ b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "iter" + "maps" "slices" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" @@ -205,6 +206,30 @@ type Constraints struct { FutureValidationCode *FutureValidationCode } +func (c *Constraints) Clone() *Constraints { + return &Constraints{ + MinRelayParentNumber: c.MinRelayParentNumber, + MaxPoVSize: c.MaxPoVSize, + MaxCodeSize: c.MaxCodeSize, + UmpRemaining: c.UmpRemaining, + UmpRemainingBytes: c.UmpRemainingBytes, + MaxUmpNumPerCandidate: c.MaxUmpNumPerCandidate, + DmpRemainingMessages: append([]uint(nil), c.DmpRemainingMessages...), + HrmpInbound: InboundHrmpLimitations{ + ValidWatermarks: append([]uint(nil), c.HrmpInbound.ValidWatermarks...), + }, + HrmpChannelsOut: maps.Clone(c.HrmpChannelsOut), + MaxHrmpNumPerCandidate: c.MaxHrmpNumPerCandidate, + RequiredParent: c.RequiredParent, + ValidationCodeHash: c.ValidationCodeHash, + UpgradeRestriction: c.UpgradeRestriction, + FutureValidationCode: &FutureValidationCode{ + BlockNumber: c.FutureValidationCode.BlockNumber, + ValidationCodeHash: c.FutureValidationCode.ValidationCodeHash, + }, + } +} + func (c *Constraints) CheckModifications(modifications *ConstraintModifications) error { if modifications.HrmpWatermark != nil && modifications.HrmpWatermark.Type == Trunk { if !slices.Contains(c.HrmpInbound.ValidWatermarks, modifications.HrmpWatermark.Watermark()) { @@ -268,6 +293,95 @@ func (c *Constraints) CheckModifications(modifications *ConstraintModifications) return nil } +func (c *Constraints) ApplyModifications(modifications *ConstraintModifications) (*Constraints, error) { + newConstraints := c.Clone() + + if modifications.RequiredParent != nil { + newConstraints.RequiredParent = *modifications.RequiredParent + } + + if modifications.HrmpWatermark != nil { + pos, found := slices.BinarySearch( + newConstraints.HrmpInbound.ValidWatermarks, + modifications.HrmpWatermark.Watermark()) + + if found { + // Exact match, so this is OK in all cases. + newConstraints.HrmpInbound.ValidWatermarks = newConstraints.HrmpInbound.ValidWatermarks[pos+1:] + } else { + switch modifications.HrmpWatermark.Type { + case Head: + // Updates to Head are always OK. + newConstraints.HrmpInbound.ValidWatermarks = newConstraints.HrmpInbound.ValidWatermarks[pos:] + case Trunk: + // Trunk update landing on disallowed watermark is not OK. + return nil, &ErrDisallowedHrmpWatermark{blockNumber: modifications.HrmpWatermark.Block} + } + } + } + + for id, outboundHrmpMod := range modifications.OutboundHrmp { + outbound, ok := newConstraints.HrmpChannelsOut[id] + if !ok { + return nil, &ErrNoSuchHrmpChannel{id} + } + + if outboundHrmpMod.BytesSubmitted > outbound.BytesRemaining { + return nil, &ErrHrmpBytesOverflow{ + paraId: id, + bytesRemaining: outbound.BytesRemaining, + bytesSubmitted: outboundHrmpMod.BytesSubmitted, + } + } + + if outboundHrmpMod.MessagesSubmitted > outbound.MessagesRemaining { + return nil, &ErrHrmpMessagesOverflow{ + paraId: id, + messagesRemaining: outbound.MessagesRemaining, + messagesSubmitted: outboundHrmpMod.MessagesSubmitted, + } + } + + outbound.BytesRemaining -= outboundHrmpMod.BytesSubmitted + outbound.MessagesRemaining -= outboundHrmpMod.MessagesSubmitted + } + + if modifications.UmpMessagesSent > newConstraints.UmpRemaining { + return nil, &ErrUmpMessagesOverflow{ + messagesRemaining: newConstraints.UmpRemaining, + messagesSubmitted: modifications.UmpMessagesSent, + } + } + newConstraints.UmpRemaining -= modifications.UmpMessagesSent + + if modifications.UmpBytesSent > newConstraints.UmpRemainingBytes { + return nil, &ErrUmpBytesOverflow{ + bytesRemaining: newConstraints.UmpRemainingBytes, + bytesSubmitted: modifications.UmpBytesSent, + } + } + newConstraints.UmpRemainingBytes -= modifications.UmpBytesSent + + if modifications.DmpMessagesProcessed > uint(len(newConstraints.DmpRemainingMessages)) { + return nil, &ErrDmpMessagesUnderflow{ + messagesRemaining: uint(len(newConstraints.DmpRemainingMessages)), + messagesProcessed: modifications.DmpMessagesProcessed, + } + } else { + newConstraints.DmpRemainingMessages = newConstraints.DmpRemainingMessages[modifications.DmpMessagesProcessed:] + } + + if modifications.CodeUpgradeApplied { + if newConstraints.FutureValidationCode == nil { + return nil, ErrAppliedNonexistentCodeUpgrade + } + + newConstraints.ValidationCodeHash = newConstraints.FutureValidationCode.ValidationCodeHash + } + + return newConstraints, nil +} + func FromPrimitiveConstraints(pc parachaintypes.Constraints) *Constraints { hrmpChannelsOut := make(map[parachaintypes.ParaID]OutboundHrmpChannelLimitations) for k, v := range pc.HrmpChannelsOut { @@ -366,10 +480,22 @@ type ConstraintModifications struct { CodeUpgradeApplied bool } +func (cm *ConstraintModifications) Clone() *ConstraintModifications { + return &ConstraintModifications{ + RequiredParent: cm.RequiredParent, + HrmpWatermark: cm.HrmpWatermark, + OutboundHrmp: maps.Clone(cm.OutboundHrmp), + UmpMessagesSent: cm.UmpMessagesSent, + UmpBytesSent: cm.UmpBytesSent, + DmpMessagesProcessed: cm.DmpMessagesProcessed, + CodeUpgradeApplied: cm.CodeUpgradeApplied, + } +} + // Identity returns the 'identity' modifications: these can be applied to // any constraints and yield the exact same result. -func NewConstraintModificationsIdentity() ConstraintModifications { - return ConstraintModifications{ +func NewConstraintModificationsIdentity() *ConstraintModifications { + return &ConstraintModifications{ RequiredParent: nil, HrmpWatermark: nil, OutboundHrmp: make(map[parachaintypes.ParaID]OutboundHrmpChannelModification), @@ -426,6 +552,10 @@ func (f *Fragment) Candidate() ProspectiveCandidate { return f.candidate } +func (f *Fragment) ConstraintModifications() *ConstraintModifications { + return f.modifications +} + // NewFragment creates a new Fragment. This fails if the fragment isnt in line // with the operating constraints. That is, either its inputs or outputs fail // checks against the constraints. From 0388b02ddbe2c3f5647ef3f9e694eb8d35ecd1dd Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Sat, 16 Nov 2024 10:52:06 -0400 Subject: [PATCH 07/31] feat: full `FragmentChain` implementation done --- .../fragment-chain/fragment_chain.go | 526 +++++++++++++++++- .../fragment-chain/fragment_chain_test.go | 57 ++ .../inclusion-emulator/inclusion_emulator.go | 28 +- 3 files changed, 591 insertions(+), 20 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go index dd75953f78..8d5b057ce6 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go @@ -2,6 +2,7 @@ package fragmentchain import ( "bytes" + "container/list" "fmt" "iter" "slices" @@ -93,6 +94,42 @@ type CandidateStorage struct { byCandidateHash map[parachaintypes.CandidateHash]*CandidateEntry } +func (c *CandidateStorage) Clone() *CandidateStorage { + clone := NewCandidateStorage() + + for parentHead, candidates := range c.byParentHead { + clone.byParentHead[parentHead] = make(map[parachaintypes.CandidateHash]any) + for candidateHash := range candidates { + clone.byParentHead[parentHead][candidateHash] = struct{}{} + } + } + + for outputHead, candidates := range c.byOutputHead { + clone.byOutputHead[outputHead] = make(map[parachaintypes.CandidateHash]any) + for candidateHash := range candidates { + clone.byOutputHead[outputHead][candidateHash] = struct{}{} + } + } + + for candidateHash, entry := range c.byCandidateHash { + clone.byCandidateHash[candidateHash] = &CandidateEntry{ + candidateHash: entry.candidateHash, + parentHeadDataHash: entry.parentHeadDataHash, + outputHeadDataHash: entry.outputHeadDataHash, + relayParent: entry.relayParent, + candidate: inclusionemulator.ProspectiveCandidate{ + Commitments: entry.candidate.Commitments, + PersistedValidationData: entry.candidate.PersistedValidationData, + PoVHash: entry.candidate.PoVHash, + ValidationCodeHash: entry.candidate.ValidationCodeHash, + }, + state: entry.state, + } + } + + return clone +} + func NewCandidateStorage() *CandidateStorage { return &CandidateStorage{ byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), @@ -115,8 +152,8 @@ func (c *CandidateStorage) AddPendingAvailabilityCandidate( } // Len return the number of stored candidate -func (c *CandidateStorage) Len() uint { - return uint(len(c.byCandidateHash)) +func (c *CandidateStorage) Len() int { + return len(c.byCandidateHash) } func (c *CandidateStorage) addCandidateEntry(candidate *CandidateEntry) error { @@ -143,6 +180,7 @@ func (c *CandidateStorage) addCandidateEntry(candidate *CandidateEntry) error { setOfCandidates[candidate.candidateHash] = struct{}{} c.byOutputHead[candidate.outputHeadDataHash] = setOfCandidates + c.byCandidateHash[candidate.candidateHash] = candidate return nil } @@ -277,13 +315,13 @@ func NewScopeWithAncestors( baseConstraints *inclusionemulator.Constraints, pendingAvailability []*PendindAvailability, maxDepth uint, - ancestors iter.Seq[inclusionemulator.RelayChainBlockInfo], + ancestors []inclusionemulator.RelayChainBlockInfo, ) (*Scope, error) { ancestorsMap := btree.NewMap[uint, inclusionemulator.RelayChainBlockInfo](100) ancestorsByHash := make(map[common.Hash]inclusionemulator.RelayChainBlockInfo) prev := relayParent.Number - for ancestor := range ancestors { + for _, ancestor := range ancestors { if prev == 0 { return nil, ErrUnexpectedAncestor{Number: ancestor.Number, Prev: prev} } @@ -484,6 +522,224 @@ func NewFragmentChain(scope *Scope, candidatesPendingAvailability *CandidateStor return fragmentChain } +// PopulateFromPrevious populates the `FragmentChain` given the new candidates pending +// availability and the optional previous fragment chain (of the previous relay parent) +func (f *FragmentChain) PopulateFromPrevious(prevFragmentChain *FragmentChain) { + prevStorage := prevFragmentChain.unconnected.Clone() + for _, candidate := range prevFragmentChain.bestChain.chain { + // if they used to be pending availability, dont add them. This is fine because: + // - if they still are pending availability, they have already been added to + // the new storage + // - if they were included, no point in keeping them + // + // This cannot happen for the candidates in the unconnected storage. The pending + // availability candidates will always be part of the best chain + pending := prevFragmentChain.scope.GetPendingAvailability(candidate.candidateHash) + if pending == nil { + prevStorage.addCandidateEntry(NewCandidateEntryFromFragment(candidate)) + } + } + + // first populate the best backable chain + f.populateChain(prevStorage) + + // now that we picked the best backable chain, trim the forks generated by candidates + // which are not present in the best chain + f.trimUneligibleForks(prevStorage, nil) + + // finally, keep any candidates which haven't been trimmed but still have potential + f.populateUnconnectedPotentialCandidates(prevStorage) +} + +func (f *FragmentChain) Scope() *Scope { + return f.scope +} + +func (f *FragmentChain) BestChainLen() int { + return len(f.bestChain.chain) +} + +func (f *FragmentChain) UnconnectedLen() int { + return f.unconnected.Len() +} + +func (f *FragmentChain) ContainsUnconnectedCandidate(candidate parachaintypes.CandidateHash) bool { + return f.unconnected.contains(candidate) +} + +// BestChainVec returns a vector of the chain's candidate hashes, in-order. +func (f *FragmentChain) BestChainVec() (hashes []parachaintypes.CandidateHash) { + hashes = make([]parachaintypes.CandidateHash, len(f.bestChain.chain)) + for idx, node := range f.bestChain.chain { + hashes[idx] = node.candidateHash + } + return hashes +} + +// Unconnected returns a vector of the unconnected potential candidate hashes, in arbitrary order. +func (f *FragmentChain) Unconnected() iter.Seq[*CandidateEntry] { + return f.unconnected.candidates() +} + +func (f *FragmentChain) IsCandidateBacked(hash parachaintypes.CandidateHash) bool { + if f.bestChain.Contains(hash) { + return true + } + + candidate := f.unconnected.byCandidateHash[hash] + return candidate != nil && candidate.state == Backed +} + +// CandidateBacked marks a candidate as backed. This can trigger a recreation of the best backable chain. +func (f *FragmentChain) CandidateBacked(newlyBackedCandidate parachaintypes.CandidateHash) { + // already backed + if f.bestChain.Contains(newlyBackedCandidate) { + return + } + + candidateEntry, ok := f.unconnected.byCandidateHash[newlyBackedCandidate] + if !ok { + // candidate is not in unconnected storage + return + } + + parentHeadDataHash := candidateEntry.parentHeadDataHash + f.unconnected.markBacked(newlyBackedCandidate) + + if !f.revertTo(parentHeadDataHash) { + // if nothing was reverted, there is nothing we can do for now + return + } + + prevStorage := f.unconnected.Clone() + f.unconnected = NewCandidateStorage() + + f.populateChain(prevStorage) + f.trimUneligibleForks(prevStorage, &parentHeadDataHash) + f.populateUnconnectedPotentialCandidates(prevStorage) +} + +// CanAddCandidateAsPotential checks if this candidate could be added in the future +func (f *FragmentChain) CanAddCandidateAsPotential(entry *CandidateEntry) error { + candidateHash := entry.candidateHash + if f.bestChain.Contains(candidateHash) || f.unconnected.contains(candidateHash) { + return ErrCandidateAlradyKnown + } + + return f.checkPotential(entry) +} + +// TryAddingSecondedCandidate tries to add a candidate as a seconded candidate, if the +// candidate has potential. It will never be added to the chain directly in the seconded +// state, it will only be part of the unconnected storage +func (f *FragmentChain) TryAddingSecondedCandidate(entry *CandidateEntry) error { + if entry.state == Backed { + return ErrIntroduceBackedCandidate + } + + err := f.CanAddCandidateAsPotential(entry) + if err != nil { + return err + } + + return f.unconnected.addCandidateEntry(entry) +} + +// GetHeadDataByHash tries to get the full head data associated with this hash +func (f *FragmentChain) GetHeadDataByHash(headDataHash common.Hash) (*parachaintypes.HeadData, error) { + reqParent := f.scope.baseConstraints.RequiredParent + reqParentHash, err := reqParent.Hash() + if err != nil { + return nil, fmt.Errorf("while hashing required parent: %w", err) + } + if reqParentHash == headDataHash { + return &reqParent, nil + } + + hasHeadDataInChain := false + if _, ok := f.bestChain.byParentHead[headDataHash]; ok { + hasHeadDataInChain = true + } else if _, ok := f.bestChain.byOutputHead[headDataHash]; ok { + hasHeadDataInChain = true + } + + if hasHeadDataInChain { + for _, candidate := range f.bestChain.chain { + if candidate.parentHeadDataHash == headDataHash { + headData := candidate. + fragment. + Candidate(). + PersistedValidationData. + ParentHead + return &headData, nil + } else if candidate.outputHeadDataHash == headDataHash { + headData := candidate.fragment.Candidate().Commitments.HeadData + return &headData, nil + } else { + continue + } + } + } + + return f.unconnected.headDataByHash(headDataHash), nil +} + +type CandidateAndRelayParent struct { + CandidateHash parachaintypes.CandidateHash + RealyParentHash common.Hash +} + +// FindBackableChain selects `count` candidates after the given `ancestors` which +// can be backed on chain next. The intention of the `ancestors` is to allow queries +// on the basis of one or more candidates which were previously pending availability +// becoming available or candidates timing out +func (f *FragmentChain) FindBackableChain( + ancestors map[parachaintypes.CandidateHash]struct{}, count uint32) []*CandidateAndRelayParent { + if count == 0 { + return nil + } + + basePos := f.findAncestorPath(ancestors) + actualEndIdx := min(basePos+int(count), len(f.bestChain.chain)) + res := make([]*CandidateAndRelayParent, 0, actualEndIdx-basePos) + + for _, elem := range f.bestChain.chain[basePos:actualEndIdx] { + // only supply candidates which are not yet pending availability. + // `ancestors` should have already contained them, but check just in case + if pending := f.scope.GetPendingAvailability(elem.candidateHash); pending == nil { + res = append(res, &CandidateAndRelayParent{ + CandidateHash: elem.candidateHash, + RealyParentHash: elem.relayParent(), + }) + } else { + break + } + } + + return res +} + +// findAncestorPath tries to orders the ancestors into a viable path from root to the last one. +// stops when the ancestors are all used or when a node in the chain is not present in the +// ancestors set. Returns the index in the chain were the search stopped +func (f *FragmentChain) findAncestorPath(ancestors map[parachaintypes.CandidateHash]struct{}) int { + if len(f.bestChain.chain) == 0 { + return 0 + } + + for idx, candidate := range f.bestChain.chain { + _, ok := ancestors[candidate.candidateHash] + if !ok { + return idx + } + delete(ancestors, candidate.candidateHash) + } + + // this means that we found the entire chain in the ancestor set. There wont be + // anything left to back. + return len(f.bestChain.chain) +} + // earliestRelayParent returns the earliest relay parent a new candidate can have in order // to be added to the chain right now. This is the relay parent of the latest candidate in // the chain. The value returned may not be valid if we want to add a candidate pending @@ -511,6 +767,227 @@ func (f *FragmentChain) earliestRelayParent() *inclusionemulator.RelayChainBlock return &earliest } +// earliestRelayParentPendingAvailability returns the earliest relay parent a potential +// candidate may have for it to ever be added to the chain. This is the relay parent of +// the last candidate pending availability or the earliest relay parent in scope. +func (f *FragmentChain) earliestRelayParentPendingAvailability() *inclusionemulator.RelayChainBlockInfo { + for i := len(f.bestChain.chain) - 1; i >= 0; i-- { + candidate := f.bestChain.chain[i] + if pending := f.scope.GetPendingAvailability(candidate.candidateHash); pending != nil { + return &pending.RelayParent + } + } + earliest := f.scope.EarliestRelayParent() + return &earliest +} + +// populateUnconnectedPotentialCandidates populates the unconnected potential candidate storage +// starting from a previous storage +func (f *FragmentChain) populateUnconnectedPotentialCandidates(oldStorage *CandidateStorage) { + for _, candidate := range oldStorage.byCandidateHash { + // sanity check, all pending availability candidates should be already present + // in the chain + if pending := f.scope.GetPendingAvailability(candidate.candidateHash); pending != nil { + continue + } + + // we can just use the error to check if we can add + // or not an entry since an error can legitimately + // happen when pruning stale candidates. + err := f.CanAddCandidateAsPotential(candidate) + if err == nil { + _ = f.unconnected.addCandidateEntry(candidate) + } + } +} + +func (f *FragmentChain) checkPotential(candidate *CandidateEntry) error { + relayParent := candidate.relayParent + parentHeadHash := candidate.parentHeadDataHash + + // trivial 0-length cycle + if candidate.outputHeadDataHash == parentHeadHash { + return ErrZeroLengthCycle + } + + // Check if the relay parent is in scope + relayParentInfo := f.scope.Ancestor(relayParent) + if relayParentInfo == nil { + return ErrRelayParentNotInScope{ + relayParentA: relayParent, + relayParentB: f.scope.EarliestRelayParent().Hash, + } + } + + // Check if the relay parent moved backwards from the latest candidate pending availability + earliestRPOfPendingAvailability := f.earliestRelayParentPendingAvailability() + if relayParentInfo.Number < earliestRPOfPendingAvailability.Number { + return ErrRelayParentPrecedesCandidatePendingAvailability{ + relayParentA: relayParentInfo.Hash, + relayParentB: earliestRPOfPendingAvailability.Hash, + } + } + + // If it's a fork with a backed candidate in the current chain + if otherCandidateHash, ok := f.bestChain.byParentHead[parentHeadHash]; ok { + if f.scope.GetPendingAvailability(otherCandidateHash) != nil { + // Cannot accept a fork with a candidate pending availability + return ErrForkWithCandidatePendingAvailability{candidateHash: otherCandidateHash} + } + + // If the candidate is backed and in the current chain, accept only a candidate + // according to the fork selection rule + if forkSelectionRule(otherCandidateHash, candidate.candidateHash) == -1 { + return ErrForkChoiceRule{candidateHash: otherCandidateHash} + } + } + + // Try seeing if the parent candidate is in the current chain or if it is the latest + // included candidate. If so, get the constraints the candidate must satisfy + var constraints *inclusionemulator.Constraints + var maybeMinRelayParentNumber *uint + + requiredParentHash, err := f.scope.baseConstraints.RequiredParent.Hash() + if err != nil { + return fmt.Errorf("while hashing required parent: %w", err) + } + + if parentCandidateHash, ok := f.bestChain.byOutputHead[parentHeadHash]; ok { + var parentCandidate *FragmentNode + + for _, c := range f.bestChain.chain { + if c.candidateHash == parentCandidateHash { + parentCandidate = c + break + } + } + + if parentCandidate == nil { + return ErrParentCandidateNotFound + } + + var err error + constraints, err = f.scope.baseConstraints.ApplyModifications(parentCandidate.cumulativeModifications) + if err != nil { + return ErrComputeConstraints{modificationErr: err} + } + + if ancestor := f.scope.Ancestor(parentCandidate.relayParent()); ancestor != nil { + maybeMinRelayParentNumber = &ancestor.Number + } + } else if requiredParentHash == parentHeadHash { + // It builds on the latest included candidate + constraints = f.scope.baseConstraints.Clone() + } else { + // If the parent is not yet part of the chain, there's nothing else we can check for now + return nil + } + + // Check for cycles or invalid tree transitions + if err := f.checkCyclesOrInvalidTree(candidate.outputHeadDataHash); err != nil { + return err + } + + // Check against constraints if we have a full concrete candidate + _, err = inclusionemulator.CheckAgainstConstraints( + relayParentInfo, + constraints, + candidate.candidate.Commitments, + candidate.candidate.ValidationCodeHash, + candidate.candidate.PersistedValidationData, + ) + if err != nil { + return ErrCheckAgainstConstraints{fragmentValidityErr: err} + } + + if relayParentInfo.Number < constraints.MinRelayParentNumber { + return ErrRelayParentMovedBackwards + } + + if maybeMinRelayParentNumber != nil && relayParentInfo.Number < *maybeMinRelayParentNumber { + return ErrRelayParentMovedBackwards + } + + return nil +} + +// trimUneligibleForks once the backable chain was populated, trim the forks generated by candidate +// hashes which are not present in the best chain. Fan this out into a full breadth-first search. If +// starting point is not nil then start the search from the candidates haing this parent head hash. +func (f *FragmentChain) trimUneligibleForks(storage *CandidateStorage, startingPoint *common.Hash) { + type queueItem struct { + hash common.Hash + hasPotential bool + } + + queue := list.New() + + // start out with the candidates in the chain. They are all valid candidates. + if startingPoint != nil { + queue.PushBack(queueItem{hash: *startingPoint, hasPotential: true}) + } else { + if len(f.bestChain.chain) == 0 { + reqParentHeadHash, err := f.scope.baseConstraints.RequiredParent.Hash() + if err != nil { + panic(fmt.Sprintf("while hashing required parent: %s", err.Error())) + } + + queue.PushBack(queueItem{hash: reqParentHeadHash, hasPotential: true}) + } else { + for _, candidate := range f.bestChain.chain { + queue.PushBack(queueItem{hash: candidate.parentHeadDataHash, hasPotential: true}) + } + } + } + + // to make sure that cycles dont make us loop forever, keep track + // of the visited parent head hashes + visited := map[common.Hash]struct{}{} + + for queue.Len() > 0 { + // queue.PopFront() + parent := queue.Remove(queue.Front()).(queueItem) + visited[parent.hash] = struct{}{} + + children, ok := storage.byParentHead[parent.hash] + if !ok { + continue + } + + // cannot remove while iterating so store them here temporarily + var toRemove []parachaintypes.CandidateHash + + for childHash := range children { + child, ok := storage.byCandidateHash[childHash] + if !ok { + continue + } + + // already visited this child. either is a cycle or multipath that lead + // to the same candidate. either way, stop this branch to avoid looping + // forever + if _, ok = visited[child.outputHeadDataHash]; ok { + continue + } + + // only keep a candidate if its full ancestry was already kept as potential + // and this candidate itself has potential + if parent.hasPotential && f.checkPotential(child) == nil { + queue.PushBack(queueItem{hash: child.outputHeadDataHash, hasPotential: true}) + } else { + // otherwise, remove this candidate and continue looping for its children + // but mark the parent's potential as false. we only want to remove its children. + toRemove = append(toRemove, childHash) + queue.PushBack(queueItem{hash: child.outputHeadDataHash, hasPotential: false}) + } + } + + for _, hash := range toRemove { + storage.removeCandidate(hash) + } + } +} + type possibleChild struct { fragment *inclusionemulator.Fragment candidateHash parachaintypes.CandidateHash @@ -549,7 +1026,7 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { requiredHeadHash, err := childConstraints.RequiredParent.Hash() if err != nil { - fmt.Println("failed while hashing required parent:", err) + panic(fmt.Sprintf("failed while hashing required parent: %s", err.Error())) } possibleChildren := make([]*possibleChild, 0) @@ -562,12 +1039,12 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { // 4. all non-pending-availability candidates have relay-parent in the scope // 5. candidate outputs fulfill constraints - var relayParent inclusionemulator.RelayChainBlockInfo + var relayParent *inclusionemulator.RelayChainBlockInfo var minRelayParent uint pending := f.scope.GetPendingAvailability(candidateEntry.candidateHash) if pending != nil { - relayParent = pending.RelayParent + relayParent = &pending.RelayParent if len(f.bestChain.chain) == 0 { minRelayParent = pending.RelayParent.Number } else { @@ -579,7 +1056,7 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { continue } - relayParent = *info + relayParent = info minRelayParent = earliestRelayParent.Number } @@ -687,3 +1164,36 @@ func (f *FragmentChain) checkCyclesOrInvalidTree(outputHeadDataHash common.Hash) return nil } + +// revertTo reverts the best backable chain so that the last candidate will be one outputting the given +// `parent_head_hash`. If the `parent_head_hash` is exactly the required parent of the base +// constraints (builds on the latest included candidate), revert the entire chain. +// Return false if we couldn't find the parent head hash +func (f *FragmentChain) revertTo(parentHeadDataHash common.Hash) bool { + var removedItems []*FragmentNode = nil + + requiredParentHash, err := f.scope.baseConstraints.RequiredParent.Hash() + if err != nil { + panic(fmt.Sprintf("failed while hashing required parent: %s", err.Error())) + } + + if requiredParentHash == parentHeadDataHash { + removedItems = f.bestChain.Clear() + } + + if _, ok := f.bestChain.byOutputHead[parentHeadDataHash]; removedItems == nil && ok { + removedItems = f.bestChain.RevertToParentHash(parentHeadDataHash) + } + + if removedItems == nil { + return false + } + + // Even if it's empty, we need to return true, because we'll be able to add a new candidate + // to the chain. + for _, node := range removedItems { + _ = f.unconnected.addCandidateEntry(NewCandidateEntryFromFragment(node)) + } + + return true +} diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go index 13ed8e3071..783544ebf4 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go @@ -6,6 +6,7 @@ import ( parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" inclusionemulator "github.com/ChainSafe/gossamer/dot/parachain/util/inclusion-emulator" "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/pkg/scale" "github.com/stretchr/testify/assert" "github.com/tidwall/btree" ) @@ -419,3 +420,59 @@ func TestBackedChain_RevertToParentHash(t *testing.T) { }) } } + +func TestFragmentChainWithFreshScope(t *testing.T) { + relayParent := inclusionemulator.RelayChainBlockInfo{ + Hash: common.Hash{0x00}, + Number: 0, + StorageRoot: common.Hash{0x00}, + } + + baseConstraints := &inclusionemulator.Constraints{ + RequiredParent: parachaintypes.HeadData{Data: []byte{byte(0)}}, + MinRelayParentNumber: 0, + ValidationCodeHash: parachaintypes.ValidationCodeHash(common.Hash{0x03}), + } + + scope, err := NewScopeWithAncestors(relayParent, baseConstraints, nil, 10, nil) + assert.NoError(t, err) + + candidateStorage := NewCandidateStorage() + + // Create 3 candidate entries forming a chain + for i := 0; i < 3; i++ { + candidateHash := parachaintypes.CandidateHash{Value: [32]byte{byte(i + 1)}} + parentHead := parachaintypes.HeadData{Data: []byte{byte(i)}} + outputHead := parachaintypes.HeadData{Data: []byte{byte(i + 1)}} + + persistedValidationData := parachaintypes.PersistedValidationData{ + ParentHead: parentHead, + } + + // Marshal and hash the persisted validation data + pvdBytes, err := scale.Marshal(persistedValidationData) + assert.NoError(t, err) + pvdHash, err := common.Blake2bHash(pvdBytes) + assert.NoError(t, err) + + committedCandidate := parachaintypes.CommittedCandidateReceipt{ + Descriptor: parachaintypes.CandidateDescriptor{ + RelayParent: common.Hash{0x00}, + PersistedValidationDataHash: pvdHash, + PovHash: common.Hash{0x02}, + ValidationCodeHash: parachaintypes.ValidationCodeHash(common.Hash{0x03}), + }, + Commitments: parachaintypes.CandidateCommitments{ + HeadData: outputHead, + }, + } + + err = candidateStorage.AddPendingAvailabilityCandidate(candidateHash, committedCandidate, persistedValidationData) + assert.NoError(t, err) + } + + fragmentChain := NewFragmentChain(scope, candidateStorage) + + // Check that the best chain contains 3 candidates + assert.Equal(t, 3, len(fragmentChain.bestChain.chain)) +} diff --git a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go index 99a3fc8ef8..d2bad1b2c4 100644 --- a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go +++ b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go @@ -100,7 +100,7 @@ type ErrValidationCodeMismatch struct { } func (e *ErrValidationCodeMismatch) Error() string { - return fmt.Sprintf("ValidationCodeMismatch(Expected: %s, Got: %s)", e.expected, e.got) + return fmt.Sprintf("ValidationCodeMismatch(Expected: %v, Got: %v)", e.expected, e.got) } type ErrPersistedValidationDataMismatch struct { @@ -207,6 +207,13 @@ type Constraints struct { } func (c *Constraints) Clone() *Constraints { + var futureValidationCode *FutureValidationCode + if c.FutureValidationCode != nil { + futureValidationCode = &FutureValidationCode{ + BlockNumber: c.FutureValidationCode.BlockNumber, + ValidationCodeHash: c.FutureValidationCode.ValidationCodeHash, + } + } return &Constraints{ MinRelayParentNumber: c.MinRelayParentNumber, MaxPoVSize: c.MaxPoVSize, @@ -223,10 +230,7 @@ func (c *Constraints) Clone() *Constraints { RequiredParent: c.RequiredParent, ValidationCodeHash: c.ValidationCodeHash, UpgradeRestriction: c.UpgradeRestriction, - FutureValidationCode: &FutureValidationCode{ - BlockNumber: c.FutureValidationCode.BlockNumber, - ValidationCodeHash: c.FutureValidationCode.ValidationCodeHash, - }, + FutureValidationCode: futureValidationCode, } } @@ -538,13 +542,13 @@ func (cm *ConstraintModifications) Stack(other *ConstraintModifications) { // Fragment represents another prospective parachain block // This is a type which guarantees that the candidate is valid under the operating constraints type Fragment struct { - relayParent RelayChainBlockInfo + relayParent *RelayChainBlockInfo operatingConstraints *Constraints candidate ProspectiveCandidate modifications *ConstraintModifications } -func (f *Fragment) RelayParent() RelayChainBlockInfo { +func (f *Fragment) RelayParent() *RelayChainBlockInfo { return f.relayParent } @@ -562,10 +566,10 @@ func (f *Fragment) ConstraintModifications() *ConstraintModifications { // This does not check that the collator signature is valid or wheter the PoV is // small enough. func NewFragment( - relayParent RelayChainBlockInfo, + relayParent *RelayChainBlockInfo, operatingConstraints *Constraints, candidate ProspectiveCandidate) (*Fragment, error) { - modifications, err := checkAgainstConstraints( + modifications, err := CheckAgainstConstraints( relayParent, operatingConstraints, candidate.Commitments, @@ -584,8 +588,8 @@ func NewFragment( }, nil } -func checkAgainstConstraints( - relayParent RelayChainBlockInfo, +func CheckAgainstConstraints( + relayParent *RelayChainBlockInfo, operatingConstraints *Constraints, commitments parachaintypes.CandidateCommitments, validationCodeHash parachaintypes.ValidationCodeHash, @@ -681,7 +685,7 @@ func skipUmpSignals(upwardMessages []parachaintypes.UpwardMessage) iter.Seq[para func validateAgainstConstraints( constraints *Constraints, - relayParent RelayChainBlockInfo, + relayParent *RelayChainBlockInfo, commitments parachaintypes.CandidateCommitments, persistedValidationData parachaintypes.PersistedValidationData, validationCodeHash parachaintypes.ValidationCodeHash, From 1b796b876fdca5eeb403891fcf85863c4ac6dbdd Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 19 Nov 2024 18:28:17 -0400 Subject: [PATCH 08/31] chore: wip fragment chain tests --- .../fragment-chain/errors.go | 4 +- .../fragment-chain/fragment_chain_test.go | 1175 +++++++++++++++++ 2 files changed, 1177 insertions(+), 2 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/errors.go b/dot/parachain/prospective-parachains/fragment-chain/errors.go index ed3b8bc62a..95a0e33951 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/errors.go +++ b/dot/parachain/prospective-parachains/fragment-chain/errors.go @@ -66,8 +66,8 @@ type ErrRelayParentNotInScope struct { } func (e ErrRelayParentNotInScope) Error() string { - return fmt.Sprintf("relay parent %x not in scope, earliest relay parent allowed %x", - e.relayParentA, e.relayParentB) + return fmt.Sprintf("relay parent %s not in scope, earliest relay parent allowed %s", + e.relayParentA.String(), e.relayParentB.String()) } type ErrUnexpectedAncestor struct { diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go index 783544ebf4..7afe579341 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go @@ -1,13 +1,17 @@ package fragmentchain import ( + "bytes" + "slices" "testing" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" inclusionemulator "github.com/ChainSafe/gossamer/dot/parachain/util/inclusion-emulator" "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/crypto/sr25519" "github.com/ChainSafe/gossamer/pkg/scale" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/tidwall/btree" ) @@ -476,3 +480,1174 @@ func TestFragmentChainWithFreshScope(t *testing.T) { // Check that the best chain contains 3 candidates assert.Equal(t, 3, len(fragmentChain.bestChain.chain)) } + +func makeConstraints( + minRelayParentNumber uint, + validWatermarks []uint, + requiredParent parachaintypes.HeadData, +) *inclusionemulator.Constraints { + return &inclusionemulator.Constraints{ + MinRelayParentNumber: minRelayParentNumber, + MaxPoVSize: 1_000_000, + MaxCodeSize: 1_000_000, + UmpRemaining: 10, + UmpRemainingBytes: 1_000, + MaxUmpNumPerCandidate: 10, + DmpRemainingMessages: make([]uint, 10), + HrmpInbound: inclusionemulator.InboundHrmpLimitations{ + ValidWatermarks: validWatermarks, + }, + HrmpChannelsOut: make(map[parachaintypes.ParaID]inclusionemulator.OutboundHrmpChannelLimitations), + MaxHrmpNumPerCandidate: 0, + RequiredParent: requiredParent, + ValidationCodeHash: parachaintypes.ValidationCodeHash(common.BytesToHash(bytes.Repeat([]byte{42}, 32))), + UpgradeRestriction: nil, + FutureValidationCode: nil, + } +} + +func makeCommittedCandidate( + t *testing.T, + paraID parachaintypes.ParaID, + relayParent common.Hash, + relayParentNumber uint32, + parentHead parachaintypes.HeadData, + paraHead parachaintypes.HeadData, + hrmpWatermark uint32, +) (parachaintypes.PersistedValidationData, parachaintypes.CommittedCandidateReceipt) { + persistedValidationData := parachaintypes.PersistedValidationData{ + ParentHead: parentHead, + RelayParentNumber: relayParentNumber, + RelayParentStorageRoot: common.Hash{}, + MaxPovSize: 1_000_000, + } + + pvdBytes, err := scale.Marshal(persistedValidationData) + require.NoError(t, err) + + pvdHash, err := common.Blake2bHash(pvdBytes) + require.NoError(t, err) + + paraHeadHash, err := paraHead.Hash() + require.NoError(t, err) + + candidate := parachaintypes.CommittedCandidateReceipt{ + Descriptor: parachaintypes.CandidateDescriptor{ + ParaID: paraID, + RelayParent: relayParent, + Collator: parachaintypes.CollatorID([sr25519.PublicKeyLength]byte{}), + PersistedValidationDataHash: pvdHash, + PovHash: common.BytesToHash(bytes.Repeat([]byte{1}, 32)), + ErasureRoot: common.BytesToHash(bytes.Repeat([]byte{1}, 32)), + Signature: parachaintypes.CollatorSignature([sr25519.SignatureLength]byte{}), + ParaHead: paraHeadHash, + ValidationCodeHash: parachaintypes.ValidationCodeHash(common.BytesToHash(bytes.Repeat([]byte{42}, 32))), + }, + Commitments: parachaintypes.CandidateCommitments{ + UpwardMessages: []parachaintypes.UpwardMessage{}, + HorizontalMessages: []parachaintypes.OutboundHrmpMessage{}, + NewValidationCode: nil, + HeadData: paraHead, + ProcessedDownwardMessages: 1, + HrmpWatermark: hrmpWatermark, + }, + } + + return persistedValidationData, candidate +} + +func TestScopeRejectsAncestors(t *testing.T) { + tests := map[string]struct { + relayParent *inclusionemulator.RelayChainBlockInfo + ancestors []inclusionemulator.RelayChainBlockInfo + maxDepth uint + baseConstraints *inclusionemulator.Constraints + pendingAvailability []*PendindAvailability + expectedError error + }{ + "rejects_ancestor_that_skips_blocks": { + relayParent: &inclusionemulator.RelayChainBlockInfo{ + Number: 10, + Hash: common.BytesToHash(bytes.Repeat([]byte{0x10}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{0x69}, 32)), + }, + ancestors: []inclusionemulator.RelayChainBlockInfo{ + { + Number: 8, + Hash: common.BytesToHash(bytes.Repeat([]byte{0x08}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{0x69}, 69)), + }, + }, + maxDepth: 2, + baseConstraints: makeConstraints(8, []uint{8, 9}, + parachaintypes.HeadData{Data: []byte{0x01, 0x02, 0x03}}), + pendingAvailability: make([]*PendindAvailability, 0), + expectedError: ErrUnexpectedAncestor{Number: 8, Prev: 10}, + }, + "rejects_ancestor_for_zero_block": { + relayParent: &inclusionemulator.RelayChainBlockInfo{ + Number: 0, + Hash: common.BytesToHash(bytes.Repeat([]byte{0}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + ancestors: []inclusionemulator.RelayChainBlockInfo{ + { + Number: 99999, + Hash: common.BytesToHash(bytes.Repeat([]byte{99}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + }, + maxDepth: 2, + baseConstraints: makeConstraints(0, []uint{}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}), + pendingAvailability: make([]*PendindAvailability, 0), + expectedError: ErrUnexpectedAncestor{Number: 99999, Prev: 0}, + }, + "rejects_unordered_ancestors": { + relayParent: &inclusionemulator.RelayChainBlockInfo{ + Number: 5, + Hash: common.BytesToHash(bytes.Repeat([]byte{0}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + ancestors: []inclusionemulator.RelayChainBlockInfo{ + { + Number: 4, + Hash: common.BytesToHash(bytes.Repeat([]byte{4}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + { + Number: 2, + Hash: common.BytesToHash(bytes.Repeat([]byte{2}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + { + Number: 3, + Hash: common.BytesToHash(bytes.Repeat([]byte{3}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + }, + maxDepth: 2, + baseConstraints: makeConstraints(0, []uint{2}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}), + pendingAvailability: make([]*PendindAvailability, 0), + expectedError: ErrUnexpectedAncestor{Number: 2, Prev: 4}, + }, + } + + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + scope, err := NewScopeWithAncestors(*tt.relayParent, tt.baseConstraints, tt.pendingAvailability, tt.maxDepth, tt.ancestors) + require.ErrorIs(t, err, tt.expectedError) + require.Nil(t, scope) + }) + } +} + +func TestScopeOnlyTakesAncestorsUpToMin(t *testing.T) { + relayParent := inclusionemulator.RelayChainBlockInfo{ + Number: 5, + Hash: common.BytesToHash(bytes.Repeat([]byte{0}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + } + + ancestors := []inclusionemulator.RelayChainBlockInfo{ + { + Number: 4, + Hash: common.BytesToHash(bytes.Repeat([]byte{4}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + { + Number: 3, + Hash: common.BytesToHash(bytes.Repeat([]byte{3}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + { + Number: 2, + Hash: common.BytesToHash(bytes.Repeat([]byte{2}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + } + + maxDepth := uint(2) + baseConstraints := makeConstraints(0, []uint{2}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}) + pendingAvailability := make([]*PendindAvailability, 0) + + scope, err := NewScopeWithAncestors(relayParent, baseConstraints, pendingAvailability, maxDepth, ancestors) + require.NoError(t, err) + + assert.Equal(t, 2, scope.ancestors.Len()) + assert.Equal(t, 2, len(scope.ancestorsByHash)) +} + +func TestCandidateStorageMethods(t *testing.T) { + tests := map[string]struct { + runTest func(*testing.T) + }{ + "persistedValidationDataMismatch": { + runTest: func(t *testing.T) { + relayParent := common.BytesToHash(bytes.Repeat([]byte{69}, 32)) + + pvd, candidate := makeCommittedCandidate( + t, + parachaintypes.ParaID(5), + relayParent, + 8, + parachaintypes.HeadData{Data: []byte{4, 5, 6}}, + parachaintypes.HeadData{Data: []byte{1, 2, 3}}, + 7, + ) + + wrongPvd := pvd + wrongPvd.MaxPovSize = 0 + + candidateHash, err := candidate.Hash() + require.NoError(t, err) + + entry, err := NewCandidateEntry(parachaintypes.CandidateHash{Value: candidateHash}, + candidate, wrongPvd, Seconded) + require.ErrorIs(t, err, ErrPersistedValidationDataMismatch) + require.Nil(t, entry) + }, + }, + + "zero_length_cycle": { + runTest: func(t *testing.T) { + relayParent := common.BytesToHash(bytes.Repeat([]byte{69}, 32)) + + pvd, candidate := makeCommittedCandidate( + t, + parachaintypes.ParaID(5), + relayParent, + 8, + parachaintypes.HeadData{Data: []byte{4, 5, 6}}, + parachaintypes.HeadData{Data: []byte{1, 2, 3}}, + 7, + ) + + candidate.Commitments.HeadData = parachaintypes.HeadData{Data: bytes.Repeat([]byte{1}, 10)} + pvd.ParentHead = parachaintypes.HeadData{Data: bytes.Repeat([]byte{1}, 10)} + wrongPvdHash, err := pvd.Hash() + require.NoError(t, err) + + candidate.Descriptor.PersistedValidationDataHash = wrongPvdHash + + candidateHash, err := candidate.Hash() + require.NoError(t, err) + + entry, err := NewCandidateEntry(parachaintypes.CandidateHash{Value: candidateHash}, + candidate, pvd, Seconded) + require.Nil(t, entry) + require.ErrorIs(t, err, ErrCandidateEntryZeroLengthCycle) + }, + }, + + "add_valid_candidate": { + runTest: func(t *testing.T) { + relayParent := common.BytesToHash(bytes.Repeat([]byte{69}, 32)) + + pvd, candidate := makeCommittedCandidate( + t, + parachaintypes.ParaID(5), + relayParent, + 8, + parachaintypes.HeadData{Data: []byte{4, 5, 6}}, + parachaintypes.HeadData{Data: []byte{1, 2, 3}}, + 7, + ) + + hash, err := candidate.Hash() + require.NoError(t, err) + candidateHash := parachaintypes.CandidateHash{Value: hash} + + parentHeadHash, err := pvd.ParentHead.Hash() + require.NoError(t, err) + + entry, err := NewCandidateEntry(candidateHash, candidate, pvd, Seconded) + require.NoError(t, err) + + storage := NewCandidateStorage() + + t.Run("add_candidate_entry_as_seconded", func(t *testing.T) { + err = storage.addCandidateEntry(entry) + require.NoError(t, err) + require.True(t, storage.contains(candidateHash)) + + // should not have any possible backed candidate yet + for entry := range storage.possibleBackedParaChildren(parentHeadHash) { + assert.Fail(t, "expected no entries, but found one", entry) + } + + require.Equal(t, storage.headDataByHash(candidate.Descriptor.ParaHead), + &candidate.Commitments.HeadData) + require.Equal(t, storage.headDataByHash(parentHeadHash), &pvd.ParentHead) + + // re-add the candidate should fail + err = storage.addCandidateEntry(entry) + require.ErrorIs(t, err, ErrCandidateAlradyKnown) + }) + + t.Run("mark_candidate_entry_as_backed", func(t *testing.T) { + storage.markBacked(candidateHash) + // marking twice is fine + storage.markBacked(candidateHash) + + // here we should have 1 possible backed candidate when we + // use the parentHeadHash (parent of our current candidate) to query + possibleBackedCandidateHashes := make([]parachaintypes.CandidateHash, 0) + for entry := range storage.possibleBackedParaChildren(parentHeadHash) { + possibleBackedCandidateHashes = append(possibleBackedCandidateHashes, entry.candidateHash) + } + + require.Equal(t, []parachaintypes.CandidateHash{candidateHash}, possibleBackedCandidateHashes) + + // here we should have 0 possible backed candidate because we are + // using the candidate hash paraHead as base to query + possibleBackedCandidateHashes = make([]parachaintypes.CandidateHash, 0) + for entry := range storage.possibleBackedParaChildren(candidate.Descriptor.ParaHead) { + possibleBackedCandidateHashes = append(possibleBackedCandidateHashes, entry.candidateHash) + } + + require.Empty(t, possibleBackedCandidateHashes) + }) + + t.Run("remove_candidate_entry", func(t *testing.T) { + storage.removeCandidate(candidateHash) + // remove it twice should be fine + storage.removeCandidate(candidateHash) + + require.False(t, storage.contains(candidateHash)) + + // should not have any possible backed candidate anymore + for entry := range storage.possibleBackedParaChildren(parentHeadHash) { + assert.Fail(t, "expected no entries, but found one", entry) + } + + require.Nil(t, storage.headDataByHash(candidate.Descriptor.ParaHead)) + require.Nil(t, storage.headDataByHash(parentHeadHash)) + }) + }, + }, + + "add_pending_availability_candidate": { + runTest: func(t *testing.T) { + relayParent := common.BytesToHash(bytes.Repeat([]byte{69}, 32)) + + pvd, candidate := makeCommittedCandidate( + t, + parachaintypes.ParaID(5), + relayParent, + 8, + parachaintypes.HeadData{Data: []byte{4, 5, 6}}, + parachaintypes.HeadData{Data: []byte{1, 2, 3}}, + 7, + ) + + hash, err := candidate.Hash() + require.NoError(t, err) + candidateHash := parachaintypes.CandidateHash{Value: hash} + + parentHeadHash, err := pvd.ParentHead.Hash() + require.NoError(t, err) + + storage := NewCandidateStorage() + err = storage.AddPendingAvailabilityCandidate(candidateHash, candidate, pvd) + require.NoError(t, err) + require.True(t, storage.contains(candidateHash)) + + // here we should have 1 possible backed candidate when we + // use the parentHeadHash (parent of our current candidate) to query + possibleBackedCandidateHashes := make([]parachaintypes.CandidateHash, 0) + for entry := range storage.possibleBackedParaChildren(parentHeadHash) { + possibleBackedCandidateHashes = append(possibleBackedCandidateHashes, entry.candidateHash) + } + + require.Equal(t, []parachaintypes.CandidateHash{candidateHash}, possibleBackedCandidateHashes) + + // here we should have 0 possible backed candidate because we are + // using the candidate hash paraHead as base to query + possibleBackedCandidateHashes = make([]parachaintypes.CandidateHash, 0) + for entry := range storage.possibleBackedParaChildren(candidate.Descriptor.ParaHead) { + possibleBackedCandidateHashes = append(possibleBackedCandidateHashes, entry.candidateHash) + } + + require.Empty(t, possibleBackedCandidateHashes) + + t.Run("add_seconded_candidate_to_create_fork", func(t *testing.T) { + pvd2, candidate2 := makeCommittedCandidate( + t, + parachaintypes.ParaID(5), + relayParent, + 8, + parachaintypes.HeadData{Data: []byte{4, 5, 6}}, + parachaintypes.HeadData{Data: []byte{2, 3, 4}}, + 7, + ) + + hash2, err := candidate2.Hash() + require.NoError(t, err) + candidateHash2 := parachaintypes.CandidateHash{Value: hash2} + + candidateEntry2, err := NewCandidateEntry(candidateHash2, candidate2, pvd2, Seconded) + require.NoError(t, err) + + err = storage.addCandidateEntry(candidateEntry2) + require.NoError(t, err) + + // here we should have 1 possible backed candidate since + // the other candidate is seconded + possibleBackedCandidateHashes := make([]parachaintypes.CandidateHash, 0) + for entry := range storage.possibleBackedParaChildren(parentHeadHash) { + possibleBackedCandidateHashes = append(possibleBackedCandidateHashes, entry.candidateHash) + } + + require.Equal(t, []parachaintypes.CandidateHash{candidateHash}, possibleBackedCandidateHashes) + + // now mark it as backed + storage.markBacked(candidateHash2) + + // here we should have 1 possible backed candidate since + // the other candidate is seconded + possibleBackedCandidateHashes = make([]parachaintypes.CandidateHash, 0) + for entry := range storage.possibleBackedParaChildren(parentHeadHash) { + possibleBackedCandidateHashes = append(possibleBackedCandidateHashes, entry.candidateHash) + } + + require.Equal(t, []parachaintypes.CandidateHash{ + candidateHash, candidateHash2}, possibleBackedCandidateHashes) + + }) + }, + }, + } + + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + tt.runTest(t) + }) + } +} + +func TestInitAndPopulateFromEmpty(t *testing.T) { + baseConstraints := makeConstraints(0, []uint{0}, parachaintypes.HeadData{Data: []byte{0x0a}}) + + scope, err := NewScopeWithAncestors( + inclusionemulator.RelayChainBlockInfo{ + Number: 1, + Hash: common.BytesToHash(bytes.Repeat([]byte{1}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{2}, 32)), + }, + baseConstraints, + nil, + 4, + nil, + ) + require.NoError(t, err) + + chain := NewFragmentChain(scope, NewCandidateStorage()) + assert.Equal(t, 0, chain.BestChainLen()) + assert.Equal(t, 0, chain.UnconnectedLen()) + + newChain := NewFragmentChain(scope, NewCandidateStorage()) + newChain.PopulateFromPrevious(chain) + assert.Equal(t, 0, newChain.BestChainLen()) + assert.Equal(t, 0, newChain.UnconnectedLen()) +} + +func populateFromPreviousStorage(scope *Scope, storage *CandidateStorage) *FragmentChain { + chain := NewFragmentChain(scope, NewCandidateStorage()) + + // clone the value + prevChain := *chain + (&prevChain).unconnected = storage.Clone() + chain.PopulateFromPrevious(&prevChain) + return chain +} + +func TestPopulateAndCheckPotential(t *testing.T) { + storage := NewCandidateStorage() + paraID := parachaintypes.ParaID(5) + + relayParentAHash := common.BytesToHash(bytes.Repeat([]byte{1}, 32)) + relayParentBHash := common.BytesToHash(bytes.Repeat([]byte{2}, 32)) + relayParentCHash := common.BytesToHash(bytes.Repeat([]byte{3}, 32)) + + relayParentAInfo := &inclusionemulator.RelayChainBlockInfo{ + Number: 0, Hash: relayParentAHash, StorageRoot: common.Hash{}, + } + + relayParentBInfo := &inclusionemulator.RelayChainBlockInfo{ + Number: 1, Hash: relayParentBHash, StorageRoot: common.Hash{}, + } + + relayParentCInfo := &inclusionemulator.RelayChainBlockInfo{ + Number: 2, Hash: relayParentCHash, StorageRoot: common.Hash{}, + } + + // the ancestors must be in the reverse order + ancestors := []inclusionemulator.RelayChainBlockInfo{ + *relayParentBInfo, + *relayParentAInfo, + } + + firstParachainHead := parachaintypes.HeadData{Data: []byte{0x0a}} + baseConstraints := makeConstraints(0, []uint{0}, firstParachainHead) + + // helper function to hash the candidate and add its entry + // into the candidate storage + hashAndInsertCandididate := func(t *testing.T, storage *CandidateStorage, + candidate parachaintypes.CommittedCandidateReceipt, pvd parachaintypes.PersistedValidationData, state CandidateState) (parachaintypes.CandidateHash, *CandidateEntry) { + hash, err := candidate.Hash() + require.NoError(t, err) + candidateHash := parachaintypes.CandidateHash{Value: hash} + entry, err := NewCandidateEntry(candidateHash, candidate, pvd, state) + require.NoError(t, err) + err = storage.addCandidateEntry(entry) + require.NoError(t, err) + + return candidateHash, entry + } + + hashAndGetEntry := func(t *testing.T, candidate parachaintypes.CommittedCandidateReceipt, + pvd parachaintypes.PersistedValidationData, state CandidateState) (parachaintypes.CandidateHash, *CandidateEntry) { + hash, err := candidate.Hash() + require.NoError(t, err) + candidateHash := parachaintypes.CandidateHash{Value: hash} + entry, err := NewCandidateEntry(candidateHash, candidate, pvd, state) + require.NoError(t, err) + return candidateHash, entry + } + + // candidates A -> B -> C are all backed + candidateAParaHead := parachaintypes.HeadData{Data: []byte{0x0b}} + pvdA, candidateA := makeCommittedCandidate(t, paraID, + relayParentAInfo.Hash, uint32(relayParentAInfo.Number), + firstParachainHead, + candidateAParaHead, + uint32(relayParentAInfo.Number), + ) + + candidateAHash, candidateAEntry := hashAndInsertCandididate(t, storage, candidateA, pvdA, Backed) + + candidateBParaHead := parachaintypes.HeadData{Data: []byte{0x0c}} + pvdB, candidateB := makeCommittedCandidate(t, paraID, + relayParentBInfo.Hash, uint32(relayParentBInfo.Number), + candidateAParaHead, // defines candidate A as parent of candidate B + candidateBParaHead, + uint32(relayParentBInfo.Number), + ) + + candidateBHash, candidateBEntry := hashAndInsertCandididate(t, storage, candidateB, pvdB, Backed) + + candidateCParaHead := parachaintypes.HeadData{Data: []byte{0x0d}} + pvdC, candidateC := makeCommittedCandidate(t, paraID, + relayParentCInfo.Hash, uint32(relayParentCInfo.Number), + candidateBParaHead, + candidateCParaHead, + uint32(relayParentCInfo.Number), + ) + + candidateCHash, candidateCEntry := hashAndInsertCandididate(t, storage, candidateC, pvdC, Backed) + + t.Run("candidate_A_doesnt_adhere_to_base_constraints", func(t *testing.T) { + wrongConstraints := []inclusionemulator.Constraints{ + // define a constraint that requires a parent head data + // that is different from candidate A parent head + *makeConstraints(relayParentAInfo.Number, []uint{relayParentAInfo.Number}, parachaintypes.HeadData{Data: []byte{0x0e}}), + + // the min relay parent for candidate A is wrong + *makeConstraints(relayParentBInfo.Number, []uint{0}, firstParachainHead), + } + + for _, wrongConstraint := range wrongConstraints { + scope, err := NewScopeWithAncestors( + *relayParentCInfo, + &wrongConstraint, + nil, + 4, + ancestors, + ) + require.NoError(t, err) + + chain := populateFromPreviousStorage(scope, storage) + require.Empty(t, chain.BestChainVec()) + + // if the min relay parent is wrong, candidate A can never become valid, otherwise + // if only the required parent doesnt match, candidate A still a potential candidate + if wrongConstraint.MinRelayParentNumber == relayParentBInfo.Number { + // if A is not a potential candidate, its decendants will also not be added. + require.Equal(t, chain.UnconnectedLen(), 0) + err := chain.CanAddCandidateAsPotential(candidateAEntry) + require.ErrorIs(t, err, ErrRelayParentNotInScope{ + relayParentA: relayParentAHash, // candidate A has relay parent A + relayParentB: relayParentBHash, // while the constraint is expecting at least relay parent B + }) + + // however if taken independently, both B and C still have potential + err = chain.CanAddCandidateAsPotential(candidateBEntry) + require.NoError(t, err) + err = chain.CanAddCandidateAsPotential(candidateCEntry) + require.NoError(t, err) + } else { + potentials := make([]parachaintypes.CandidateHash, 0) + for unconnected := range chain.Unconnected() { + potentials = append(potentials, unconnected.candidateHash) + } + + slices.SortStableFunc(potentials, func(i, j parachaintypes.CandidateHash) int { + return bytes.Compare(i.Value[:], j.Value[:]) + }) + + require.Equal(t, []parachaintypes.CandidateHash{ + candidateAHash, + candidateCHash, + candidateBHash, + }, potentials) + } + } + }) + + t.Run("depth_cases", func(t *testing.T) { + depthCases := map[string]struct { + depth []uint + expectedBestChain []parachaintypes.CandidateHash + expectedUnconnected map[parachaintypes.CandidateHash]struct{} + }{ + "0_depth_only_allows_one_candidate_but_keep_the_rest_as_potential": { + depth: []uint{0}, + expectedBestChain: []parachaintypes.CandidateHash{candidateAHash}, + expectedUnconnected: map[parachaintypes.CandidateHash]struct{}{ + candidateBHash: {}, + candidateCHash: {}, + }, + }, + "1_depth_allow_two_candidates": { + depth: []uint{1}, + expectedBestChain: []parachaintypes.CandidateHash{candidateAHash, candidateBHash}, + expectedUnconnected: map[parachaintypes.CandidateHash]struct{}{ + candidateCHash: {}, + }, + }, + "2_more_depth_allow_all_candidates": { + depth: []uint{2, 3, 4, 5}, + expectedBestChain: []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, + expectedUnconnected: map[parachaintypes.CandidateHash]struct{}{}, + }, + } + + for tname, tt := range depthCases { + tt := tt + t.Run(tname, func(t *testing.T) { + // iterate over all the depth values + for _, depth := range tt.depth { + scope, err := NewScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + nil, + depth, + ancestors, + ) + require.NoError(t, err) + + chain := NewFragmentChain(scope, NewCandidateStorage()) + // individually each candidate is a potential candidate + require.NoError(t, chain.CanAddCandidateAsPotential(candidateAEntry)) + require.NoError(t, chain.CanAddCandidateAsPotential(candidateBEntry)) + require.NoError(t, chain.CanAddCandidateAsPotential(candidateCEntry)) + + chain = populateFromPreviousStorage(scope, storage) + require.Equal(t, tt.expectedBestChain, chain.BestChainVec()) + + // Check that the unconnected candidates are as expected + unconnectedHashes := make(map[parachaintypes.CandidateHash]struct{}) + for unconnected := range chain.Unconnected() { + unconnectedHashes[unconnected.candidateHash] = struct{}{} + } + + assert.Equal(t, tt.expectedUnconnected, unconnectedHashes) + } + }) + } + }) + + t.Run("relay_parent_out_of_scope", func(t *testing.T) { + // candidate A has a relay parent out of scope. Candidates B and C + // will also be deleted since they form a chain with A + t.Run("candidate_A_relay_parent_out_of_scope", func(t *testing.T) { + newAncestors := []inclusionemulator.RelayChainBlockInfo{ + *relayParentBInfo, + } + + scope, err := NewScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + nil, + 4, + newAncestors, + ) + require.NoError(t, err) + chain := populateFromPreviousStorage(scope, storage) + require.Empty(t, chain.BestChainVec()) + require.Equal(t, 0, chain.UnconnectedLen()) + + require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), + ErrRelayParentNotInScope{ + relayParentA: relayParentAHash, + relayParentB: relayParentBHash, + }) + + // however if taken indepently, both B and C still have potential + require.NoError(t, chain.CanAddCandidateAsPotential(candidateBEntry)) + require.NoError(t, chain.CanAddCandidateAsPotential(candidateCEntry)) + }) + + t.Run("candidate_A_and_B_out_of_scope_C_still_potential", func(t *testing.T) { + scope, err := NewScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + nil, + 4, + nil, + ) + require.NoError(t, err) + chain := populateFromPreviousStorage(scope, storage) + require.Empty(t, chain.BestChainVec()) + require.Equal(t, 0, chain.UnconnectedLen()) + + require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), + ErrRelayParentNotInScope{ + relayParentA: relayParentAHash, + relayParentB: relayParentCHash, + }) + + // however if taken indepently, both B and C still have potential + require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateBEntry), + ErrRelayParentNotInScope{ + relayParentA: relayParentBHash, + relayParentB: relayParentCHash, + }) + + require.NoError(t, chain.CanAddCandidateAsPotential(candidateCEntry)) + }) + }) + + t.Run("parachain_cycle_not_allowed", func(t *testing.T) { + // make C parent of parachain block A + modifiedStorage := storage.Clone() + modifiedStorage.removeCandidate(candidateCHash) + + wrongPvdC, wrongCandidateC := makeCommittedCandidate(t, paraID, + relayParentCInfo.Hash, uint32(relayParentCInfo.Number), + candidateBParaHead, // defines candidate B as parent of candidate C + firstParachainHead, // defines this candidate para head output as the parent of candidate A + uint32(relayParentCInfo.Number), + ) + + _, wrongCandidateCEntry := hashAndInsertCandididate(t, modifiedStorage, wrongCandidateC, wrongPvdC, Backed) + + scope, err := NewScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + nil, + 4, + ancestors, + ) + require.NoError(t, err) + + chain := populateFromPreviousStorage(scope, modifiedStorage) + require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash}, chain.BestChainVec()) + require.Equal(t, 0, chain.UnconnectedLen()) + + err = chain.CanAddCandidateAsPotential(wrongCandidateCEntry) + require.ErrorIs(t, err, ErrCycle) + + // However, if taken independently, C still has potential, since we don't know A and B. + chain = NewFragmentChain(scope, NewCandidateStorage()) + require.NoError(t, chain.CanAddCandidateAsPotential(wrongCandidateCEntry)) + }) + + t.Run("relay_parent_move_backwards_not_allowed", func(t *testing.T) { + // each candidate was build using a different, and contigous, relay parent + // in this test we are going to change candidate C to have the same relay + // parent of candidate A, given that candidate B is one block ahead. + modifiedStorage := storage.Clone() + modifiedStorage.removeCandidate(candidateCHash) + + wrongPvdC, wrongCandidateC := makeCommittedCandidate(t, paraID, + relayParentAInfo.Hash, uint32(relayParentAInfo.Number), + candidateBParaHead, + candidateCParaHead, + 0, + ) + + _, wrongCandidateCEntry := hashAndInsertCandididate(t, modifiedStorage, wrongCandidateC, wrongPvdC, Backed) + + scope, err := NewScopeWithAncestors(*relayParentCInfo, baseConstraints, nil, 4, ancestors) + require.NoError(t, err) + + chain := populateFromPreviousStorage(scope, modifiedStorage) + require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash}, chain.BestChainVec()) + require.Equal(t, 0, chain.UnconnectedLen()) + + require.ErrorIs(t, chain.CanAddCandidateAsPotential(wrongCandidateCEntry), ErrRelayParentMovedBackwards) + }) + + t.Run("unconnected_candidate_C", func(t *testing.T) { + // candidate C is an unconnected candidate, C's relay parent is allowed to move + // backwards from B's relay parent, because C may latter on trigger a reorg and + // B may get removed + + modifiedStorage := storage.Clone() + modifiedStorage.removeCandidate(candidateCHash) + + parenteHead := parachaintypes.HeadData{Data: []byte{0x0d}} + unconnectedCandidateCHead := parachaintypes.HeadData{Data: []byte{0x0e}} + + unconnectedCPvd, unconnectedCandidateC := makeCommittedCandidate(t, paraID, + relayParentAInfo.Hash, uint32(relayParentAInfo.Number), + parenteHead, + unconnectedCandidateCHead, + 0, + ) + + unconnectedCandidateCHash, unconnectedCandidateCEntry := hashAndInsertCandididate(t, + modifiedStorage, unconnectedCandidateC, unconnectedCPvd, Backed) + + scope, err := NewScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + nil, + 4, + ancestors, + ) + require.NoError(t, err) + + chain := NewFragmentChain(scope, NewCandidateStorage()) + require.NoError(t, chain.CanAddCandidateAsPotential(unconnectedCandidateCEntry)) + + chain = populateFromPreviousStorage(scope, modifiedStorage) + require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash}, chain.BestChainVec()) + + unconnected := make(map[parachaintypes.CandidateHash]struct{}) + for entry := range chain.Unconnected() { + unconnected[entry.candidateHash] = struct{}{} + } + + require.Equal(t, map[parachaintypes.CandidateHash]struct{}{ + unconnectedCandidateCHash: {}, + }, unconnected) + + t.Run("candidate_A_is_pending_availability_candidate_C_should_not_move_backwards", func(t *testing.T) { + // candidate A is pending availability and candidate C is an unconnected candidate, C's relay parent + // is not allowed to move backwards from A's relay parent because we're sure A will not get remove + // in the future, as it's already on-chain (unless it times out availability, a case for which we + // don't care to optmise for) + modifiedStorage.removeCandidate(candidateAHash) + modifiedAPvd, modifiedCandidateA := makeCommittedCandidate(t, paraID, + relayParentBInfo.Hash, uint32(relayParentBInfo.Number), + firstParachainHead, + candidateAParaHead, + uint32(relayParentBInfo.Number), + ) + + modifiedCandidateAHash, _ := hashAndInsertCandididate(t, + modifiedStorage, modifiedCandidateA, modifiedAPvd, Backed) + + scope, err := NewScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + []*PendindAvailability{ + {CandidateHash: modifiedCandidateAHash, RelayParent: *relayParentBInfo}, + }, + 4, + ancestors, + ) + require.NoError(t, err) + + chain := populateFromPreviousStorage(scope, modifiedStorage) + require.Equal(t, []parachaintypes.CandidateHash{modifiedCandidateAHash, candidateBHash}, chain.BestChainVec()) + require.Equal(t, 0, chain.UnconnectedLen()) + + require.ErrorIs(t, + chain.CanAddCandidateAsPotential(unconnectedCandidateCEntry), + ErrRelayParentPrecedesCandidatePendingAvailability{ + relayParentA: relayParentAHash, + relayParentB: relayParentBHash, + }) + }) + }) + + t.Run("cannot_fork_from_a_candidate_pending_availability", func(t *testing.T) { + modifiedStorage := storage.Clone() + modifiedStorage.removeCandidate(candidateCHash) + + modifiedStorage.removeCandidate(candidateAHash) + modifiedAPvd, modifiedCandidateA := makeCommittedCandidate(t, paraID, + relayParentBInfo.Hash, uint32(relayParentBInfo.Number), + firstParachainHead, + candidateAParaHead, + uint32(relayParentBInfo.Number), + ) + + modifiedCandidateAHash, _ := hashAndInsertCandididate(t, + modifiedStorage, modifiedCandidateA, modifiedAPvd, Backed) + + wrongCandidateCHead := parachaintypes.HeadData{Data: []byte{0x01}} + wrongPvdC, wrongCandidateC := makeCommittedCandidate(t, paraID, + relayParentBInfo.Hash, uint32(relayParentBInfo.Number), + firstParachainHead, + wrongCandidateCHead, + uint32(relayParentBInfo.Number), + ) + + wrongCandidateCHash, wrongCandidateCEntry := hashAndInsertCandididate(t, + modifiedStorage, wrongCandidateC, wrongPvdC, Backed) + + // does not matter if the fork selection rule picks the new candidate + // as the modified candidate A is pending availability + require.Equal(t, -1, forkSelectionRule(wrongCandidateCHash, modifiedCandidateAHash)) + + scope, err := NewScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + []*PendindAvailability{ + { + CandidateHash: modifiedCandidateAHash, + RelayParent: *relayParentBInfo, + }, + }, + 4, + ancestors, + ) + require.NoError(t, err) + chain := populateFromPreviousStorage(scope, modifiedStorage) + require.Equal(t, []parachaintypes.CandidateHash{modifiedCandidateAHash, candidateBHash}, chain.BestChainVec()) + require.Equal(t, 0, chain.UnconnectedLen()) + require.ErrorIs(t, chain.CanAddCandidateAsPotential(wrongCandidateCEntry), ErrForkWithCandidatePendingAvailability{ + candidateHash: modifiedCandidateAHash, + }) + }) + + t.Run("multiple_pending_availability_candidates", func(t *testing.T) { + validOptions := [][]*PendindAvailability{ + { + {CandidateHash: candidateAHash, RelayParent: *relayParentAInfo}, + }, + { + {CandidateHash: candidateAHash, RelayParent: *relayParentAInfo}, + {CandidateHash: candidateBHash, RelayParent: *relayParentBInfo}, + }, + { + {CandidateHash: candidateAHash, RelayParent: *relayParentAInfo}, + {CandidateHash: candidateBHash, RelayParent: *relayParentBInfo}, + {CandidateHash: candidateCHash, RelayParent: *relayParentCInfo}, + }, + } + + for _, pending := range validOptions { + scope, err := NewScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + pending, + 3, + ancestors, + ) + require.NoError(t, err) + + chain := populateFromPreviousStorage(scope, storage) + assert.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.BestChainVec()) + assert.Equal(t, 0, chain.UnconnectedLen()) + } + }) + + t.Run("relay_parents_of_pending_availability_candidates_can_be_out_of_scope", func(t *testing.T) { + ancestorsWithoutA := []inclusionemulator.RelayChainBlockInfo{ + *relayParentBInfo, + } + + scope, err := NewScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + []*PendindAvailability{ + {CandidateHash: candidateAHash, RelayParent: *relayParentAInfo}, + }, + 4, + ancestorsWithoutA, + ) + require.NoError(t, err) + + chain := populateFromPreviousStorage(scope, storage) + assert.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.BestChainVec()) + assert.Equal(t, 0, chain.UnconnectedLen()) + }) + + t.Run("relay_parents_of_pending_availability_candidates_cannot_move_backwards", func(t *testing.T) { + scope, err := NewScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + []*PendindAvailability{ + { + CandidateHash: candidateAHash, + RelayParent: inclusionemulator.RelayChainBlockInfo{ + Hash: relayParentAInfo.Hash, + Number: 1, + StorageRoot: relayParentAInfo.StorageRoot, + }, + }, + { + CandidateHash: candidateBHash, + RelayParent: inclusionemulator.RelayChainBlockInfo{ + Hash: relayParentBInfo.Hash, + Number: 0, + StorageRoot: relayParentBInfo.StorageRoot, + }, + }, + }, + 4, + []inclusionemulator.RelayChainBlockInfo{}, + ) + require.NoError(t, err) + + chain := populateFromPreviousStorage(scope, storage) + assert.Empty(t, chain.BestChainVec()) + assert.Equal(t, 0, chain.UnconnectedLen()) + }) + + t.Run("more_complex_case_with_multiple_candidates_and_constraints", func(t *testing.T) { + scope, err := NewScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + nil, + 2, + ancestors, + ) + require.NoError(t, err) + + // Candidate D + pvdD, candidateD := makeCommittedCandidate(t, paraID, + relayParentCInfo.Hash, uint32(relayParentCInfo.Number), + parachaintypes.HeadData{Data: []byte{0x0d}}, + parachaintypes.HeadData{Data: []byte{0x0e}}, + uint32(relayParentCInfo.Number), + ) + candidateDHash, candidateDEntry := hashAndGetEntry(t, candidateD, pvdD, Backed) + require.NoError(t, populateFromPreviousStorage(scope, storage). + CanAddCandidateAsPotential(candidateDEntry)) + require.NoError(t, storage.addCandidateEntry(candidateDEntry)) + + // Candidate F + pvdF, candidateF := makeCommittedCandidate(t, paraID, + relayParentCInfo.Hash, uint32(relayParentCInfo.Number), + parachaintypes.HeadData{Data: []byte{0x0f}}, + parachaintypes.HeadData{Data: []byte{0xf1}}, + 1000, + ) + candidateFHash, candidateFEntry := hashAndGetEntry(t, candidateF, pvdF, Seconded) + require.NoError(t, populateFromPreviousStorage(scope, storage). + CanAddCandidateAsPotential(candidateFEntry)) + require.NoError(t, storage.addCandidateEntry(candidateFEntry)) + + // Candidate A1 + pvdA1, candidateA1 := makeCommittedCandidate(t, paraID, + relayParentAInfo.Hash, uint32(relayParentAInfo.Number), + firstParachainHead, + parachaintypes.HeadData{Data: []byte{0xb1}}, + uint32(relayParentAInfo.Number), + ) + candidateA1Hash, candidateA1Entry := hashAndGetEntry(t, candidateA1, pvdA1, Backed) + + // candidate A1 is created so that its hash is greater than the candidate A hash. + require.Equal(t, -1, forkSelectionRule(candidateAHash, candidateA1Hash)) + require.ErrorIs(t, populateFromPreviousStorage(scope, storage). + CanAddCandidateAsPotential(candidateA1Entry), + ErrForkChoiceRule{candidateHash: candidateAHash}) + + require.NoError(t, storage.addCandidateEntry(candidateA1Entry)) + + // Candidate B1 + pvdB1, candidateB1 := makeCommittedCandidate(t, paraID, + relayParentAInfo.Hash, uint32(relayParentAInfo.Number), + parachaintypes.HeadData{Data: []byte{0xb1}}, + parachaintypes.HeadData{Data: []byte{0xc1}}, + uint32(relayParentAInfo.Number), + ) + _, candidateB1Entry := hashAndGetEntry(t, candidateB1, pvdB1, Seconded) + require.NoError(t, populateFromPreviousStorage(scope, storage). + CanAddCandidateAsPotential(candidateB1Entry)) + + require.NoError(t, storage.addCandidateEntry(candidateB1Entry)) + + // Candidate C1 + pvdC1, candidateC1 := makeCommittedCandidate(t, paraID, + relayParentAInfo.Hash, uint32(relayParentAInfo.Number), + parachaintypes.HeadData{Data: []byte{0xc1}}, + parachaintypes.HeadData{Data: []byte{0xd1}}, + uint32(relayParentAInfo.Number), + ) + _, candidateC1Entry := hashAndGetEntry(t, candidateC1, pvdC1, Backed) + require.NoError(t, populateFromPreviousStorage(scope, storage). + CanAddCandidateAsPotential(candidateC1Entry)) + + require.NoError(t, storage.addCandidateEntry(candidateC1Entry)) + + // Candidate C2 + pvdC2, candidateC2 := makeCommittedCandidate(t, paraID, + relayParentAInfo.Hash, uint32(relayParentAInfo.Number), + parachaintypes.HeadData{Data: []byte{0xc1}}, + parachaintypes.HeadData{Data: []byte{0xd2}}, + uint32(relayParentAInfo.Number), + ) + + _, candidateC2Entry := hashAndGetEntry(t, candidateC2, pvdC2, Seconded) + require.NoError(t, populateFromPreviousStorage(scope, storage). + CanAddCandidateAsPotential(candidateC2Entry)) + require.NoError(t, storage.addCandidateEntry(candidateC2Entry)) + + // Candidate A2 + pvdA2, candidateA2 := makeCommittedCandidate(t, paraID, + relayParentAInfo.Hash, uint32(relayParentAInfo.Number), + firstParachainHead, + parachaintypes.HeadData{Data: []byte{0x0c9}}, + uint32(relayParentAInfo.Number), + ) + candidateA2Hash, candidateA2Entry := hashAndGetEntry(t, candidateA2, pvdA2, Seconded) + + require.Equal(t, -1, forkSelectionRule(candidateA2Hash, candidateAHash)) + require.NoError(t, populateFromPreviousStorage(scope, storage). + CanAddCandidateAsPotential(candidateA2Entry)) + + require.NoError(t, storage.addCandidateEntry(candidateA2Entry)) + + // Candidate B2 + pvdB2, candidateB2 := makeCommittedCandidate(t, paraID, + relayParentBInfo.Hash, uint32(relayParentBInfo.Number), + parachaintypes.HeadData{Data: []byte{0x0c9}}, + parachaintypes.HeadData{Data: []byte{0xb4}}, + uint32(relayParentBInfo.Number), + ) + candidateB2Hash, candidateB2Entry := hashAndGetEntry(t, candidateB2, pvdB2, Backed) + require.NoError(t, populateFromPreviousStorage(scope, storage). + CanAddCandidateAsPotential(candidateB2Entry)) + + require.NoError(t, storage.addCandidateEntry(candidateB2Entry)) + + chain := populateFromPreviousStorage(scope, storage) + assert.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.BestChainVec()) + + unconnectedHashes := make(map[parachaintypes.CandidateHash]struct{}) + for unconnected := range chain.Unconnected() { + unconnectedHashes[unconnected.candidateHash] = struct{}{} + } + + expectedUnconnected := map[parachaintypes.CandidateHash]struct{}{ + candidateDHash: {}, + candidateFHash: {}, + candidateA2Hash: {}, + candidateB2Hash: {}, + } + assert.Equal(t, expectedUnconnected, unconnectedHashes) + + // Cannot add as potential an already present candidate (whether it's in the best chain or in unconnected storage) + assert.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), ErrCandidateAlradyKnown) + assert.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateFEntry), ErrCandidateAlradyKnown) + }) +} From f11ee4f905afaceb0da4605a9a69f2b7439d7dd0 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 21 Nov 2024 15:33:00 -0400 Subject: [PATCH 09/31] chore: `TestPopulateAndCheckPotential` done --- .../fragment-chain/fragment_chain.go | 5 +- .../fragment-chain/fragment_chain_test.go | 259 +++++++++++++++++- .../inclusion-emulator/inclusion_emulator.go | 14 +- 3 files changed, 261 insertions(+), 17 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go index 8d5b057ce6..628a7e7969 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go @@ -214,7 +214,6 @@ func (c *CandidateStorage) markBacked(candidateHash parachaintypes.CandidateHash } entry.state = Backed - fmt.Println("candidate marked as backed") } func (c *CandidateStorage) contains(candidateHash parachaintypes.CandidateHash) bool { @@ -604,6 +603,7 @@ func (f *FragmentChain) CandidateBacked(newlyBackedCandidate parachaintypes.Cand } parentHeadDataHash := candidateEntry.parentHeadDataHash + f.unconnected.markBacked(newlyBackedCandidate) if !f.revertTo(parentHeadDataHash) { @@ -1002,7 +1002,7 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { var cumulativeModifications *inclusionemulator.ConstraintModifications if len(f.bestChain.chain) > 0 { lastCandidate := f.bestChain.chain[len(f.bestChain.chain)-1] - cumulativeModifications = lastCandidate.cumulativeModifications + cumulativeModifications = lastCandidate.cumulativeModifications.Clone() } else { cumulativeModifications = inclusionemulator.NewConstraintModificationsIdentity() } @@ -1069,7 +1069,6 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { // candidates can be out-of-scope. // // earliest relay parent can be before the - if relayParent.Number < minRelayParent { // relay parent moved backwards continue diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go index 7afe579341..4f0406f1e2 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go @@ -2,6 +2,7 @@ package fragmentchain import ( "bytes" + "errors" "slices" "testing" @@ -1523,10 +1524,11 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.NoError(t, err) // Candidate D + candidateDParaHead := parachaintypes.HeadData{Data: []byte{0x0e}} pvdD, candidateD := makeCommittedCandidate(t, paraID, relayParentCInfo.Hash, uint32(relayParentCInfo.Number), - parachaintypes.HeadData{Data: []byte{0x0d}}, - parachaintypes.HeadData{Data: []byte{0x0e}}, + candidateCParaHead, + candidateDParaHead, uint32(relayParentCInfo.Number), ) candidateDHash, candidateDEntry := hashAndGetEntry(t, candidateD, pvdD, Backed) @@ -1535,10 +1537,12 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.NoError(t, storage.addCandidateEntry(candidateDEntry)) // Candidate F + candidateEParaHead := parachaintypes.HeadData{Data: []byte{0x0f}} + candidateFParaHead := parachaintypes.HeadData{Data: []byte{0xf1}} pvdF, candidateF := makeCommittedCandidate(t, paraID, relayParentCInfo.Hash, uint32(relayParentCInfo.Number), - parachaintypes.HeadData{Data: []byte{0x0f}}, - parachaintypes.HeadData{Data: []byte{0xf1}}, + candidateEParaHead, + candidateFParaHead, 1000, ) candidateFHash, candidateFEntry := hashAndGetEntry(t, candidateF, pvdF, Seconded) @@ -1603,10 +1607,11 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.NoError(t, storage.addCandidateEntry(candidateC2Entry)) // Candidate A2 + candidateA2HeadData := parachaintypes.HeadData{Data: []byte{0x0c9}} pvdA2, candidateA2 := makeCommittedCandidate(t, paraID, relayParentAInfo.Hash, uint32(relayParentAInfo.Number), firstParachainHead, - parachaintypes.HeadData{Data: []byte{0x0c9}}, + candidateA2HeadData, uint32(relayParentAInfo.Number), ) candidateA2Hash, candidateA2Entry := hashAndGetEntry(t, candidateA2, pvdA2, Seconded) @@ -1618,10 +1623,11 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.NoError(t, storage.addCandidateEntry(candidateA2Entry)) // Candidate B2 + candidateB2HeadData := parachaintypes.HeadData{Data: []byte{0xb4}} pvdB2, candidateB2 := makeCommittedCandidate(t, paraID, relayParentBInfo.Hash, uint32(relayParentBInfo.Number), - parachaintypes.HeadData{Data: []byte{0x0c9}}, - parachaintypes.HeadData{Data: []byte{0xb4}}, + candidateA2HeadData, + candidateB2HeadData, uint32(relayParentBInfo.Number), ) candidateB2Hash, candidateB2Entry := hashAndGetEntry(t, candidateB2, pvdB2, Backed) @@ -1649,5 +1655,244 @@ func TestPopulateAndCheckPotential(t *testing.T) { // Cannot add as potential an already present candidate (whether it's in the best chain or in unconnected storage) assert.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), ErrCandidateAlradyKnown) assert.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateFEntry), ErrCandidateAlradyKnown) + + t.Run("simulate_best_chain_reorg", func(t *testing.T) { + // back a2, the reversion should happen at the root. + chain := cloneFragmentChain(chain) + chain.CandidateBacked(candidateA2Hash) + + require.Equal(t, []parachaintypes.CandidateHash{candidateA2Hash, candidateB2Hash}, chain.BestChainVec()) + + // candidate F is kept as it was truly unconnected. The rest will be trimmed + unconnected := map[parachaintypes.CandidateHash]struct{}{} + for entry := range chain.Unconnected() { + unconnected[entry.candidateHash] = struct{}{} + } + + require.Equal(t, map[parachaintypes.CandidateHash]struct{}{ + candidateFHash: {}, + }, unconnected) + + // candidates A1 and A will never have potential again + require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateA1Entry), ErrForkChoiceRule{ + candidateHash: candidateA2Hash, + }) + require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), ErrForkChoiceRule{ + candidateHash: candidateA2Hash, + }) + }) + + t.Run("simulate_more_complex_reorg", func(t *testing.T) { + // a2 points to b2, which is backed + // a2 has underneath a subtree a2 -> b2 -> c3 and a2 -> b2 -> c4 + // b2 and c3 are backed, c4 is kept because it has a lower candidate hash than c3 + // backing c4 will cause a chain reorg + + // candidate c3 + candidateC3HeadData := parachaintypes.HeadData{Data: []byte{0xc2}} + candidateC3Pvd, candidateC3 := makeCommittedCandidate(t, paraID, + relayParentBHash, uint32(relayParentBInfo.Number), + candidateB2HeadData, + candidateC3HeadData, + uint32(relayParentBInfo.Number), + ) + + candidateC3Hash, candidateC3Entry := hashAndGetEntry(t, candidateC3, candidateC3Pvd, Seconded) + + // candidate c4 + candidateC4HeadData := parachaintypes.HeadData{Data: []byte{0xc3}} + candidateC4Pvd, candidateC4 := makeCommittedCandidate(t, paraID, + relayParentBHash, uint32(relayParentBInfo.Number), + candidateB2HeadData, + candidateC4HeadData, + uint32(relayParentBInfo.Number), + ) + + candidateC4Hash, candidateC4Entry := hashAndGetEntry(t, candidateC4, candidateC4Pvd, Seconded) + + // c4 should have a lower candidate hash than c3 + require.Equal(t, -1, forkSelectionRule(candidateC4Hash, candidateC3Hash)) + + storage := storage.Clone() + + require.NoError(t, storage.addCandidateEntry(candidateC3Entry)) + require.NoError(t, storage.addCandidateEntry(candidateC4Entry)) + + chain := populateFromPreviousStorage(scope, storage) + + // current best chain + // so we will cause a reorg when backing a2 and c3 + // and trigger another reorg when backing c4 + require.Equal(t, []parachaintypes.CandidateHash{ + candidateAHash, candidateBHash, candidateCHash, + }, chain.BestChainVec()) + + chain.CandidateBacked(candidateA2Hash) + + require.Equal(t, []parachaintypes.CandidateHash{ + candidateA2Hash, candidateB2Hash, + }, chain.BestChainVec()) + + chain.CandidateBacked(candidateC3Hash) + + require.Equal(t, []parachaintypes.CandidateHash{ + candidateA2Hash, candidateB2Hash, candidateC3Hash, + }, chain.BestChainVec()) + + // backing c4 will cause a reorg + chain.CandidateBacked(candidateC4Hash) + + require.Equal(t, []parachaintypes.CandidateHash{ + candidateA2Hash, candidateB2Hash, candidateC4Hash, + }, chain.BestChainVec()) + + unconnected := make(map[parachaintypes.CandidateHash]struct{}) + for entry := range chain.Unconnected() { + unconnected[entry.candidateHash] = struct{}{} + } + + require.Equal(t, map[parachaintypes.CandidateHash]struct{}{ + candidateFHash: {}, + }, unconnected) + }) + + // candidate F has an invalid hrmp watermark, however it was not checked beforehand + // as we don't have its parent yet. Add its parent now (candidate E), this will not impact anything + // as E is not yet part of the best chain. + candidateEPvd, candidateE := makeCommittedCandidate(t, paraID, + relayParentCHash, uint32(relayParentCInfo.Number), + candidateDParaHead, + candidateEParaHead, + uint32(relayParentCInfo.Number), + ) + + candidateEHash, _ := hashAndInsertCandididate(t, storage, candidateE, candidateEPvd, Seconded) + chain = populateFromPreviousStorage(scope, storage) + require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.BestChainVec()) + + unconnected := make(map[parachaintypes.CandidateHash]struct{}) + for entry := range chain.Unconnected() { + unconnected[entry.candidateHash] = struct{}{} + } + require.Equal(t, map[parachaintypes.CandidateHash]struct{}{ + candidateDHash: {}, + candidateFHash: {}, + candidateA2Hash: {}, + candidateB2Hash: {}, + candidateEHash: {}, + }, unconnected) + + t.Run("simulate_candidates_A_B_C_are_pending_availability", func(t *testing.T) { + scope, err := NewScopeWithAncestors( + *relayParentCInfo, baseConstraints.Clone(), + []*PendindAvailability{ + {CandidateHash: candidateAHash, RelayParent: *relayParentAInfo}, + {CandidateHash: candidateBHash, RelayParent: *relayParentBInfo}, + {CandidateHash: candidateCHash, RelayParent: *relayParentCInfo}, + }, + 2, + ancestors, + ) + require.NoError(t, err) + + // candidates A2, B2 will now be trimmed + chain := populateFromPreviousStorage(scope, storage) + require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.BestChainVec()) + + unconnectedHashes := make(map[parachaintypes.CandidateHash]struct{}) + for unconnected := range chain.Unconnected() { + unconnectedHashes[unconnected.candidateHash] = struct{}{} + } + + require.Equal(t, map[parachaintypes.CandidateHash]struct{}{ + candidateDHash: {}, + candidateFHash: {}, + candidateEHash: {}, + }, unconnectedHashes) + + // cannot add as potential an already pending availability candidate + require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), ErrCandidateAlradyKnown) + + // simulate the fact that candidate A, B and C have been included + baseConstraints := makeConstraints(0, []uint{0}, parachaintypes.HeadData{Data: []byte{0x0d}}) + scope, err = NewScopeWithAncestors(*relayParentCInfo, baseConstraints, nil, 2, ancestors) + require.NoError(t, err) + + prevChain := chain + chain = NewFragmentChain(scope, NewCandidateStorage()) + chain.PopulateFromPrevious(prevChain) + require.Equal(t, []parachaintypes.CandidateHash{candidateDHash}, chain.BestChainVec()) + + unconnectedHashes = make(map[parachaintypes.CandidateHash]struct{}) + for unconnected := range chain.Unconnected() { + unconnectedHashes[unconnected.candidateHash] = struct{}{} + } + + require.Equal(t, map[parachaintypes.CandidateHash]struct{}{ + candidateEHash: {}, + candidateFHash: {}, + }, unconnectedHashes) + + // mark E as backed, F will be dropped for invalid watermark. + // empty unconnected candidates + chain.CandidateBacked(candidateEHash) + require.Equal(t, []parachaintypes.CandidateHash{candidateDHash, candidateEHash}, chain.BestChainVec()) + require.Zero(t, chain.UnconnectedLen()) + + var expectedErr error = &ErrCheckAgainstConstraints{ + fragmentValidityErr: &inclusionemulator.ErrOutputsInvalid{ + ModificationError: &inclusionemulator.ErrDisallowedHrmpWatermark{ + BlockNumber: 1000, + }, + }, + } + + errCheckAgainstConstraints := new(ErrCheckAgainstConstraints) + err = chain.CanAddCandidateAsPotential(candidateFEntry) + + require.True(t, errors.As(err, errCheckAgainstConstraints)) + require.Equal(t, errCheckAgainstConstraints, expectedErr) + }) }) } + +func cloneFragmentChain(original *FragmentChain) *FragmentChain { + // Clone the scope + clonedScope := &Scope{ + relayParent: original.scope.relayParent, + baseConstraints: original.scope.baseConstraints.Clone(), + pendindAvailability: append([]*PendindAvailability(nil), original.scope.pendindAvailability...), + maxDepth: original.scope.maxDepth, + ancestors: original.scope.ancestors.Copy(), + ancestorsByHash: make(map[common.Hash]inclusionemulator.RelayChainBlockInfo), + } + + for k, v := range original.scope.ancestorsByHash { + clonedScope.ancestorsByHash[k] = v + } + + // Clone the best chain + clonedBestChain := NewBackedChain() + for _, node := range original.bestChain.chain { + clonedNode := &FragmentNode{ + fragment: node.fragment, + candidateHash: node.candidateHash, + parentHeadDataHash: node.parentHeadDataHash, + outputHeadDataHash: node.outputHeadDataHash, + cumulativeModifications: node.cumulativeModifications.Clone(), + } + clonedBestChain.Push(clonedNode) + } + + // Clone the unconnected storage + clonedUnconnected := original.unconnected.Clone() + + // Create the cloned fragment chain + clonedFragmentChain := &FragmentChain{ + scope: clonedScope, + bestChain: clonedBestChain, + unconnected: clonedUnconnected, + } + + return clonedFragmentChain +} diff --git a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go index d2bad1b2c4..ddfe5fc8b9 100644 --- a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go +++ b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go @@ -26,11 +26,11 @@ type ProspectiveCandidate struct { } type ErrDisallowedHrmpWatermark struct { - blockNumber uint + BlockNumber uint } func (e *ErrDisallowedHrmpWatermark) Error() string { - return fmt.Sprintf("DisallowedHrmpWatermark(BlockNumber: %d)", e.blockNumber) + return fmt.Sprintf("DisallowedHrmpWatermark(BlockNumber: %d)", e.BlockNumber) } type ErrNoSuchHrmpChannel struct { @@ -113,11 +113,11 @@ func (e *ErrPersistedValidationDataMismatch) Error() string { } type ErrOutputsInvalid struct { - modificationError error + ModificationError error } func (e *ErrOutputsInvalid) Error() string { - return fmt.Sprintf("OutputsInvalid(ModificationError: %v)", e.modificationError) + return fmt.Sprintf("OutputsInvalid(ModificationError: %v)", e.ModificationError) } type ErrCodeSizeTooLarge struct { @@ -237,7 +237,7 @@ func (c *Constraints) Clone() *Constraints { func (c *Constraints) CheckModifications(modifications *ConstraintModifications) error { if modifications.HrmpWatermark != nil && modifications.HrmpWatermark.Type == Trunk { if !slices.Contains(c.HrmpInbound.ValidWatermarks, modifications.HrmpWatermark.Watermark()) { - return &ErrDisallowedHrmpWatermark{blockNumber: modifications.HrmpWatermark.Watermark()} + return &ErrDisallowedHrmpWatermark{BlockNumber: modifications.HrmpWatermark.Watermark()} } } @@ -319,7 +319,7 @@ func (c *Constraints) ApplyModifications(modifications *ConstraintModifications) newConstraints.HrmpInbound.ValidWatermarks = newConstraints.HrmpInbound.ValidWatermarks[pos:] case Trunk: // Trunk update landing on disallowed watermark is not OK. - return nil, &ErrDisallowedHrmpWatermark{blockNumber: modifications.HrmpWatermark.Block} + return nil, &ErrDisallowedHrmpWatermark{BlockNumber: modifications.HrmpWatermark.Block} } } } @@ -759,7 +759,7 @@ func validateAgainstConstraints( } if err := constraints.CheckModifications(modifications); err != nil { - return &ErrOutputsInvalid{modificationError: err} + return &ErrOutputsInvalid{ModificationError: err} } return nil From 6a001c82571e3b1477bdc4bf53349a8a18f6967a Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 21 Nov 2024 18:07:25 -0400 Subject: [PATCH 10/31] feat: bring polkadot-sdk test coverage to fragment chain --- .../fragment-chain/fragment_chain.go | 1 + .../fragment-chain/fragment_chain_test.go | 278 ++++++++++++++++++ 2 files changed, 279 insertions(+) diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go index 628a7e7969..3ee86e8db6 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go @@ -700,6 +700,7 @@ func (f *FragmentChain) FindBackableChain( } basePos := f.findAncestorPath(ancestors) + actualEndIdx := min(basePos+int(count), len(f.bestChain.chain)) res := make([]*CandidateAndRelayParent, 0, actualEndIdx-basePos) diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go index 4f0406f1e2..ffa4a04b74 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go @@ -3,6 +3,8 @@ package fragmentchain import ( "bytes" "errors" + "maps" + "math/rand/v2" "slices" "testing" @@ -1896,3 +1898,279 @@ func cloneFragmentChain(original *FragmentChain) *FragmentChain { return clonedFragmentChain } + +func TestFindAncestorPathAndFindBackableChainEmptyBestChain(t *testing.T) { + relayParent := common.BytesToHash(bytes.Repeat([]byte{1}, 32)) + requiredParent := parachaintypes.HeadData{Data: []byte{0xff}} + maxDepth := uint(10) + + // Empty chain + baseConstraints := makeConstraints(0, []uint{0}, requiredParent) + + relayParentInfo := inclusionemulator.RelayChainBlockInfo{ + Number: 0, + Hash: relayParent, + StorageRoot: common.Hash{}, + } + + scope, err := NewScopeWithAncestors(relayParentInfo, baseConstraints, nil, maxDepth, nil) + require.NoError(t, err) + + chain := NewFragmentChain(scope, NewCandidateStorage()) + assert.Equal(t, 0, chain.BestChainLen()) + + assert.Equal(t, 0, chain.findAncestorPath(map[parachaintypes.CandidateHash]struct{}{})) + assert.Equal(t, []*CandidateAndRelayParent{}, chain.FindBackableChain(map[parachaintypes.CandidateHash]struct{}{}, 2)) + + // Invalid candidate + ancestors := map[parachaintypes.CandidateHash]struct{}{ + {Value: common.Hash{}}: {}, + } + assert.Equal(t, 0, chain.findAncestorPath(ancestors)) + assert.Equal(t, []*CandidateAndRelayParent{}, chain.FindBackableChain(ancestors, 2)) +} + +func TestFindAncestorPathAndFindBackableChain(t *testing.T) { + paraID := parachaintypes.ParaID(5) + relayParent := common.BytesToHash(bytes.Repeat([]byte{1}, 32)) + requiredParent := parachaintypes.HeadData{Data: []byte{0xff}} + maxDepth := uint(5) + relayParentNumber := uint32(0) + relayParentStorageRoot := common.Hash{} + + type CandidateAndPVD struct { + candidate parachaintypes.CommittedCandidateReceipt + pvd parachaintypes.PersistedValidationData + } + + candidates := make([]*CandidateAndPVD, 0) + + // candidate 0 + candidate0Pvd, candidate0 := makeCommittedCandidate(t, paraID, + relayParent, 0, requiredParent, parachaintypes.HeadData{Data: []byte{0x00}}, relayParentNumber) + candidates = append(candidates, &CandidateAndPVD{candidate: candidate0, pvd: candidate0Pvd}) + + // candidate 1 to 5 + for i := 1; i <= 5; i++ { + candidatePvd, candidate := makeCommittedCandidate(t, paraID, + relayParent, 0, + parachaintypes.HeadData{Data: []byte{byte(i - 1)}}, + parachaintypes.HeadData{Data: []byte{byte(i)}}, + relayParentNumber) + candidates = append(candidates, &CandidateAndPVD{candidate: candidate, pvd: candidatePvd}) + } + + storage := NewCandidateStorage() + + for _, c := range candidates { + candidateHash, err := c.candidate.Hash() + require.NoError(t, err) + + entry, err := NewCandidateEntry(parachaintypes.CandidateHash{Value: candidateHash}, c.candidate, c.pvd, Seconded) + require.NoError(t, err) + + err = storage.addCandidateEntry(entry) + require.NoError(t, err) + } + + candidateHashes := make([]parachaintypes.CandidateHash, 0) + for _, c := range candidates { + candidateHash, err := c.candidate.Hash() + require.NoError(t, err) + candidateHashes = append(candidateHashes, parachaintypes.CandidateHash{Value: candidateHash}) + } + + type Ancestors = map[parachaintypes.CandidateHash]struct{} + + hashes := func(from, to uint) []*CandidateAndRelayParent { + var output []*CandidateAndRelayParent + + for i := from; i < to; i++ { + output = append(output, &CandidateAndRelayParent{ + CandidateHash: candidateHashes[i], + RealyParentHash: relayParent, + }) + } + + return output + } + + relayParentInfo := inclusionemulator.RelayChainBlockInfo{ + Number: uint(relayParentNumber), + Hash: relayParent, + StorageRoot: relayParentStorageRoot, + } + + baseConstraints := makeConstraints(0, []uint{0}, requiredParent) + scope, err := NewScopeWithAncestors( + relayParentInfo, + baseConstraints, + nil, + maxDepth, + nil, + ) + require.NoError(t, err) + + chain := populateFromPreviousStorage(scope, storage) + + // for now candidates are only seconded, not backed, the best chain is empty + // and no candidate will be returned + + require.Equal(t, 6, len(candidateHashes)) + require.Equal(t, 0, chain.BestChainLen()) + require.Equal(t, 6, chain.UnconnectedLen()) + + for count := 0; count < 10; count++ { + require.Equal(t, 0, len(chain.FindBackableChain(make(Ancestors), uint32(count)))) + } + + t.Run("couple_candidates_backed", func(t *testing.T) { + chain := cloneFragmentChain(chain) + chain.CandidateBacked(candidateHashes[5]) + + for count := 0; count < 10; count++ { + require.Equal(t, 0, len(chain.FindBackableChain(make(Ancestors), uint32(count)))) + } + + chain.CandidateBacked(candidateHashes[3]) + chain.CandidateBacked(candidateHashes[4]) + + for count := 0; count < 10; count++ { + require.Equal(t, 0, len(chain.FindBackableChain(make(Ancestors), uint32(count)))) + } + + chain.CandidateBacked(candidateHashes[1]) + + for count := 0; count < 10; count++ { + require.Equal(t, 0, len(chain.FindBackableChain(make(Ancestors), uint32(count)))) + } + + chain.CandidateBacked(candidateHashes[0]) + require.Equal(t, hashes(0, 1), chain.FindBackableChain(make(Ancestors), 1)) + + for c := 2; c < 10; c++ { + require.Equal(t, hashes(0, 2), chain.FindBackableChain(make(Ancestors), uint32(c))) + } + + // now back the missing piece + chain.CandidateBacked(candidateHashes[2]) + require.Equal(t, 6, chain.BestChainLen()) + + for count := 0; count < 10; count++ { + var result []*CandidateAndRelayParent + if count > 6 { + result = hashes(0, 6) + } else { + for i := 0; i < count && i < 6; i++ { + result = append(result, &CandidateAndRelayParent{ + CandidateHash: candidateHashes[i], + RealyParentHash: relayParent, + }) + } + } + require.Equal(t, result, chain.FindBackableChain(make(Ancestors), uint32(count))) + } + }) + + t.Run("back_all_candidates_in_random_order", func(t *testing.T) { + candidatesShuffled := make([]parachaintypes.CandidateHash, len(candidateHashes)) + for i := range candidateHashes { + candidatesShuffled[i] = parachaintypes.CandidateHash{ + Value: common.NewHash(candidateHashes[i].Value.ToBytes()), + } + } + + rand.Shuffle(len(candidatesShuffled), func(i, j int) { + candidatesShuffled[i], candidatesShuffled[j] = candidatesShuffled[j], candidatesShuffled[i] + }) + + for _, c := range candidatesShuffled { + chain.CandidateBacked(c) + storage.markBacked(c) + } + + // no ancestors supplied + require.Equal(t, 0, chain.findAncestorPath(make(Ancestors))) + require.Equal(t, []*CandidateAndRelayParent(nil), chain.FindBackableChain(make(Ancestors), 0)) + require.Equal(t, hashes(0, 1), chain.FindBackableChain(make(Ancestors), 1)) + require.Equal(t, hashes(0, 2), chain.FindBackableChain(make(Ancestors), 2)) + require.Equal(t, hashes(0, 5), chain.FindBackableChain(make(Ancestors), 5)) + + for count := 6; count < 10; count++ { + require.Equal(t, hashes(0, 6), chain.FindBackableChain(make(Ancestors), uint32(count))) + } + + // ancestors which is not part of the chain will be ignored + ancestors := make(Ancestors) + ancestors[parachaintypes.CandidateHash{Value: common.Hash{}}] = struct{}{} + require.Equal(t, 0, chain.findAncestorPath(ancestors)) + require.Equal(t, hashes(0, 4), chain.FindBackableChain(ancestors, 4)) + + ancestors = make(Ancestors) + ancestors[candidateHashes[1]] = struct{}{} + ancestors[parachaintypes.CandidateHash{Value: common.Hash{}}] = struct{}{} + require.Equal(t, 0, chain.findAncestorPath(ancestors)) + require.Equal(t, hashes(0, 4), chain.FindBackableChain(ancestors, 4)) + + ancestors = make(Ancestors) + ancestors[candidateHashes[0]] = struct{}{} + ancestors[parachaintypes.CandidateHash{Value: common.Hash{}}] = struct{}{} + require.Equal(t, 1, chain.findAncestorPath(maps.Clone(ancestors))) + require.Equal(t, hashes(1, 5), chain.FindBackableChain(ancestors, 4)) + + // ancestors which are part of the chain but don't form a path from root, will be ignored + ancestors = make(Ancestors) + ancestors[candidateHashes[1]] = struct{}{} + ancestors[candidateHashes[2]] = struct{}{} + require.Equal(t, 0, chain.findAncestorPath(maps.Clone(ancestors))) + require.Equal(t, hashes(0, 4), chain.FindBackableChain(ancestors, 4)) + + // valid ancestors + ancestors = make(Ancestors) + ancestors[candidateHashes[2]] = struct{}{} + ancestors[candidateHashes[0]] = struct{}{} + ancestors[candidateHashes[1]] = struct{}{} + require.Equal(t, 3, chain.findAncestorPath(maps.Clone(ancestors))) + require.Equal(t, hashes(3, 5), chain.FindBackableChain(maps.Clone(ancestors), 2)) + + for count := 3; count < 10; count++ { + require.Equal(t, hashes(3, 6), chain.FindBackableChain(maps.Clone(ancestors), uint32(count))) + } + + // valid ancestors with candidates which have been omitted due to timeouts + ancestors = make(Ancestors) + ancestors[candidateHashes[0]] = struct{}{} + ancestors[candidateHashes[2]] = struct{}{} + require.Equal(t, 1, chain.findAncestorPath(maps.Clone(ancestors))) + require.Equal(t, hashes(1, 4), chain.FindBackableChain(maps.Clone(ancestors), 3)) + require.Equal(t, hashes(1, 5), chain.FindBackableChain(maps.Clone(ancestors), 4)) + + for count := 5; count < 10; count++ { + require.Equal(t, hashes(1, 6), chain.FindBackableChain(maps.Clone(ancestors), uint32(count))) + } + + ancestors = make(Ancestors) + ancestors[candidateHashes[0]] = struct{}{} + ancestors[candidateHashes[1]] = struct{}{} + ancestors[candidateHashes[3]] = struct{}{} + require.Equal(t, 2, chain.findAncestorPath(maps.Clone(ancestors))) + require.Equal(t, hashes(2, 6), chain.FindBackableChain(maps.Clone(ancestors), 4)) + + require.Equal(t, hashes(0, 0), chain.FindBackableChain(maps.Clone(ancestors), 0)) + + // stop when we've found a candidate which is pending availability + scope, err := NewScopeWithAncestors(relayParentInfo, baseConstraints, + []*PendindAvailability{ + {CandidateHash: candidateHashes[3], RelayParent: relayParentInfo}, + }, + maxDepth, + nil, + ) + require.NoError(t, err) + chain = populateFromPreviousStorage(scope, storage) + ancestors = make(Ancestors) + ancestors[candidateHashes[0]] = struct{}{} + ancestors[candidateHashes[1]] = struct{}{} + require.Equal(t, hashes(2, 3), chain.FindBackableChain(maps.Clone(ancestors), 3)) + }) +} From 6b30bc64f2cbde5837fc2673883d5210d84b217a Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 26 Nov 2024 18:44:04 -0400 Subject: [PATCH 11/31] chore: remove dsstore --- dot/parachain/util/.DS_Store | Bin 6148 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 dot/parachain/util/.DS_Store diff --git a/dot/parachain/util/.DS_Store b/dot/parachain/util/.DS_Store deleted file mode 100644 index f9e0685c8f2e305625d74c63e50bed948df668e9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKu};G<5Pb(7f>IR?`*Z=)G+E$AQT7%z7){&A+acyj-6pXI#}ftfT+*tZ1gpk zP)>3z9Xmsg(8Q=jql%{(Vs!Q=k1HKJL!(1H`4Io|XYwNX>zqI3a7bwwbtn)D^c6U^ z=2-9lEB-QrNq(OatxzBo_-6{psG3z%Zp!c0Z`;$mHnGfE)HJSBhep5m2w+3ck@Gxg c<4JwSm5!aE&Z7O9PK<{@79^@r;1?A50P1o;5&!@I From 5f01ccbe8a04fded6a69e63cc97d23497dbe5cef Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Wed, 27 Nov 2024 20:48:52 -0400 Subject: [PATCH 12/31] chore: change loopt --- .../prospective-parachains/fragment-chain/fragment_chain.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go index 3ee86e8db6..e06816cc7b 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go @@ -1013,10 +1013,7 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { return } - for { - if len(f.bestChain.chain) > int(f.scope.maxDepth) { - break - } + for len(f.bestChain.chain) > int(f.scope.maxDepth) { childConstraints, err := f.scope.baseConstraints.ApplyModifications(cumulativeModifications) if err != nil { From 5abd1e3bf247259fcea820e1ad93ff7afe0374ba Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 3 Dec 2024 16:01:05 -0400 Subject: [PATCH 13/31] chore: fix loop conditional --- .../prospective-parachains/fragment-chain/fragment_chain.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go index e06816cc7b..1cd73d0c38 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go @@ -1013,8 +1013,7 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { return } - for len(f.bestChain.chain) > int(f.scope.maxDepth) { - + for len(f.bestChain.chain) < int(f.scope.maxDepth) { childConstraints, err := f.scope.baseConstraints.ApplyModifications(cumulativeModifications) if err != nil { // TODO: include logger From 54e0adb55a0614ec6fb4a8cd1a92506ff336e27e Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 5 Dec 2024 11:08:07 -0400 Subject: [PATCH 14/31] chore: address comment --- .../fragment-chain/errors.go | 3 +- .../fragment-chain/fragment_chain.go | 53 ++--- .../fragment-chain/fragment_chain_test.go | 54 ++--- dot/parachain/types/async_backing.go | 32 ++- .../inclusion-emulator/inclusion_emulator.go | 195 ++++-------------- 5 files changed, 127 insertions(+), 210 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/errors.go b/dot/parachain/prospective-parachains/fragment-chain/errors.go index 95a0e33951..d0ed7f8000 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/errors.go +++ b/dot/parachain/prospective-parachains/fragment-chain/errors.go @@ -9,7 +9,7 @@ import ( ) var ( - ErrCandidateAlradyKnown = errors.New("candidate already known") + ErrCandidateAlreadyKnown = errors.New("candidate already known") ErrZeroLengthCycle = errors.New("candidate's parent head is equal to its output head. Would introduce a cycle") ErrCycle = errors.New("candidate would introduce a cycle") ErrMultiplePaths = errors.New("candidate would introduce two paths to the same output state") @@ -17,7 +17,6 @@ var ( ErrParentCandidateNotFound = errors.New("could not find parent of the candidate") ErrRelayParentMovedBackwards = errors.New("relay parent would move backwards from the latest candidate in the chain") ErrPersistedValidationDataMismatch = errors.New("candidate does not match the persisted validation data provided alongside it") - ErrCandidateEntryZeroLengthCycle = errors.New("candidate's parent head is equal to its output head. Would introduce a cycle") ) type ErrRelayParentPrecedesCandidatePendingAvailability struct { diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go index 1cd73d0c38..35dd903f6e 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go @@ -9,10 +9,13 @@ import ( parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" inclusionemulator "github.com/ChainSafe/gossamer/dot/parachain/util/inclusion-emulator" + "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/common" "github.com/tidwall/btree" ) +var logger = log.NewFromGlobal(log.AddContext("pkg", "fragment_chain"), log.SetLevel(log.Debug)) + type CandidateState int const ( @@ -24,8 +27,7 @@ func forkSelectionRule(hash1, hash2 parachaintypes.CandidateHash) int { return bytes.Compare(hash1.Value[:], hash2.Value[:]) } -// CandidateEntry represents a candidate into the CandidateStorage -// TODO: Should CandidateEntry implements `HypotheticalOrConcreteCandidate` +// CandidateEntry represents a candidate in the CandidateStorage type CandidateEntry struct { candidateHash parachaintypes.CandidateHash parentHeadDataHash common.Hash @@ -56,7 +58,7 @@ func NewCandidateEntry( return nil, ErrPersistedValidationDataMismatch } - parendHeadDataHash, err := persistedValidationData.ParentHead.Hash() + parentHeadDataHash, err := persistedValidationData.ParentHead.Hash() if err != nil { return nil, fmt.Errorf("while hashing parent head data: %w", err) } @@ -66,13 +68,13 @@ func NewCandidateEntry( return nil, fmt.Errorf("while hashing output head data: %w", err) } - if parendHeadDataHash == outputHeadDataHash { - return nil, ErrCandidateEntryZeroLengthCycle + if parentHeadDataHash == outputHeadDataHash { + return nil, ErrZeroLengthCycle } return &CandidateEntry{ candidateHash: candidateHash, - parentHeadDataHash: parendHeadDataHash, + parentHeadDataHash: parentHeadDataHash, outputHeadDataHash: outputHeadDataHash, relayParent: candidate.Descriptor.RelayParent, state: state, @@ -159,12 +161,12 @@ func (c *CandidateStorage) Len() int { func (c *CandidateStorage) addCandidateEntry(candidate *CandidateEntry) error { _, ok := c.byCandidateHash[candidate.candidateHash] if ok { - return ErrCandidateAlradyKnown + return ErrCandidateAlreadyKnown } // updates the reference parent hash -> candidate // we don't check the `ok` value since the key can - // exists in the map but pointing to a nil hashset + // exists in the map but pointing to a nil map setOfCandidates := c.byParentHead[candidate.parentHeadDataHash] if setOfCandidates == nil { setOfCandidates = make(map[parachaintypes.CandidateHash]any) @@ -210,7 +212,7 @@ func (c *CandidateStorage) removeCandidate(candidateHash parachaintypes.Candidat func (c *CandidateStorage) markBacked(candidateHash parachaintypes.CandidateHash) { entry, ok := c.byCandidateHash[candidateHash] if !ok { - fmt.Println("candidate not found while marking as backed") + logger.Tracef("candidate not found while marking as backed") } entry.state = Backed @@ -274,9 +276,9 @@ func (c *CandidateStorage) possibleBackedParaChildren(parentHeadHash common.Hash } } -// PendindAvailability is a candidate on-chain but pending availability, for special +// PendingAvailability is a candidate on-chain but pending availability, for special // treatment in the `Scope` -type PendindAvailability struct { +type PendingAvailability struct { CandidateHash parachaintypes.CandidateHash RelayParent inclusionemulator.RelayChainBlockInfo } @@ -292,15 +294,15 @@ type Scope struct { // mapped by hash ancestorsByHash map[common.Hash]inclusionemulator.RelayChainBlockInfo // candidates pending availability at this block - pendindAvailability []*PendindAvailability + pendingAvailability []*PendingAvailability // the base constraints derived from the latest included candidate - baseConstraints *inclusionemulator.Constraints + baseConstraints *parachaintypes.Constraints // equal to `max_candidate_depth` maxDepth uint } // NewScopeWithAncestors defines a new scope, all arguments are straightforward -// expect ancestors. Ancestor should be in reverse order, starting with the parent +// except ancestors. Ancestor should be in reverse order, starting with the parent // of the relayParent, and proceeding backwards in block number decrements of 1. // Ancestors not following these conditions will be rejected. // @@ -311,8 +313,8 @@ type Scope struct { // should be provided. It is allowed to provide 0 ancestors. func NewScopeWithAncestors( relayParent inclusionemulator.RelayChainBlockInfo, - baseConstraints *inclusionemulator.Constraints, - pendingAvailability []*PendindAvailability, + baseConstraints *parachaintypes.Constraints, + pendingAvailability []*PendingAvailability, maxDepth uint, ancestors []inclusionemulator.RelayChainBlockInfo, ) (*Scope, error) { @@ -341,7 +343,7 @@ func NewScopeWithAncestors( return &Scope{ relayParent: relayParent, baseConstraints: baseConstraints, - pendindAvailability: pendingAvailability, + pendingAvailability: pendingAvailability, maxDepth: maxDepth, ancestors: ancestorsMap, ancestorsByHash: ancestorsByHash, @@ -370,8 +372,8 @@ func (s *Scope) Ancestor(hash common.Hash) *inclusionemulator.RelayChainBlockInf } // Whether the candidate in question is one pending availability in this scope. -func (s *Scope) GetPendingAvailability(candidateHash parachaintypes.CandidateHash) *PendindAvailability { - for _, c := range s.pendindAvailability { +func (s *Scope) GetPendingAvailability(candidateHash parachaintypes.CandidateHash) *PendingAvailability { + for _, c := range s.pendingAvailability { if c.CandidateHash == candidateHash { return c } @@ -623,7 +625,7 @@ func (f *FragmentChain) CandidateBacked(newlyBackedCandidate parachaintypes.Cand func (f *FragmentChain) CanAddCandidateAsPotential(entry *CandidateEntry) error { candidateHash := entry.candidateHash if f.bestChain.Contains(candidateHash) || f.unconnected.contains(candidateHash) { - return ErrCandidateAlradyKnown + return ErrCandidateAlreadyKnown } return f.checkPotential(entry) @@ -845,7 +847,7 @@ func (f *FragmentChain) checkPotential(candidate *CandidateEntry) error { // Try seeing if the parent candidate is in the current chain or if it is the latest // included candidate. If so, get the constraints the candidate must satisfy - var constraints *inclusionemulator.Constraints + var constraints *parachaintypes.Constraints var maybeMinRelayParentNumber *uint requiredParentHash, err := f.scope.baseConstraints.RequiredParent.Hash() @@ -868,7 +870,9 @@ func (f *FragmentChain) checkPotential(candidate *CandidateEntry) error { } var err error - constraints, err = f.scope.baseConstraints.ApplyModifications(parentCandidate.cumulativeModifications) + constraints, err = inclusionemulator.ApplyModifications( + f.scope.baseConstraints, + parentCandidate.cumulativeModifications) if err != nil { return ErrComputeConstraints{modificationErr: err} } @@ -914,7 +918,7 @@ func (f *FragmentChain) checkPotential(candidate *CandidateEntry) error { // trimUneligibleForks once the backable chain was populated, trim the forks generated by candidate // hashes which are not present in the best chain. Fan this out into a full breadth-first search. If -// starting point is not nil then start the search from the candidates haing this parent head hash. +// starting point is not nil then start the search from the candidates having this parent head hash. func (f *FragmentChain) trimUneligibleForks(storage *CandidateStorage, startingPoint *common.Hash) { type queueItem struct { hash common.Hash @@ -1014,7 +1018,8 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { } for len(f.bestChain.chain) < int(f.scope.maxDepth) { - childConstraints, err := f.scope.baseConstraints.ApplyModifications(cumulativeModifications) + childConstraints, err := inclusionemulator.ApplyModifications( + f.scope.baseConstraints, cumulativeModifications) if err != nil { // TODO: include logger fmt.Println("failed to apply modifications:", err) diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go index ffa4a04b74..cdae6a435a 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go @@ -270,7 +270,7 @@ func TestEarliestRelayParent(t *testing.T) { Hash: common.Hash{0x01}, Number: 10, } - baseConstraints := &inclusionemulator.Constraints{ + baseConstraints := ¶chaintypes.Constraints{ MinRelayParentNumber: 5, } ancestor := inclusionemulator.RelayChainBlockInfo{ @@ -296,7 +296,7 @@ func TestEarliestRelayParent(t *testing.T) { Hash: common.Hash{0x01}, Number: 10, } - baseConstraints := &inclusionemulator.Constraints{ + baseConstraints := ¶chaintypes.Constraints{ MinRelayParentNumber: 5, } return &Scope{ @@ -435,7 +435,7 @@ func TestFragmentChainWithFreshScope(t *testing.T) { StorageRoot: common.Hash{0x00}, } - baseConstraints := &inclusionemulator.Constraints{ + baseConstraints := ¶chaintypes.Constraints{ RequiredParent: parachaintypes.HeadData{Data: []byte{byte(0)}}, MinRelayParentNumber: 0, ValidationCodeHash: parachaintypes.ValidationCodeHash(common.Hash{0x03}), @@ -488,8 +488,8 @@ func makeConstraints( minRelayParentNumber uint, validWatermarks []uint, requiredParent parachaintypes.HeadData, -) *inclusionemulator.Constraints { - return &inclusionemulator.Constraints{ +) *parachaintypes.Constraints { + return ¶chaintypes.Constraints{ MinRelayParentNumber: minRelayParentNumber, MaxPoVSize: 1_000_000, MaxCodeSize: 1_000_000, @@ -497,10 +497,10 @@ func makeConstraints( UmpRemainingBytes: 1_000, MaxUmpNumPerCandidate: 10, DmpRemainingMessages: make([]uint, 10), - HrmpInbound: inclusionemulator.InboundHrmpLimitations{ + HrmpInbound: parachaintypes.InboundHrmpLimitations{ ValidWatermarks: validWatermarks, }, - HrmpChannelsOut: make(map[parachaintypes.ParaID]inclusionemulator.OutboundHrmpChannelLimitations), + HrmpChannelsOut: make(map[parachaintypes.ParaID]parachaintypes.OutboundHrmpChannelLimitations), MaxHrmpNumPerCandidate: 0, RequiredParent: requiredParent, ValidationCodeHash: parachaintypes.ValidationCodeHash(common.BytesToHash(bytes.Repeat([]byte{42}, 32))), @@ -564,8 +564,8 @@ func TestScopeRejectsAncestors(t *testing.T) { relayParent *inclusionemulator.RelayChainBlockInfo ancestors []inclusionemulator.RelayChainBlockInfo maxDepth uint - baseConstraints *inclusionemulator.Constraints - pendingAvailability []*PendindAvailability + baseConstraints *parachaintypes.Constraints + pendingAvailability []*PendingAvailability expectedError error }{ "rejects_ancestor_that_skips_blocks": { @@ -584,7 +584,7 @@ func TestScopeRejectsAncestors(t *testing.T) { maxDepth: 2, baseConstraints: makeConstraints(8, []uint{8, 9}, parachaintypes.HeadData{Data: []byte{0x01, 0x02, 0x03}}), - pendingAvailability: make([]*PendindAvailability, 0), + pendingAvailability: make([]*PendingAvailability, 0), expectedError: ErrUnexpectedAncestor{Number: 8, Prev: 10}, }, "rejects_ancestor_for_zero_block": { @@ -602,7 +602,7 @@ func TestScopeRejectsAncestors(t *testing.T) { }, maxDepth: 2, baseConstraints: makeConstraints(0, []uint{}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}), - pendingAvailability: make([]*PendindAvailability, 0), + pendingAvailability: make([]*PendingAvailability, 0), expectedError: ErrUnexpectedAncestor{Number: 99999, Prev: 0}, }, "rejects_unordered_ancestors": { @@ -630,7 +630,7 @@ func TestScopeRejectsAncestors(t *testing.T) { }, maxDepth: 2, baseConstraints: makeConstraints(0, []uint{2}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}), - pendingAvailability: make([]*PendindAvailability, 0), + pendingAvailability: make([]*PendingAvailability, 0), expectedError: ErrUnexpectedAncestor{Number: 2, Prev: 4}, }, } @@ -672,7 +672,7 @@ func TestScopeOnlyTakesAncestorsUpToMin(t *testing.T) { maxDepth := uint(2) baseConstraints := makeConstraints(0, []uint{2}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}) - pendingAvailability := make([]*PendindAvailability, 0) + pendingAvailability := make([]*PendingAvailability, 0) scope, err := NewScopeWithAncestors(relayParent, baseConstraints, pendingAvailability, maxDepth, ancestors) require.NoError(t, err) @@ -739,7 +739,7 @@ func TestCandidateStorageMethods(t *testing.T) { entry, err := NewCandidateEntry(parachaintypes.CandidateHash{Value: candidateHash}, candidate, pvd, Seconded) require.Nil(t, entry) - require.ErrorIs(t, err, ErrCandidateEntryZeroLengthCycle) + require.ErrorIs(t, err, ErrZeroLengthCycle) }, }, @@ -785,7 +785,7 @@ func TestCandidateStorageMethods(t *testing.T) { // re-add the candidate should fail err = storage.addCandidateEntry(entry) - require.ErrorIs(t, err, ErrCandidateAlradyKnown) + require.ErrorIs(t, err, ErrCandidateAlreadyKnown) }) t.Run("mark_candidate_entry_as_backed", func(t *testing.T) { @@ -1052,7 +1052,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { candidateCHash, candidateCEntry := hashAndInsertCandididate(t, storage, candidateC, pvdC, Backed) t.Run("candidate_A_doesnt_adhere_to_base_constraints", func(t *testing.T) { - wrongConstraints := []inclusionemulator.Constraints{ + wrongConstraints := []parachaintypes.Constraints{ // define a constraint that requires a parent head data // that is different from candidate A parent head *makeConstraints(relayParentAInfo.Number, []uint{relayParentAInfo.Number}, parachaintypes.HeadData{Data: []byte{0x0e}}), @@ -1358,7 +1358,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { scope, err := NewScopeWithAncestors( *relayParentCInfo, baseConstraints, - []*PendindAvailability{ + []*PendingAvailability{ {CandidateHash: modifiedCandidateAHash, RelayParent: *relayParentBInfo}, }, 4, @@ -1412,7 +1412,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { scope, err := NewScopeWithAncestors( *relayParentCInfo, baseConstraints, - []*PendindAvailability{ + []*PendingAvailability{ { CandidateHash: modifiedCandidateAHash, RelayParent: *relayParentBInfo, @@ -1431,7 +1431,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { }) t.Run("multiple_pending_availability_candidates", func(t *testing.T) { - validOptions := [][]*PendindAvailability{ + validOptions := [][]*PendingAvailability{ { {CandidateHash: candidateAHash, RelayParent: *relayParentAInfo}, }, @@ -1470,7 +1470,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { scope, err := NewScopeWithAncestors( *relayParentCInfo, baseConstraints, - []*PendindAvailability{ + []*PendingAvailability{ {CandidateHash: candidateAHash, RelayParent: *relayParentAInfo}, }, 4, @@ -1487,7 +1487,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { scope, err := NewScopeWithAncestors( *relayParentCInfo, baseConstraints, - []*PendindAvailability{ + []*PendingAvailability{ { CandidateHash: candidateAHash, RelayParent: inclusionemulator.RelayChainBlockInfo{ @@ -1655,8 +1655,8 @@ func TestPopulateAndCheckPotential(t *testing.T) { assert.Equal(t, expectedUnconnected, unconnectedHashes) // Cannot add as potential an already present candidate (whether it's in the best chain or in unconnected storage) - assert.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), ErrCandidateAlradyKnown) - assert.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateFEntry), ErrCandidateAlradyKnown) + assert.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), ErrCandidateAlreadyKnown) + assert.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateFEntry), ErrCandidateAlreadyKnown) t.Run("simulate_best_chain_reorg", func(t *testing.T) { // back a2, the reversion should happen at the root. @@ -1787,7 +1787,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { t.Run("simulate_candidates_A_B_C_are_pending_availability", func(t *testing.T) { scope, err := NewScopeWithAncestors( *relayParentCInfo, baseConstraints.Clone(), - []*PendindAvailability{ + []*PendingAvailability{ {CandidateHash: candidateAHash, RelayParent: *relayParentAInfo}, {CandidateHash: candidateBHash, RelayParent: *relayParentBInfo}, {CandidateHash: candidateCHash, RelayParent: *relayParentCInfo}, @@ -1813,7 +1813,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { }, unconnectedHashes) // cannot add as potential an already pending availability candidate - require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), ErrCandidateAlradyKnown) + require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), ErrCandidateAlreadyKnown) // simulate the fact that candidate A, B and C have been included baseConstraints := makeConstraints(0, []uint{0}, parachaintypes.HeadData{Data: []byte{0x0d}}) @@ -1863,7 +1863,7 @@ func cloneFragmentChain(original *FragmentChain) *FragmentChain { clonedScope := &Scope{ relayParent: original.scope.relayParent, baseConstraints: original.scope.baseConstraints.Clone(), - pendindAvailability: append([]*PendindAvailability(nil), original.scope.pendindAvailability...), + pendingAvailability: append([]*PendingAvailability(nil), original.scope.pendingAvailability...), maxDepth: original.scope.maxDepth, ancestors: original.scope.ancestors.Copy(), ancestorsByHash: make(map[common.Hash]inclusionemulator.RelayChainBlockInfo), @@ -2160,7 +2160,7 @@ func TestFindAncestorPathAndFindBackableChain(t *testing.T) { // stop when we've found a candidate which is pending availability scope, err := NewScopeWithAncestors(relayParentInfo, baseConstraints, - []*PendindAvailability{ + []*PendingAvailability{ {CandidateHash: candidateHashes[3], RelayParent: relayParentInfo}, }, maxDepth, diff --git a/dot/parachain/types/async_backing.go b/dot/parachain/types/async_backing.go index 91b0044927..876ac1aa0d 100644 --- a/dot/parachain/types/async_backing.go +++ b/dot/parachain/types/async_backing.go @@ -3,6 +3,8 @@ package parachaintypes +import "maps" + // AsyncBackingParams contains the parameters for the async backing. type AsyncBackingParams struct { // The maximum number of para blocks between the para head in a relay parent @@ -70,8 +72,36 @@ type Constraints struct { FutureValidationCode *FutureValidationCode } -// FutureValidationCode represents a tuple of BlockNumber an ValidationCodeHash +// FutureValidationCode represents a tuple of BlockNumber and ValidationCodeHash type FutureValidationCode struct { BlockNumber uint ValidationCodeHash ValidationCodeHash } + +func (c *Constraints) Clone() *Constraints { + var futureValidationCode *FutureValidationCode + if c.FutureValidationCode != nil { + futureValidationCode = &FutureValidationCode{ + BlockNumber: c.FutureValidationCode.BlockNumber, + ValidationCodeHash: c.FutureValidationCode.ValidationCodeHash, + } + } + return &Constraints{ + MinRelayParentNumber: c.MinRelayParentNumber, + MaxPoVSize: c.MaxPoVSize, + MaxCodeSize: c.MaxCodeSize, + UmpRemaining: c.UmpRemaining, + UmpRemainingBytes: c.UmpRemainingBytes, + MaxUmpNumPerCandidate: c.MaxUmpNumPerCandidate, + DmpRemainingMessages: append([]uint(nil), c.DmpRemainingMessages...), + HrmpInbound: InboundHrmpLimitations{ + ValidWatermarks: append([]uint(nil), c.HrmpInbound.ValidWatermarks...), + }, + HrmpChannelsOut: maps.Clone(c.HrmpChannelsOut), + MaxHrmpNumPerCandidate: c.MaxHrmpNumPerCandidate, + RequiredParent: c.RequiredParent, + ValidationCodeHash: c.ValidationCodeHash, + UpgradeRestriction: c.UpgradeRestriction, + FutureValidationCode: futureValidationCode, + } +} diff --git a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go index ddfe5fc8b9..e23d73fa26 100644 --- a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go +++ b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go @@ -43,8 +43,8 @@ func (e *ErrNoSuchHrmpChannel) Error() string { type ErrHrmpMessagesOverflow struct { paraId parachaintypes.ParaID - messagesRemaining uint - messagesSubmitted uint + messagesRemaining uint32 + messagesSubmitted uint32 } func (e *ErrHrmpMessagesOverflow) Error() string { @@ -53,8 +53,8 @@ func (e *ErrHrmpMessagesOverflow) Error() string { type ErrHrmpBytesOverflow struct { paraId parachaintypes.ParaID - bytesRemaining uint - bytesSubmitted uint + bytesRemaining uint32 + bytesSubmitted uint32 } func (e *ErrHrmpBytesOverflow) Error() string { @@ -62,8 +62,8 @@ func (e *ErrHrmpBytesOverflow) Error() string { } type ErrUmpMessagesOverflow struct { - messagesRemaining uint - messagesSubmitted uint + messagesRemaining uint32 + messagesSubmitted uint32 } func (e *ErrUmpMessagesOverflow) Error() string { @@ -71,8 +71,8 @@ func (e *ErrUmpMessagesOverflow) Error() string { } type ErrUmpBytesOverflow struct { - bytesRemaining uint - bytesSubmitted uint + bytesRemaining uint32 + bytesSubmitted uint32 } func (e *ErrUmpBytesOverflow) Error() string { @@ -80,8 +80,8 @@ func (e *ErrUmpBytesOverflow) Error() string { } type ErrDmpMessagesUnderflow struct { - messagesRemaining uint - messagesProcessed uint + messagesRemaining uint32 + messagesProcessed uint32 } func (e *ErrDmpMessagesUnderflow) Error() string { @@ -121,8 +121,8 @@ func (e *ErrOutputsInvalid) Error() string { } type ErrCodeSizeTooLarge struct { - maxAllowed uint - newSize uint + maxAllowed uint32 + newSize uint32 } func (e *ErrCodeSizeTooLarge) Error() string { @@ -139,8 +139,8 @@ func (e *ErrRelayParentTooOld) Error() string { } type ErrUmpMessagesPerCandidateOverflow struct { - messagesAllowed uint - messagesSubmitted uint + messagesAllowed uint32 + messagesSubmitted uint32 } func (e *ErrUmpMessagesPerCandidateOverflow) Error() string { @@ -148,8 +148,8 @@ func (e *ErrUmpMessagesPerCandidateOverflow) Error() string { } type ErrHrmpMessagesPerCandidateOverflow struct { - messagesAllowed uint - messagesSubmitted uint + messagesAllowed uint32 + messagesSubmitted uint32 } func (e *ErrHrmpMessagesPerCandidateOverflow) Error() string { @@ -171,70 +171,7 @@ type RelayChainBlockInfo struct { Number uint } -// Constraints on the actions that can be taken by a new parachain block. These -// limitations are implicitly associated with some particular parachain, which should -// be apparent from usage. -type Constraints struct { - // The minimum relay-parent number accepted under these constraints. - MinRelayParentNumber uint - // The maximum Proof-of-Validity size allowed, in bytes. - MaxPoVSize uint - // The maximum new validation code size allowed, in bytes. - MaxCodeSize uint - // The amount of UMP messages remaining. - UmpRemaining uint - // The amount of UMP bytes remaining. - UmpRemainingBytes uint - // The maximum number of UMP messages allowed per candidate. - MaxUmpNumPerCandidate uint - // Remaining DMP queue. Only includes sent-at block numbers. - DmpRemainingMessages []uint - // The limitations of all registered inbound HRMP channels. - HrmpInbound InboundHrmpLimitations - // The limitations of all registered outbound HRMP channels. - HrmpChannelsOut map[parachaintypes.ParaID]OutboundHrmpChannelLimitations - // The maximum number of HRMP messages allowed per candidate. - MaxHrmpNumPerCandidate uint - // The required parent head-data of the parachain. - RequiredParent parachaintypes.HeadData - // The expected validation-code-hash of this parachain. - ValidationCodeHash parachaintypes.ValidationCodeHash - // The code upgrade restriction signal as-of this parachain. - UpgradeRestriction parachaintypes.UpgradeRestriction - // The future validation code hash, if any, and at what relay-parent - // number the upgrade would be minimally applied. - FutureValidationCode *FutureValidationCode -} - -func (c *Constraints) Clone() *Constraints { - var futureValidationCode *FutureValidationCode - if c.FutureValidationCode != nil { - futureValidationCode = &FutureValidationCode{ - BlockNumber: c.FutureValidationCode.BlockNumber, - ValidationCodeHash: c.FutureValidationCode.ValidationCodeHash, - } - } - return &Constraints{ - MinRelayParentNumber: c.MinRelayParentNumber, - MaxPoVSize: c.MaxPoVSize, - MaxCodeSize: c.MaxCodeSize, - UmpRemaining: c.UmpRemaining, - UmpRemainingBytes: c.UmpRemainingBytes, - MaxUmpNumPerCandidate: c.MaxUmpNumPerCandidate, - DmpRemainingMessages: append([]uint(nil), c.DmpRemainingMessages...), - HrmpInbound: InboundHrmpLimitations{ - ValidWatermarks: append([]uint(nil), c.HrmpInbound.ValidWatermarks...), - }, - HrmpChannelsOut: maps.Clone(c.HrmpChannelsOut), - MaxHrmpNumPerCandidate: c.MaxHrmpNumPerCandidate, - RequiredParent: c.RequiredParent, - ValidationCodeHash: c.ValidationCodeHash, - UpgradeRestriction: c.UpgradeRestriction, - FutureValidationCode: futureValidationCode, - } -} - -func (c *Constraints) CheckModifications(modifications *ConstraintModifications) error { +func CheckModifications(c *parachaintypes.Constraints, modifications *ConstraintModifications) error { if modifications.HrmpWatermark != nil && modifications.HrmpWatermark.Type == Trunk { if !slices.Contains(c.HrmpInbound.ValidWatermarks, modifications.HrmpWatermark.Watermark()) { return &ErrDisallowedHrmpWatermark{BlockNumber: modifications.HrmpWatermark.Watermark()} @@ -285,7 +222,7 @@ func (c *Constraints) CheckModifications(modifications *ConstraintModifications) _, overflow = math.SafeSub(uint64(len(c.DmpRemainingMessages)), uint64(modifications.DmpMessagesProcessed)) if overflow { return &ErrDmpMessagesUnderflow{ - messagesRemaining: uint(len(c.DmpRemainingMessages)), + messagesRemaining: uint32(len(c.DmpRemainingMessages)), messagesProcessed: modifications.DmpMessagesProcessed, } } @@ -297,7 +234,8 @@ func (c *Constraints) CheckModifications(modifications *ConstraintModifications) return nil } -func (c *Constraints) ApplyModifications(modifications *ConstraintModifications) (*Constraints, error) { +func ApplyModifications(c *parachaintypes.Constraints, modifications *ConstraintModifications) ( + *parachaintypes.Constraints, error) { newConstraints := c.Clone() if modifications.RequiredParent != nil { @@ -366,9 +304,9 @@ func (c *Constraints) ApplyModifications(modifications *ConstraintModifications) } newConstraints.UmpRemainingBytes -= modifications.UmpBytesSent - if modifications.DmpMessagesProcessed > uint(len(newConstraints.DmpRemainingMessages)) { + if modifications.DmpMessagesProcessed > uint32(len(newConstraints.DmpRemainingMessages)) { return nil, &ErrDmpMessagesUnderflow{ - messagesRemaining: uint(len(newConstraints.DmpRemainingMessages)), + messagesRemaining: uint32(len(newConstraints.DmpRemainingMessages)), messagesProcessed: modifications.DmpMessagesProcessed, } } else { @@ -386,65 +324,10 @@ func (c *Constraints) ApplyModifications(modifications *ConstraintModifications) return newConstraints, nil } -func FromPrimitiveConstraints(pc parachaintypes.Constraints) *Constraints { - hrmpChannelsOut := make(map[parachaintypes.ParaID]OutboundHrmpChannelLimitations) - for k, v := range pc.HrmpChannelsOut { - hrmpChannelsOut[k] = OutboundHrmpChannelLimitations{ - BytesRemaining: uint(v.BytesRemaining), - MessagesRemaining: uint(v.MessagesRemaining), - } - } - - var futureValidationCode *FutureValidationCode - if pc.FutureValidationCode != nil { - futureValidationCode = &FutureValidationCode{ - BlockNumber: pc.FutureValidationCode.BlockNumber, - ValidationCodeHash: pc.FutureValidationCode.ValidationCodeHash, - } - } - - return &Constraints{ - MinRelayParentNumber: pc.MinRelayParentNumber, - MaxPoVSize: uint(pc.MaxPoVSize), - MaxCodeSize: uint(pc.MaxCodeSize), - UmpRemaining: uint(pc.UmpRemaining), - UmpRemainingBytes: uint(pc.UmpRemainingBytes), - MaxUmpNumPerCandidate: uint(pc.MaxUmpNumPerCandidate), - DmpRemainingMessages: pc.DmpRemainingMessages, - HrmpInbound: InboundHrmpLimitations{ - ValidWatermarks: pc.HrmpInbound.ValidWatermarks, - }, - HrmpChannelsOut: hrmpChannelsOut, - MaxHrmpNumPerCandidate: uint(pc.MaxHrmpNumPerCandidate), - RequiredParent: pc.RequiredParent, - ValidationCodeHash: pc.ValidationCodeHash, - UpgradeRestriction: pc.UpgradeRestriction, - FutureValidationCode: futureValidationCode, - } -} - -// InboundHrmpLimitations constraints on inbound HRMP channels -type InboundHrmpLimitations struct { - ValidWatermarks []uint -} - -// OutboundHrmpChannelLimitations constraints on outbound HRMP channels. -type OutboundHrmpChannelLimitations struct { - BytesRemaining uint - MessagesRemaining uint -} - -// FutureValidationCode represents the future validation code hash, if any, and at what relay-parent -// number the upgrade would be minimally applied. -type FutureValidationCode struct { - BlockNumber uint - ValidationCodeHash parachaintypes.ValidationCodeHash -} - // OutboundHrmpChannelModification represents modifications to outbound HRMP channels. type OutboundHrmpChannelModification struct { - BytesSubmitted uint - MessagesSubmitted uint + BytesSubmitted uint32 + MessagesSubmitted uint32 } // HrmpWatermarkUpdate represents an update to the HRMP Watermark. @@ -475,11 +358,11 @@ type ConstraintModifications struct { // Outbound HRMP channel modifications. OutboundHrmp map[parachaintypes.ParaID]OutboundHrmpChannelModification // The amount of UMP XCM messages sent. `UMPSignal` and separator are excluded. - UmpMessagesSent uint + UmpMessagesSent uint32 // The amount of UMP XCM bytes sent. `UMPSignal` and separator are excluded. - UmpBytesSent uint + UmpBytesSent uint32 // The amount of DMP messages processed. - DmpMessagesProcessed uint + DmpMessagesProcessed uint32 // Whether a pending code upgrade has been applied. CodeUpgradeApplied bool } @@ -543,7 +426,7 @@ func (cm *ConstraintModifications) Stack(other *ConstraintModifications) { // This is a type which guarantees that the candidate is valid under the operating constraints type Fragment struct { relayParent *RelayChainBlockInfo - operatingConstraints *Constraints + operatingConstraints *parachaintypes.Constraints candidate ProspectiveCandidate modifications *ConstraintModifications } @@ -567,7 +450,7 @@ func (f *Fragment) ConstraintModifications() *ConstraintModifications { // small enough. func NewFragment( relayParent *RelayChainBlockInfo, - operatingConstraints *Constraints, + operatingConstraints *parachaintypes.Constraints, candidate ProspectiveCandidate) (*Fragment, error) { modifications, err := CheckAgainstConstraints( relayParent, @@ -590,7 +473,7 @@ func NewFragment( func CheckAgainstConstraints( relayParent *RelayChainBlockInfo, - operatingConstraints *Constraints, + operatingConstraints *parachaintypes.Constraints, commitments parachaintypes.CandidateCommitments, validationCodeHash parachaintypes.ValidationCodeHash, persistedValidationData parachaintypes.PersistedValidationData, @@ -631,7 +514,7 @@ func CheckAgainstConstraints( record = OutboundHrmpChannelModification{} } - record.BytesSubmitted += uint(len(message.Data)) + record.BytesSubmitted += uint32(len(message.Data)) record.MessagesSubmitted++ outboundHrmp[recipientParaID] = record } @@ -645,9 +528,9 @@ func CheckAgainstConstraints( RequiredParent: &commitments.HeadData, HrmpWatermark: &hrmpWatermark, OutboundHrmp: outboundHrmp, - UmpMessagesSent: uint(umpMessagesSent), - UmpBytesSent: uint(umpBytesSent), - DmpMessagesProcessed: uint(commitments.ProcessedDownwardMessages), + UmpMessagesSent: uint32(umpMessagesSent), + UmpBytesSent: uint32(umpBytesSent), + DmpMessagesProcessed: commitments.ProcessedDownwardMessages, CodeUpgradeApplied: codeUpgradeApplied, } @@ -684,7 +567,7 @@ func skipUmpSignals(upwardMessages []parachaintypes.UpwardMessage) iter.Seq[para } func validateAgainstConstraints( - constraints *Constraints, + constraints *parachaintypes.Constraints, relayParent *RelayChainBlockInfo, commitments parachaintypes.CandidateCommitments, persistedValidationData parachaintypes.PersistedValidationData, @@ -731,10 +614,10 @@ func validateAgainstConstraints( announcedCodeSize = len(*commitments.NewValidationCode) } - if uint(announcedCodeSize) > constraints.MaxCodeSize { + if uint32(announcedCodeSize) > constraints.MaxCodeSize { return &ErrCodeSizeTooLarge{ maxAllowed: constraints.MaxCodeSize, - newSize: uint(announcedCodeSize), + newSize: uint32(announcedCodeSize), } } @@ -747,7 +630,7 @@ func validateAgainstConstraints( if len(commitments.HorizontalMessages) > int(constraints.MaxHrmpNumPerCandidate) { return &ErrHrmpMessagesPerCandidateOverflow{ messagesAllowed: constraints.MaxHrmpNumPerCandidate, - messagesSubmitted: uint(len(commitments.HorizontalMessages)), + messagesSubmitted: uint32(len(commitments.HorizontalMessages)), } } @@ -758,7 +641,7 @@ func validateAgainstConstraints( } } - if err := constraints.CheckModifications(modifications); err != nil { + if err := CheckModifications(constraints, modifications); err != nil { return &ErrOutputsInvalid{ModificationError: err} } From 78dfef1e2eb825014e66cf189a999659dd7ecd02 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 5 Dec 2024 20:16:35 -0400 Subject: [PATCH 15/31] chore: address comments --- .../fragment-chain/fragment_chain.go | 61 ++++++++---------- .../fragment-chain/fragment_chain_test.go | 64 ++++++++++--------- dot/parachain/types/async_backing.go | 2 +- dot/parachain/types/types.go | 45 +++++++++++-- .../inclusion-emulator/inclusion_emulator.go | 44 ++++++------- 5 files changed, 125 insertions(+), 91 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go index 35dd903f6e..422a26ce3e 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go @@ -16,7 +16,7 @@ import ( var logger = log.NewFromGlobal(log.AddContext("pkg", "fragment_chain"), log.SetLevel(log.Debug)) -type CandidateState int +type CandidateState byte const ( Seconded CandidateState = iota @@ -33,10 +33,8 @@ type CandidateEntry struct { parentHeadDataHash common.Hash outputHeadDataHash common.Hash relayParent common.Hash - // TODO: this is under a Arc smart pointer, should we - // have that here? maybe some specialized struct that protects the underlying data? - candidate inclusionemulator.ProspectiveCandidate - state CandidateState + candidate *inclusionemulator.ProspectiveCandidate + state CandidateState } func (c *CandidateEntry) Hash() parachaintypes.CandidateHash { @@ -51,7 +49,7 @@ func NewCandidateEntry( ) (*CandidateEntry, error) { pvdHash, err := persistedValidationData.Hash() if err != nil { - return nil, fmt.Errorf("while hashing persisted validation data: %w", err) + return nil, fmt.Errorf("hashing persisted validation data: %w", err) } if pvdHash != candidate.Descriptor.PersistedValidationDataHash { @@ -60,12 +58,12 @@ func NewCandidateEntry( parentHeadDataHash, err := persistedValidationData.ParentHead.Hash() if err != nil { - return nil, fmt.Errorf("while hashing parent head data: %w", err) + return nil, fmt.Errorf("hashing parent head data: %w", err) } outputHeadDataHash, err := candidate.Commitments.HeadData.Hash() if err != nil { - return nil, fmt.Errorf("while hashing output head data: %w", err) + return nil, fmt.Errorf("hashing output head data: %w", err) } if parentHeadDataHash == outputHeadDataHash { @@ -78,7 +76,7 @@ func NewCandidateEntry( outputHeadDataHash: outputHeadDataHash, relayParent: candidate.Descriptor.RelayParent, state: state, - candidate: inclusionemulator.ProspectiveCandidate{ + candidate: &inclusionemulator.ProspectiveCandidate{ Commitments: candidate.Commitments, PersistedValidationData: persistedValidationData, PoVHash: candidate.Descriptor.PovHash, @@ -91,8 +89,8 @@ func NewCandidateEntry( // their relay-parents and their backing states. This does not assume any restriction on whether // or not candidates form a chain. Useful for storing all kinds of candidates. type CandidateStorage struct { - byParentHead map[common.Hash]map[parachaintypes.CandidateHash]any - byOutputHead map[common.Hash]map[parachaintypes.CandidateHash]any + byParentHead map[common.Hash]map[parachaintypes.CandidateHash]struct{} + byOutputHead map[common.Hash]map[parachaintypes.CandidateHash]struct{} byCandidateHash map[parachaintypes.CandidateHash]*CandidateEntry } @@ -100,14 +98,14 @@ func (c *CandidateStorage) Clone() *CandidateStorage { clone := NewCandidateStorage() for parentHead, candidates := range c.byParentHead { - clone.byParentHead[parentHead] = make(map[parachaintypes.CandidateHash]any) + clone.byParentHead[parentHead] = make(map[parachaintypes.CandidateHash]struct{}) for candidateHash := range candidates { clone.byParentHead[parentHead][candidateHash] = struct{}{} } } for outputHead, candidates := range c.byOutputHead { - clone.byOutputHead[outputHead] = make(map[parachaintypes.CandidateHash]any) + clone.byOutputHead[outputHead] = make(map[parachaintypes.CandidateHash]struct{}) for candidateHash := range candidates { clone.byOutputHead[outputHead][candidateHash] = struct{}{} } @@ -119,13 +117,8 @@ func (c *CandidateStorage) Clone() *CandidateStorage { parentHeadDataHash: entry.parentHeadDataHash, outputHeadDataHash: entry.outputHeadDataHash, relayParent: entry.relayParent, - candidate: inclusionemulator.ProspectiveCandidate{ - Commitments: entry.candidate.Commitments, - PersistedValidationData: entry.candidate.PersistedValidationData, - PoVHash: entry.candidate.PoVHash, - ValidationCodeHash: entry.candidate.ValidationCodeHash, - }, - state: entry.state, + candidate: entry.candidate, + state: entry.state, } } @@ -134,8 +127,8 @@ func (c *CandidateStorage) Clone() *CandidateStorage { func NewCandidateStorage() *CandidateStorage { return &CandidateStorage{ - byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), - byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), } } @@ -150,7 +143,11 @@ func (c *CandidateStorage) AddPendingAvailabilityCandidate( return err } - return c.addCandidateEntry(entry) + if err := c.addCandidateEntry(entry); err != nil { + return fmt.Errorf("adding pending availability candidate: %w", err) + } + + return nil } // Len return the number of stored candidate @@ -169,7 +166,7 @@ func (c *CandidateStorage) addCandidateEntry(candidate *CandidateEntry) error { // exists in the map but pointing to a nil map setOfCandidates := c.byParentHead[candidate.parentHeadDataHash] if setOfCandidates == nil { - setOfCandidates = make(map[parachaintypes.CandidateHash]any) + setOfCandidates = make(map[parachaintypes.CandidateHash]struct{}) } setOfCandidates[candidate.candidateHash] = struct{}{} c.byParentHead[candidate.parentHeadDataHash] = setOfCandidates @@ -177,7 +174,7 @@ func (c *CandidateStorage) addCandidateEntry(candidate *CandidateEntry) error { // udpates the reference output hash -> candidate setOfCandidates = c.byOutputHead[candidate.outputHeadDataHash] if setOfCandidates == nil { - setOfCandidates = make(map[parachaintypes.CandidateHash]any) + setOfCandidates = make(map[parachaintypes.CandidateHash]struct{}) } setOfCandidates[candidate.candidateHash] = struct{}{} c.byOutputHead[candidate.outputHeadDataHash] = setOfCandidates @@ -218,11 +215,6 @@ func (c *CandidateStorage) markBacked(candidateHash parachaintypes.CandidateHash entry.state = Backed } -func (c *CandidateStorage) contains(candidateHash parachaintypes.CandidateHash) bool { - _, ok := c.byCandidateHash[candidateHash] - return ok -} - // candidates returns an iterator over references to the stored candidates, in arbitrary order. func (c *CandidateStorage) candidates() iter.Seq[*CandidateEntry] { return func(yield func(*CandidateEntry) bool) { @@ -564,8 +556,9 @@ func (f *FragmentChain) UnconnectedLen() int { return f.unconnected.Len() } -func (f *FragmentChain) ContainsUnconnectedCandidate(candidate parachaintypes.CandidateHash) bool { - return f.unconnected.contains(candidate) +func (f *FragmentChain) ContainsUnconnectedCandidate(candidateHash parachaintypes.CandidateHash) bool { + _, ok := f.unconnected.byCandidateHash[candidateHash] + return ok } // BestChainVec returns a vector of the chain's candidate hashes, in-order. @@ -624,7 +617,9 @@ func (f *FragmentChain) CandidateBacked(newlyBackedCandidate parachaintypes.Cand // CanAddCandidateAsPotential checks if this candidate could be added in the future func (f *FragmentChain) CanAddCandidateAsPotential(entry *CandidateEntry) error { candidateHash := entry.candidateHash - if f.bestChain.Contains(candidateHash) || f.unconnected.contains(candidateHash) { + + _, existsInCandidateStorage := f.unconnected.byCandidateHash[candidateHash] + if f.bestChain.Contains(candidateHash) || existsInCandidateStorage { return ErrCandidateAlreadyKnown } diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go index cdae6a435a..48bcb51974 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go @@ -20,8 +20,8 @@ import ( func TestCandidateStorage_RemoveCandidate(t *testing.T) { storage := &CandidateStorage{ - byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), - byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), } @@ -37,8 +37,8 @@ func TestCandidateStorage_RemoveCandidate(t *testing.T) { } storage.byCandidateHash[candidateHash] = entry - storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} - storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: struct{}{}} + storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: struct{}{}} storage.removeCandidate(candidateHash) @@ -54,8 +54,8 @@ func TestCandidateStorage_RemoveCandidate(t *testing.T) { func TestCandidateStorage_MarkBacked(t *testing.T) { storage := &CandidateStorage{ - byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), - byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), } @@ -71,8 +71,8 @@ func TestCandidateStorage_MarkBacked(t *testing.T) { } storage.byCandidateHash[candidateHash] = entry - storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} - storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: struct{}{}} + storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: struct{}{}} storage.markBacked(candidateHash) @@ -88,8 +88,8 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { "find_head_data_of_first_candidate_using_output_head_data_hash": { setup: func() *CandidateStorage { storage := &CandidateStorage{ - byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), - byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), } @@ -102,7 +102,7 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { candidateHash: candidateHash, parentHeadDataHash: parentHeadHash, outputHeadDataHash: outputHeadHash, - candidate: inclusionemulator.ProspectiveCandidate{ + candidate: &inclusionemulator.ProspectiveCandidate{ Commitments: parachaintypes.CandidateCommitments{ HeadData: headData, }, @@ -110,8 +110,8 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { } storage.byCandidateHash[candidateHash] = entry - storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} - storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: struct{}{}} + storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: struct{}{}} return storage }, @@ -121,8 +121,8 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { "find_head_data_using_parent_head_data_hash_from_second_candidate": { setup: func() *CandidateStorage { storage := &CandidateStorage{ - byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), - byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), } @@ -135,7 +135,7 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { candidateHash: candidateHash, parentHeadDataHash: parentHeadHash, outputHeadDataHash: outputHeadHash, - candidate: inclusionemulator.ProspectiveCandidate{ + candidate: &inclusionemulator.ProspectiveCandidate{ PersistedValidationData: parachaintypes.PersistedValidationData{ ParentHead: headData, }, @@ -143,8 +143,8 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { } storage.byCandidateHash[candidateHash] = entry - storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} - storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]any{candidateHash: struct{}{}} + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: struct{}{}} + storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: struct{}{}} return storage }, @@ -154,8 +154,8 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { "use_nonexistent_hash_and_should_get_nil": { setup: func() *CandidateStorage { storage := &CandidateStorage{ - byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), - byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), } return storage @@ -166,8 +166,8 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { "insert_0_candidates_and_try_to_find_but_should_get_nil": { setup: func() *CandidateStorage { return &CandidateStorage{ - byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), - byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), } }, @@ -195,8 +195,8 @@ func TestCandidateStorage_PossibleBackedParaChildren(t *testing.T) { "insert_2_candidates_for_same_parent_one_seconded_one_backed": { setup: func() *CandidateStorage { storage := &CandidateStorage{ - byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), - byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), } @@ -223,7 +223,7 @@ func TestCandidateStorage_PossibleBackedParaChildren(t *testing.T) { storage.byCandidateHash[candidateHash1] = entry1 storage.byCandidateHash[candidateHash2] = entry2 - storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]any{ + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{ candidateHash1: struct{}{}, candidateHash2: struct{}{}, } @@ -236,8 +236,8 @@ func TestCandidateStorage_PossibleBackedParaChildren(t *testing.T) { "insert_nothing_and_call_function_should_return_nothing": { setup: func() *CandidateStorage { return &CandidateStorage{ - byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), - byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]any), + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), } }, @@ -772,7 +772,8 @@ func TestCandidateStorageMethods(t *testing.T) { t.Run("add_candidate_entry_as_seconded", func(t *testing.T) { err = storage.addCandidateEntry(entry) require.NoError(t, err) - require.True(t, storage.contains(candidateHash)) + _, ok := storage.byCandidateHash[candidateHash] + require.True(t, ok) // should not have any possible backed candidate yet for entry := range storage.possibleBackedParaChildren(parentHeadHash) { @@ -817,7 +818,8 @@ func TestCandidateStorageMethods(t *testing.T) { // remove it twice should be fine storage.removeCandidate(candidateHash) - require.False(t, storage.contains(candidateHash)) + _, ok := storage.byCandidateHash[candidateHash] + require.False(t, ok) // should not have any possible backed candidate anymore for entry := range storage.possibleBackedParaChildren(parentHeadHash) { @@ -854,7 +856,9 @@ func TestCandidateStorageMethods(t *testing.T) { storage := NewCandidateStorage() err = storage.AddPendingAvailabilityCandidate(candidateHash, candidate, pvd) require.NoError(t, err) - require.True(t, storage.contains(candidateHash)) + + _, ok := storage.byCandidateHash[candidateHash] + require.True(t, ok) // here we should have 1 possible backed candidate when we // use the parentHeadHash (parent of our current candidate) to query diff --git a/dot/parachain/types/async_backing.go b/dot/parachain/types/async_backing.go index 876ac1aa0d..967f404a93 100644 --- a/dot/parachain/types/async_backing.go +++ b/dot/parachain/types/async_backing.go @@ -66,7 +66,7 @@ type Constraints struct { // The expected validation-code-hash of this parachain. ValidationCodeHash ValidationCodeHash // The code upgrade restriction signal as-of this parachain. - UpgradeRestriction UpgradeRestriction + UpgradeRestriction *UpgradeRestriction // The future validation code hash, if any, and at what relay-parent // number the upgrade would be minimally applied. FutureValidationCode *FutureValidationCode diff --git a/dot/parachain/types/types.go b/dot/parachain/types/types.go index e14a72850e..bcbd370e98 100644 --- a/dot/parachain/types/types.go +++ b/dot/parachain/types/types.go @@ -747,18 +747,53 @@ type Subsystem interface { Stop() } +type Present struct{} + // UpgradeRestriction a possible restriction that prevents a parachain // from performing an upgrade // TODO: should be scale encoded/decoded -type UpgradeRestriction interface { - isUpgradeRestriction() +type UpgradeRestriction struct { + inner any } -var _ UpgradeRestriction = (*Present)(nil) +type UpgradeRestrictionValues interface { + Present +} -type Present struct{} +func setMyVaryingDataType[Value UpgradeRestrictionValues](mvdt *UpgradeRestriction, value Value) { + mvdt.inner = value +} -func (*Present) isUpgradeRestriction() {} +func (mvdt *UpgradeRestriction) SetValue(value any) (err error) { + switch value := value.(type) { + case Present: + setMyVaryingDataType(mvdt, value) + return + default: + return fmt.Errorf("unsupported type") + } +} + +func (mvdt UpgradeRestriction) IndexValue() (index uint, value any, err error) { + switch mvdt.inner.(type) { + case Present: + return 0, mvdt.inner, nil + } + return 0, nil, scale.ErrUnsupportedVaryingDataTypeValue +} + +func (mvdt UpgradeRestriction) Value() (value any, err error) { + _, value, err = mvdt.IndexValue() + return +} + +func (mvdt UpgradeRestriction) ValueAt(index uint) (value any, err error) { + switch index { + case 0: + return Present{}, nil + } + return nil, scale.ErrUnknownVaryingDataTypeValue +} // CandidateHashAndRelayParent is a pair of candidate hash and relay parent hash type CandidateHashAndRelayParent struct { diff --git a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go index e23d73fa26..df71b75690 100644 --- a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go +++ b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go @@ -34,31 +34,31 @@ func (e *ErrDisallowedHrmpWatermark) Error() string { } type ErrNoSuchHrmpChannel struct { - paraId parachaintypes.ParaID + paraID parachaintypes.ParaID } func (e *ErrNoSuchHrmpChannel) Error() string { - return fmt.Sprintf("NoSuchHrmpChannel(ParaId: %d)", e.paraId) + return fmt.Sprintf("NoSuchHrmpChannel(ParaId: %d)", e.paraID) } type ErrHrmpMessagesOverflow struct { - paraId parachaintypes.ParaID + paraID parachaintypes.ParaID messagesRemaining uint32 messagesSubmitted uint32 } func (e *ErrHrmpMessagesOverflow) Error() string { - return fmt.Sprintf("HrmpMessagesOverflow(ParaId: %d, MessagesRemaining: %d, MessagesSubmitted: %d)", e.paraId, e.messagesRemaining, e.messagesSubmitted) + return fmt.Sprintf("HrmpMessagesOverflow(ParaId: %d, MessagesRemaining: %d, MessagesSubmitted: %d)", e.paraID, e.messagesRemaining, e.messagesSubmitted) } type ErrHrmpBytesOverflow struct { - paraId parachaintypes.ParaID + paraID parachaintypes.ParaID bytesRemaining uint32 bytesSubmitted uint32 } func (e *ErrHrmpBytesOverflow) Error() string { - return fmt.Sprintf("HrmpBytesOverflow(ParaId: %d, BytesRemaining: %d, BytesSubmitted: %d)", e.paraId, e.bytesRemaining, e.bytesSubmitted) + return fmt.Sprintf("HrmpBytesOverflow(ParaId: %d, BytesRemaining: %d, BytesSubmitted: %d)", e.paraID, e.bytesRemaining, e.bytesSubmitted) } type ErrUmpMessagesOverflow struct { @@ -181,13 +181,13 @@ func CheckModifications(c *parachaintypes.Constraints, modifications *Constraint for id, outboundHrmpMod := range modifications.OutboundHrmp { outbound, ok := c.HrmpChannelsOut[id] if !ok { - return &ErrNoSuchHrmpChannel{paraId: id} + return &ErrNoSuchHrmpChannel{paraID: id} } _, overflow := math.SafeSub(uint64(outbound.BytesRemaining), uint64(outboundHrmpMod.BytesSubmitted)) if overflow { return &ErrHrmpBytesOverflow{ - paraId: id, + paraID: id, bytesRemaining: outbound.BytesRemaining, bytesSubmitted: outboundHrmpMod.BytesSubmitted, } @@ -196,7 +196,7 @@ func CheckModifications(c *parachaintypes.Constraints, modifications *Constraint _, overflow = math.SafeSub(uint64(outbound.MessagesRemaining), uint64(outboundHrmpMod.MessagesSubmitted)) if overflow { return &ErrHrmpMessagesOverflow{ - paraId: id, + paraID: id, messagesRemaining: outbound.MessagesRemaining, messagesSubmitted: outboundHrmpMod.MessagesSubmitted, } @@ -270,7 +270,7 @@ func ApplyModifications(c *parachaintypes.Constraints, modifications *Constraint if outboundHrmpMod.BytesSubmitted > outbound.BytesRemaining { return nil, &ErrHrmpBytesOverflow{ - paraId: id, + paraID: id, bytesRemaining: outbound.BytesRemaining, bytesSubmitted: outboundHrmpMod.BytesSubmitted, } @@ -278,7 +278,7 @@ func ApplyModifications(c *parachaintypes.Constraints, modifications *Constraint if outboundHrmpMod.MessagesSubmitted > outbound.MessagesRemaining { return nil, &ErrHrmpMessagesOverflow{ - paraId: id, + paraID: id, messagesRemaining: outbound.MessagesRemaining, messagesSubmitted: outboundHrmpMod.MessagesSubmitted, } @@ -383,13 +383,7 @@ func (cm *ConstraintModifications) Clone() *ConstraintModifications { // any constraints and yield the exact same result. func NewConstraintModificationsIdentity() *ConstraintModifications { return &ConstraintModifications{ - RequiredParent: nil, - HrmpWatermark: nil, - OutboundHrmp: make(map[parachaintypes.ParaID]OutboundHrmpChannelModification), - UmpMessagesSent: 0, - UmpBytesSent: 0, - DmpMessagesProcessed: 0, - CodeUpgradeApplied: false, + OutboundHrmp: make(map[parachaintypes.ParaID]OutboundHrmpChannelModification), } } @@ -427,7 +421,7 @@ func (cm *ConstraintModifications) Stack(other *ConstraintModifications) { type Fragment struct { relayParent *RelayChainBlockInfo operatingConstraints *parachaintypes.Constraints - candidate ProspectiveCandidate + candidate *ProspectiveCandidate modifications *ConstraintModifications } @@ -435,7 +429,7 @@ func (f *Fragment) RelayParent() *RelayChainBlockInfo { return f.relayParent } -func (f *Fragment) Candidate() ProspectiveCandidate { +func (f *Fragment) Candidate() *ProspectiveCandidate { return f.candidate } @@ -451,7 +445,8 @@ func (f *Fragment) ConstraintModifications() *ConstraintModifications { func NewFragment( relayParent *RelayChainBlockInfo, operatingConstraints *parachaintypes.Constraints, - candidate ProspectiveCandidate) (*Fragment, error) { + candidate *ProspectiveCandidate) (*Fragment, error) { + modifications, err := CheckAgainstConstraints( relayParent, operatingConstraints, @@ -603,7 +598,12 @@ func validateAgainstConstraints( } if commitments.NewValidationCode != nil { - switch constraints.UpgradeRestriction.(type) { + restriction, err := constraints.UpgradeRestriction.Value() + if err != nil { + return fmt.Errorf("while getting upgrade restriction: %w", err) + } + + switch restriction.(type) { case *parachaintypes.Present: return ErrCodeUpgradeRestricted } From e55b6ce3fc84be8fa9ba4d8f117c15220dc5fc84 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 5 Dec 2024 20:17:32 -0400 Subject: [PATCH 16/31] chore: test snakecase --- .../fragment-chain/fragment_chain_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go index 48bcb51974..1fb469b982 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go @@ -264,7 +264,7 @@ func TestEarliestRelayParent(t *testing.T) { setup func() *Scope expect inclusionemulator.RelayChainBlockInfo }{ - "returns from ancestors": { + "returns_from_ancestors": { setup: func() *Scope { relayParent := inclusionemulator.RelayChainBlockInfo{ Hash: common.Hash{0x01}, @@ -290,7 +290,7 @@ func TestEarliestRelayParent(t *testing.T) { Number: 9, }, }, - "returns relayParent": { + "returns_relayParent": { setup: func() *Scope { relayParent := inclusionemulator.RelayChainBlockInfo{ Hash: common.Hash{0x01}, From 2f60868440bd90aa0e179006db05c674468ffdc8 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 6 Dec 2024 10:01:09 -0400 Subject: [PATCH 17/31] wip: fixing failing tests --- .../fragment-chain/fragment_chain_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go index 1fb469b982..1a0eba0435 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go @@ -4,7 +4,7 @@ import ( "bytes" "errors" "maps" - "math/rand/v2" + "math/rand" "slices" "testing" @@ -2101,7 +2101,8 @@ func TestFindAncestorPathAndFindBackableChain(t *testing.T) { require.Equal(t, hashes(0, 5), chain.FindBackableChain(make(Ancestors), 5)) for count := 6; count < 10; count++ { - require.Equal(t, hashes(0, 6), chain.FindBackableChain(make(Ancestors), uint32(count))) + backableChain := chain.FindBackableChain(make(Ancestors), uint32(count)) + require.Equal(t, hashes(0, 6), backableChain) } // ancestors which is not part of the chain will be ignored From 041f161dfc9a76032a1d19f0972e80660dc7b212 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 9 Dec 2024 13:48:41 -0400 Subject: [PATCH 18/31] chore: fix `TestScopeOnlyTakesAncestorsUpToMin` test --- .../fragment-chain/fragment_chain_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go index 1a0eba0435..40ee836627 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go @@ -671,7 +671,7 @@ func TestScopeOnlyTakesAncestorsUpToMin(t *testing.T) { } maxDepth := uint(2) - baseConstraints := makeConstraints(0, []uint{2}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}) + baseConstraints := makeConstraints(3, []uint{2}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}) pendingAvailability := make([]*PendingAvailability, 0) scope, err := NewScopeWithAncestors(relayParent, baseConstraints, pendingAvailability, maxDepth, ancestors) From 082bb9c3668410bbc9d6fa9a2786fa84320c2651 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 9 Dec 2024 13:56:29 -0400 Subject: [PATCH 19/31] chore: loop until maxDepth + 1 --- .../prospective-parachains/fragment-chain/fragment_chain.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go index 422a26ce3e..9279079fcf 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go @@ -1012,7 +1012,7 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { return } - for len(f.bestChain.chain) < int(f.scope.maxDepth) { + for len(f.bestChain.chain) < int(f.scope.maxDepth)+1 { childConstraints, err := inclusionemulator.ApplyModifications( f.scope.baseConstraints, cumulativeModifications) if err != nil { From 7d716c7f884022de122a6b6c529b72563be2ee9a Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 9 Dec 2024 16:34:31 -0400 Subject: [PATCH 20/31] chore: gofmt --- .../fragment-chain/fragment_chain_test.go | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go index 40ee836627..22216ee6dd 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go @@ -37,8 +37,8 @@ func TestCandidateStorage_RemoveCandidate(t *testing.T) { } storage.byCandidateHash[candidateHash] = entry - storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: struct{}{}} - storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: struct{}{}} + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: {}} + storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: {}} storage.removeCandidate(candidateHash) @@ -71,8 +71,8 @@ func TestCandidateStorage_MarkBacked(t *testing.T) { } storage.byCandidateHash[candidateHash] = entry - storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: struct{}{}} - storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: struct{}{}} + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: {}} + storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: {}} storage.markBacked(candidateHash) @@ -110,8 +110,8 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { } storage.byCandidateHash[candidateHash] = entry - storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: struct{}{}} - storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: struct{}{}} + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: {}} + storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: {}} return storage }, @@ -143,8 +143,8 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { } storage.byCandidateHash[candidateHash] = entry - storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: struct{}{}} - storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: struct{}{}} + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: {}} + storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: {}} return storage }, @@ -224,8 +224,8 @@ func TestCandidateStorage_PossibleBackedParaChildren(t *testing.T) { storage.byCandidateHash[candidateHash1] = entry1 storage.byCandidateHash[candidateHash2] = entry2 storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{ - candidateHash1: struct{}{}, - candidateHash2: struct{}{}, + candidateHash1: {}, + candidateHash2: {}, } return storage From 1dfeb76607dbf5eb93ee6bdc98bd50023dd35bed Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 9 Dec 2024 16:45:37 -0400 Subject: [PATCH 21/31] chore: address lint warns --- .../fragment-chain/errors.go | 8 ++--- .../fragment-chain/fragment_chain.go | 4 +-- .../fragment-chain/fragment_chain_test.go | 29 ++++++++++++++----- .../inclusion-emulator/inclusion_emulator.go | 26 ++++++++++------- 4 files changed, 44 insertions(+), 23 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/errors.go b/dot/parachain/prospective-parachains/fragment-chain/errors.go index d0ed7f8000..ec63fcc228 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/errors.go +++ b/dot/parachain/prospective-parachains/fragment-chain/errors.go @@ -10,13 +10,13 @@ import ( var ( ErrCandidateAlreadyKnown = errors.New("candidate already known") - ErrZeroLengthCycle = errors.New("candidate's parent head is equal to its output head. Would introduce a cycle") + ErrZeroLengthCycle = errors.New("candidate's parent head is equal to its output head. Would introduce a cycle") //nolint:lll ErrCycle = errors.New("candidate would introduce a cycle") ErrMultiplePaths = errors.New("candidate would introduce two paths to the same output state") - ErrIntroduceBackedCandidate = errors.New("attempting to directly introduce a Backed candidate. It should first be introduced as Seconded") + ErrIntroduceBackedCandidate = errors.New("attempting to directly introduce a Backed candidate. It should first be introduced as Seconded") //nolint:lll ErrParentCandidateNotFound = errors.New("could not find parent of the candidate") - ErrRelayParentMovedBackwards = errors.New("relay parent would move backwards from the latest candidate in the chain") - ErrPersistedValidationDataMismatch = errors.New("candidate does not match the persisted validation data provided alongside it") + ErrRelayParentMovedBackwards = errors.New("relay parent would move backwards from the latest candidate in the chain") //nolint:lll + ErrPersistedValidationDataMismatch = errors.New("candidate does not match the persisted validation data provided alongside it") //nolint:lll ) type ErrRelayParentPrecedesCandidatePendingAvailability struct { diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go index 9279079fcf..7da10cc552 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go @@ -529,7 +529,7 @@ func (f *FragmentChain) PopulateFromPrevious(prevFragmentChain *FragmentChain) { // availability candidates will always be part of the best chain pending := prevFragmentChain.scope.GetPendingAvailability(candidate.candidateHash) if pending == nil { - prevStorage.addCandidateEntry(NewCandidateEntryFromFragment(candidate)) + _ = prevStorage.addCandidateEntry(NewCandidateEntryFromFragment(candidate)) } } @@ -1034,7 +1034,7 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { // 2. parent hash is correct // 3. relay parent does not move backwards // 4. all non-pending-availability candidates have relay-parent in the scope - // 5. candidate outputs fulfill constraints + // 5. candidate outputs fulfil constraints var relayParent *inclusionemulator.RelayChainBlockInfo var minRelayParent uint diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go index 22216ee6dd..2147e02b9d 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go @@ -230,8 +230,12 @@ func TestCandidateStorage_PossibleBackedParaChildren(t *testing.T) { return storage }, - hash: common.Hash{4, 5, 6}, - expected: []*CandidateEntry{{candidateHash: parachaintypes.CandidateHash{Value: common.Hash{10, 11, 12}}, parentHeadDataHash: common.Hash{4, 5, 6}, outputHeadDataHash: common.Hash{13, 14, 15}, state: Backed}}, + hash: common.Hash{4, 5, 6}, + expected: []*CandidateEntry{{candidateHash: parachaintypes.CandidateHash{ + Value: common.Hash{10, 11, 12}}, + parentHeadDataHash: common.Hash{4, 5, 6}, + outputHeadDataHash: common.Hash{13, 14, 15}, state: Backed}, + }, }, "insert_nothing_and_call_function_should_return_nothing": { setup: func() *CandidateStorage { @@ -638,7 +642,12 @@ func TestScopeRejectsAncestors(t *testing.T) { for name, tt := range tests { tt := tt t.Run(name, func(t *testing.T) { - scope, err := NewScopeWithAncestors(*tt.relayParent, tt.baseConstraints, tt.pendingAvailability, tt.maxDepth, tt.ancestors) + scope, err := NewScopeWithAncestors( + *tt.relayParent, + tt.baseConstraints, + tt.pendingAvailability, + tt.maxDepth, + tt.ancestors) require.ErrorIs(t, err, tt.expectedError) require.Nil(t, scope) }) @@ -1002,7 +1011,10 @@ func TestPopulateAndCheckPotential(t *testing.T) { // helper function to hash the candidate and add its entry // into the candidate storage hashAndInsertCandididate := func(t *testing.T, storage *CandidateStorage, - candidate parachaintypes.CommittedCandidateReceipt, pvd parachaintypes.PersistedValidationData, state CandidateState) (parachaintypes.CandidateHash, *CandidateEntry) { + candidate parachaintypes.CommittedCandidateReceipt, + pvd parachaintypes.PersistedValidationData, state CandidateState) ( + parachaintypes.CandidateHash, *CandidateEntry) { + hash, err := candidate.Hash() require.NoError(t, err) candidateHash := parachaintypes.CandidateHash{Value: hash} @@ -1059,7 +1071,8 @@ func TestPopulateAndCheckPotential(t *testing.T) { wrongConstraints := []parachaintypes.Constraints{ // define a constraint that requires a parent head data // that is different from candidate A parent head - *makeConstraints(relayParentAInfo.Number, []uint{relayParentAInfo.Number}, parachaintypes.HeadData{Data: []byte{0x0e}}), + *makeConstraints(relayParentAInfo.Number, + []uint{relayParentAInfo.Number}, parachaintypes.HeadData{Data: []byte{0x0e}}), // the min relay parent for candidate A is wrong *makeConstraints(relayParentBInfo.Number, []uint{0}, firstParachainHead), @@ -1081,7 +1094,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { // if the min relay parent is wrong, candidate A can never become valid, otherwise // if only the required parent doesnt match, candidate A still a potential candidate if wrongConstraint.MinRelayParentNumber == relayParentBInfo.Number { - // if A is not a potential candidate, its decendants will also not be added. + // if A is not a potential candidate, its descendants will also not be added. require.Equal(t, chain.UnconnectedLen(), 0) err := chain.CanAddCandidateAsPotential(candidateAEntry) require.ErrorIs(t, err, ErrRelayParentNotInScope{ @@ -1803,7 +1816,9 @@ func TestPopulateAndCheckPotential(t *testing.T) { // candidates A2, B2 will now be trimmed chain := populateFromPreviousStorage(scope, storage) - require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.BestChainVec()) + require.Equal(t, + []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, + chain.BestChainVec()) unconnectedHashes := make(map[parachaintypes.CandidateHash]struct{}) for unconnected := range chain.Unconnected() { diff --git a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go index df71b75690..70260007a1 100644 --- a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go +++ b/dot/parachain/util/inclusion-emulator/inclusion_emulator.go @@ -17,7 +17,6 @@ import ( // without pinning it to a particular session. For example, commitments are // represented here, but the erasure-root is not. This means that, prospective // candidates are not correlated to any session in particular. -// TODO: should we have a specialized struct to simulate an Arc? type ProspectiveCandidate struct { Commitments parachaintypes.CandidateCommitments PersistedValidationData parachaintypes.PersistedValidationData @@ -48,7 +47,8 @@ type ErrHrmpMessagesOverflow struct { } func (e *ErrHrmpMessagesOverflow) Error() string { - return fmt.Sprintf("HrmpMessagesOverflow(ParaId: %d, MessagesRemaining: %d, MessagesSubmitted: %d)", e.paraID, e.messagesRemaining, e.messagesSubmitted) + return fmt.Sprintf("HrmpMessagesOverflow(ParaId: %d, MessagesRemaining: %d, MessagesSubmitted: %d)", + e.paraID, e.messagesRemaining, e.messagesSubmitted) } type ErrHrmpBytesOverflow struct { @@ -58,7 +58,8 @@ type ErrHrmpBytesOverflow struct { } func (e *ErrHrmpBytesOverflow) Error() string { - return fmt.Sprintf("HrmpBytesOverflow(ParaId: %d, BytesRemaining: %d, BytesSubmitted: %d)", e.paraID, e.bytesRemaining, e.bytesSubmitted) + return fmt.Sprintf("HrmpBytesOverflow(ParaId: %d, BytesRemaining: %d, BytesSubmitted: %d)", + e.paraID, e.bytesRemaining, e.bytesSubmitted) } type ErrUmpMessagesOverflow struct { @@ -67,7 +68,8 @@ type ErrUmpMessagesOverflow struct { } func (e *ErrUmpMessagesOverflow) Error() string { - return fmt.Sprintf("UmpMessagesOverflow(MessagesRemaining: %d, MessagesSubmitted: %d)", e.messagesRemaining, e.messagesSubmitted) + return fmt.Sprintf("UmpMessagesOverflow(MessagesRemaining: %d, MessagesSubmitted: %d)", + e.messagesRemaining, e.messagesSubmitted) } type ErrUmpBytesOverflow struct { @@ -85,7 +87,8 @@ type ErrDmpMessagesUnderflow struct { } func (e *ErrDmpMessagesUnderflow) Error() string { - return fmt.Sprintf("DmpMessagesUnderflow(MessagesRemaining: %d, MessagesProcessed: %d)", e.messagesRemaining, e.messagesProcessed) + return fmt.Sprintf("DmpMessagesUnderflow(MessagesRemaining: %d, MessagesProcessed: %d)", + e.messagesRemaining, e.messagesProcessed) } var ( @@ -144,7 +147,8 @@ type ErrUmpMessagesPerCandidateOverflow struct { } func (e *ErrUmpMessagesPerCandidateOverflow) Error() string { - return fmt.Sprintf("UmpMessagesPerCandidateOverflow(MessagesAllowed: %d, MessagesSubmitted: %d)", e.messagesAllowed, e.messagesSubmitted) + return fmt.Sprintf("UmpMessagesPerCandidateOverflow(MessagesAllowed: %d, MessagesSubmitted: %d)", + e.messagesAllowed, e.messagesSubmitted) } type ErrHrmpMessagesPerCandidateOverflow struct { @@ -153,7 +157,8 @@ type ErrHrmpMessagesPerCandidateOverflow struct { } func (e *ErrHrmpMessagesPerCandidateOverflow) Error() string { - return fmt.Sprintf("HrmpMessagesPerCandidateOverflow(MessagesAllowed: %d, MessagesSubmitted: %d)", e.messagesAllowed, e.messagesSubmitted) + return fmt.Sprintf("HrmpMessagesPerCandidateOverflow(MessagesAllowed: %d, MessagesSubmitted: %d)", + e.messagesAllowed, e.messagesSubmitted) } type ErrHrmpMessagesDescendingOrDuplicate struct { @@ -440,7 +445,7 @@ func (f *Fragment) ConstraintModifications() *ConstraintModifications { // NewFragment creates a new Fragment. This fails if the fragment isnt in line // with the operating constraints. That is, either its inputs or outputs fail // checks against the constraints. -// This does not check that the collator signature is valid or wheter the PoV is +// This does not check that the collator signature is valid or whether the PoV is // small enough. func NewFragment( relayParent *RelayChainBlockInfo, @@ -556,7 +561,8 @@ func skipUmpSignals(upwardMessages []parachaintypes.UpwardMessage) iter.Seq[para return } } - return + + return //nolint:staticcheck } } } @@ -573,7 +579,7 @@ func validateAgainstConstraints( ParentHead: constraints.RequiredParent, RelayParentNumber: uint32(relayParent.Number), RelayParentStorageRoot: relayParent.StorageRoot, - MaxPovSize: uint32(constraints.MaxPoVSize), + MaxPovSize: constraints.MaxPoVSize, } if !expectedPVD.Equal(persistedValidationData) { From ba8ede8762dab65e023f906be6c2ac51e105ce90 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 10 Dec 2024 09:17:21 -0400 Subject: [PATCH 22/31] chore: removed `Unconnected` method --- .../fragment-chain/fragment_chain.go | 5 ----- .../fragment-chain/fragment_chain_test.go | 18 +++++++++--------- 2 files changed, 9 insertions(+), 14 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go index 7da10cc552..e3680de4ea 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go @@ -570,11 +570,6 @@ func (f *FragmentChain) BestChainVec() (hashes []parachaintypes.CandidateHash) { return hashes } -// Unconnected returns a vector of the unconnected potential candidate hashes, in arbitrary order. -func (f *FragmentChain) Unconnected() iter.Seq[*CandidateEntry] { - return f.unconnected.candidates() -} - func (f *FragmentChain) IsCandidateBacked(hash parachaintypes.CandidateHash) bool { if f.bestChain.Contains(hash) { return true diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go index 2147e02b9d..a1bb331da4 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go @@ -1109,7 +1109,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.NoError(t, err) } else { potentials := make([]parachaintypes.CandidateHash, 0) - for unconnected := range chain.Unconnected() { + for _, unconnected := range chain.unconnected.byCandidateHash { potentials = append(potentials, unconnected.candidateHash) } @@ -1179,7 +1179,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { // Check that the unconnected candidates are as expected unconnectedHashes := make(map[parachaintypes.CandidateHash]struct{}) - for unconnected := range chain.Unconnected() { + for _, unconnected := range chain.unconnected.byCandidateHash { unconnectedHashes[unconnected.candidateHash] = struct{}{} } @@ -1348,7 +1348,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash}, chain.BestChainVec()) unconnected := make(map[parachaintypes.CandidateHash]struct{}) - for entry := range chain.Unconnected() { + for _, entry := range chain.unconnected.byCandidateHash { unconnected[entry.candidateHash] = struct{}{} } @@ -1659,7 +1659,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { assert.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.BestChainVec()) unconnectedHashes := make(map[parachaintypes.CandidateHash]struct{}) - for unconnected := range chain.Unconnected() { + for _, unconnected := range chain.unconnected.byCandidateHash { unconnectedHashes[unconnected.candidateHash] = struct{}{} } @@ -1684,7 +1684,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { // candidate F is kept as it was truly unconnected. The rest will be trimmed unconnected := map[parachaintypes.CandidateHash]struct{}{} - for entry := range chain.Unconnected() { + for _, entry := range chain.unconnected.byCandidateHash { unconnected[entry.candidateHash] = struct{}{} } @@ -1766,7 +1766,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { }, chain.BestChainVec()) unconnected := make(map[parachaintypes.CandidateHash]struct{}) - for entry := range chain.Unconnected() { + for _, entry := range chain.unconnected.byCandidateHash { unconnected[entry.candidateHash] = struct{}{} } @@ -1790,7 +1790,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.BestChainVec()) unconnected := make(map[parachaintypes.CandidateHash]struct{}) - for entry := range chain.Unconnected() { + for _, entry := range chain.unconnected.byCandidateHash { unconnected[entry.candidateHash] = struct{}{} } require.Equal(t, map[parachaintypes.CandidateHash]struct{}{ @@ -1821,7 +1821,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { chain.BestChainVec()) unconnectedHashes := make(map[parachaintypes.CandidateHash]struct{}) - for unconnected := range chain.Unconnected() { + for _, unconnected := range chain.unconnected.byCandidateHash { unconnectedHashes[unconnected.candidateHash] = struct{}{} } @@ -1845,7 +1845,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.Equal(t, []parachaintypes.CandidateHash{candidateDHash}, chain.BestChainVec()) unconnectedHashes = make(map[parachaintypes.CandidateHash]struct{}) - for unconnected := range chain.Unconnected() { + for _, unconnected := range chain.unconnected.byCandidateHash { unconnectedHashes[unconnected.candidateHash] = struct{}{} } From ef35ba9cd7b33538f295976d4e902697e236f355 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 10 Dec 2024 09:41:08 -0400 Subject: [PATCH 23/31] chore: remove `candidates` method --- .../fragment-chain/fragment_chain.go | 22 +++++++++---------- 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go index e3680de4ea..9e180c4ea6 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go @@ -23,6 +23,8 @@ const ( Backed ) +// forkSelectionRule compares 2 candidate hashes, the result will be +// 0 if hash1 == hash2, -1 if hash1 < hash2, and +1 if hash1 > hash2 func forkSelectionRule(hash1, hash2 parachaintypes.CandidateHash) int { return bytes.Compare(hash1.Value[:], hash2.Value[:]) } @@ -144,7 +146,7 @@ func (c *CandidateStorage) AddPendingAvailabilityCandidate( } if err := c.addCandidateEntry(entry); err != nil { - return fmt.Errorf("adding pending availability candidate: %w", err) + return fmt.Errorf("adding candidate entry: %w", err) } return nil @@ -155,6 +157,10 @@ func (c *CandidateStorage) Len() int { return len(c.byCandidateHash) } +// addCandidateEntry inserts a new entry in the storage map, where the candidate hash +// is the key and the *CandidateEntry is the value, also it create other links, the +// parent head hash points to the candidate hash also the output head hash points to the +// candidate hash func (c *CandidateStorage) addCandidateEntry(candidate *CandidateEntry) error { _, ok := c.byCandidateHash[candidate.candidateHash] if ok { @@ -183,6 +189,9 @@ func (c *CandidateStorage) addCandidateEntry(candidate *CandidateEntry) error { return nil } +// removeCandidate removes the candidate entry from the storage based on candidateHash +// it also removes the parent head hash entry that points to candidateHash and +// removes the output head hash entry that points to candidateHash func (c *CandidateStorage) removeCandidate(candidateHash parachaintypes.CandidateHash) { entry, ok := c.byCandidateHash[candidateHash] if !ok { @@ -215,17 +224,6 @@ func (c *CandidateStorage) markBacked(candidateHash parachaintypes.CandidateHash entry.state = Backed } -// candidates returns an iterator over references to the stored candidates, in arbitrary order. -func (c *CandidateStorage) candidates() iter.Seq[*CandidateEntry] { - return func(yield func(*CandidateEntry) bool) { - for _, entry := range c.byCandidateHash { - if !yield(entry) { - return - } - } - } -} - func (c *CandidateStorage) headDataByHash(hash common.Hash) *parachaintypes.HeadData { // first, search for candidates outputting this head data and extract the head data // from their commitments if they exist. From 6e281642bc70d6e3369c9dd45120856266e3f649 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Tue, 10 Dec 2024 12:08:55 -0400 Subject: [PATCH 24/31] chore: added logs --- .../fragment-chain/fragment_chain.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go index 9e180c4ea6..e8b1bb326f 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go @@ -1009,8 +1009,7 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { childConstraints, err := inclusionemulator.ApplyModifications( f.scope.baseConstraints, cumulativeModifications) if err != nil { - // TODO: include logger - fmt.Println("failed to apply modifications:", err) + logger.Warnf("failed to apply modifications: %s", err.Error()) break } @@ -1051,7 +1050,7 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { } if err := f.checkCyclesOrInvalidTree(candidateEntry.outputHeadDataHash); err != nil { - fmt.Println("checking cycle or invalid tree:", err) + logger.Warnf("failed while checking cycle or invalid tree: %s", err.Error()) continue } @@ -1080,7 +1079,7 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { fragment, err := inclusionemulator.NewFragment(relayParent, constraints, candidateEntry.candidate) if err != nil { - fmt.Println("failed to create fragment:", err) + logger.Warnf("failed to create fragment: %s", err.Error()) continue } From 30062cf6b6fee71bb29fcb425176483d8d19ec90 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 12 Dec 2024 10:22:53 -0400 Subject: [PATCH 25/31] chore: address comments --- .../{fragment-chain => }/errors.go | 2 +- .../{fragment-chain => }/fragment_chain.go | 297 ++++++------ .../fragment_chain_test.go | 427 +++++++++--------- .../inclusion_emulator.go | 17 +- dot/parachain/types/async_backing.go | 10 +- 5 files changed, 369 insertions(+), 384 deletions(-) rename dot/parachain/prospective-parachains/{fragment-chain => }/errors.go (98%) rename dot/parachain/prospective-parachains/{fragment-chain => }/fragment_chain.go (81%) rename dot/parachain/prospective-parachains/{fragment-chain => }/fragment_chain_test.go (86%) rename dot/parachain/{util/inclusion-emulator => prospective-parachains}/inclusion_emulator.go (97%) diff --git a/dot/parachain/prospective-parachains/fragment-chain/errors.go b/dot/parachain/prospective-parachains/errors.go similarity index 98% rename from dot/parachain/prospective-parachains/fragment-chain/errors.go rename to dot/parachain/prospective-parachains/errors.go index ec63fcc228..bcc3c6e62c 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/errors.go +++ b/dot/parachain/prospective-parachains/errors.go @@ -1,4 +1,4 @@ -package fragmentchain +package prospectiveparachains import ( "errors" diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go b/dot/parachain/prospective-parachains/fragment_chain.go similarity index 81% rename from dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go rename to dot/parachain/prospective-parachains/fragment_chain.go index e8b1bb326f..f9a0d714ba 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment_chain.go @@ -1,4 +1,4 @@ -package fragmentchain +package prospectiveparachains import ( "bytes" @@ -8,47 +8,40 @@ import ( "slices" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" - inclusionemulator "github.com/ChainSafe/gossamer/dot/parachain/util/inclusion-emulator" - "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/common" "github.com/tidwall/btree" ) -var logger = log.NewFromGlobal(log.AddContext("pkg", "fragment_chain"), log.SetLevel(log.Debug)) - -type CandidateState byte +type candidateState byte const ( - Seconded CandidateState = iota - Backed + seconded candidateState = iota + backed ) -// forkSelectionRule compares 2 candidate hashes, the result will be -// 0 if hash1 == hash2, -1 if hash1 < hash2, and +1 if hash1 > hash2 +// forkSelectionRule does a normal comparision between 2 candidate hashes +// and returns -1 if the first hash is lower than the second one meaning that +// the first hash will be choosen as the best candidate. func forkSelectionRule(hash1, hash2 parachaintypes.CandidateHash) int { return bytes.Compare(hash1.Value[:], hash2.Value[:]) } -// CandidateEntry represents a candidate in the CandidateStorage -type CandidateEntry struct { +// candidateEntry represents a candidate in the candidateStorage +type candidateEntry struct { candidateHash parachaintypes.CandidateHash parentHeadDataHash common.Hash outputHeadDataHash common.Hash relayParent common.Hash - candidate *inclusionemulator.ProspectiveCandidate - state CandidateState -} - -func (c *CandidateEntry) Hash() parachaintypes.CandidateHash { - return c.candidateHash + candidate *ProspectiveCandidate + state candidateState } -func NewCandidateEntry( +func newCandidateEntry( candidateHash parachaintypes.CandidateHash, candidate parachaintypes.CommittedCandidateReceipt, persistedValidationData parachaintypes.PersistedValidationData, - state CandidateState, -) (*CandidateEntry, error) { + state candidateState, +) (*candidateEntry, error) { pvdHash, err := persistedValidationData.Hash() if err != nil { return nil, fmt.Errorf("hashing persisted validation data: %w", err) @@ -72,13 +65,13 @@ func NewCandidateEntry( return nil, ErrZeroLengthCycle } - return &CandidateEntry{ + return &candidateEntry{ candidateHash: candidateHash, parentHeadDataHash: parentHeadDataHash, outputHeadDataHash: outputHeadDataHash, relayParent: candidate.Descriptor.RelayParent, state: state, - candidate: &inclusionemulator.ProspectiveCandidate{ + candidate: &ProspectiveCandidate{ Commitments: candidate.Commitments, PersistedValidationData: persistedValidationData, PoVHash: candidate.Descriptor.PovHash, @@ -87,17 +80,17 @@ func NewCandidateEntry( }, nil } -// CandidateStorage is an utility for storing candidates and information about them such as +// candidateStorage is an utility for storing candidates and information about them such as // their relay-parents and their backing states. This does not assume any restriction on whether // or not candidates form a chain. Useful for storing all kinds of candidates. -type CandidateStorage struct { +type candidateStorage struct { byParentHead map[common.Hash]map[parachaintypes.CandidateHash]struct{} byOutputHead map[common.Hash]map[parachaintypes.CandidateHash]struct{} - byCandidateHash map[parachaintypes.CandidateHash]*CandidateEntry + byCandidateHash map[parachaintypes.CandidateHash]*candidateEntry } -func (c *CandidateStorage) Clone() *CandidateStorage { - clone := NewCandidateStorage() +func (c *candidateStorage) Clone() *candidateStorage { + clone := newCandidateStorage() for parentHead, candidates := range c.byParentHead { clone.byParentHead[parentHead] = make(map[parachaintypes.CandidateHash]struct{}) @@ -114,7 +107,7 @@ func (c *CandidateStorage) Clone() *CandidateStorage { } for candidateHash, entry := range c.byCandidateHash { - clone.byCandidateHash[candidateHash] = &CandidateEntry{ + clone.byCandidateHash[candidateHash] = &candidateEntry{ candidateHash: entry.candidateHash, parentHeadDataHash: entry.parentHeadDataHash, outputHeadDataHash: entry.outputHeadDataHash, @@ -127,20 +120,20 @@ func (c *CandidateStorage) Clone() *CandidateStorage { return clone } -func NewCandidateStorage() *CandidateStorage { - return &CandidateStorage{ +func newCandidateStorage() *candidateStorage { + return &candidateStorage{ byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), - byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), } } -func (c *CandidateStorage) AddPendingAvailabilityCandidate( +func (c *candidateStorage) AddPendingAvailabilityCandidate( candidateHash parachaintypes.CandidateHash, candidate parachaintypes.CommittedCandidateReceipt, persistedValidationData parachaintypes.PersistedValidationData, ) error { - entry, err := NewCandidateEntry(candidateHash, candidate, persistedValidationData, Backed) + entry, err := newCandidateEntry(candidateHash, candidate, persistedValidationData, backed) if err != nil { return err } @@ -153,15 +146,15 @@ func (c *CandidateStorage) AddPendingAvailabilityCandidate( } // Len return the number of stored candidate -func (c *CandidateStorage) Len() int { +func (c *candidateStorage) Len() int { return len(c.byCandidateHash) } // addCandidateEntry inserts a new entry in the storage map, where the candidate hash -// is the key and the *CandidateEntry is the value, also it create other links, the +// is the key and the *candidateEntry is the value, also it create other links, the // parent head hash points to the candidate hash also the output head hash points to the // candidate hash -func (c *CandidateStorage) addCandidateEntry(candidate *CandidateEntry) error { +func (c *candidateStorage) addCandidateEntry(candidate *candidateEntry) error { _, ok := c.byCandidateHash[candidate.candidateHash] if ok { return ErrCandidateAlreadyKnown @@ -192,7 +185,7 @@ func (c *CandidateStorage) addCandidateEntry(candidate *CandidateEntry) error { // removeCandidate removes the candidate entry from the storage based on candidateHash // it also removes the parent head hash entry that points to candidateHash and // removes the output head hash entry that points to candidateHash -func (c *CandidateStorage) removeCandidate(candidateHash parachaintypes.CandidateHash) { +func (c *candidateStorage) removeCandidate(candidateHash parachaintypes.CandidateHash) { entry, ok := c.byCandidateHash[candidateHash] if !ok { return @@ -215,16 +208,16 @@ func (c *CandidateStorage) removeCandidate(candidateHash parachaintypes.Candidat } } -func (c *CandidateStorage) markBacked(candidateHash parachaintypes.CandidateHash) { +func (c *candidateStorage) markBacked(candidateHash parachaintypes.CandidateHash) { entry, ok := c.byCandidateHash[candidateHash] if !ok { logger.Tracef("candidate not found while marking as backed") } - entry.state = Backed + entry.state = backed } -func (c *CandidateStorage) headDataByHash(hash common.Hash) *parachaintypes.HeadData { +func (c *candidateStorage) headDataByHash(hash common.Hash) *parachaintypes.HeadData { // first, search for candidates outputting this head data and extract the head data // from their commitments if they exist. // otherwise, search for candidates building upon this head data and extract the @@ -249,15 +242,15 @@ func (c *CandidateStorage) headDataByHash(hash common.Hash) *parachaintypes.Head return nil } -func (c *CandidateStorage) possibleBackedParaChildren(parentHeadHash common.Hash) iter.Seq[*CandidateEntry] { - return func(yield func(*CandidateEntry) bool) { +func (c *candidateStorage) possibleBackedParaChildren(parentHeadHash common.Hash) iter.Seq[*candidateEntry] { + return func(yield func(*candidateEntry) bool) { seqOfCandidateHashes, ok := c.byParentHead[parentHeadHash] if !ok { return } for candidateHash := range seqOfCandidateHashes { - if entry, ok := c.byCandidateHash[candidateHash]; ok && entry.state == Backed { + if entry, ok := c.byCandidateHash[candidateHash]; ok && entry.state == backed { if !yield(entry) { return } @@ -266,32 +259,32 @@ func (c *CandidateStorage) possibleBackedParaChildren(parentHeadHash common.Hash } } -// PendingAvailability is a candidate on-chain but pending availability, for special -// treatment in the `Scope` -type PendingAvailability struct { - CandidateHash parachaintypes.CandidateHash - RelayParent inclusionemulator.RelayChainBlockInfo +// pendingAvailability is a candidate on-chain but pending availability, for special +// treatment in the `scope` +type pendingAvailability struct { + candidateHash parachaintypes.CandidateHash + relayParent RelayChainBlockInfo } // The scope of a fragment chain -type Scope struct { +type scope struct { // the relay parent we're currently building on top of - relayParent inclusionemulator.RelayChainBlockInfo + relayParent RelayChainBlockInfo // the other relay parents candidates are allowed to build upon, // mapped by the block number - ancestors *btree.Map[uint, inclusionemulator.RelayChainBlockInfo] + ancestors *btree.Map[uint, RelayChainBlockInfo] // the other relay parents candidates are allowed to build upon, // mapped by hash - ancestorsByHash map[common.Hash]inclusionemulator.RelayChainBlockInfo + ancestorsByHash map[common.Hash]RelayChainBlockInfo // candidates pending availability at this block - pendingAvailability []*PendingAvailability + pendingAvailability []*pendingAvailability // the base constraints derived from the latest included candidate baseConstraints *parachaintypes.Constraints // equal to `max_candidate_depth` maxDepth uint } -// NewScopeWithAncestors defines a new scope, all arguments are straightforward +// newScopeWithAncestors defines a new scope, all arguments are straightforward // except ancestors. Ancestor should be in reverse order, starting with the parent // of the relayParent, and proceeding backwards in block number decrements of 1. // Ancestors not following these conditions will be rejected. @@ -301,15 +294,15 @@ type Scope struct { // // Only ancestor whose children have the same session id as the relay parent's children // should be provided. It is allowed to provide 0 ancestors. -func NewScopeWithAncestors( - relayParent inclusionemulator.RelayChainBlockInfo, +func newScopeWithAncestors( + relayParent RelayChainBlockInfo, baseConstraints *parachaintypes.Constraints, - pendingAvailability []*PendingAvailability, + pendingAvailability []*pendingAvailability, maxDepth uint, - ancestors []inclusionemulator.RelayChainBlockInfo, -) (*Scope, error) { - ancestorsMap := btree.NewMap[uint, inclusionemulator.RelayChainBlockInfo](100) - ancestorsByHash := make(map[common.Hash]inclusionemulator.RelayChainBlockInfo) + ancestors []RelayChainBlockInfo, +) (*scope, error) { + ancestorsMap := btree.NewMap[uint, RelayChainBlockInfo](100) + ancestorsByHash := make(map[common.Hash]RelayChainBlockInfo) prev := relayParent.Number for _, ancestor := range ancestors { @@ -330,7 +323,7 @@ func NewScopeWithAncestors( ancestorsMap.Set(ancestor.Number, ancestor) } - return &Scope{ + return &scope{ relayParent: relayParent, baseConstraints: baseConstraints, pendingAvailability: pendingAvailability, @@ -341,7 +334,7 @@ func NewScopeWithAncestors( } // EarliestRelayParent gets the earliest relay-parent allowed in the scope of the fragment chain. -func (s *Scope) EarliestRelayParent() inclusionemulator.RelayChainBlockInfo { +func (s *scope) EarliestRelayParent() RelayChainBlockInfo { if iter := s.ancestors.Iter(); iter.Next() { return iter.Value() } @@ -349,7 +342,7 @@ func (s *Scope) EarliestRelayParent() inclusionemulator.RelayChainBlockInfo { } // Ancestor gets the relay ancestor of the fragment chain by hash. -func (s *Scope) Ancestor(hash common.Hash) *inclusionemulator.RelayChainBlockInfo { +func (s *scope) Ancestor(hash common.Hash) *RelayChainBlockInfo { if hash == s.relayParent.Hash { return &s.relayParent } @@ -362,9 +355,9 @@ func (s *Scope) Ancestor(hash common.Hash) *inclusionemulator.RelayChainBlockInf } // Whether the candidate in question is one pending availability in this scope. -func (s *Scope) GetPendingAvailability(candidateHash parachaintypes.CandidateHash) *PendingAvailability { +func (s *scope) GetPendingAvailability(candidateHash parachaintypes.CandidateHash) *pendingAvailability { for _, c := range s.pendingAvailability { - if c.CandidateHash == candidateHash { + if c.candidateHash == candidateHash { return c } } @@ -373,38 +366,38 @@ func (s *Scope) GetPendingAvailability(candidateHash parachaintypes.CandidateHas // Fragment node is a node that belongs to a `BackedChain`. It holds constraints based on // the ancestors in the chain -type FragmentNode struct { - fragment *inclusionemulator.Fragment +type fragmentNode struct { + fragment *Fragment candidateHash parachaintypes.CandidateHash - cumulativeModifications *inclusionemulator.ConstraintModifications + cumulativeModifications *ConstraintModifications parentHeadDataHash common.Hash outputHeadDataHash common.Hash } -func (f *FragmentNode) relayParent() common.Hash { +func (f *fragmentNode) relayParent() common.Hash { return f.fragment.RelayParent().Hash } -// NewCandidateEntryFromFragment creates a candidate entry from a fragment, we dont need -// to perform the checks done in `NewCandidateEntry` since a `FragmentNode` always comes -// from a `CandidateEntry` -func NewCandidateEntryFromFragment(node *FragmentNode) *CandidateEntry { - return &CandidateEntry{ +// newCandidateEntryFromFragment creates a candidate entry from a fragment, we dont need +// to perform the checks done in `newCandidateEntry` since a `fragmentNode` always comes +// from a `candidateEntry` +func newCandidateEntryFromFragment(node *fragmentNode) *candidateEntry { + return &candidateEntry{ candidateHash: node.candidateHash, parentHeadDataHash: node.parentHeadDataHash, outputHeadDataHash: node.outputHeadDataHash, candidate: node.fragment.Candidate(), relayParent: node.relayParent(), // a fragment node is always backed - state: Backed, + state: backed, } } -// BackedChain is a chain of backed/backable candidates +// backedChain is a chain of backed/backable candidates // Includes candidates pending availability and candidates which may be backed on-chain -type BackedChain struct { +type backedChain struct { // holds the candidate chain - chain []*FragmentNode + chain []*fragmentNode // index from parent head data to the candidate that has that head data as parent // only contains the candidates present in the `chain` @@ -418,23 +411,23 @@ type BackedChain struct { candidates map[parachaintypes.CandidateHash]struct{} } -func NewBackedChain() *BackedChain { - return &BackedChain{ - chain: make([]*FragmentNode, 0), +func newBackedChain() *backedChain { + return &backedChain{ + chain: make([]*fragmentNode, 0), byParentHead: make(map[common.Hash]parachaintypes.CandidateHash), byOutputHead: make(map[common.Hash]parachaintypes.CandidateHash), candidates: make(map[parachaintypes.CandidateHash]struct{}), } } -func (bc *BackedChain) Push(candidate *FragmentNode) { +func (bc *backedChain) Push(candidate *fragmentNode) { bc.candidates[candidate.candidateHash] = struct{}{} bc.byParentHead[candidate.parentHeadDataHash] = candidate.candidateHash bc.byOutputHead[candidate.outputHeadDataHash] = candidate.candidateHash bc.chain = append(bc.chain, candidate) } -func (bc *BackedChain) Clear() []*FragmentNode { +func (bc *backedChain) Clear() []*fragmentNode { bc.byParentHead = make(map[common.Hash]parachaintypes.CandidateHash) bc.byOutputHead = make(map[common.Hash]parachaintypes.CandidateHash) bc.candidates = make(map[parachaintypes.CandidateHash]struct{}) @@ -444,7 +437,7 @@ func (bc *BackedChain) Clear() []*FragmentNode { return oldChain } -func (bc *BackedChain) RevertToParentHash(parentHeadDataHash common.Hash) []*FragmentNode { +func (bc *backedChain) RevertToParentHash(parentHeadDataHash common.Hash) []*fragmentNode { foundIndex := -1 for i := 0; i < len(bc.chain); i++ { @@ -462,7 +455,7 @@ func (bc *BackedChain) RevertToParentHash(parentHeadDataHash common.Hash) []*Fra if foundIndex != -1 { // drain the elements from the found index until // the end of the slice and return them - removed := make([]*FragmentNode, len(bc.chain)-(foundIndex+1)) + removed := make([]*fragmentNode, len(bc.chain)-(foundIndex+1)) copy(removed, bc.chain[foundIndex+1:]) bc.chain = slices.Delete(bc.chain, foundIndex+1, len(bc.chain)) @@ -472,7 +465,7 @@ func (bc *BackedChain) RevertToParentHash(parentHeadDataHash common.Hash) []*Fra return nil } -func (bc *BackedChain) Contains(hash parachaintypes.CandidateHash) bool { +func (bc *backedChain) Contains(hash parachaintypes.CandidateHash) bool { _, ok := bc.candidates[hash] return ok } @@ -481,30 +474,30 @@ func (bc *BackedChain) Contains(hash parachaintypes.CandidateHash) bool { // best backable candidate chain, as well as potential candidates which could // become connected to the chain in the future or which could even overwrite // the existing chain -type FragmentChain struct { +type fragmentChain struct { // the current scope, which dictates the on-chain operating constraints that // all future candidates must ad-here to. - scope *Scope + scope *scope // the current best chain of backable candidates. It only contains candidates // which build on top of each other and which have reached the backing quorum. // In the presence of potential forks, this chain will pick a fork according to // the `forkSelectionRule` - bestChain *BackedChain + bestChain *backedChain // the potential candidate storage. Contains candidates which are not yet part of // the `chain` but may become in the future. These can form any tree shape as well // as contain unconnected candidates for which we don't know the parent. - unconnected *CandidateStorage + unconnected *candidateStorage } -// NewFragmentChain createa a new fragment chain with the given scope and populates it with +// newFragmentChain createa a new fragment chain with the given scope and populates it with // the candidates pending availability -func NewFragmentChain(scope *Scope, candidatesPendingAvailability *CandidateStorage) *FragmentChain { - fragmentChain := &FragmentChain{ +func newFragmentChain(scope *scope, candidatesPendingAvailability *candidateStorage) *fragmentChain { + fragmentChain := &fragmentChain{ scope: scope, - bestChain: NewBackedChain(), - unconnected: NewCandidateStorage(), + bestChain: newBackedChain(), + unconnected: newCandidateStorage(), } // we only need to populate the best backable chain. Candidates pending availability @@ -513,9 +506,9 @@ func NewFragmentChain(scope *Scope, candidatesPendingAvailability *CandidateStor return fragmentChain } -// PopulateFromPrevious populates the `FragmentChain` given the new candidates pending +// PopulateFromPrevious populates the `fragmentChain` given the new candidates pending // availability and the optional previous fragment chain (of the previous relay parent) -func (f *FragmentChain) PopulateFromPrevious(prevFragmentChain *FragmentChain) { +func (f *fragmentChain) PopulateFromPrevious(prevFragmentChain *fragmentChain) { prevStorage := prevFragmentChain.unconnected.Clone() for _, candidate := range prevFragmentChain.bestChain.chain { // if they used to be pending availability, dont add them. This is fine because: @@ -527,7 +520,7 @@ func (f *FragmentChain) PopulateFromPrevious(prevFragmentChain *FragmentChain) { // availability candidates will always be part of the best chain pending := prevFragmentChain.scope.GetPendingAvailability(candidate.candidateHash) if pending == nil { - _ = prevStorage.addCandidateEntry(NewCandidateEntryFromFragment(candidate)) + _ = prevStorage.addCandidateEntry(newCandidateEntryFromFragment(candidate)) } } @@ -542,25 +535,25 @@ func (f *FragmentChain) PopulateFromPrevious(prevFragmentChain *FragmentChain) { f.populateUnconnectedPotentialCandidates(prevStorage) } -func (f *FragmentChain) Scope() *Scope { +func (f *fragmentChain) Scope() *scope { return f.scope } -func (f *FragmentChain) BestChainLen() int { +func (f *fragmentChain) BestChainLen() int { return len(f.bestChain.chain) } -func (f *FragmentChain) UnconnectedLen() int { +func (f *fragmentChain) UnconnectedLen() int { return f.unconnected.Len() } -func (f *FragmentChain) ContainsUnconnectedCandidate(candidateHash parachaintypes.CandidateHash) bool { +func (f *fragmentChain) ContainsUnconnectedCandidate(candidateHash parachaintypes.CandidateHash) bool { _, ok := f.unconnected.byCandidateHash[candidateHash] return ok } // BestChainVec returns a vector of the chain's candidate hashes, in-order. -func (f *FragmentChain) BestChainVec() (hashes []parachaintypes.CandidateHash) { +func (f *fragmentChain) BestChainVec() (hashes []parachaintypes.CandidateHash) { hashes = make([]parachaintypes.CandidateHash, len(f.bestChain.chain)) for idx, node := range f.bestChain.chain { hashes[idx] = node.candidateHash @@ -568,17 +561,17 @@ func (f *FragmentChain) BestChainVec() (hashes []parachaintypes.CandidateHash) { return hashes } -func (f *FragmentChain) IsCandidateBacked(hash parachaintypes.CandidateHash) bool { +func (f *fragmentChain) IsCandidateBacked(hash parachaintypes.CandidateHash) bool { if f.bestChain.Contains(hash) { return true } candidate := f.unconnected.byCandidateHash[hash] - return candidate != nil && candidate.state == Backed + return candidate != nil && candidate.state == backed } // CandidateBacked marks a candidate as backed. This can trigger a recreation of the best backable chain. -func (f *FragmentChain) CandidateBacked(newlyBackedCandidate parachaintypes.CandidateHash) { +func (f *fragmentChain) CandidateBacked(newlyBackedCandidate parachaintypes.CandidateHash) { // already backed if f.bestChain.Contains(newlyBackedCandidate) { return @@ -600,7 +593,7 @@ func (f *FragmentChain) CandidateBacked(newlyBackedCandidate parachaintypes.Cand } prevStorage := f.unconnected.Clone() - f.unconnected = NewCandidateStorage() + f.unconnected = newCandidateStorage() f.populateChain(prevStorage) f.trimUneligibleForks(prevStorage, &parentHeadDataHash) @@ -608,7 +601,7 @@ func (f *FragmentChain) CandidateBacked(newlyBackedCandidate parachaintypes.Cand } // CanAddCandidateAsPotential checks if this candidate could be added in the future -func (f *FragmentChain) CanAddCandidateAsPotential(entry *CandidateEntry) error { +func (f *fragmentChain) CanAddCandidateAsPotential(entry *candidateEntry) error { candidateHash := entry.candidateHash _, existsInCandidateStorage := f.unconnected.byCandidateHash[candidateHash] @@ -622,8 +615,8 @@ func (f *FragmentChain) CanAddCandidateAsPotential(entry *CandidateEntry) error // TryAddingSecondedCandidate tries to add a candidate as a seconded candidate, if the // candidate has potential. It will never be added to the chain directly in the seconded // state, it will only be part of the unconnected storage -func (f *FragmentChain) TryAddingSecondedCandidate(entry *CandidateEntry) error { - if entry.state == Backed { +func (f *fragmentChain) TryAddingSecondedCandidate(entry *candidateEntry) error { + if entry.state == backed { return ErrIntroduceBackedCandidate } @@ -636,7 +629,7 @@ func (f *FragmentChain) TryAddingSecondedCandidate(entry *CandidateEntry) error } // GetHeadDataByHash tries to get the full head data associated with this hash -func (f *FragmentChain) GetHeadDataByHash(headDataHash common.Hash) (*parachaintypes.HeadData, error) { +func (f *fragmentChain) GetHeadDataByHash(headDataHash common.Hash) (*parachaintypes.HeadData, error) { reqParent := f.scope.baseConstraints.RequiredParent reqParentHash, err := reqParent.Hash() if err != nil { @@ -674,17 +667,17 @@ func (f *FragmentChain) GetHeadDataByHash(headDataHash common.Hash) (*parachaint return f.unconnected.headDataByHash(headDataHash), nil } -type CandidateAndRelayParent struct { - CandidateHash parachaintypes.CandidateHash - RealyParentHash common.Hash +type candidateAndRelayParent struct { + candidateHash parachaintypes.CandidateHash + realyParentHash common.Hash } // FindBackableChain selects `count` candidates after the given `ancestors` which // can be backed on chain next. The intention of the `ancestors` is to allow queries // on the basis of one or more candidates which were previously pending availability // becoming available or candidates timing out -func (f *FragmentChain) FindBackableChain( - ancestors map[parachaintypes.CandidateHash]struct{}, count uint32) []*CandidateAndRelayParent { +func (f *fragmentChain) FindBackableChain( + ancestors map[parachaintypes.CandidateHash]struct{}, count uint32) []*candidateAndRelayParent { if count == 0 { return nil } @@ -692,15 +685,15 @@ func (f *FragmentChain) FindBackableChain( basePos := f.findAncestorPath(ancestors) actualEndIdx := min(basePos+int(count), len(f.bestChain.chain)) - res := make([]*CandidateAndRelayParent, 0, actualEndIdx-basePos) + res := make([]*candidateAndRelayParent, 0, actualEndIdx-basePos) for _, elem := range f.bestChain.chain[basePos:actualEndIdx] { // only supply candidates which are not yet pending availability. // `ancestors` should have already contained them, but check just in case if pending := f.scope.GetPendingAvailability(elem.candidateHash); pending == nil { - res = append(res, &CandidateAndRelayParent{ - CandidateHash: elem.candidateHash, - RealyParentHash: elem.relayParent(), + res = append(res, &candidateAndRelayParent{ + candidateHash: elem.candidateHash, + realyParentHash: elem.relayParent(), }) } else { break @@ -713,7 +706,7 @@ func (f *FragmentChain) FindBackableChain( // findAncestorPath tries to orders the ancestors into a viable path from root to the last one. // stops when the ancestors are all used or when a node in the chain is not present in the // ancestors set. Returns the index in the chain were the search stopped -func (f *FragmentChain) findAncestorPath(ancestors map[parachaintypes.CandidateHash]struct{}) int { +func (f *fragmentChain) findAncestorPath(ancestors map[parachaintypes.CandidateHash]struct{}) int { if len(f.bestChain.chain) == 0 { return 0 } @@ -736,7 +729,7 @@ func (f *FragmentChain) findAncestorPath(ancestors map[parachaintypes.CandidateH // the chain. The value returned may not be valid if we want to add a candidate pending // availability, which may have a relay parent which is out of scope, special handling // is needed in that case. -func (f *FragmentChain) earliestRelayParent() *inclusionemulator.RelayChainBlockInfo { +func (f *fragmentChain) earliestRelayParent() *RelayChainBlockInfo { if len(f.bestChain.chain) > 0 { lastCandidate := f.bestChain.chain[len(f.bestChain.chain)-1] info := f.scope.Ancestor(lastCandidate.relayParent()) @@ -751,7 +744,7 @@ func (f *FragmentChain) earliestRelayParent() *inclusionemulator.RelayChainBlock return nil } - return &pending.RelayParent + return &pending.relayParent } earliest := f.scope.EarliestRelayParent() @@ -761,11 +754,11 @@ func (f *FragmentChain) earliestRelayParent() *inclusionemulator.RelayChainBlock // earliestRelayParentPendingAvailability returns the earliest relay parent a potential // candidate may have for it to ever be added to the chain. This is the relay parent of // the last candidate pending availability or the earliest relay parent in scope. -func (f *FragmentChain) earliestRelayParentPendingAvailability() *inclusionemulator.RelayChainBlockInfo { +func (f *fragmentChain) earliestRelayParentPendingAvailability() *RelayChainBlockInfo { for i := len(f.bestChain.chain) - 1; i >= 0; i-- { candidate := f.bestChain.chain[i] if pending := f.scope.GetPendingAvailability(candidate.candidateHash); pending != nil { - return &pending.RelayParent + return &pending.relayParent } } earliest := f.scope.EarliestRelayParent() @@ -774,7 +767,7 @@ func (f *FragmentChain) earliestRelayParentPendingAvailability() *inclusionemula // populateUnconnectedPotentialCandidates populates the unconnected potential candidate storage // starting from a previous storage -func (f *FragmentChain) populateUnconnectedPotentialCandidates(oldStorage *CandidateStorage) { +func (f *fragmentChain) populateUnconnectedPotentialCandidates(oldStorage *candidateStorage) { for _, candidate := range oldStorage.byCandidateHash { // sanity check, all pending availability candidates should be already present // in the chain @@ -792,7 +785,7 @@ func (f *FragmentChain) populateUnconnectedPotentialCandidates(oldStorage *Candi } } -func (f *FragmentChain) checkPotential(candidate *CandidateEntry) error { +func (f *fragmentChain) checkPotential(candidate *candidateEntry) error { relayParent := candidate.relayParent parentHeadHash := candidate.parentHeadDataHash @@ -844,7 +837,7 @@ func (f *FragmentChain) checkPotential(candidate *CandidateEntry) error { } if parentCandidateHash, ok := f.bestChain.byOutputHead[parentHeadHash]; ok { - var parentCandidate *FragmentNode + var parentCandidate *fragmentNode for _, c := range f.bestChain.chain { if c.candidateHash == parentCandidateHash { @@ -858,7 +851,7 @@ func (f *FragmentChain) checkPotential(candidate *CandidateEntry) error { } var err error - constraints, err = inclusionemulator.ApplyModifications( + constraints, err = ApplyModifications( f.scope.baseConstraints, parentCandidate.cumulativeModifications) if err != nil { @@ -882,7 +875,7 @@ func (f *FragmentChain) checkPotential(candidate *CandidateEntry) error { } // Check against constraints if we have a full concrete candidate - _, err = inclusionemulator.CheckAgainstConstraints( + _, err = CheckAgainstConstraints( relayParentInfo, constraints, candidate.candidate.Commitments, @@ -907,7 +900,7 @@ func (f *FragmentChain) checkPotential(candidate *CandidateEntry) error { // trimUneligibleForks once the backable chain was populated, trim the forks generated by candidate // hashes which are not present in the best chain. Fan this out into a full breadth-first search. If // starting point is not nil then start the search from the candidates having this parent head hash. -func (f *FragmentChain) trimUneligibleForks(storage *CandidateStorage, startingPoint *common.Hash) { +func (f *fragmentChain) trimUneligibleForks(storage *candidateStorage, startingPoint *common.Hash) { type queueItem struct { hash common.Hash hasPotential bool @@ -982,22 +975,22 @@ func (f *FragmentChain) trimUneligibleForks(storage *CandidateStorage, startingP } type possibleChild struct { - fragment *inclusionemulator.Fragment + fragment *Fragment candidateHash parachaintypes.CandidateHash outputHeadDataHash common.Hash parentHeadDataHash common.Hash } -// populateChain populates the fragment chain with candidates from the supplied `CandidateStorage`. -// Can be called by the `NewFragmentChain` or when backing a new candidate. When this is called +// populateChain populates the fragment chain with candidates from the supplied `candidateStorage`. +// Can be called by the `newFragmentChain` or when backing a new candidate. When this is called // it may cause the previous chain to be completely erased or it may add more than one candidate -func (f *FragmentChain) populateChain(storage *CandidateStorage) { - var cumulativeModifications *inclusionemulator.ConstraintModifications +func (f *fragmentChain) populateChain(storage *candidateStorage) { + var cumulativeModifications *ConstraintModifications if len(f.bestChain.chain) > 0 { lastCandidate := f.bestChain.chain[len(f.bestChain.chain)-1] cumulativeModifications = lastCandidate.cumulativeModifications.Clone() } else { - cumulativeModifications = inclusionemulator.NewConstraintModificationsIdentity() + cumulativeModifications = NewConstraintModificationsIdentity() } earliestRelayParent := f.earliestRelayParent() @@ -1006,7 +999,7 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { } for len(f.bestChain.chain) < int(f.scope.maxDepth)+1 { - childConstraints, err := inclusionemulator.ApplyModifications( + childConstraints, err := ApplyModifications( f.scope.baseConstraints, cumulativeModifications) if err != nil { logger.Warnf("failed to apply modifications: %s", err.Error()) @@ -1028,14 +1021,14 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { // 4. all non-pending-availability candidates have relay-parent in the scope // 5. candidate outputs fulfil constraints - var relayParent *inclusionemulator.RelayChainBlockInfo + var relayParent *RelayChainBlockInfo var minRelayParent uint pending := f.scope.GetPendingAvailability(candidateEntry.candidateHash) if pending != nil { - relayParent = &pending.RelayParent + relayParent = &pending.relayParent if len(f.bestChain.chain) == 0 { - minRelayParent = pending.RelayParent.Number + minRelayParent = pending.relayParent.Number } else { minRelayParent = earliestRelayParent.Number } @@ -1074,10 +1067,10 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { constraints := childConstraints.Clone() if pending != nil { // overwrite for candidates pending availability as a special-case - constraints.MinRelayParentNumber = pending.RelayParent.Number + constraints.MinRelayParentNumber = pending.relayParent.Number } - fragment, err := inclusionemulator.NewFragment(relayParent, constraints, candidateEntry.candidate) + fragment, err := NewFragment(relayParent, constraints, candidateEntry.candidate) if err != nil { logger.Warnf("failed to create fragment: %s", err.Error()) continue @@ -1114,13 +1107,13 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { cumulativeModifications.Stack(bestCandidate.fragment.ConstraintModifications()) // update the earliest relay parent - earliestRelayParent = &inclusionemulator.RelayChainBlockInfo{ + earliestRelayParent = &RelayChainBlockInfo{ Hash: bestCandidate.fragment.RelayParent().Hash, Number: bestCandidate.fragment.RelayParent().Number, StorageRoot: bestCandidate.fragment.RelayParent().StorageRoot, } - node := &FragmentNode{ + node := &fragmentNode{ fragment: bestCandidate.fragment, candidateHash: bestCandidate.candidateHash, parentHeadDataHash: bestCandidate.parentHeadDataHash, @@ -1135,8 +1128,8 @@ func (f *FragmentChain) populateChain(storage *CandidateStorage) { // checkCyclesOrInvalidTree checks whether a candidate outputting this head data would // introduce a cycle or multiple paths to the same state. Trivial 0-length cycles are -// checked in `NewCandidateEntry`. -func (f *FragmentChain) checkCyclesOrInvalidTree(outputHeadDataHash common.Hash) error { +// checked in `newCandidateEntry`. +func (f *fragmentChain) checkCyclesOrInvalidTree(outputHeadDataHash common.Hash) error { // this should catch a cycle where this candidate would point back to the parent // of some candidate in the chain _, ok := f.bestChain.byParentHead[outputHeadDataHash] @@ -1157,8 +1150,8 @@ func (f *FragmentChain) checkCyclesOrInvalidTree(outputHeadDataHash common.Hash) // `parent_head_hash`. If the `parent_head_hash` is exactly the required parent of the base // constraints (builds on the latest included candidate), revert the entire chain. // Return false if we couldn't find the parent head hash -func (f *FragmentChain) revertTo(parentHeadDataHash common.Hash) bool { - var removedItems []*FragmentNode = nil +func (f *fragmentChain) revertTo(parentHeadDataHash common.Hash) bool { + var removedItems []*fragmentNode = nil requiredParentHash, err := f.scope.baseConstraints.RequiredParent.Hash() if err != nil { @@ -1180,7 +1173,7 @@ func (f *FragmentChain) revertTo(parentHeadDataHash common.Hash) bool { // Even if it's empty, we need to return true, because we'll be able to add a new candidate // to the chain. for _, node := range removedItems { - _ = f.unconnected.addCandidateEntry(NewCandidateEntryFromFragment(node)) + _ = f.unconnected.addCandidateEntry(newCandidateEntryFromFragment(node)) } return true diff --git a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment_chain_test.go similarity index 86% rename from dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go rename to dot/parachain/prospective-parachains/fragment_chain_test.go index a1bb331da4..4eb1075ee5 100644 --- a/dot/parachain/prospective-parachains/fragment-chain/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment_chain_test.go @@ -1,4 +1,4 @@ -package fragmentchain +package prospectiveparachains import ( "bytes" @@ -9,7 +9,6 @@ import ( "testing" parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" - inclusionemulator "github.com/ChainSafe/gossamer/dot/parachain/util/inclusion-emulator" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto/sr25519" "github.com/ChainSafe/gossamer/pkg/scale" @@ -19,21 +18,21 @@ import ( ) func TestCandidateStorage_RemoveCandidate(t *testing.T) { - storage := &CandidateStorage{ + storage := &candidateStorage{ byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), - byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), } candidateHash := parachaintypes.CandidateHash{Value: common.Hash{1, 2, 3}} parentHeadHash := common.Hash{4, 5, 6} outputHeadHash := common.Hash{7, 8, 9} - entry := &CandidateEntry{ + entry := &candidateEntry{ candidateHash: candidateHash, parentHeadDataHash: parentHeadHash, outputHeadDataHash: outputHeadHash, - state: Backed, + state: backed, } storage.byCandidateHash[candidateHash] = entry @@ -53,21 +52,21 @@ func TestCandidateStorage_RemoveCandidate(t *testing.T) { } func TestCandidateStorage_MarkBacked(t *testing.T) { - storage := &CandidateStorage{ + storage := &candidateStorage{ byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), - byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), } candidateHash := parachaintypes.CandidateHash{Value: common.Hash{1, 2, 3}} parentHeadHash := common.Hash{4, 5, 6} outputHeadHash := common.Hash{7, 8, 9} - entry := &CandidateEntry{ + entry := &candidateEntry{ candidateHash: candidateHash, parentHeadDataHash: parentHeadHash, outputHeadDataHash: outputHeadHash, - state: Seconded, + state: seconded, } storage.byCandidateHash[candidateHash] = entry @@ -76,21 +75,21 @@ func TestCandidateStorage_MarkBacked(t *testing.T) { storage.markBacked(candidateHash) - assert.Equal(t, Backed, entry.state, "candidate state should be marked as backed") + assert.Equal(t, backed, entry.state, "candidate state should be marked as backed") } func TestCandidateStorage_HeadDataByHash(t *testing.T) { tests := map[string]struct { - setup func() *CandidateStorage + setup func() *candidateStorage hash common.Hash expected *parachaintypes.HeadData }{ "find_head_data_of_first_candidate_using_output_head_data_hash": { - setup: func() *CandidateStorage { - storage := &CandidateStorage{ + setup: func() *candidateStorage { + storage := &candidateStorage{ byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), - byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), } candidateHash := parachaintypes.CandidateHash{Value: common.Hash{1, 2, 3}} @@ -98,11 +97,11 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { outputHeadHash := common.Hash{7, 8, 9} headData := parachaintypes.HeadData{Data: []byte{10, 11, 12}} - entry := &CandidateEntry{ + entry := &candidateEntry{ candidateHash: candidateHash, parentHeadDataHash: parentHeadHash, outputHeadDataHash: outputHeadHash, - candidate: &inclusionemulator.ProspectiveCandidate{ + candidate: &ProspectiveCandidate{ Commitments: parachaintypes.CandidateCommitments{ HeadData: headData, }, @@ -119,11 +118,11 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { expected: ¶chaintypes.HeadData{Data: []byte{10, 11, 12}}, }, "find_head_data_using_parent_head_data_hash_from_second_candidate": { - setup: func() *CandidateStorage { - storage := &CandidateStorage{ + setup: func() *candidateStorage { + storage := &candidateStorage{ byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), - byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), } candidateHash := parachaintypes.CandidateHash{Value: common.Hash{13, 14, 15}} @@ -131,11 +130,11 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { outputHeadHash := common.Hash{19, 20, 21} headData := parachaintypes.HeadData{Data: []byte{22, 23, 24}} - entry := &CandidateEntry{ + entry := &candidateEntry{ candidateHash: candidateHash, parentHeadDataHash: parentHeadHash, outputHeadDataHash: outputHeadHash, - candidate: &inclusionemulator.ProspectiveCandidate{ + candidate: &ProspectiveCandidate{ PersistedValidationData: parachaintypes.PersistedValidationData{ ParentHead: headData, }, @@ -152,11 +151,11 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { expected: ¶chaintypes.HeadData{Data: []byte{22, 23, 24}}, }, "use_nonexistent_hash_and_should_get_nil": { - setup: func() *CandidateStorage { - storage := &CandidateStorage{ + setup: func() *candidateStorage { + storage := &candidateStorage{ byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), - byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), } return storage }, @@ -164,11 +163,11 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { expected: nil, }, "insert_0_candidates_and_try_to_find_but_should_get_nil": { - setup: func() *CandidateStorage { - return &CandidateStorage{ + setup: func() *candidateStorage { + return &candidateStorage{ byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), - byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), } }, hash: common.Hash{7, 8, 9}, @@ -188,16 +187,16 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { func TestCandidateStorage_PossibleBackedParaChildren(t *testing.T) { tests := map[string]struct { - setup func() *CandidateStorage + setup func() *candidateStorage hash common.Hash - expected []*CandidateEntry + expected []*candidateEntry }{ "insert_2_candidates_for_same_parent_one_seconded_one_backed": { - setup: func() *CandidateStorage { - storage := &CandidateStorage{ + setup: func() *candidateStorage { + storage := &candidateStorage{ byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), - byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), } candidateHash1 := parachaintypes.CandidateHash{Value: common.Hash{1, 2, 3}} @@ -207,18 +206,18 @@ func TestCandidateStorage_PossibleBackedParaChildren(t *testing.T) { candidateHash2 := parachaintypes.CandidateHash{Value: common.Hash{10, 11, 12}} outputHeadHash2 := common.Hash{13, 14, 15} - entry1 := &CandidateEntry{ + entry1 := &candidateEntry{ candidateHash: candidateHash1, parentHeadDataHash: parentHeadHash, outputHeadDataHash: outputHeadHash1, - state: Seconded, + state: seconded, } - entry2 := &CandidateEntry{ + entry2 := &candidateEntry{ candidateHash: candidateHash2, parentHeadDataHash: parentHeadHash, outputHeadDataHash: outputHeadHash2, - state: Backed, + state: backed, } storage.byCandidateHash[candidateHash1] = entry1 @@ -231,18 +230,18 @@ func TestCandidateStorage_PossibleBackedParaChildren(t *testing.T) { return storage }, hash: common.Hash{4, 5, 6}, - expected: []*CandidateEntry{{candidateHash: parachaintypes.CandidateHash{ + expected: []*candidateEntry{{candidateHash: parachaintypes.CandidateHash{ Value: common.Hash{10, 11, 12}}, parentHeadDataHash: common.Hash{4, 5, 6}, - outputHeadDataHash: common.Hash{13, 14, 15}, state: Backed}, + outputHeadDataHash: common.Hash{13, 14, 15}, state: backed}, }, }, "insert_nothing_and_call_function_should_return_nothing": { - setup: func() *CandidateStorage { - return &CandidateStorage{ + setup: func() *candidateStorage { + return &candidateStorage{ byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), - byCandidateHash: make(map[parachaintypes.CandidateHash]*CandidateEntry), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), } }, hash: common.Hash{4, 5, 6}, @@ -254,7 +253,7 @@ func TestCandidateStorage_PossibleBackedParaChildren(t *testing.T) { tt := tt t.Run(name, func(t *testing.T) { storage := tt.setup() - var result []*CandidateEntry + var result []*candidateEntry for entry := range storage.possibleBackedParaChildren(tt.hash) { result = append(result, entry) } @@ -265,51 +264,51 @@ func TestCandidateStorage_PossibleBackedParaChildren(t *testing.T) { func TestEarliestRelayParent(t *testing.T) { tests := map[string]struct { - setup func() *Scope - expect inclusionemulator.RelayChainBlockInfo + setup func() *scope + expect RelayChainBlockInfo }{ "returns_from_ancestors": { - setup: func() *Scope { - relayParent := inclusionemulator.RelayChainBlockInfo{ + setup: func() *scope { + relayParent := RelayChainBlockInfo{ Hash: common.Hash{0x01}, Number: 10, } baseConstraints := ¶chaintypes.Constraints{ MinRelayParentNumber: 5, } - ancestor := inclusionemulator.RelayChainBlockInfo{ + ancestor := RelayChainBlockInfo{ Hash: common.Hash{0x02}, Number: 9, } - ancestorsMap := btree.NewMap[uint, inclusionemulator.RelayChainBlockInfo](100) + ancestorsMap := btree.NewMap[uint, RelayChainBlockInfo](100) ancestorsMap.Set(ancestor.Number, ancestor) - return &Scope{ + return &scope{ relayParent: relayParent, baseConstraints: baseConstraints, ancestors: ancestorsMap, } }, - expect: inclusionemulator.RelayChainBlockInfo{ + expect: RelayChainBlockInfo{ Hash: common.Hash{0x02}, Number: 9, }, }, "returns_relayParent": { - setup: func() *Scope { - relayParent := inclusionemulator.RelayChainBlockInfo{ + setup: func() *scope { + relayParent := RelayChainBlockInfo{ Hash: common.Hash{0x01}, Number: 10, } baseConstraints := ¶chaintypes.Constraints{ MinRelayParentNumber: 5, } - return &Scope{ + return &scope{ relayParent: relayParent, baseConstraints: baseConstraints, - ancestors: btree.NewMap[uint, inclusionemulator.RelayChainBlockInfo](100), + ancestors: btree.NewMap[uint, RelayChainBlockInfo](100), } }, - expect: inclusionemulator.RelayChainBlockInfo{ + expect: RelayChainBlockInfo{ Hash: common.Hash{0x01}, Number: 10, }, @@ -328,26 +327,26 @@ func TestEarliestRelayParent(t *testing.T) { func TestBackedChain_RevertToParentHash(t *testing.T) { tests := map[string]struct { - setup func() *BackedChain + setup func() *backedChain hash common.Hash expectedChainSize int expectedRemovedFragments int }{ "revert_to_parent_at_pos_2": { - setup: func() *BackedChain { - chain := &BackedChain{ - chain: make([]*FragmentNode, 0), + setup: func() *backedChain { + chain := &backedChain{ + chain: make([]*fragmentNode, 0), byParentHead: make(map[common.Hash]parachaintypes.CandidateHash), byOutputHead: make(map[common.Hash]parachaintypes.CandidateHash), candidates: make(map[parachaintypes.CandidateHash]struct{}), } for i := 0; i < 5; i++ { - node := &FragmentNode{ + node := &fragmentNode{ candidateHash: parachaintypes.CandidateHash{Value: common.Hash{byte(i)}}, parentHeadDataHash: common.Hash{byte(i)}, outputHeadDataHash: common.Hash{byte(i + 1)}, - cumulativeModifications: &inclusionemulator.ConstraintModifications{}, + cumulativeModifications: &ConstraintModifications{}, } chain.Push(node) } @@ -358,20 +357,20 @@ func TestBackedChain_RevertToParentHash(t *testing.T) { expectedRemovedFragments: 2, }, "revert_to_parent_at_pos_0": { - setup: func() *BackedChain { - chain := &BackedChain{ - chain: make([]*FragmentNode, 0), + setup: func() *backedChain { + chain := &backedChain{ + chain: make([]*fragmentNode, 0), byParentHead: make(map[common.Hash]parachaintypes.CandidateHash), byOutputHead: make(map[common.Hash]parachaintypes.CandidateHash), candidates: make(map[parachaintypes.CandidateHash]struct{}), } for i := 0; i < 2; i++ { - node := &FragmentNode{ + node := &fragmentNode{ candidateHash: parachaintypes.CandidateHash{Value: common.Hash{byte(i)}}, parentHeadDataHash: common.Hash{byte(i)}, outputHeadDataHash: common.Hash{byte(i + 1)}, - cumulativeModifications: &inclusionemulator.ConstraintModifications{}, + cumulativeModifications: &ConstraintModifications{}, } chain.Push(node) } @@ -382,20 +381,20 @@ func TestBackedChain_RevertToParentHash(t *testing.T) { expectedRemovedFragments: 1, }, "no_node_removed": { - setup: func() *BackedChain { - chain := &BackedChain{ - chain: make([]*FragmentNode, 0), + setup: func() *backedChain { + chain := &backedChain{ + chain: make([]*fragmentNode, 0), byParentHead: make(map[common.Hash]parachaintypes.CandidateHash), byOutputHead: make(map[common.Hash]parachaintypes.CandidateHash), candidates: make(map[parachaintypes.CandidateHash]struct{}), } for i := 0; i < 3; i++ { - node := &FragmentNode{ + node := &fragmentNode{ candidateHash: parachaintypes.CandidateHash{Value: common.Hash{byte(i)}}, parentHeadDataHash: common.Hash{byte(i)}, outputHeadDataHash: common.Hash{byte(i + 1)}, - cumulativeModifications: &inclusionemulator.ConstraintModifications{}, + cumulativeModifications: &ConstraintModifications{}, } chain.Push(node) } @@ -433,7 +432,7 @@ func TestBackedChain_RevertToParentHash(t *testing.T) { } func TestFragmentChainWithFreshScope(t *testing.T) { - relayParent := inclusionemulator.RelayChainBlockInfo{ + relayParent := RelayChainBlockInfo{ Hash: common.Hash{0x00}, Number: 0, StorageRoot: common.Hash{0x00}, @@ -445,10 +444,10 @@ func TestFragmentChainWithFreshScope(t *testing.T) { ValidationCodeHash: parachaintypes.ValidationCodeHash(common.Hash{0x03}), } - scope, err := NewScopeWithAncestors(relayParent, baseConstraints, nil, 10, nil) + scope, err := newScopeWithAncestors(relayParent, baseConstraints, nil, 10, nil) assert.NoError(t, err) - candidateStorage := NewCandidateStorage() + candidateStorage := newCandidateStorage() // Create 3 candidate entries forming a chain for i := 0; i < 3; i++ { @@ -482,7 +481,7 @@ func TestFragmentChainWithFreshScope(t *testing.T) { assert.NoError(t, err) } - fragmentChain := NewFragmentChain(scope, candidateStorage) + fragmentChain := newFragmentChain(scope, candidateStorage) // Check that the best chain contains 3 candidates assert.Equal(t, 3, len(fragmentChain.bestChain.chain)) @@ -565,20 +564,20 @@ func makeCommittedCandidate( func TestScopeRejectsAncestors(t *testing.T) { tests := map[string]struct { - relayParent *inclusionemulator.RelayChainBlockInfo - ancestors []inclusionemulator.RelayChainBlockInfo + relayParent *RelayChainBlockInfo + ancestors []RelayChainBlockInfo maxDepth uint baseConstraints *parachaintypes.Constraints - pendingAvailability []*PendingAvailability + pendingAvailability []*pendingAvailability expectedError error }{ "rejects_ancestor_that_skips_blocks": { - relayParent: &inclusionemulator.RelayChainBlockInfo{ + relayParent: &RelayChainBlockInfo{ Number: 10, Hash: common.BytesToHash(bytes.Repeat([]byte{0x10}, 32)), StorageRoot: common.BytesToHash(bytes.Repeat([]byte{0x69}, 32)), }, - ancestors: []inclusionemulator.RelayChainBlockInfo{ + ancestors: []RelayChainBlockInfo{ { Number: 8, Hash: common.BytesToHash(bytes.Repeat([]byte{0x08}, 32)), @@ -588,16 +587,16 @@ func TestScopeRejectsAncestors(t *testing.T) { maxDepth: 2, baseConstraints: makeConstraints(8, []uint{8, 9}, parachaintypes.HeadData{Data: []byte{0x01, 0x02, 0x03}}), - pendingAvailability: make([]*PendingAvailability, 0), + pendingAvailability: make([]*pendingAvailability, 0), expectedError: ErrUnexpectedAncestor{Number: 8, Prev: 10}, }, "rejects_ancestor_for_zero_block": { - relayParent: &inclusionemulator.RelayChainBlockInfo{ + relayParent: &RelayChainBlockInfo{ Number: 0, Hash: common.BytesToHash(bytes.Repeat([]byte{0}, 32)), StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), }, - ancestors: []inclusionemulator.RelayChainBlockInfo{ + ancestors: []RelayChainBlockInfo{ { Number: 99999, Hash: common.BytesToHash(bytes.Repeat([]byte{99}, 32)), @@ -606,16 +605,16 @@ func TestScopeRejectsAncestors(t *testing.T) { }, maxDepth: 2, baseConstraints: makeConstraints(0, []uint{}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}), - pendingAvailability: make([]*PendingAvailability, 0), + pendingAvailability: make([]*pendingAvailability, 0), expectedError: ErrUnexpectedAncestor{Number: 99999, Prev: 0}, }, "rejects_unordered_ancestors": { - relayParent: &inclusionemulator.RelayChainBlockInfo{ + relayParent: &RelayChainBlockInfo{ Number: 5, Hash: common.BytesToHash(bytes.Repeat([]byte{0}, 32)), StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), }, - ancestors: []inclusionemulator.RelayChainBlockInfo{ + ancestors: []RelayChainBlockInfo{ { Number: 4, Hash: common.BytesToHash(bytes.Repeat([]byte{4}, 32)), @@ -634,7 +633,7 @@ func TestScopeRejectsAncestors(t *testing.T) { }, maxDepth: 2, baseConstraints: makeConstraints(0, []uint{2}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}), - pendingAvailability: make([]*PendingAvailability, 0), + pendingAvailability: make([]*pendingAvailability, 0), expectedError: ErrUnexpectedAncestor{Number: 2, Prev: 4}, }, } @@ -642,7 +641,7 @@ func TestScopeRejectsAncestors(t *testing.T) { for name, tt := range tests { tt := tt t.Run(name, func(t *testing.T) { - scope, err := NewScopeWithAncestors( + scope, err := newScopeWithAncestors( *tt.relayParent, tt.baseConstraints, tt.pendingAvailability, @@ -655,13 +654,13 @@ func TestScopeRejectsAncestors(t *testing.T) { } func TestScopeOnlyTakesAncestorsUpToMin(t *testing.T) { - relayParent := inclusionemulator.RelayChainBlockInfo{ + relayParent := RelayChainBlockInfo{ Number: 5, Hash: common.BytesToHash(bytes.Repeat([]byte{0}, 32)), StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), } - ancestors := []inclusionemulator.RelayChainBlockInfo{ + ancestors := []RelayChainBlockInfo{ { Number: 4, Hash: common.BytesToHash(bytes.Repeat([]byte{4}, 32)), @@ -681,9 +680,9 @@ func TestScopeOnlyTakesAncestorsUpToMin(t *testing.T) { maxDepth := uint(2) baseConstraints := makeConstraints(3, []uint{2}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}) - pendingAvailability := make([]*PendingAvailability, 0) + pendingAvailability := make([]*pendingAvailability, 0) - scope, err := NewScopeWithAncestors(relayParent, baseConstraints, pendingAvailability, maxDepth, ancestors) + scope, err := newScopeWithAncestors(relayParent, baseConstraints, pendingAvailability, maxDepth, ancestors) require.NoError(t, err) assert.Equal(t, 2, scope.ancestors.Len()) @@ -714,8 +713,8 @@ func TestCandidateStorageMethods(t *testing.T) { candidateHash, err := candidate.Hash() require.NoError(t, err) - entry, err := NewCandidateEntry(parachaintypes.CandidateHash{Value: candidateHash}, - candidate, wrongPvd, Seconded) + entry, err := newCandidateEntry(parachaintypes.CandidateHash{Value: candidateHash}, + candidate, wrongPvd, seconded) require.ErrorIs(t, err, ErrPersistedValidationDataMismatch) require.Nil(t, entry) }, @@ -745,8 +744,8 @@ func TestCandidateStorageMethods(t *testing.T) { candidateHash, err := candidate.Hash() require.NoError(t, err) - entry, err := NewCandidateEntry(parachaintypes.CandidateHash{Value: candidateHash}, - candidate, pvd, Seconded) + entry, err := newCandidateEntry(parachaintypes.CandidateHash{Value: candidateHash}, + candidate, pvd, seconded) require.Nil(t, entry) require.ErrorIs(t, err, ErrZeroLengthCycle) }, @@ -773,10 +772,10 @@ func TestCandidateStorageMethods(t *testing.T) { parentHeadHash, err := pvd.ParentHead.Hash() require.NoError(t, err) - entry, err := NewCandidateEntry(candidateHash, candidate, pvd, Seconded) + entry, err := newCandidateEntry(candidateHash, candidate, pvd, seconded) require.NoError(t, err) - storage := NewCandidateStorage() + storage := newCandidateStorage() t.Run("add_candidate_entry_as_seconded", func(t *testing.T) { err = storage.addCandidateEntry(entry) @@ -862,7 +861,7 @@ func TestCandidateStorageMethods(t *testing.T) { parentHeadHash, err := pvd.ParentHead.Hash() require.NoError(t, err) - storage := NewCandidateStorage() + storage := newCandidateStorage() err = storage.AddPendingAvailabilityCandidate(candidateHash, candidate, pvd) require.NoError(t, err) @@ -902,7 +901,7 @@ func TestCandidateStorageMethods(t *testing.T) { require.NoError(t, err) candidateHash2 := parachaintypes.CandidateHash{Value: hash2} - candidateEntry2, err := NewCandidateEntry(candidateHash2, candidate2, pvd2, Seconded) + candidateEntry2, err := newCandidateEntry(candidateHash2, candidate2, pvd2, seconded) require.NoError(t, err) err = storage.addCandidateEntry(candidateEntry2) @@ -946,8 +945,8 @@ func TestCandidateStorageMethods(t *testing.T) { func TestInitAndPopulateFromEmpty(t *testing.T) { baseConstraints := makeConstraints(0, []uint{0}, parachaintypes.HeadData{Data: []byte{0x0a}}) - scope, err := NewScopeWithAncestors( - inclusionemulator.RelayChainBlockInfo{ + scope, err := newScopeWithAncestors( + RelayChainBlockInfo{ Number: 1, Hash: common.BytesToHash(bytes.Repeat([]byte{1}, 32)), StorageRoot: common.BytesToHash(bytes.Repeat([]byte{2}, 32)), @@ -959,18 +958,18 @@ func TestInitAndPopulateFromEmpty(t *testing.T) { ) require.NoError(t, err) - chain := NewFragmentChain(scope, NewCandidateStorage()) + chain := newFragmentChain(scope, newCandidateStorage()) assert.Equal(t, 0, chain.BestChainLen()) assert.Equal(t, 0, chain.UnconnectedLen()) - newChain := NewFragmentChain(scope, NewCandidateStorage()) + newChain := newFragmentChain(scope, newCandidateStorage()) newChain.PopulateFromPrevious(chain) assert.Equal(t, 0, newChain.BestChainLen()) assert.Equal(t, 0, newChain.UnconnectedLen()) } -func populateFromPreviousStorage(scope *Scope, storage *CandidateStorage) *FragmentChain { - chain := NewFragmentChain(scope, NewCandidateStorage()) +func populateFromPreviousStorage(scope *scope, storage *candidateStorage) *fragmentChain { + chain := newFragmentChain(scope, newCandidateStorage()) // clone the value prevChain := *chain @@ -980,27 +979,27 @@ func populateFromPreviousStorage(scope *Scope, storage *CandidateStorage) *Fragm } func TestPopulateAndCheckPotential(t *testing.T) { - storage := NewCandidateStorage() + storage := newCandidateStorage() paraID := parachaintypes.ParaID(5) relayParentAHash := common.BytesToHash(bytes.Repeat([]byte{1}, 32)) relayParentBHash := common.BytesToHash(bytes.Repeat([]byte{2}, 32)) relayParentCHash := common.BytesToHash(bytes.Repeat([]byte{3}, 32)) - relayParentAInfo := &inclusionemulator.RelayChainBlockInfo{ + relayParentAInfo := &RelayChainBlockInfo{ Number: 0, Hash: relayParentAHash, StorageRoot: common.Hash{}, } - relayParentBInfo := &inclusionemulator.RelayChainBlockInfo{ + relayParentBInfo := &RelayChainBlockInfo{ Number: 1, Hash: relayParentBHash, StorageRoot: common.Hash{}, } - relayParentCInfo := &inclusionemulator.RelayChainBlockInfo{ + relayParentCInfo := &RelayChainBlockInfo{ Number: 2, Hash: relayParentCHash, StorageRoot: common.Hash{}, } // the ancestors must be in the reverse order - ancestors := []inclusionemulator.RelayChainBlockInfo{ + ancestors := []RelayChainBlockInfo{ *relayParentBInfo, *relayParentAInfo, } @@ -1010,15 +1009,15 @@ func TestPopulateAndCheckPotential(t *testing.T) { // helper function to hash the candidate and add its entry // into the candidate storage - hashAndInsertCandididate := func(t *testing.T, storage *CandidateStorage, + hashAndInsertCandididate := func(t *testing.T, storage *candidateStorage, candidate parachaintypes.CommittedCandidateReceipt, - pvd parachaintypes.PersistedValidationData, state CandidateState) ( - parachaintypes.CandidateHash, *CandidateEntry) { + pvd parachaintypes.PersistedValidationData, state candidateState) ( + parachaintypes.CandidateHash, *candidateEntry) { hash, err := candidate.Hash() require.NoError(t, err) candidateHash := parachaintypes.CandidateHash{Value: hash} - entry, err := NewCandidateEntry(candidateHash, candidate, pvd, state) + entry, err := newCandidateEntry(candidateHash, candidate, pvd, state) require.NoError(t, err) err = storage.addCandidateEntry(entry) require.NoError(t, err) @@ -1027,11 +1026,11 @@ func TestPopulateAndCheckPotential(t *testing.T) { } hashAndGetEntry := func(t *testing.T, candidate parachaintypes.CommittedCandidateReceipt, - pvd parachaintypes.PersistedValidationData, state CandidateState) (parachaintypes.CandidateHash, *CandidateEntry) { + pvd parachaintypes.PersistedValidationData, state candidateState) (parachaintypes.CandidateHash, *candidateEntry) { hash, err := candidate.Hash() require.NoError(t, err) candidateHash := parachaintypes.CandidateHash{Value: hash} - entry, err := NewCandidateEntry(candidateHash, candidate, pvd, state) + entry, err := newCandidateEntry(candidateHash, candidate, pvd, state) require.NoError(t, err) return candidateHash, entry } @@ -1045,7 +1044,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { uint32(relayParentAInfo.Number), ) - candidateAHash, candidateAEntry := hashAndInsertCandididate(t, storage, candidateA, pvdA, Backed) + candidateAHash, candidateAEntry := hashAndInsertCandididate(t, storage, candidateA, pvdA, backed) candidateBParaHead := parachaintypes.HeadData{Data: []byte{0x0c}} pvdB, candidateB := makeCommittedCandidate(t, paraID, @@ -1055,7 +1054,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { uint32(relayParentBInfo.Number), ) - candidateBHash, candidateBEntry := hashAndInsertCandididate(t, storage, candidateB, pvdB, Backed) + candidateBHash, candidateBEntry := hashAndInsertCandididate(t, storage, candidateB, pvdB, backed) candidateCParaHead := parachaintypes.HeadData{Data: []byte{0x0d}} pvdC, candidateC := makeCommittedCandidate(t, paraID, @@ -1065,7 +1064,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { uint32(relayParentCInfo.Number), ) - candidateCHash, candidateCEntry := hashAndInsertCandididate(t, storage, candidateC, pvdC, Backed) + candidateCHash, candidateCEntry := hashAndInsertCandididate(t, storage, candidateC, pvdC, backed) t.Run("candidate_A_doesnt_adhere_to_base_constraints", func(t *testing.T) { wrongConstraints := []parachaintypes.Constraints{ @@ -1079,7 +1078,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { } for _, wrongConstraint := range wrongConstraints { - scope, err := NewScopeWithAncestors( + scope, err := newScopeWithAncestors( *relayParentCInfo, &wrongConstraint, nil, @@ -1159,7 +1158,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { t.Run(tname, func(t *testing.T) { // iterate over all the depth values for _, depth := range tt.depth { - scope, err := NewScopeWithAncestors( + scope, err := newScopeWithAncestors( *relayParentCInfo, baseConstraints, nil, @@ -1168,7 +1167,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { ) require.NoError(t, err) - chain := NewFragmentChain(scope, NewCandidateStorage()) + chain := newFragmentChain(scope, newCandidateStorage()) // individually each candidate is a potential candidate require.NoError(t, chain.CanAddCandidateAsPotential(candidateAEntry)) require.NoError(t, chain.CanAddCandidateAsPotential(candidateBEntry)) @@ -1193,11 +1192,11 @@ func TestPopulateAndCheckPotential(t *testing.T) { // candidate A has a relay parent out of scope. Candidates B and C // will also be deleted since they form a chain with A t.Run("candidate_A_relay_parent_out_of_scope", func(t *testing.T) { - newAncestors := []inclusionemulator.RelayChainBlockInfo{ + newAncestors := []RelayChainBlockInfo{ *relayParentBInfo, } - scope, err := NewScopeWithAncestors( + scope, err := newScopeWithAncestors( *relayParentCInfo, baseConstraints, nil, @@ -1221,7 +1220,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { }) t.Run("candidate_A_and_B_out_of_scope_C_still_potential", func(t *testing.T) { - scope, err := NewScopeWithAncestors( + scope, err := newScopeWithAncestors( *relayParentCInfo, baseConstraints, nil, @@ -1262,9 +1261,9 @@ func TestPopulateAndCheckPotential(t *testing.T) { uint32(relayParentCInfo.Number), ) - _, wrongCandidateCEntry := hashAndInsertCandididate(t, modifiedStorage, wrongCandidateC, wrongPvdC, Backed) + _, wrongCandidateCEntry := hashAndInsertCandididate(t, modifiedStorage, wrongCandidateC, wrongPvdC, backed) - scope, err := NewScopeWithAncestors( + scope, err := newScopeWithAncestors( *relayParentCInfo, baseConstraints, nil, @@ -1281,7 +1280,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.ErrorIs(t, err, ErrCycle) // However, if taken independently, C still has potential, since we don't know A and B. - chain = NewFragmentChain(scope, NewCandidateStorage()) + chain = newFragmentChain(scope, newCandidateStorage()) require.NoError(t, chain.CanAddCandidateAsPotential(wrongCandidateCEntry)) }) @@ -1299,9 +1298,9 @@ func TestPopulateAndCheckPotential(t *testing.T) { 0, ) - _, wrongCandidateCEntry := hashAndInsertCandididate(t, modifiedStorage, wrongCandidateC, wrongPvdC, Backed) + _, wrongCandidateCEntry := hashAndInsertCandididate(t, modifiedStorage, wrongCandidateC, wrongPvdC, backed) - scope, err := NewScopeWithAncestors(*relayParentCInfo, baseConstraints, nil, 4, ancestors) + scope, err := newScopeWithAncestors(*relayParentCInfo, baseConstraints, nil, 4, ancestors) require.NoError(t, err) chain := populateFromPreviousStorage(scope, modifiedStorage) @@ -1330,9 +1329,9 @@ func TestPopulateAndCheckPotential(t *testing.T) { ) unconnectedCandidateCHash, unconnectedCandidateCEntry := hashAndInsertCandididate(t, - modifiedStorage, unconnectedCandidateC, unconnectedCPvd, Backed) + modifiedStorage, unconnectedCandidateC, unconnectedCPvd, backed) - scope, err := NewScopeWithAncestors( + scope, err := newScopeWithAncestors( *relayParentCInfo, baseConstraints, nil, @@ -1341,7 +1340,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { ) require.NoError(t, err) - chain := NewFragmentChain(scope, NewCandidateStorage()) + chain := newFragmentChain(scope, newCandidateStorage()) require.NoError(t, chain.CanAddCandidateAsPotential(unconnectedCandidateCEntry)) chain = populateFromPreviousStorage(scope, modifiedStorage) @@ -1370,13 +1369,13 @@ func TestPopulateAndCheckPotential(t *testing.T) { ) modifiedCandidateAHash, _ := hashAndInsertCandididate(t, - modifiedStorage, modifiedCandidateA, modifiedAPvd, Backed) + modifiedStorage, modifiedCandidateA, modifiedAPvd, backed) - scope, err := NewScopeWithAncestors( + scope, err := newScopeWithAncestors( *relayParentCInfo, baseConstraints, - []*PendingAvailability{ - {CandidateHash: modifiedCandidateAHash, RelayParent: *relayParentBInfo}, + []*pendingAvailability{ + {candidateHash: modifiedCandidateAHash, relayParent: *relayParentBInfo}, }, 4, ancestors, @@ -1409,7 +1408,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { ) modifiedCandidateAHash, _ := hashAndInsertCandididate(t, - modifiedStorage, modifiedCandidateA, modifiedAPvd, Backed) + modifiedStorage, modifiedCandidateA, modifiedAPvd, backed) wrongCandidateCHead := parachaintypes.HeadData{Data: []byte{0x01}} wrongPvdC, wrongCandidateC := makeCommittedCandidate(t, paraID, @@ -1420,19 +1419,19 @@ func TestPopulateAndCheckPotential(t *testing.T) { ) wrongCandidateCHash, wrongCandidateCEntry := hashAndInsertCandididate(t, - modifiedStorage, wrongCandidateC, wrongPvdC, Backed) + modifiedStorage, wrongCandidateC, wrongPvdC, backed) // does not matter if the fork selection rule picks the new candidate // as the modified candidate A is pending availability require.Equal(t, -1, forkSelectionRule(wrongCandidateCHash, modifiedCandidateAHash)) - scope, err := NewScopeWithAncestors( + scope, err := newScopeWithAncestors( *relayParentCInfo, baseConstraints, - []*PendingAvailability{ + []*pendingAvailability{ { - CandidateHash: modifiedCandidateAHash, - RelayParent: *relayParentBInfo, + candidateHash: modifiedCandidateAHash, + relayParent: *relayParentBInfo, }, }, 4, @@ -1448,23 +1447,23 @@ func TestPopulateAndCheckPotential(t *testing.T) { }) t.Run("multiple_pending_availability_candidates", func(t *testing.T) { - validOptions := [][]*PendingAvailability{ + validOptions := [][]*pendingAvailability{ { - {CandidateHash: candidateAHash, RelayParent: *relayParentAInfo}, + {candidateHash: candidateAHash, relayParent: *relayParentAInfo}, }, { - {CandidateHash: candidateAHash, RelayParent: *relayParentAInfo}, - {CandidateHash: candidateBHash, RelayParent: *relayParentBInfo}, + {candidateHash: candidateAHash, relayParent: *relayParentAInfo}, + {candidateHash: candidateBHash, relayParent: *relayParentBInfo}, }, { - {CandidateHash: candidateAHash, RelayParent: *relayParentAInfo}, - {CandidateHash: candidateBHash, RelayParent: *relayParentBInfo}, - {CandidateHash: candidateCHash, RelayParent: *relayParentCInfo}, + {candidateHash: candidateAHash, relayParent: *relayParentAInfo}, + {candidateHash: candidateBHash, relayParent: *relayParentBInfo}, + {candidateHash: candidateCHash, relayParent: *relayParentCInfo}, }, } for _, pending := range validOptions { - scope, err := NewScopeWithAncestors( + scope, err := newScopeWithAncestors( *relayParentCInfo, baseConstraints, pending, @@ -1480,15 +1479,15 @@ func TestPopulateAndCheckPotential(t *testing.T) { }) t.Run("relay_parents_of_pending_availability_candidates_can_be_out_of_scope", func(t *testing.T) { - ancestorsWithoutA := []inclusionemulator.RelayChainBlockInfo{ + ancestorsWithoutA := []RelayChainBlockInfo{ *relayParentBInfo, } - scope, err := NewScopeWithAncestors( + scope, err := newScopeWithAncestors( *relayParentCInfo, baseConstraints, - []*PendingAvailability{ - {CandidateHash: candidateAHash, RelayParent: *relayParentAInfo}, + []*pendingAvailability{ + {candidateHash: candidateAHash, relayParent: *relayParentAInfo}, }, 4, ancestorsWithoutA, @@ -1501,21 +1500,21 @@ func TestPopulateAndCheckPotential(t *testing.T) { }) t.Run("relay_parents_of_pending_availability_candidates_cannot_move_backwards", func(t *testing.T) { - scope, err := NewScopeWithAncestors( + scope, err := newScopeWithAncestors( *relayParentCInfo, baseConstraints, - []*PendingAvailability{ + []*pendingAvailability{ { - CandidateHash: candidateAHash, - RelayParent: inclusionemulator.RelayChainBlockInfo{ + candidateHash: candidateAHash, + relayParent: RelayChainBlockInfo{ Hash: relayParentAInfo.Hash, Number: 1, StorageRoot: relayParentAInfo.StorageRoot, }, }, { - CandidateHash: candidateBHash, - RelayParent: inclusionemulator.RelayChainBlockInfo{ + candidateHash: candidateBHash, + relayParent: RelayChainBlockInfo{ Hash: relayParentBInfo.Hash, Number: 0, StorageRoot: relayParentBInfo.StorageRoot, @@ -1523,7 +1522,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { }, }, 4, - []inclusionemulator.RelayChainBlockInfo{}, + []RelayChainBlockInfo{}, ) require.NoError(t, err) @@ -1533,7 +1532,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { }) t.Run("more_complex_case_with_multiple_candidates_and_constraints", func(t *testing.T) { - scope, err := NewScopeWithAncestors( + scope, err := newScopeWithAncestors( *relayParentCInfo, baseConstraints, nil, @@ -1550,7 +1549,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { candidateDParaHead, uint32(relayParentCInfo.Number), ) - candidateDHash, candidateDEntry := hashAndGetEntry(t, candidateD, pvdD, Backed) + candidateDHash, candidateDEntry := hashAndGetEntry(t, candidateD, pvdD, backed) require.NoError(t, populateFromPreviousStorage(scope, storage). CanAddCandidateAsPotential(candidateDEntry)) require.NoError(t, storage.addCandidateEntry(candidateDEntry)) @@ -1564,7 +1563,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { candidateFParaHead, 1000, ) - candidateFHash, candidateFEntry := hashAndGetEntry(t, candidateF, pvdF, Seconded) + candidateFHash, candidateFEntry := hashAndGetEntry(t, candidateF, pvdF, seconded) require.NoError(t, populateFromPreviousStorage(scope, storage). CanAddCandidateAsPotential(candidateFEntry)) require.NoError(t, storage.addCandidateEntry(candidateFEntry)) @@ -1576,7 +1575,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { parachaintypes.HeadData{Data: []byte{0xb1}}, uint32(relayParentAInfo.Number), ) - candidateA1Hash, candidateA1Entry := hashAndGetEntry(t, candidateA1, pvdA1, Backed) + candidateA1Hash, candidateA1Entry := hashAndGetEntry(t, candidateA1, pvdA1, backed) // candidate A1 is created so that its hash is greater than the candidate A hash. require.Equal(t, -1, forkSelectionRule(candidateAHash, candidateA1Hash)) @@ -1593,7 +1592,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { parachaintypes.HeadData{Data: []byte{0xc1}}, uint32(relayParentAInfo.Number), ) - _, candidateB1Entry := hashAndGetEntry(t, candidateB1, pvdB1, Seconded) + _, candidateB1Entry := hashAndGetEntry(t, candidateB1, pvdB1, seconded) require.NoError(t, populateFromPreviousStorage(scope, storage). CanAddCandidateAsPotential(candidateB1Entry)) @@ -1606,7 +1605,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { parachaintypes.HeadData{Data: []byte{0xd1}}, uint32(relayParentAInfo.Number), ) - _, candidateC1Entry := hashAndGetEntry(t, candidateC1, pvdC1, Backed) + _, candidateC1Entry := hashAndGetEntry(t, candidateC1, pvdC1, backed) require.NoError(t, populateFromPreviousStorage(scope, storage). CanAddCandidateAsPotential(candidateC1Entry)) @@ -1620,7 +1619,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { uint32(relayParentAInfo.Number), ) - _, candidateC2Entry := hashAndGetEntry(t, candidateC2, pvdC2, Seconded) + _, candidateC2Entry := hashAndGetEntry(t, candidateC2, pvdC2, seconded) require.NoError(t, populateFromPreviousStorage(scope, storage). CanAddCandidateAsPotential(candidateC2Entry)) require.NoError(t, storage.addCandidateEntry(candidateC2Entry)) @@ -1633,7 +1632,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { candidateA2HeadData, uint32(relayParentAInfo.Number), ) - candidateA2Hash, candidateA2Entry := hashAndGetEntry(t, candidateA2, pvdA2, Seconded) + candidateA2Hash, candidateA2Entry := hashAndGetEntry(t, candidateA2, pvdA2, seconded) require.Equal(t, -1, forkSelectionRule(candidateA2Hash, candidateAHash)) require.NoError(t, populateFromPreviousStorage(scope, storage). @@ -1649,7 +1648,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { candidateB2HeadData, uint32(relayParentBInfo.Number), ) - candidateB2Hash, candidateB2Entry := hashAndGetEntry(t, candidateB2, pvdB2, Backed) + candidateB2Hash, candidateB2Entry := hashAndGetEntry(t, candidateB2, pvdB2, backed) require.NoError(t, populateFromPreviousStorage(scope, storage). CanAddCandidateAsPotential(candidateB2Entry)) @@ -1716,7 +1715,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { uint32(relayParentBInfo.Number), ) - candidateC3Hash, candidateC3Entry := hashAndGetEntry(t, candidateC3, candidateC3Pvd, Seconded) + candidateC3Hash, candidateC3Entry := hashAndGetEntry(t, candidateC3, candidateC3Pvd, seconded) // candidate c4 candidateC4HeadData := parachaintypes.HeadData{Data: []byte{0xc3}} @@ -1727,7 +1726,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { uint32(relayParentBInfo.Number), ) - candidateC4Hash, candidateC4Entry := hashAndGetEntry(t, candidateC4, candidateC4Pvd, Seconded) + candidateC4Hash, candidateC4Entry := hashAndGetEntry(t, candidateC4, candidateC4Pvd, seconded) // c4 should have a lower candidate hash than c3 require.Equal(t, -1, forkSelectionRule(candidateC4Hash, candidateC3Hash)) @@ -1785,7 +1784,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { uint32(relayParentCInfo.Number), ) - candidateEHash, _ := hashAndInsertCandididate(t, storage, candidateE, candidateEPvd, Seconded) + candidateEHash, _ := hashAndInsertCandididate(t, storage, candidateE, candidateEPvd, seconded) chain = populateFromPreviousStorage(scope, storage) require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.BestChainVec()) @@ -1802,12 +1801,12 @@ func TestPopulateAndCheckPotential(t *testing.T) { }, unconnected) t.Run("simulate_candidates_A_B_C_are_pending_availability", func(t *testing.T) { - scope, err := NewScopeWithAncestors( + scope, err := newScopeWithAncestors( *relayParentCInfo, baseConstraints.Clone(), - []*PendingAvailability{ - {CandidateHash: candidateAHash, RelayParent: *relayParentAInfo}, - {CandidateHash: candidateBHash, RelayParent: *relayParentBInfo}, - {CandidateHash: candidateCHash, RelayParent: *relayParentCInfo}, + []*pendingAvailability{ + {candidateHash: candidateAHash, relayParent: *relayParentAInfo}, + {candidateHash: candidateBHash, relayParent: *relayParentBInfo}, + {candidateHash: candidateCHash, relayParent: *relayParentCInfo}, }, 2, ancestors, @@ -1836,11 +1835,11 @@ func TestPopulateAndCheckPotential(t *testing.T) { // simulate the fact that candidate A, B and C have been included baseConstraints := makeConstraints(0, []uint{0}, parachaintypes.HeadData{Data: []byte{0x0d}}) - scope, err = NewScopeWithAncestors(*relayParentCInfo, baseConstraints, nil, 2, ancestors) + scope, err = newScopeWithAncestors(*relayParentCInfo, baseConstraints, nil, 2, ancestors) require.NoError(t, err) prevChain := chain - chain = NewFragmentChain(scope, NewCandidateStorage()) + chain = newFragmentChain(scope, newCandidateStorage()) chain.PopulateFromPrevious(prevChain) require.Equal(t, []parachaintypes.CandidateHash{candidateDHash}, chain.BestChainVec()) @@ -1861,8 +1860,8 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.Zero(t, chain.UnconnectedLen()) var expectedErr error = &ErrCheckAgainstConstraints{ - fragmentValidityErr: &inclusionemulator.ErrOutputsInvalid{ - ModificationError: &inclusionemulator.ErrDisallowedHrmpWatermark{ + fragmentValidityErr: &ErrOutputsInvalid{ + ModificationError: &ErrDisallowedHrmpWatermark{ BlockNumber: 1000, }, }, @@ -1877,15 +1876,15 @@ func TestPopulateAndCheckPotential(t *testing.T) { }) } -func cloneFragmentChain(original *FragmentChain) *FragmentChain { +func cloneFragmentChain(original *fragmentChain) *fragmentChain { // Clone the scope - clonedScope := &Scope{ + clonedScope := &scope{ relayParent: original.scope.relayParent, baseConstraints: original.scope.baseConstraints.Clone(), - pendingAvailability: append([]*PendingAvailability(nil), original.scope.pendingAvailability...), + pendingAvailability: append([]*pendingAvailability(nil), original.scope.pendingAvailability...), maxDepth: original.scope.maxDepth, ancestors: original.scope.ancestors.Copy(), - ancestorsByHash: make(map[common.Hash]inclusionemulator.RelayChainBlockInfo), + ancestorsByHash: make(map[common.Hash]RelayChainBlockInfo), } for k, v := range original.scope.ancestorsByHash { @@ -1893,9 +1892,9 @@ func cloneFragmentChain(original *FragmentChain) *FragmentChain { } // Clone the best chain - clonedBestChain := NewBackedChain() + clonedBestChain := newBackedChain() for _, node := range original.bestChain.chain { - clonedNode := &FragmentNode{ + clonedNode := &fragmentNode{ fragment: node.fragment, candidateHash: node.candidateHash, parentHeadDataHash: node.parentHeadDataHash, @@ -1909,7 +1908,7 @@ func cloneFragmentChain(original *FragmentChain) *FragmentChain { clonedUnconnected := original.unconnected.Clone() // Create the cloned fragment chain - clonedFragmentChain := &FragmentChain{ + clonedFragmentChain := &fragmentChain{ scope: clonedScope, bestChain: clonedBestChain, unconnected: clonedUnconnected, @@ -1926,27 +1925,27 @@ func TestFindAncestorPathAndFindBackableChainEmptyBestChain(t *testing.T) { // Empty chain baseConstraints := makeConstraints(0, []uint{0}, requiredParent) - relayParentInfo := inclusionemulator.RelayChainBlockInfo{ + relayParentInfo := RelayChainBlockInfo{ Number: 0, Hash: relayParent, StorageRoot: common.Hash{}, } - scope, err := NewScopeWithAncestors(relayParentInfo, baseConstraints, nil, maxDepth, nil) + scope, err := newScopeWithAncestors(relayParentInfo, baseConstraints, nil, maxDepth, nil) require.NoError(t, err) - chain := NewFragmentChain(scope, NewCandidateStorage()) + chain := newFragmentChain(scope, newCandidateStorage()) assert.Equal(t, 0, chain.BestChainLen()) assert.Equal(t, 0, chain.findAncestorPath(map[parachaintypes.CandidateHash]struct{}{})) - assert.Equal(t, []*CandidateAndRelayParent{}, chain.FindBackableChain(map[parachaintypes.CandidateHash]struct{}{}, 2)) + assert.Equal(t, []*candidateAndRelayParent{}, chain.FindBackableChain(map[parachaintypes.CandidateHash]struct{}{}, 2)) // Invalid candidate ancestors := map[parachaintypes.CandidateHash]struct{}{ {Value: common.Hash{}}: {}, } assert.Equal(t, 0, chain.findAncestorPath(ancestors)) - assert.Equal(t, []*CandidateAndRelayParent{}, chain.FindBackableChain(ancestors, 2)) + assert.Equal(t, []*candidateAndRelayParent{}, chain.FindBackableChain(ancestors, 2)) } func TestFindAncestorPathAndFindBackableChain(t *testing.T) { @@ -1979,13 +1978,13 @@ func TestFindAncestorPathAndFindBackableChain(t *testing.T) { candidates = append(candidates, &CandidateAndPVD{candidate: candidate, pvd: candidatePvd}) } - storage := NewCandidateStorage() + storage := newCandidateStorage() for _, c := range candidates { candidateHash, err := c.candidate.Hash() require.NoError(t, err) - entry, err := NewCandidateEntry(parachaintypes.CandidateHash{Value: candidateHash}, c.candidate, c.pvd, Seconded) + entry, err := newCandidateEntry(parachaintypes.CandidateHash{Value: candidateHash}, c.candidate, c.pvd, seconded) require.NoError(t, err) err = storage.addCandidateEntry(entry) @@ -2001,27 +2000,27 @@ func TestFindAncestorPathAndFindBackableChain(t *testing.T) { type Ancestors = map[parachaintypes.CandidateHash]struct{} - hashes := func(from, to uint) []*CandidateAndRelayParent { - var output []*CandidateAndRelayParent + hashes := func(from, to uint) []*candidateAndRelayParent { + var output []*candidateAndRelayParent for i := from; i < to; i++ { - output = append(output, &CandidateAndRelayParent{ - CandidateHash: candidateHashes[i], - RealyParentHash: relayParent, + output = append(output, &candidateAndRelayParent{ + candidateHash: candidateHashes[i], + realyParentHash: relayParent, }) } return output } - relayParentInfo := inclusionemulator.RelayChainBlockInfo{ + relayParentInfo := RelayChainBlockInfo{ Number: uint(relayParentNumber), Hash: relayParent, StorageRoot: relayParentStorageRoot, } baseConstraints := makeConstraints(0, []uint{0}, requiredParent) - scope, err := NewScopeWithAncestors( + scope, err := newScopeWithAncestors( relayParentInfo, baseConstraints, nil, @@ -2076,14 +2075,14 @@ func TestFindAncestorPathAndFindBackableChain(t *testing.T) { require.Equal(t, 6, chain.BestChainLen()) for count := 0; count < 10; count++ { - var result []*CandidateAndRelayParent + var result []*candidateAndRelayParent if count > 6 { result = hashes(0, 6) } else { for i := 0; i < count && i < 6; i++ { - result = append(result, &CandidateAndRelayParent{ - CandidateHash: candidateHashes[i], - RealyParentHash: relayParent, + result = append(result, &candidateAndRelayParent{ + candidateHash: candidateHashes[i], + realyParentHash: relayParent, }) } } @@ -2110,7 +2109,7 @@ func TestFindAncestorPathAndFindBackableChain(t *testing.T) { // no ancestors supplied require.Equal(t, 0, chain.findAncestorPath(make(Ancestors))) - require.Equal(t, []*CandidateAndRelayParent(nil), chain.FindBackableChain(make(Ancestors), 0)) + require.Equal(t, []*candidateAndRelayParent(nil), chain.FindBackableChain(make(Ancestors), 0)) require.Equal(t, hashes(0, 1), chain.FindBackableChain(make(Ancestors), 1)) require.Equal(t, hashes(0, 2), chain.FindBackableChain(make(Ancestors), 2)) require.Equal(t, hashes(0, 5), chain.FindBackableChain(make(Ancestors), 5)) @@ -2179,9 +2178,9 @@ func TestFindAncestorPathAndFindBackableChain(t *testing.T) { require.Equal(t, hashes(0, 0), chain.FindBackableChain(maps.Clone(ancestors), 0)) // stop when we've found a candidate which is pending availability - scope, err := NewScopeWithAncestors(relayParentInfo, baseConstraints, - []*PendingAvailability{ - {CandidateHash: candidateHashes[3], RelayParent: relayParentInfo}, + scope, err := newScopeWithAncestors(relayParentInfo, baseConstraints, + []*pendingAvailability{ + {candidateHash: candidateHashes[3], relayParent: relayParentInfo}, }, maxDepth, nil, diff --git a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go b/dot/parachain/prospective-parachains/inclusion_emulator.go similarity index 97% rename from dot/parachain/util/inclusion-emulator/inclusion_emulator.go rename to dot/parachain/prospective-parachains/inclusion_emulator.go index 70260007a1..5bec335766 100644 --- a/dot/parachain/util/inclusion-emulator/inclusion_emulator.go +++ b/dot/parachain/prospective-parachains/inclusion_emulator.go @@ -1,4 +1,4 @@ -package inclusionemulator +package prospectiveparachains import ( "bytes" @@ -106,15 +106,6 @@ func (e *ErrValidationCodeMismatch) Error() string { return fmt.Sprintf("ValidationCodeMismatch(Expected: %v, Got: %v)", e.expected, e.got) } -type ErrPersistedValidationDataMismatch struct { - expected parachaintypes.PersistedValidationData - got parachaintypes.PersistedValidationData -} - -func (e *ErrPersistedValidationDataMismatch) Error() string { - return fmt.Sprintf("PersistedValidationDataMismatch(Expected: %v, Got: %v)", e.expected, e.got) -} - type ErrOutputsInvalid struct { ModificationError error } @@ -583,10 +574,8 @@ func validateAgainstConstraints( } if !expectedPVD.Equal(persistedValidationData) { - return &ErrPersistedValidationDataMismatch{ - expected: expectedPVD, - got: persistedValidationData, - } + return fmt.Errorf("%w, expected: %v, got: %v", + ErrPersistedValidationDataMismatch, expectedPVD, persistedValidationData) } if constraints.ValidationCodeHash != validationCodeHash { diff --git a/dot/parachain/types/async_backing.go b/dot/parachain/types/async_backing.go index 967f404a93..e35ec994dd 100644 --- a/dot/parachain/types/async_backing.go +++ b/dot/parachain/types/async_backing.go @@ -3,7 +3,10 @@ package parachaintypes -import "maps" +import ( + "maps" + "slices" +) // AsyncBackingParams contains the parameters for the async backing. type AsyncBackingParams struct { @@ -86,6 +89,7 @@ func (c *Constraints) Clone() *Constraints { ValidationCodeHash: c.FutureValidationCode.ValidationCodeHash, } } + return &Constraints{ MinRelayParentNumber: c.MinRelayParentNumber, MaxPoVSize: c.MaxPoVSize, @@ -93,9 +97,9 @@ func (c *Constraints) Clone() *Constraints { UmpRemaining: c.UmpRemaining, UmpRemainingBytes: c.UmpRemainingBytes, MaxUmpNumPerCandidate: c.MaxUmpNumPerCandidate, - DmpRemainingMessages: append([]uint(nil), c.DmpRemainingMessages...), + DmpRemainingMessages: slices.Clone(c.DmpRemainingMessages), HrmpInbound: InboundHrmpLimitations{ - ValidWatermarks: append([]uint(nil), c.HrmpInbound.ValidWatermarks...), + ValidWatermarks: slices.Clone(c.HrmpInbound.ValidWatermarks), }, HrmpChannelsOut: maps.Clone(c.HrmpChannelsOut), MaxHrmpNumPerCandidate: c.MaxHrmpNumPerCandidate, From 42a7345de3aa794a5b00bc28b57e4d156bb56b23 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 12 Dec 2024 10:24:13 -0400 Subject: [PATCH 26/31] chore: fix misspelling --- dot/parachain/prospective-parachains/fragment_chain.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dot/parachain/prospective-parachains/fragment_chain.go b/dot/parachain/prospective-parachains/fragment_chain.go index f9a0d714ba..aa8c7ef72e 100644 --- a/dot/parachain/prospective-parachains/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment_chain.go @@ -19,9 +19,9 @@ const ( backed ) -// forkSelectionRule does a normal comparision between 2 candidate hashes +// forkSelectionRule does a normal comparison between 2 candidate hashes // and returns -1 if the first hash is lower than the second one meaning that -// the first hash will be choosen as the best candidate. +// the first hash will be chosen as the best candidate. func forkSelectionRule(hash1, hash2 parachaintypes.CandidateHash) int { return bytes.Compare(hash1.Value[:], hash2.Value[:]) } From 71085e7c4cfdc5419b0f14a60362e1677e63e10f Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Thu, 12 Dec 2024 12:58:07 -0400 Subject: [PATCH 27/31] chore: make types unexported --- .../prospective-parachains/errors.go | 44 +++++++++---------- .../prospective-parachains/fragment_chain.go | 38 ++++++++-------- .../fragment_chain_test.go | 44 +++++++++---------- .../inclusion_emulator.go | 2 +- dot/parachain/types/types.go | 4 +- 5 files changed, 67 insertions(+), 65 deletions(-) diff --git a/dot/parachain/prospective-parachains/errors.go b/dot/parachain/prospective-parachains/errors.go index bcc3c6e62c..42c4ed7f24 100644 --- a/dot/parachain/prospective-parachains/errors.go +++ b/dot/parachain/prospective-parachains/errors.go @@ -9,73 +9,73 @@ import ( ) var ( - ErrCandidateAlreadyKnown = errors.New("candidate already known") - ErrZeroLengthCycle = errors.New("candidate's parent head is equal to its output head. Would introduce a cycle") //nolint:lll - ErrCycle = errors.New("candidate would introduce a cycle") - ErrMultiplePaths = errors.New("candidate would introduce two paths to the same output state") - ErrIntroduceBackedCandidate = errors.New("attempting to directly introduce a Backed candidate. It should first be introduced as Seconded") //nolint:lll - ErrParentCandidateNotFound = errors.New("could not find parent of the candidate") - ErrRelayParentMovedBackwards = errors.New("relay parent would move backwards from the latest candidate in the chain") //nolint:lll - ErrPersistedValidationDataMismatch = errors.New("candidate does not match the persisted validation data provided alongside it") //nolint:lll + errCandidateAlreadyKnown = errors.New("candidate already known") + errZeroLengthCycle = errors.New("candidate's parent head is equal to its output head. Would introduce a cycle") //nolint:lll + errCycle = errors.New("candidate would introduce a cycle") + errMultiplePaths = errors.New("candidate would introduce two paths to the same output state") + errIntroduceBackedCandidate = errors.New("attempting to directly introduce a Backed candidate. It should first be introduced as Seconded") //nolint:lll + errParentCandidateNotFound = errors.New("could not find parent of the candidate") + errRelayParentMovedBackwards = errors.New("relay parent would move backwards from the latest candidate in the chain") //nolint:lll + errPersistedValidationDataMismatch = errors.New("candidate does not match the persisted validation data provided alongside it") //nolint:lll ) -type ErrRelayParentPrecedesCandidatePendingAvailability struct { +type errRelayParentPrecedesCandidatePendingAvailability struct { relayParentA, relayParentB common.Hash } -func (e ErrRelayParentPrecedesCandidatePendingAvailability) Error() string { +func (e errRelayParentPrecedesCandidatePendingAvailability) Error() string { return fmt.Sprintf("relay parent %x of the candidate precedes the relay parent %x of a pending availability candidate", e.relayParentA, e.relayParentB) } -type ErrForkWithCandidatePendingAvailability struct { +type errForkWithCandidatePendingAvailability struct { candidateHash parachaintypes.CandidateHash } -func (e ErrForkWithCandidatePendingAvailability) Error() string { +func (e errForkWithCandidatePendingAvailability) Error() string { return fmt.Sprintf("candidate would introduce a fork with a pending availability candidate: %x", e.candidateHash.Value) } -type ErrForkChoiceRule struct { +type errForkChoiceRule struct { candidateHash parachaintypes.CandidateHash } -func (e ErrForkChoiceRule) Error() string { +func (e errForkChoiceRule) Error() string { return fmt.Sprintf("fork selection rule favours another candidate: %x", e.candidateHash.Value) } -type ErrComputeConstraints struct { +type errComputeConstraints struct { modificationErr error } -func (e ErrComputeConstraints) Error() string { +func (e errComputeConstraints) Error() string { return fmt.Sprintf("could not compute candidate constraints: %s", e.modificationErr) } -type ErrCheckAgainstConstraints struct { +type errCheckAgainstConstraints struct { fragmentValidityErr error } -func (e ErrCheckAgainstConstraints) Error() string { +func (e errCheckAgainstConstraints) Error() string { return fmt.Sprintf("candidate violates constraints: %s", e.fragmentValidityErr) } -type ErrRelayParentNotInScope struct { +type errRelayParentNotInScope struct { relayParentA, relayParentB common.Hash } -func (e ErrRelayParentNotInScope) Error() string { +func (e errRelayParentNotInScope) Error() string { return fmt.Sprintf("relay parent %s not in scope, earliest relay parent allowed %s", e.relayParentA.String(), e.relayParentB.String()) } -type ErrUnexpectedAncestor struct { +type errUnexpectedAncestor struct { // The block number that this error occurred at Number uint // The previous seen block number, which did not match `number`. Prev uint } -func (e ErrUnexpectedAncestor) Error() string { +func (e errUnexpectedAncestor) Error() string { return fmt.Sprintf("unexpected ancestor %d, expected %d", e.Number, e.Prev) } diff --git a/dot/parachain/prospective-parachains/fragment_chain.go b/dot/parachain/prospective-parachains/fragment_chain.go index aa8c7ef72e..729792f40f 100644 --- a/dot/parachain/prospective-parachains/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment_chain.go @@ -48,7 +48,7 @@ func newCandidateEntry( } if pvdHash != candidate.Descriptor.PersistedValidationDataHash { - return nil, ErrPersistedValidationDataMismatch + return nil, errPersistedValidationDataMismatch } parentHeadDataHash, err := persistedValidationData.ParentHead.Hash() @@ -62,7 +62,7 @@ func newCandidateEntry( } if parentHeadDataHash == outputHeadDataHash { - return nil, ErrZeroLengthCycle + return nil, errZeroLengthCycle } return &candidateEntry{ @@ -157,7 +157,7 @@ func (c *candidateStorage) Len() int { func (c *candidateStorage) addCandidateEntry(candidate *candidateEntry) error { _, ok := c.byCandidateHash[candidate.candidateHash] if ok { - return ErrCandidateAlreadyKnown + return errCandidateAlreadyKnown } // updates the reference parent hash -> candidate @@ -307,11 +307,11 @@ func newScopeWithAncestors( prev := relayParent.Number for _, ancestor := range ancestors { if prev == 0 { - return nil, ErrUnexpectedAncestor{Number: ancestor.Number, Prev: prev} + return nil, errUnexpectedAncestor{Number: ancestor.Number, Prev: prev} } if ancestor.Number != prev-1 { - return nil, ErrUnexpectedAncestor{Number: ancestor.Number, Prev: prev} + return nil, errUnexpectedAncestor{Number: ancestor.Number, Prev: prev} } if prev == baseConstraints.MinRelayParentNumber { @@ -606,7 +606,7 @@ func (f *fragmentChain) CanAddCandidateAsPotential(entry *candidateEntry) error _, existsInCandidateStorage := f.unconnected.byCandidateHash[candidateHash] if f.bestChain.Contains(candidateHash) || existsInCandidateStorage { - return ErrCandidateAlreadyKnown + return errCandidateAlreadyKnown } return f.checkPotential(entry) @@ -617,7 +617,7 @@ func (f *fragmentChain) CanAddCandidateAsPotential(entry *candidateEntry) error // state, it will only be part of the unconnected storage func (f *fragmentChain) TryAddingSecondedCandidate(entry *candidateEntry) error { if entry.state == backed { - return ErrIntroduceBackedCandidate + return errIntroduceBackedCandidate } err := f.CanAddCandidateAsPotential(entry) @@ -791,13 +791,13 @@ func (f *fragmentChain) checkPotential(candidate *candidateEntry) error { // trivial 0-length cycle if candidate.outputHeadDataHash == parentHeadHash { - return ErrZeroLengthCycle + return errZeroLengthCycle } // Check if the relay parent is in scope relayParentInfo := f.scope.Ancestor(relayParent) if relayParentInfo == nil { - return ErrRelayParentNotInScope{ + return errRelayParentNotInScope{ relayParentA: relayParent, relayParentB: f.scope.EarliestRelayParent().Hash, } @@ -806,7 +806,7 @@ func (f *fragmentChain) checkPotential(candidate *candidateEntry) error { // Check if the relay parent moved backwards from the latest candidate pending availability earliestRPOfPendingAvailability := f.earliestRelayParentPendingAvailability() if relayParentInfo.Number < earliestRPOfPendingAvailability.Number { - return ErrRelayParentPrecedesCandidatePendingAvailability{ + return errRelayParentPrecedesCandidatePendingAvailability{ relayParentA: relayParentInfo.Hash, relayParentB: earliestRPOfPendingAvailability.Hash, } @@ -816,13 +816,13 @@ func (f *fragmentChain) checkPotential(candidate *candidateEntry) error { if otherCandidateHash, ok := f.bestChain.byParentHead[parentHeadHash]; ok { if f.scope.GetPendingAvailability(otherCandidateHash) != nil { // Cannot accept a fork with a candidate pending availability - return ErrForkWithCandidatePendingAvailability{candidateHash: otherCandidateHash} + return errForkWithCandidatePendingAvailability{candidateHash: otherCandidateHash} } // If the candidate is backed and in the current chain, accept only a candidate // according to the fork selection rule if forkSelectionRule(otherCandidateHash, candidate.candidateHash) == -1 { - return ErrForkChoiceRule{candidateHash: otherCandidateHash} + return errForkChoiceRule{candidateHash: otherCandidateHash} } } @@ -847,7 +847,7 @@ func (f *fragmentChain) checkPotential(candidate *candidateEntry) error { } if parentCandidate == nil { - return ErrParentCandidateNotFound + return errParentCandidateNotFound } var err error @@ -855,7 +855,7 @@ func (f *fragmentChain) checkPotential(candidate *candidateEntry) error { f.scope.baseConstraints, parentCandidate.cumulativeModifications) if err != nil { - return ErrComputeConstraints{modificationErr: err} + return errComputeConstraints{modificationErr: err} } if ancestor := f.scope.Ancestor(parentCandidate.relayParent()); ancestor != nil { @@ -883,15 +883,15 @@ func (f *fragmentChain) checkPotential(candidate *candidateEntry) error { candidate.candidate.PersistedValidationData, ) if err != nil { - return ErrCheckAgainstConstraints{fragmentValidityErr: err} + return errCheckAgainstConstraints{fragmentValidityErr: err} } if relayParentInfo.Number < constraints.MinRelayParentNumber { - return ErrRelayParentMovedBackwards + return errRelayParentMovedBackwards } if maybeMinRelayParentNumber != nil && relayParentInfo.Number < *maybeMinRelayParentNumber { - return ErrRelayParentMovedBackwards + return errRelayParentMovedBackwards } return nil @@ -1134,13 +1134,13 @@ func (f *fragmentChain) checkCyclesOrInvalidTree(outputHeadDataHash common.Hash) // of some candidate in the chain _, ok := f.bestChain.byParentHead[outputHeadDataHash] if ok { - return ErrCycle + return errCycle } // multiple paths to the same state, which cannot happen for a chain _, ok = f.bestChain.byOutputHead[outputHeadDataHash] if ok { - return ErrMultiplePaths + return errMultiplePaths } return nil diff --git a/dot/parachain/prospective-parachains/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment_chain_test.go index 4eb1075ee5..ccf6cbd301 100644 --- a/dot/parachain/prospective-parachains/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment_chain_test.go @@ -588,7 +588,7 @@ func TestScopeRejectsAncestors(t *testing.T) { baseConstraints: makeConstraints(8, []uint{8, 9}, parachaintypes.HeadData{Data: []byte{0x01, 0x02, 0x03}}), pendingAvailability: make([]*pendingAvailability, 0), - expectedError: ErrUnexpectedAncestor{Number: 8, Prev: 10}, + expectedError: errUnexpectedAncestor{Number: 8, Prev: 10}, }, "rejects_ancestor_for_zero_block": { relayParent: &RelayChainBlockInfo{ @@ -606,7 +606,7 @@ func TestScopeRejectsAncestors(t *testing.T) { maxDepth: 2, baseConstraints: makeConstraints(0, []uint{}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}), pendingAvailability: make([]*pendingAvailability, 0), - expectedError: ErrUnexpectedAncestor{Number: 99999, Prev: 0}, + expectedError: errUnexpectedAncestor{Number: 99999, Prev: 0}, }, "rejects_unordered_ancestors": { relayParent: &RelayChainBlockInfo{ @@ -634,7 +634,7 @@ func TestScopeRejectsAncestors(t *testing.T) { maxDepth: 2, baseConstraints: makeConstraints(0, []uint{2}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}), pendingAvailability: make([]*pendingAvailability, 0), - expectedError: ErrUnexpectedAncestor{Number: 2, Prev: 4}, + expectedError: errUnexpectedAncestor{Number: 2, Prev: 4}, }, } @@ -715,7 +715,7 @@ func TestCandidateStorageMethods(t *testing.T) { entry, err := newCandidateEntry(parachaintypes.CandidateHash{Value: candidateHash}, candidate, wrongPvd, seconded) - require.ErrorIs(t, err, ErrPersistedValidationDataMismatch) + require.ErrorIs(t, err, errPersistedValidationDataMismatch) require.Nil(t, entry) }, }, @@ -747,7 +747,7 @@ func TestCandidateStorageMethods(t *testing.T) { entry, err := newCandidateEntry(parachaintypes.CandidateHash{Value: candidateHash}, candidate, pvd, seconded) require.Nil(t, entry) - require.ErrorIs(t, err, ErrZeroLengthCycle) + require.ErrorIs(t, err, errZeroLengthCycle) }, }, @@ -794,7 +794,7 @@ func TestCandidateStorageMethods(t *testing.T) { // re-add the candidate should fail err = storage.addCandidateEntry(entry) - require.ErrorIs(t, err, ErrCandidateAlreadyKnown) + require.ErrorIs(t, err, errCandidateAlreadyKnown) }) t.Run("mark_candidate_entry_as_backed", func(t *testing.T) { @@ -1096,7 +1096,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { // if A is not a potential candidate, its descendants will also not be added. require.Equal(t, chain.UnconnectedLen(), 0) err := chain.CanAddCandidateAsPotential(candidateAEntry) - require.ErrorIs(t, err, ErrRelayParentNotInScope{ + require.ErrorIs(t, err, errRelayParentNotInScope{ relayParentA: relayParentAHash, // candidate A has relay parent A relayParentB: relayParentBHash, // while the constraint is expecting at least relay parent B }) @@ -1209,7 +1209,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.Equal(t, 0, chain.UnconnectedLen()) require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), - ErrRelayParentNotInScope{ + errRelayParentNotInScope{ relayParentA: relayParentAHash, relayParentB: relayParentBHash, }) @@ -1233,14 +1233,14 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.Equal(t, 0, chain.UnconnectedLen()) require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), - ErrRelayParentNotInScope{ + errRelayParentNotInScope{ relayParentA: relayParentAHash, relayParentB: relayParentCHash, }) // however if taken indepently, both B and C still have potential require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateBEntry), - ErrRelayParentNotInScope{ + errRelayParentNotInScope{ relayParentA: relayParentBHash, relayParentB: relayParentCHash, }) @@ -1277,7 +1277,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.Equal(t, 0, chain.UnconnectedLen()) err = chain.CanAddCandidateAsPotential(wrongCandidateCEntry) - require.ErrorIs(t, err, ErrCycle) + require.ErrorIs(t, err, errCycle) // However, if taken independently, C still has potential, since we don't know A and B. chain = newFragmentChain(scope, newCandidateStorage()) @@ -1307,7 +1307,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash}, chain.BestChainVec()) require.Equal(t, 0, chain.UnconnectedLen()) - require.ErrorIs(t, chain.CanAddCandidateAsPotential(wrongCandidateCEntry), ErrRelayParentMovedBackwards) + require.ErrorIs(t, chain.CanAddCandidateAsPotential(wrongCandidateCEntry), errRelayParentMovedBackwards) }) t.Run("unconnected_candidate_C", func(t *testing.T) { @@ -1388,7 +1388,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.ErrorIs(t, chain.CanAddCandidateAsPotential(unconnectedCandidateCEntry), - ErrRelayParentPrecedesCandidatePendingAvailability{ + errRelayParentPrecedesCandidatePendingAvailability{ relayParentA: relayParentAHash, relayParentB: relayParentBHash, }) @@ -1441,7 +1441,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { chain := populateFromPreviousStorage(scope, modifiedStorage) require.Equal(t, []parachaintypes.CandidateHash{modifiedCandidateAHash, candidateBHash}, chain.BestChainVec()) require.Equal(t, 0, chain.UnconnectedLen()) - require.ErrorIs(t, chain.CanAddCandidateAsPotential(wrongCandidateCEntry), ErrForkWithCandidatePendingAvailability{ + require.ErrorIs(t, chain.CanAddCandidateAsPotential(wrongCandidateCEntry), errForkWithCandidatePendingAvailability{ candidateHash: modifiedCandidateAHash, }) }) @@ -1581,7 +1581,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.Equal(t, -1, forkSelectionRule(candidateAHash, candidateA1Hash)) require.ErrorIs(t, populateFromPreviousStorage(scope, storage). CanAddCandidateAsPotential(candidateA1Entry), - ErrForkChoiceRule{candidateHash: candidateAHash}) + errForkChoiceRule{candidateHash: candidateAHash}) require.NoError(t, storage.addCandidateEntry(candidateA1Entry)) @@ -1671,8 +1671,8 @@ func TestPopulateAndCheckPotential(t *testing.T) { assert.Equal(t, expectedUnconnected, unconnectedHashes) // Cannot add as potential an already present candidate (whether it's in the best chain or in unconnected storage) - assert.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), ErrCandidateAlreadyKnown) - assert.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateFEntry), ErrCandidateAlreadyKnown) + assert.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), errCandidateAlreadyKnown) + assert.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateFEntry), errCandidateAlreadyKnown) t.Run("simulate_best_chain_reorg", func(t *testing.T) { // back a2, the reversion should happen at the root. @@ -1692,10 +1692,10 @@ func TestPopulateAndCheckPotential(t *testing.T) { }, unconnected) // candidates A1 and A will never have potential again - require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateA1Entry), ErrForkChoiceRule{ + require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateA1Entry), errForkChoiceRule{ candidateHash: candidateA2Hash, }) - require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), ErrForkChoiceRule{ + require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), errForkChoiceRule{ candidateHash: candidateA2Hash, }) }) @@ -1831,7 +1831,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { }, unconnectedHashes) // cannot add as potential an already pending availability candidate - require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), ErrCandidateAlreadyKnown) + require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), errCandidateAlreadyKnown) // simulate the fact that candidate A, B and C have been included baseConstraints := makeConstraints(0, []uint{0}, parachaintypes.HeadData{Data: []byte{0x0d}}) @@ -1859,7 +1859,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.Equal(t, []parachaintypes.CandidateHash{candidateDHash, candidateEHash}, chain.BestChainVec()) require.Zero(t, chain.UnconnectedLen()) - var expectedErr error = &ErrCheckAgainstConstraints{ + var expectedErr error = &errCheckAgainstConstraints{ fragmentValidityErr: &ErrOutputsInvalid{ ModificationError: &ErrDisallowedHrmpWatermark{ BlockNumber: 1000, @@ -1867,7 +1867,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { }, } - errCheckAgainstConstraints := new(ErrCheckAgainstConstraints) + errCheckAgainstConstraints := new(errCheckAgainstConstraints) err = chain.CanAddCandidateAsPotential(candidateFEntry) require.True(t, errors.As(err, errCheckAgainstConstraints)) diff --git a/dot/parachain/prospective-parachains/inclusion_emulator.go b/dot/parachain/prospective-parachains/inclusion_emulator.go index 5bec335766..0018d60937 100644 --- a/dot/parachain/prospective-parachains/inclusion_emulator.go +++ b/dot/parachain/prospective-parachains/inclusion_emulator.go @@ -575,7 +575,7 @@ func validateAgainstConstraints( if !expectedPVD.Equal(persistedValidationData) { return fmt.Errorf("%w, expected: %v, got: %v", - ErrPersistedValidationDataMismatch, expectedPVD, persistedValidationData) + errPersistedValidationDataMismatch, expectedPVD, persistedValidationData) } if constraints.ValidationCodeHash != validationCodeHash { diff --git a/dot/parachain/types/types.go b/dot/parachain/types/types.go index bcbd370e98..17f1e2f800 100644 --- a/dot/parachain/types/types.go +++ b/dot/parachain/types/types.go @@ -747,11 +747,13 @@ type Subsystem interface { Stop() } +// Present is a variant of UpgradeRestriction enumerator that signals +// a upgrade restriction is present and there are no details about its +// specifics nor how long it could last type Present struct{} // UpgradeRestriction a possible restriction that prevents a parachain // from performing an upgrade -// TODO: should be scale encoded/decoded type UpgradeRestriction struct { inner any } From da18a0b005942bfd476da810819e513d33626f3e Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Fri, 13 Dec 2024 10:17:21 -0400 Subject: [PATCH 28/31] Trigger Build From d16b056073f905f82c6d6ccacf0102c631705a12 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Sat, 14 Dec 2024 14:44:32 -0400 Subject: [PATCH 29/31] chore: addressing comments --- .../prospective-parachains/errors.go | 139 ++++++- .../prospective-parachains/fragment_chain.go | 164 ++++----- .../fragment_chain_test.go | 342 +++++++++--------- .../inclusion_emulator.go | 288 ++++----------- dot/parachain/types/async_backing.go | 2 +- 5 files changed, 456 insertions(+), 479 deletions(-) diff --git a/dot/parachain/prospective-parachains/errors.go b/dot/parachain/prospective-parachains/errors.go index 42c4ed7f24..58cbdedcd3 100644 --- a/dot/parachain/prospective-parachains/errors.go +++ b/dot/parachain/prospective-parachains/errors.go @@ -17,6 +17,9 @@ var ( errParentCandidateNotFound = errors.New("could not find parent of the candidate") errRelayParentMovedBackwards = errors.New("relay parent would move backwards from the latest candidate in the chain") //nolint:lll errPersistedValidationDataMismatch = errors.New("candidate does not match the persisted validation data provided alongside it") //nolint:lll + errAppliedNonexistentCodeUpgrade = errors.New("applied non existent code upgrade") + errDmpAdvancementRule = errors.New("dmp advancement rule") + errCodeUpgradeRestricted = errors.New("code upgrade restricted") ) type errRelayParentPrecedesCandidatePendingAvailability struct { @@ -71,11 +74,141 @@ func (e errRelayParentNotInScope) Error() string { type errUnexpectedAncestor struct { // The block number that this error occurred at - Number uint + number uint // The previous seen block number, which did not match `number`. - Prev uint + prev uint } func (e errUnexpectedAncestor) Error() string { - return fmt.Sprintf("unexpected ancestor %d, expected %d", e.Number, e.Prev) + return fmt.Sprintf("unexpected ancestor %d, expected %d", e.number, e.prev) +} + +type errDisallowedHrmpWatermark struct { + BlockNumber uint +} + +func (e *errDisallowedHrmpWatermark) Error() string { + return fmt.Sprintf("DisallowedHrmpWatermark(BlockNumber: %d)", e.BlockNumber) +} + +type errNoSuchHrmpChannel struct { + paraID parachaintypes.ParaID +} + +func (e *errNoSuchHrmpChannel) Error() string { + return fmt.Sprintf("NoSuchHrmpChannel(ParaId: %d)", e.paraID) +} + +type errHrmpMessagesOverflow struct { + paraID parachaintypes.ParaID + messagesRemaining uint32 + messagesSubmitted uint32 +} + +func (e *errHrmpMessagesOverflow) Error() string { + return fmt.Sprintf("HrmpMessagesOverflow(ParaId: %d, MessagesRemaining: %d, MessagesSubmitted: %d)", + e.paraID, e.messagesRemaining, e.messagesSubmitted) +} + +type errHrmpBytesOverflow struct { + paraID parachaintypes.ParaID + bytesRemaining uint32 + bytesSubmitted uint32 +} + +func (e *errHrmpBytesOverflow) Error() string { + return fmt.Sprintf("HrmpBytesOverflow(ParaId: %d, BytesRemaining: %d, BytesSubmitted: %d)", + e.paraID, e.bytesRemaining, e.bytesSubmitted) +} + +type errUmpMessagesOverflow struct { + messagesRemaining uint32 + messagesSubmitted uint32 +} + +func (e *errUmpMessagesOverflow) Error() string { + return fmt.Sprintf("UmpMessagesOverflow(MessagesRemaining: %d, MessagesSubmitted: %d)", + e.messagesRemaining, e.messagesSubmitted) +} + +type errUmpBytesOverflow struct { + bytesRemaining uint32 + bytesSubmitted uint32 +} + +func (e *errUmpBytesOverflow) Error() string { + return fmt.Sprintf("UmpBytesOverflow(BytesRemaining: %d, BytesSubmitted: %d)", e.bytesRemaining, e.bytesSubmitted) +} + +type errDmpMessagesUnderflow struct { + messagesRemaining uint32 + messagesProcessed uint32 +} + +func (e *errDmpMessagesUnderflow) Error() string { + return fmt.Sprintf("DmpMessagesUnderflow(MessagesRemaining: %d, MessagesProcessed: %d)", + e.messagesRemaining, e.messagesProcessed) +} + +type errValidationCodeMismatch struct { + expected parachaintypes.ValidationCodeHash + got parachaintypes.ValidationCodeHash +} + +func (e *errValidationCodeMismatch) Error() string { + return fmt.Sprintf("ValidationCodeMismatch(Expected: %v, Got: %v)", e.expected, e.got) +} + +type errOutputsInvalid struct { + ModificationError error +} + +func (e *errOutputsInvalid) Error() string { + return fmt.Sprintf("OutputsInvalid(ModificationError: %v)", e.ModificationError) +} + +type errCodeSizeTooLarge struct { + maxAllowed uint32 + newSize uint32 +} + +func (e *errCodeSizeTooLarge) Error() string { + return fmt.Sprintf("CodeSizeTooLarge(MaxAllowed: %d, NewSize: %d)", e.maxAllowed, e.newSize) +} + +type errRelayParentTooOld struct { + minAllowed uint + current uint +} + +func (e *errRelayParentTooOld) Error() string { + return fmt.Sprintf("RelayParentTooOld(MinAllowed: %d, Current: %d)", e.minAllowed, e.current) +} + +type errUmpMessagesPerCandidateOverflow struct { + messagesAllowed uint32 + messagesSubmitted uint32 +} + +func (e *errUmpMessagesPerCandidateOverflow) Error() string { + return fmt.Sprintf("UmpMessagesPerCandidateOverflow(MessagesAllowed: %d, MessagesSubmitted: %d)", + e.messagesAllowed, e.messagesSubmitted) +} + +type errHrmpMessagesPerCandidateOverflow struct { + messagesAllowed uint32 + messagesSubmitted uint32 +} + +func (e *errHrmpMessagesPerCandidateOverflow) Error() string { + return fmt.Sprintf("HrmpMessagesPerCandidateOverflow(MessagesAllowed: %d, MessagesSubmitted: %d)", + e.messagesAllowed, e.messagesSubmitted) +} + +type errHrmpMessagesDescendingOrDuplicate struct { + index uint +} + +func (e *errHrmpMessagesDescendingOrDuplicate) Error() string { + return fmt.Sprintf("HrmpMessagesDescendingOrDuplicate(Index: %d)", e.index) } diff --git a/dot/parachain/prospective-parachains/fragment_chain.go b/dot/parachain/prospective-parachains/fragment_chain.go index 729792f40f..6717067663 100644 --- a/dot/parachain/prospective-parachains/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment_chain.go @@ -32,7 +32,7 @@ type candidateEntry struct { parentHeadDataHash common.Hash outputHeadDataHash common.Hash relayParent common.Hash - candidate *ProspectiveCandidate + candidate *prospectiveCandidate state candidateState } @@ -71,7 +71,7 @@ func newCandidateEntry( outputHeadDataHash: outputHeadDataHash, relayParent: candidate.Descriptor.RelayParent, state: state, - candidate: &ProspectiveCandidate{ + candidate: &prospectiveCandidate{ Commitments: candidate.Commitments, PersistedValidationData: persistedValidationData, PoVHash: candidate.Descriptor.PovHash, @@ -89,7 +89,7 @@ type candidateStorage struct { byCandidateHash map[parachaintypes.CandidateHash]*candidateEntry } -func (c *candidateStorage) Clone() *candidateStorage { +func (c *candidateStorage) clone() *candidateStorage { clone := newCandidateStorage() for parentHead, candidates := range c.byParentHead { @@ -128,7 +128,7 @@ func newCandidateStorage() *candidateStorage { } } -func (c *candidateStorage) AddPendingAvailabilityCandidate( +func (c *candidateStorage) addPendingAvailabilityCandidate( candidateHash parachaintypes.CandidateHash, candidate parachaintypes.CommittedCandidateReceipt, persistedValidationData parachaintypes.PersistedValidationData, @@ -146,7 +146,7 @@ func (c *candidateStorage) AddPendingAvailabilityCandidate( } // Len return the number of stored candidate -func (c *candidateStorage) Len() int { +func (c *candidateStorage) len() int { return len(c.byCandidateHash) } @@ -263,19 +263,19 @@ func (c *candidateStorage) possibleBackedParaChildren(parentHeadHash common.Hash // treatment in the `scope` type pendingAvailability struct { candidateHash parachaintypes.CandidateHash - relayParent RelayChainBlockInfo + relayParent relayChainBlockInfo } // The scope of a fragment chain type scope struct { // the relay parent we're currently building on top of - relayParent RelayChainBlockInfo + relayParent relayChainBlockInfo // the other relay parents candidates are allowed to build upon, // mapped by the block number - ancestors *btree.Map[uint, RelayChainBlockInfo] + ancestors *btree.Map[uint, relayChainBlockInfo] // the other relay parents candidates are allowed to build upon, // mapped by hash - ancestorsByHash map[common.Hash]RelayChainBlockInfo + ancestorsByHash map[common.Hash]relayChainBlockInfo // candidates pending availability at this block pendingAvailability []*pendingAvailability // the base constraints derived from the latest included candidate @@ -295,23 +295,23 @@ type scope struct { // Only ancestor whose children have the same session id as the relay parent's children // should be provided. It is allowed to provide 0 ancestors. func newScopeWithAncestors( - relayParent RelayChainBlockInfo, + relayParent relayChainBlockInfo, baseConstraints *parachaintypes.Constraints, pendingAvailability []*pendingAvailability, maxDepth uint, - ancestors []RelayChainBlockInfo, + ancestors []relayChainBlockInfo, ) (*scope, error) { - ancestorsMap := btree.NewMap[uint, RelayChainBlockInfo](100) - ancestorsByHash := make(map[common.Hash]RelayChainBlockInfo) + ancestorsMap := btree.NewMap[uint, relayChainBlockInfo](100) + ancestorsByHash := make(map[common.Hash]relayChainBlockInfo) prev := relayParent.Number for _, ancestor := range ancestors { if prev == 0 { - return nil, errUnexpectedAncestor{Number: ancestor.Number, Prev: prev} + return nil, errUnexpectedAncestor{number: ancestor.Number, prev: prev} } if ancestor.Number != prev-1 { - return nil, errUnexpectedAncestor{Number: ancestor.Number, Prev: prev} + return nil, errUnexpectedAncestor{number: ancestor.Number, prev: prev} } if prev == baseConstraints.MinRelayParentNumber { @@ -333,8 +333,8 @@ func newScopeWithAncestors( }, nil } -// EarliestRelayParent gets the earliest relay-parent allowed in the scope of the fragment chain. -func (s *scope) EarliestRelayParent() RelayChainBlockInfo { +// earliestRelayParent gets the earliest relay-parent allowed in the scope of the fragment chain. +func (s *scope) earliestRelayParent() relayChainBlockInfo { if iter := s.ancestors.Iter(); iter.Next() { return iter.Value() } @@ -342,7 +342,7 @@ func (s *scope) EarliestRelayParent() RelayChainBlockInfo { } // Ancestor gets the relay ancestor of the fragment chain by hash. -func (s *scope) Ancestor(hash common.Hash) *RelayChainBlockInfo { +func (s *scope) ancestor(hash common.Hash) *relayChainBlockInfo { if hash == s.relayParent.Hash { return &s.relayParent } @@ -355,7 +355,7 @@ func (s *scope) Ancestor(hash common.Hash) *RelayChainBlockInfo { } // Whether the candidate in question is one pending availability in this scope. -func (s *scope) GetPendingAvailability(candidateHash parachaintypes.CandidateHash) *pendingAvailability { +func (s *scope) getPendingAvailability(candidateHash parachaintypes.CandidateHash) *pendingAvailability { for _, c := range s.pendingAvailability { if c.candidateHash == candidateHash { return c @@ -369,7 +369,7 @@ func (s *scope) GetPendingAvailability(candidateHash parachaintypes.CandidateHas type fragmentNode struct { fragment *Fragment candidateHash parachaintypes.CandidateHash - cumulativeModifications *ConstraintModifications + cumulativeModifications *constraintModifications parentHeadDataHash common.Hash outputHeadDataHash common.Hash } @@ -420,14 +420,14 @@ func newBackedChain() *backedChain { } } -func (bc *backedChain) Push(candidate *fragmentNode) { +func (bc *backedChain) push(candidate *fragmentNode) { bc.candidates[candidate.candidateHash] = struct{}{} bc.byParentHead[candidate.parentHeadDataHash] = candidate.candidateHash bc.byOutputHead[candidate.outputHeadDataHash] = candidate.candidateHash bc.chain = append(bc.chain, candidate) } -func (bc *backedChain) Clear() []*fragmentNode { +func (bc *backedChain) clear() []*fragmentNode { bc.byParentHead = make(map[common.Hash]parachaintypes.CandidateHash) bc.byOutputHead = make(map[common.Hash]parachaintypes.CandidateHash) bc.candidates = make(map[parachaintypes.CandidateHash]struct{}) @@ -437,7 +437,7 @@ func (bc *backedChain) Clear() []*fragmentNode { return oldChain } -func (bc *backedChain) RevertToParentHash(parentHeadDataHash common.Hash) []*fragmentNode { +func (bc *backedChain) revertToParentHash(parentHeadDataHash common.Hash) []*fragmentNode { foundIndex := -1 for i := 0; i < len(bc.chain); i++ { @@ -465,11 +465,6 @@ func (bc *backedChain) RevertToParentHash(parentHeadDataHash common.Hash) []*fra return nil } -func (bc *backedChain) Contains(hash parachaintypes.CandidateHash) bool { - _, ok := bc.candidates[hash] - return ok -} - // this is a fragment chain specific to an active leaf. It holds the current // best backable candidate chain, as well as potential candidates which could // become connected to the chain in the future or which could even overwrite @@ -506,10 +501,10 @@ func newFragmentChain(scope *scope, candidatesPendingAvailability *candidateStor return fragmentChain } -// PopulateFromPrevious populates the `fragmentChain` given the new candidates pending +// populateFromPrevious populates the `fragmentChain` given the new candidates pending // availability and the optional previous fragment chain (of the previous relay parent) -func (f *fragmentChain) PopulateFromPrevious(prevFragmentChain *fragmentChain) { - prevStorage := prevFragmentChain.unconnected.Clone() +func (f *fragmentChain) populateFromPrevious(prevFragmentChain *fragmentChain) { + prevStorage := prevFragmentChain.unconnected.clone() for _, candidate := range prevFragmentChain.bestChain.chain { // if they used to be pending availability, dont add them. This is fine because: // - if they still are pending availability, they have already been added to @@ -518,7 +513,7 @@ func (f *fragmentChain) PopulateFromPrevious(prevFragmentChain *fragmentChain) { // // This cannot happen for the candidates in the unconnected storage. The pending // availability candidates will always be part of the best chain - pending := prevFragmentChain.scope.GetPendingAvailability(candidate.candidateHash) + pending := prevFragmentChain.scope.getPendingAvailability(candidate.candidateHash) if pending == nil { _ = prevStorage.addCandidateEntry(newCandidateEntryFromFragment(candidate)) } @@ -535,25 +530,17 @@ func (f *fragmentChain) PopulateFromPrevious(prevFragmentChain *fragmentChain) { f.populateUnconnectedPotentialCandidates(prevStorage) } -func (f *fragmentChain) Scope() *scope { - return f.scope -} - -func (f *fragmentChain) BestChainLen() int { +func (f *fragmentChain) bestChainLen() int { return len(f.bestChain.chain) } -func (f *fragmentChain) UnconnectedLen() int { - return f.unconnected.Len() -} - -func (f *fragmentChain) ContainsUnconnectedCandidate(candidateHash parachaintypes.CandidateHash) bool { +func (f *fragmentChain) containsUnconnectedCandidate(candidateHash parachaintypes.CandidateHash) bool { _, ok := f.unconnected.byCandidateHash[candidateHash] return ok } -// BestChainVec returns a vector of the chain's candidate hashes, in-order. -func (f *fragmentChain) BestChainVec() (hashes []parachaintypes.CandidateHash) { +// bestChainVec returns a vector of the chain's candidate hashes, in-order. +func (f *fragmentChain) bestChainVec() (hashes []parachaintypes.CandidateHash) { hashes = make([]parachaintypes.CandidateHash, len(f.bestChain.chain)) for idx, node := range f.bestChain.chain { hashes[idx] = node.candidateHash @@ -561,8 +548,8 @@ func (f *fragmentChain) BestChainVec() (hashes []parachaintypes.CandidateHash) { return hashes } -func (f *fragmentChain) IsCandidateBacked(hash parachaintypes.CandidateHash) bool { - if f.bestChain.Contains(hash) { +func (f *fragmentChain) isCandidateBacked(hash parachaintypes.CandidateHash) bool { + if _, ok := f.bestChain.candidates[hash]; ok { return true } @@ -570,10 +557,10 @@ func (f *fragmentChain) IsCandidateBacked(hash parachaintypes.CandidateHash) boo return candidate != nil && candidate.state == backed } -// CandidateBacked marks a candidate as backed. This can trigger a recreation of the best backable chain. -func (f *fragmentChain) CandidateBacked(newlyBackedCandidate parachaintypes.CandidateHash) { +// candidateBacked marks a candidate as backed. This can trigger a recreation of the best backable chain. +func (f *fragmentChain) candidateBacked(newlyBackedCandidate parachaintypes.CandidateHash) { // already backed - if f.bestChain.Contains(newlyBackedCandidate) { + if _, ok := f.bestChain.candidates[newlyBackedCandidate]; ok { return } @@ -592,7 +579,7 @@ func (f *fragmentChain) CandidateBacked(newlyBackedCandidate parachaintypes.Cand return } - prevStorage := f.unconnected.Clone() + prevStorage := f.unconnected.clone() f.unconnected = newCandidateStorage() f.populateChain(prevStorage) @@ -600,27 +587,28 @@ func (f *fragmentChain) CandidateBacked(newlyBackedCandidate parachaintypes.Cand f.populateUnconnectedPotentialCandidates(prevStorage) } -// CanAddCandidateAsPotential checks if this candidate could be added in the future -func (f *fragmentChain) CanAddCandidateAsPotential(entry *candidateEntry) error { +// canAddCandidateAsPotential checks if this candidate could be added in the future +func (f *fragmentChain) canAddCandidateAsPotential(entry *candidateEntry) error { candidateHash := entry.candidateHash _, existsInCandidateStorage := f.unconnected.byCandidateHash[candidateHash] - if f.bestChain.Contains(candidateHash) || existsInCandidateStorage { + _, existsInBestChain := f.bestChain.candidates[candidateHash] + if existsInBestChain || existsInCandidateStorage { return errCandidateAlreadyKnown } return f.checkPotential(entry) } -// TryAddingSecondedCandidate tries to add a candidate as a seconded candidate, if the +// tryAddingSecondedCandidate tries to add a candidate as a seconded candidate, if the // candidate has potential. It will never be added to the chain directly in the seconded // state, it will only be part of the unconnected storage -func (f *fragmentChain) TryAddingSecondedCandidate(entry *candidateEntry) error { +func (f *fragmentChain) tryAddingSecondedCandidate(entry *candidateEntry) error { if entry.state == backed { return errIntroduceBackedCandidate } - err := f.CanAddCandidateAsPotential(entry) + err := f.canAddCandidateAsPotential(entry) if err != nil { return err } @@ -628,8 +616,8 @@ func (f *fragmentChain) TryAddingSecondedCandidate(entry *candidateEntry) error return f.unconnected.addCandidateEntry(entry) } -// GetHeadDataByHash tries to get the full head data associated with this hash -func (f *fragmentChain) GetHeadDataByHash(headDataHash common.Hash) (*parachaintypes.HeadData, error) { +// getHeadDataByHash tries to get the full head data associated with this hash +func (f *fragmentChain) getHeadDataByHash(headDataHash common.Hash) (*parachaintypes.HeadData, error) { reqParent := f.scope.baseConstraints.RequiredParent reqParentHash, err := reqParent.Hash() if err != nil { @@ -672,11 +660,11 @@ type candidateAndRelayParent struct { realyParentHash common.Hash } -// FindBackableChain selects `count` candidates after the given `ancestors` which +// findBackableChain selects `count` candidates after the given `ancestors` which // can be backed on chain next. The intention of the `ancestors` is to allow queries // on the basis of one or more candidates which were previously pending availability // becoming available or candidates timing out -func (f *fragmentChain) FindBackableChain( +func (f *fragmentChain) findBackableChain( ancestors map[parachaintypes.CandidateHash]struct{}, count uint32) []*candidateAndRelayParent { if count == 0 { return nil @@ -690,7 +678,7 @@ func (f *fragmentChain) FindBackableChain( for _, elem := range f.bestChain.chain[basePos:actualEndIdx] { // only supply candidates which are not yet pending availability. // `ancestors` should have already contained them, but check just in case - if pending := f.scope.GetPendingAvailability(elem.candidateHash); pending == nil { + if pending := f.scope.getPendingAvailability(elem.candidateHash); pending == nil { res = append(res, &candidateAndRelayParent{ candidateHash: elem.candidateHash, realyParentHash: elem.relayParent(), @@ -729,17 +717,17 @@ func (f *fragmentChain) findAncestorPath(ancestors map[parachaintypes.CandidateH // the chain. The value returned may not be valid if we want to add a candidate pending // availability, which may have a relay parent which is out of scope, special handling // is needed in that case. -func (f *fragmentChain) earliestRelayParent() *RelayChainBlockInfo { +func (f *fragmentChain) earliestRelayParent() *relayChainBlockInfo { if len(f.bestChain.chain) > 0 { lastCandidate := f.bestChain.chain[len(f.bestChain.chain)-1] - info := f.scope.Ancestor(lastCandidate.relayParent()) + info := f.scope.ancestor(lastCandidate.relayParent()) if info != nil { return info } // if the relay parent is out of scope AND it is in the chain // it must be a candidate pending availability - pending := f.scope.GetPendingAvailability(lastCandidate.candidateHash) + pending := f.scope.getPendingAvailability(lastCandidate.candidateHash) if pending == nil { return nil } @@ -747,21 +735,21 @@ func (f *fragmentChain) earliestRelayParent() *RelayChainBlockInfo { return &pending.relayParent } - earliest := f.scope.EarliestRelayParent() + earliest := f.scope.earliestRelayParent() return &earliest } // earliestRelayParentPendingAvailability returns the earliest relay parent a potential // candidate may have for it to ever be added to the chain. This is the relay parent of // the last candidate pending availability or the earliest relay parent in scope. -func (f *fragmentChain) earliestRelayParentPendingAvailability() *RelayChainBlockInfo { +func (f *fragmentChain) earliestRelayParentPendingAvailability() *relayChainBlockInfo { for i := len(f.bestChain.chain) - 1; i >= 0; i-- { candidate := f.bestChain.chain[i] - if pending := f.scope.GetPendingAvailability(candidate.candidateHash); pending != nil { + if pending := f.scope.getPendingAvailability(candidate.candidateHash); pending != nil { return &pending.relayParent } } - earliest := f.scope.EarliestRelayParent() + earliest := f.scope.earliestRelayParent() return &earliest } @@ -771,14 +759,14 @@ func (f *fragmentChain) populateUnconnectedPotentialCandidates(oldStorage *candi for _, candidate := range oldStorage.byCandidateHash { // sanity check, all pending availability candidates should be already present // in the chain - if pending := f.scope.GetPendingAvailability(candidate.candidateHash); pending != nil { + if pending := f.scope.getPendingAvailability(candidate.candidateHash); pending != nil { continue } // we can just use the error to check if we can add // or not an entry since an error can legitimately // happen when pruning stale candidates. - err := f.CanAddCandidateAsPotential(candidate) + err := f.canAddCandidateAsPotential(candidate) if err == nil { _ = f.unconnected.addCandidateEntry(candidate) } @@ -795,11 +783,11 @@ func (f *fragmentChain) checkPotential(candidate *candidateEntry) error { } // Check if the relay parent is in scope - relayParentInfo := f.scope.Ancestor(relayParent) + relayParentInfo := f.scope.ancestor(relayParent) if relayParentInfo == nil { return errRelayParentNotInScope{ relayParentA: relayParent, - relayParentB: f.scope.EarliestRelayParent().Hash, + relayParentB: f.scope.earliestRelayParent().Hash, } } @@ -814,7 +802,7 @@ func (f *fragmentChain) checkPotential(candidate *candidateEntry) error { // If it's a fork with a backed candidate in the current chain if otherCandidateHash, ok := f.bestChain.byParentHead[parentHeadHash]; ok { - if f.scope.GetPendingAvailability(otherCandidateHash) != nil { + if f.scope.getPendingAvailability(otherCandidateHash) != nil { // Cannot accept a fork with a candidate pending availability return errForkWithCandidatePendingAvailability{candidateHash: otherCandidateHash} } @@ -851,14 +839,14 @@ func (f *fragmentChain) checkPotential(candidate *candidateEntry) error { } var err error - constraints, err = ApplyModifications( + constraints, err = applyModifications( f.scope.baseConstraints, parentCandidate.cumulativeModifications) if err != nil { return errComputeConstraints{modificationErr: err} } - if ancestor := f.scope.Ancestor(parentCandidate.relayParent()); ancestor != nil { + if ancestor := f.scope.ancestor(parentCandidate.relayParent()); ancestor != nil { maybeMinRelayParentNumber = &ancestor.Number } } else if requiredParentHash == parentHeadHash { @@ -875,7 +863,7 @@ func (f *fragmentChain) checkPotential(candidate *candidateEntry) error { } // Check against constraints if we have a full concrete candidate - _, err = CheckAgainstConstraints( + _, err = checkAgainstConstraints( relayParentInfo, constraints, candidate.candidate.Commitments, @@ -985,7 +973,7 @@ type possibleChild struct { // Can be called by the `newFragmentChain` or when backing a new candidate. When this is called // it may cause the previous chain to be completely erased or it may add more than one candidate func (f *fragmentChain) populateChain(storage *candidateStorage) { - var cumulativeModifications *ConstraintModifications + var cumulativeModifications *constraintModifications if len(f.bestChain.chain) > 0 { lastCandidate := f.bestChain.chain[len(f.bestChain.chain)-1] cumulativeModifications = lastCandidate.cumulativeModifications.Clone() @@ -999,7 +987,7 @@ func (f *fragmentChain) populateChain(storage *candidateStorage) { } for len(f.bestChain.chain) < int(f.scope.maxDepth)+1 { - childConstraints, err := ApplyModifications( + childConstraints, err := applyModifications( f.scope.baseConstraints, cumulativeModifications) if err != nil { logger.Warnf("failed to apply modifications: %s", err.Error()) @@ -1021,10 +1009,10 @@ func (f *fragmentChain) populateChain(storage *candidateStorage) { // 4. all non-pending-availability candidates have relay-parent in the scope // 5. candidate outputs fulfil constraints - var relayParent *RelayChainBlockInfo + var relayParent *relayChainBlockInfo var minRelayParent uint - pending := f.scope.GetPendingAvailability(candidateEntry.candidateHash) + pending := f.scope.getPendingAvailability(candidateEntry.candidateHash) if pending != nil { relayParent = &pending.relayParent if len(f.bestChain.chain) == 0 { @@ -1033,7 +1021,7 @@ func (f *fragmentChain) populateChain(storage *candidateStorage) { minRelayParent = earliestRelayParent.Number } } else { - info := f.scope.Ancestor(candidateEntry.relayParent) + info := f.scope.ancestor(candidateEntry.relayParent) if info == nil { continue } @@ -1060,7 +1048,7 @@ func (f *fragmentChain) populateChain(storage *candidateStorage) { // this can never happen, as candidates can only be duplicated // if there's a cycle and we shouldnt have allowed for a cycle // to be chained - if f.bestChain.Contains(candidateEntry.candidateHash) { + if _, ok := f.bestChain.candidates[candidateEntry.candidateHash]; ok { continue } @@ -1091,9 +1079,9 @@ func (f *fragmentChain) populateChain(storage *candidateStorage) { // choose the best candidate bestCandidate := slices.MinFunc(possibleChildren, func(fst, snd *possibleChild) int { // always pick a candidate pending availability as best. - if f.scope.GetPendingAvailability(fst.candidateHash) != nil { + if f.scope.getPendingAvailability(fst.candidateHash) != nil { return -1 - } else if f.scope.GetPendingAvailability(snd.candidateHash) != nil { + } else if f.scope.getPendingAvailability(snd.candidateHash) != nil { return 1 } else { return forkSelectionRule(fst.candidateHash, snd.candidateHash) @@ -1107,7 +1095,7 @@ func (f *fragmentChain) populateChain(storage *candidateStorage) { cumulativeModifications.Stack(bestCandidate.fragment.ConstraintModifications()) // update the earliest relay parent - earliestRelayParent = &RelayChainBlockInfo{ + earliestRelayParent = &relayChainBlockInfo{ Hash: bestCandidate.fragment.RelayParent().Hash, Number: bestCandidate.fragment.RelayParent().Number, StorageRoot: bestCandidate.fragment.RelayParent().StorageRoot, @@ -1122,7 +1110,7 @@ func (f *fragmentChain) populateChain(storage *candidateStorage) { } // add the candidate to the chain now - f.bestChain.Push(node) + f.bestChain.push(node) } } @@ -1159,11 +1147,11 @@ func (f *fragmentChain) revertTo(parentHeadDataHash common.Hash) bool { } if requiredParentHash == parentHeadDataHash { - removedItems = f.bestChain.Clear() + removedItems = f.bestChain.clear() } if _, ok := f.bestChain.byOutputHead[parentHeadDataHash]; removedItems == nil && ok { - removedItems = f.bestChain.RevertToParentHash(parentHeadDataHash) + removedItems = f.bestChain.revertToParentHash(parentHeadDataHash) } if removedItems == nil { diff --git a/dot/parachain/prospective-parachains/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment_chain_test.go index ccf6cbd301..b9e5939134 100644 --- a/dot/parachain/prospective-parachains/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment_chain_test.go @@ -101,7 +101,7 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { candidateHash: candidateHash, parentHeadDataHash: parentHeadHash, outputHeadDataHash: outputHeadHash, - candidate: &ProspectiveCandidate{ + candidate: &prospectiveCandidate{ Commitments: parachaintypes.CandidateCommitments{ HeadData: headData, }, @@ -134,7 +134,7 @@ func TestCandidateStorage_HeadDataByHash(t *testing.T) { candidateHash: candidateHash, parentHeadDataHash: parentHeadHash, outputHeadDataHash: outputHeadHash, - candidate: &ProspectiveCandidate{ + candidate: &prospectiveCandidate{ PersistedValidationData: parachaintypes.PersistedValidationData{ ParentHead: headData, }, @@ -265,22 +265,22 @@ func TestCandidateStorage_PossibleBackedParaChildren(t *testing.T) { func TestEarliestRelayParent(t *testing.T) { tests := map[string]struct { setup func() *scope - expect RelayChainBlockInfo + expect relayChainBlockInfo }{ "returns_from_ancestors": { setup: func() *scope { - relayParent := RelayChainBlockInfo{ + relayParent := relayChainBlockInfo{ Hash: common.Hash{0x01}, Number: 10, } baseConstraints := ¶chaintypes.Constraints{ MinRelayParentNumber: 5, } - ancestor := RelayChainBlockInfo{ + ancestor := relayChainBlockInfo{ Hash: common.Hash{0x02}, Number: 9, } - ancestorsMap := btree.NewMap[uint, RelayChainBlockInfo](100) + ancestorsMap := btree.NewMap[uint, relayChainBlockInfo](100) ancestorsMap.Set(ancestor.Number, ancestor) return &scope{ relayParent: relayParent, @@ -288,14 +288,14 @@ func TestEarliestRelayParent(t *testing.T) { ancestors: ancestorsMap, } }, - expect: RelayChainBlockInfo{ + expect: relayChainBlockInfo{ Hash: common.Hash{0x02}, Number: 9, }, }, "returns_relayParent": { setup: func() *scope { - relayParent := RelayChainBlockInfo{ + relayParent := relayChainBlockInfo{ Hash: common.Hash{0x01}, Number: 10, } @@ -305,10 +305,10 @@ func TestEarliestRelayParent(t *testing.T) { return &scope{ relayParent: relayParent, baseConstraints: baseConstraints, - ancestors: btree.NewMap[uint, RelayChainBlockInfo](100), + ancestors: btree.NewMap[uint, relayChainBlockInfo](100), } }, - expect: RelayChainBlockInfo{ + expect: relayChainBlockInfo{ Hash: common.Hash{0x01}, Number: 10, }, @@ -319,7 +319,7 @@ func TestEarliestRelayParent(t *testing.T) { tt := tt t.Run(name, func(t *testing.T) { scope := tt.setup() - result := scope.EarliestRelayParent() + result := scope.earliestRelayParent() assert.Equal(t, tt.expect, result) }) } @@ -346,9 +346,9 @@ func TestBackedChain_RevertToParentHash(t *testing.T) { candidateHash: parachaintypes.CandidateHash{Value: common.Hash{byte(i)}}, parentHeadDataHash: common.Hash{byte(i)}, outputHeadDataHash: common.Hash{byte(i + 1)}, - cumulativeModifications: &ConstraintModifications{}, + cumulativeModifications: &constraintModifications{}, } - chain.Push(node) + chain.push(node) } return chain }, @@ -370,9 +370,9 @@ func TestBackedChain_RevertToParentHash(t *testing.T) { candidateHash: parachaintypes.CandidateHash{Value: common.Hash{byte(i)}}, parentHeadDataHash: common.Hash{byte(i)}, outputHeadDataHash: common.Hash{byte(i + 1)}, - cumulativeModifications: &ConstraintModifications{}, + cumulativeModifications: &constraintModifications{}, } - chain.Push(node) + chain.push(node) } return chain }, @@ -394,9 +394,9 @@ func TestBackedChain_RevertToParentHash(t *testing.T) { candidateHash: parachaintypes.CandidateHash{Value: common.Hash{byte(i)}}, parentHeadDataHash: common.Hash{byte(i)}, outputHeadDataHash: common.Hash{byte(i + 1)}, - cumulativeModifications: &ConstraintModifications{}, + cumulativeModifications: &constraintModifications{}, } - chain.Push(node) + chain.push(node) } return chain }, @@ -410,7 +410,7 @@ func TestBackedChain_RevertToParentHash(t *testing.T) { tt := tt t.Run(name, func(t *testing.T) { backedChain := tt.setup() - removedNodes := backedChain.RevertToParentHash(tt.hash) + removedNodes := backedChain.revertToParentHash(tt.hash) // Check the number of removed nodes assert.Equal(t, tt.expectedRemovedFragments, len(removedNodes)) @@ -432,7 +432,7 @@ func TestBackedChain_RevertToParentHash(t *testing.T) { } func TestFragmentChainWithFreshScope(t *testing.T) { - relayParent := RelayChainBlockInfo{ + relayParent := relayChainBlockInfo{ Hash: common.Hash{0x00}, Number: 0, StorageRoot: common.Hash{0x00}, @@ -477,7 +477,7 @@ func TestFragmentChainWithFreshScope(t *testing.T) { }, } - err = candidateStorage.AddPendingAvailabilityCandidate(candidateHash, committedCandidate, persistedValidationData) + err = candidateStorage.addPendingAvailabilityCandidate(candidateHash, committedCandidate, persistedValidationData) assert.NoError(t, err) } @@ -564,20 +564,20 @@ func makeCommittedCandidate( func TestScopeRejectsAncestors(t *testing.T) { tests := map[string]struct { - relayParent *RelayChainBlockInfo - ancestors []RelayChainBlockInfo + relayParent *relayChainBlockInfo + ancestors []relayChainBlockInfo maxDepth uint baseConstraints *parachaintypes.Constraints pendingAvailability []*pendingAvailability expectedError error }{ "rejects_ancestor_that_skips_blocks": { - relayParent: &RelayChainBlockInfo{ + relayParent: &relayChainBlockInfo{ Number: 10, Hash: common.BytesToHash(bytes.Repeat([]byte{0x10}, 32)), StorageRoot: common.BytesToHash(bytes.Repeat([]byte{0x69}, 32)), }, - ancestors: []RelayChainBlockInfo{ + ancestors: []relayChainBlockInfo{ { Number: 8, Hash: common.BytesToHash(bytes.Repeat([]byte{0x08}, 32)), @@ -588,15 +588,15 @@ func TestScopeRejectsAncestors(t *testing.T) { baseConstraints: makeConstraints(8, []uint{8, 9}, parachaintypes.HeadData{Data: []byte{0x01, 0x02, 0x03}}), pendingAvailability: make([]*pendingAvailability, 0), - expectedError: errUnexpectedAncestor{Number: 8, Prev: 10}, + expectedError: errUnexpectedAncestor{number: 8, prev: 10}, }, "rejects_ancestor_for_zero_block": { - relayParent: &RelayChainBlockInfo{ + relayParent: &relayChainBlockInfo{ Number: 0, Hash: common.BytesToHash(bytes.Repeat([]byte{0}, 32)), StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), }, - ancestors: []RelayChainBlockInfo{ + ancestors: []relayChainBlockInfo{ { Number: 99999, Hash: common.BytesToHash(bytes.Repeat([]byte{99}, 32)), @@ -606,15 +606,15 @@ func TestScopeRejectsAncestors(t *testing.T) { maxDepth: 2, baseConstraints: makeConstraints(0, []uint{}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}), pendingAvailability: make([]*pendingAvailability, 0), - expectedError: errUnexpectedAncestor{Number: 99999, Prev: 0}, + expectedError: errUnexpectedAncestor{number: 99999, prev: 0}, }, "rejects_unordered_ancestors": { - relayParent: &RelayChainBlockInfo{ + relayParent: &relayChainBlockInfo{ Number: 5, Hash: common.BytesToHash(bytes.Repeat([]byte{0}, 32)), StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), }, - ancestors: []RelayChainBlockInfo{ + ancestors: []relayChainBlockInfo{ { Number: 4, Hash: common.BytesToHash(bytes.Repeat([]byte{4}, 32)), @@ -634,7 +634,7 @@ func TestScopeRejectsAncestors(t *testing.T) { maxDepth: 2, baseConstraints: makeConstraints(0, []uint{2}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}), pendingAvailability: make([]*pendingAvailability, 0), - expectedError: errUnexpectedAncestor{Number: 2, Prev: 4}, + expectedError: errUnexpectedAncestor{number: 2, prev: 4}, }, } @@ -654,13 +654,13 @@ func TestScopeRejectsAncestors(t *testing.T) { } func TestScopeOnlyTakesAncestorsUpToMin(t *testing.T) { - relayParent := RelayChainBlockInfo{ + relayParent := relayChainBlockInfo{ Number: 5, Hash: common.BytesToHash(bytes.Repeat([]byte{0}, 32)), StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), } - ancestors := []RelayChainBlockInfo{ + ancestors := []relayChainBlockInfo{ { Number: 4, Hash: common.BytesToHash(bytes.Repeat([]byte{4}, 32)), @@ -862,7 +862,7 @@ func TestCandidateStorageMethods(t *testing.T) { require.NoError(t, err) storage := newCandidateStorage() - err = storage.AddPendingAvailabilityCandidate(candidateHash, candidate, pvd) + err = storage.addPendingAvailabilityCandidate(candidateHash, candidate, pvd) require.NoError(t, err) _, ok := storage.byCandidateHash[candidateHash] @@ -946,7 +946,7 @@ func TestInitAndPopulateFromEmpty(t *testing.T) { baseConstraints := makeConstraints(0, []uint{0}, parachaintypes.HeadData{Data: []byte{0x0a}}) scope, err := newScopeWithAncestors( - RelayChainBlockInfo{ + relayChainBlockInfo{ Number: 1, Hash: common.BytesToHash(bytes.Repeat([]byte{1}, 32)), StorageRoot: common.BytesToHash(bytes.Repeat([]byte{2}, 32)), @@ -959,13 +959,13 @@ func TestInitAndPopulateFromEmpty(t *testing.T) { require.NoError(t, err) chain := newFragmentChain(scope, newCandidateStorage()) - assert.Equal(t, 0, chain.BestChainLen()) - assert.Equal(t, 0, chain.UnconnectedLen()) + assert.Equal(t, 0, chain.bestChainLen()) + assert.Equal(t, 0, chain.unconnected.len()) newChain := newFragmentChain(scope, newCandidateStorage()) - newChain.PopulateFromPrevious(chain) - assert.Equal(t, 0, newChain.BestChainLen()) - assert.Equal(t, 0, newChain.UnconnectedLen()) + newChain.populateFromPrevious(chain) + assert.Equal(t, 0, newChain.bestChainLen()) + assert.Equal(t, 0, newChain.unconnected.len()) } func populateFromPreviousStorage(scope *scope, storage *candidateStorage) *fragmentChain { @@ -973,8 +973,8 @@ func populateFromPreviousStorage(scope *scope, storage *candidateStorage) *fragm // clone the value prevChain := *chain - (&prevChain).unconnected = storage.Clone() - chain.PopulateFromPrevious(&prevChain) + (&prevChain).unconnected = storage.clone() + chain.populateFromPrevious(&prevChain) return chain } @@ -986,20 +986,20 @@ func TestPopulateAndCheckPotential(t *testing.T) { relayParentBHash := common.BytesToHash(bytes.Repeat([]byte{2}, 32)) relayParentCHash := common.BytesToHash(bytes.Repeat([]byte{3}, 32)) - relayParentAInfo := &RelayChainBlockInfo{ + relayParentAInfo := &relayChainBlockInfo{ Number: 0, Hash: relayParentAHash, StorageRoot: common.Hash{}, } - relayParentBInfo := &RelayChainBlockInfo{ + relayParentBInfo := &relayChainBlockInfo{ Number: 1, Hash: relayParentBHash, StorageRoot: common.Hash{}, } - relayParentCInfo := &RelayChainBlockInfo{ + relayParentCInfo := &relayChainBlockInfo{ Number: 2, Hash: relayParentCHash, StorageRoot: common.Hash{}, } // the ancestors must be in the reverse order - ancestors := []RelayChainBlockInfo{ + ancestors := []relayChainBlockInfo{ *relayParentBInfo, *relayParentAInfo, } @@ -1088,23 +1088,23 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.NoError(t, err) chain := populateFromPreviousStorage(scope, storage) - require.Empty(t, chain.BestChainVec()) + require.Empty(t, chain.bestChainVec()) // if the min relay parent is wrong, candidate A can never become valid, otherwise // if only the required parent doesnt match, candidate A still a potential candidate if wrongConstraint.MinRelayParentNumber == relayParentBInfo.Number { // if A is not a potential candidate, its descendants will also not be added. - require.Equal(t, chain.UnconnectedLen(), 0) - err := chain.CanAddCandidateAsPotential(candidateAEntry) + require.Equal(t, chain.unconnected.len(), 0) + err := chain.canAddCandidateAsPotential(candidateAEntry) require.ErrorIs(t, err, errRelayParentNotInScope{ relayParentA: relayParentAHash, // candidate A has relay parent A relayParentB: relayParentBHash, // while the constraint is expecting at least relay parent B }) // however if taken independently, both B and C still have potential - err = chain.CanAddCandidateAsPotential(candidateBEntry) + err = chain.canAddCandidateAsPotential(candidateBEntry) require.NoError(t, err) - err = chain.CanAddCandidateAsPotential(candidateCEntry) + err = chain.canAddCandidateAsPotential(candidateCEntry) require.NoError(t, err) } else { potentials := make([]parachaintypes.CandidateHash, 0) @@ -1169,12 +1169,12 @@ func TestPopulateAndCheckPotential(t *testing.T) { chain := newFragmentChain(scope, newCandidateStorage()) // individually each candidate is a potential candidate - require.NoError(t, chain.CanAddCandidateAsPotential(candidateAEntry)) - require.NoError(t, chain.CanAddCandidateAsPotential(candidateBEntry)) - require.NoError(t, chain.CanAddCandidateAsPotential(candidateCEntry)) + require.NoError(t, chain.canAddCandidateAsPotential(candidateAEntry)) + require.NoError(t, chain.canAddCandidateAsPotential(candidateBEntry)) + require.NoError(t, chain.canAddCandidateAsPotential(candidateCEntry)) chain = populateFromPreviousStorage(scope, storage) - require.Equal(t, tt.expectedBestChain, chain.BestChainVec()) + require.Equal(t, tt.expectedBestChain, chain.bestChainVec()) // Check that the unconnected candidates are as expected unconnectedHashes := make(map[parachaintypes.CandidateHash]struct{}) @@ -1192,7 +1192,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { // candidate A has a relay parent out of scope. Candidates B and C // will also be deleted since they form a chain with A t.Run("candidate_A_relay_parent_out_of_scope", func(t *testing.T) { - newAncestors := []RelayChainBlockInfo{ + newAncestors := []relayChainBlockInfo{ *relayParentBInfo, } @@ -1205,18 +1205,18 @@ func TestPopulateAndCheckPotential(t *testing.T) { ) require.NoError(t, err) chain := populateFromPreviousStorage(scope, storage) - require.Empty(t, chain.BestChainVec()) - require.Equal(t, 0, chain.UnconnectedLen()) + require.Empty(t, chain.bestChainVec()) + require.Equal(t, 0, chain.unconnected.len()) - require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), + require.ErrorIs(t, chain.canAddCandidateAsPotential(candidateAEntry), errRelayParentNotInScope{ relayParentA: relayParentAHash, relayParentB: relayParentBHash, }) // however if taken indepently, both B and C still have potential - require.NoError(t, chain.CanAddCandidateAsPotential(candidateBEntry)) - require.NoError(t, chain.CanAddCandidateAsPotential(candidateCEntry)) + require.NoError(t, chain.canAddCandidateAsPotential(candidateBEntry)) + require.NoError(t, chain.canAddCandidateAsPotential(candidateCEntry)) }) t.Run("candidate_A_and_B_out_of_scope_C_still_potential", func(t *testing.T) { @@ -1229,29 +1229,29 @@ func TestPopulateAndCheckPotential(t *testing.T) { ) require.NoError(t, err) chain := populateFromPreviousStorage(scope, storage) - require.Empty(t, chain.BestChainVec()) - require.Equal(t, 0, chain.UnconnectedLen()) + require.Empty(t, chain.bestChainVec()) + require.Equal(t, 0, chain.unconnected.len()) - require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), + require.ErrorIs(t, chain.canAddCandidateAsPotential(candidateAEntry), errRelayParentNotInScope{ relayParentA: relayParentAHash, relayParentB: relayParentCHash, }) // however if taken indepently, both B and C still have potential - require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateBEntry), + require.ErrorIs(t, chain.canAddCandidateAsPotential(candidateBEntry), errRelayParentNotInScope{ relayParentA: relayParentBHash, relayParentB: relayParentCHash, }) - require.NoError(t, chain.CanAddCandidateAsPotential(candidateCEntry)) + require.NoError(t, chain.canAddCandidateAsPotential(candidateCEntry)) }) }) t.Run("parachain_cycle_not_allowed", func(t *testing.T) { // make C parent of parachain block A - modifiedStorage := storage.Clone() + modifiedStorage := storage.clone() modifiedStorage.removeCandidate(candidateCHash) wrongPvdC, wrongCandidateC := makeCommittedCandidate(t, paraID, @@ -1273,22 +1273,22 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.NoError(t, err) chain := populateFromPreviousStorage(scope, modifiedStorage) - require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash}, chain.BestChainVec()) - require.Equal(t, 0, chain.UnconnectedLen()) + require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash}, chain.bestChainVec()) + require.Equal(t, 0, chain.unconnected.len()) - err = chain.CanAddCandidateAsPotential(wrongCandidateCEntry) + err = chain.canAddCandidateAsPotential(wrongCandidateCEntry) require.ErrorIs(t, err, errCycle) // However, if taken independently, C still has potential, since we don't know A and B. chain = newFragmentChain(scope, newCandidateStorage()) - require.NoError(t, chain.CanAddCandidateAsPotential(wrongCandidateCEntry)) + require.NoError(t, chain.canAddCandidateAsPotential(wrongCandidateCEntry)) }) t.Run("relay_parent_move_backwards_not_allowed", func(t *testing.T) { // each candidate was build using a different, and contigous, relay parent // in this test we are going to change candidate C to have the same relay // parent of candidate A, given that candidate B is one block ahead. - modifiedStorage := storage.Clone() + modifiedStorage := storage.clone() modifiedStorage.removeCandidate(candidateCHash) wrongPvdC, wrongCandidateC := makeCommittedCandidate(t, paraID, @@ -1304,10 +1304,10 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.NoError(t, err) chain := populateFromPreviousStorage(scope, modifiedStorage) - require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash}, chain.BestChainVec()) - require.Equal(t, 0, chain.UnconnectedLen()) + require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash}, chain.bestChainVec()) + require.Equal(t, 0, chain.unconnected.len()) - require.ErrorIs(t, chain.CanAddCandidateAsPotential(wrongCandidateCEntry), errRelayParentMovedBackwards) + require.ErrorIs(t, chain.canAddCandidateAsPotential(wrongCandidateCEntry), errRelayParentMovedBackwards) }) t.Run("unconnected_candidate_C", func(t *testing.T) { @@ -1315,7 +1315,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { // backwards from B's relay parent, because C may latter on trigger a reorg and // B may get removed - modifiedStorage := storage.Clone() + modifiedStorage := storage.clone() modifiedStorage.removeCandidate(candidateCHash) parenteHead := parachaintypes.HeadData{Data: []byte{0x0d}} @@ -1341,10 +1341,10 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.NoError(t, err) chain := newFragmentChain(scope, newCandidateStorage()) - require.NoError(t, chain.CanAddCandidateAsPotential(unconnectedCandidateCEntry)) + require.NoError(t, chain.canAddCandidateAsPotential(unconnectedCandidateCEntry)) chain = populateFromPreviousStorage(scope, modifiedStorage) - require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash}, chain.BestChainVec()) + require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash}, chain.bestChainVec()) unconnected := make(map[parachaintypes.CandidateHash]struct{}) for _, entry := range chain.unconnected.byCandidateHash { @@ -1383,11 +1383,11 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.NoError(t, err) chain := populateFromPreviousStorage(scope, modifiedStorage) - require.Equal(t, []parachaintypes.CandidateHash{modifiedCandidateAHash, candidateBHash}, chain.BestChainVec()) - require.Equal(t, 0, chain.UnconnectedLen()) + require.Equal(t, []parachaintypes.CandidateHash{modifiedCandidateAHash, candidateBHash}, chain.bestChainVec()) + require.Equal(t, 0, chain.unconnected.len()) require.ErrorIs(t, - chain.CanAddCandidateAsPotential(unconnectedCandidateCEntry), + chain.canAddCandidateAsPotential(unconnectedCandidateCEntry), errRelayParentPrecedesCandidatePendingAvailability{ relayParentA: relayParentAHash, relayParentB: relayParentBHash, @@ -1396,7 +1396,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { }) t.Run("cannot_fork_from_a_candidate_pending_availability", func(t *testing.T) { - modifiedStorage := storage.Clone() + modifiedStorage := storage.clone() modifiedStorage.removeCandidate(candidateCHash) modifiedStorage.removeCandidate(candidateAHash) @@ -1439,9 +1439,9 @@ func TestPopulateAndCheckPotential(t *testing.T) { ) require.NoError(t, err) chain := populateFromPreviousStorage(scope, modifiedStorage) - require.Equal(t, []parachaintypes.CandidateHash{modifiedCandidateAHash, candidateBHash}, chain.BestChainVec()) - require.Equal(t, 0, chain.UnconnectedLen()) - require.ErrorIs(t, chain.CanAddCandidateAsPotential(wrongCandidateCEntry), errForkWithCandidatePendingAvailability{ + require.Equal(t, []parachaintypes.CandidateHash{modifiedCandidateAHash, candidateBHash}, chain.bestChainVec()) + require.Equal(t, 0, chain.unconnected.len()) + require.ErrorIs(t, chain.canAddCandidateAsPotential(wrongCandidateCEntry), errForkWithCandidatePendingAvailability{ candidateHash: modifiedCandidateAHash, }) }) @@ -1473,13 +1473,13 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.NoError(t, err) chain := populateFromPreviousStorage(scope, storage) - assert.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.BestChainVec()) - assert.Equal(t, 0, chain.UnconnectedLen()) + assert.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.bestChainVec()) + assert.Equal(t, 0, chain.unconnected.len()) } }) t.Run("relay_parents_of_pending_availability_candidates_can_be_out_of_scope", func(t *testing.T) { - ancestorsWithoutA := []RelayChainBlockInfo{ + ancestorsWithoutA := []relayChainBlockInfo{ *relayParentBInfo, } @@ -1495,8 +1495,8 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.NoError(t, err) chain := populateFromPreviousStorage(scope, storage) - assert.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.BestChainVec()) - assert.Equal(t, 0, chain.UnconnectedLen()) + assert.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.bestChainVec()) + assert.Equal(t, 0, chain.unconnected.len()) }) t.Run("relay_parents_of_pending_availability_candidates_cannot_move_backwards", func(t *testing.T) { @@ -1506,7 +1506,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { []*pendingAvailability{ { candidateHash: candidateAHash, - relayParent: RelayChainBlockInfo{ + relayParent: relayChainBlockInfo{ Hash: relayParentAInfo.Hash, Number: 1, StorageRoot: relayParentAInfo.StorageRoot, @@ -1514,7 +1514,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { }, { candidateHash: candidateBHash, - relayParent: RelayChainBlockInfo{ + relayParent: relayChainBlockInfo{ Hash: relayParentBInfo.Hash, Number: 0, StorageRoot: relayParentBInfo.StorageRoot, @@ -1522,13 +1522,13 @@ func TestPopulateAndCheckPotential(t *testing.T) { }, }, 4, - []RelayChainBlockInfo{}, + []relayChainBlockInfo{}, ) require.NoError(t, err) chain := populateFromPreviousStorage(scope, storage) - assert.Empty(t, chain.BestChainVec()) - assert.Equal(t, 0, chain.UnconnectedLen()) + assert.Empty(t, chain.bestChainVec()) + assert.Equal(t, 0, chain.unconnected.len()) }) t.Run("more_complex_case_with_multiple_candidates_and_constraints", func(t *testing.T) { @@ -1551,7 +1551,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { ) candidateDHash, candidateDEntry := hashAndGetEntry(t, candidateD, pvdD, backed) require.NoError(t, populateFromPreviousStorage(scope, storage). - CanAddCandidateAsPotential(candidateDEntry)) + canAddCandidateAsPotential(candidateDEntry)) require.NoError(t, storage.addCandidateEntry(candidateDEntry)) // Candidate F @@ -1565,7 +1565,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { ) candidateFHash, candidateFEntry := hashAndGetEntry(t, candidateF, pvdF, seconded) require.NoError(t, populateFromPreviousStorage(scope, storage). - CanAddCandidateAsPotential(candidateFEntry)) + canAddCandidateAsPotential(candidateFEntry)) require.NoError(t, storage.addCandidateEntry(candidateFEntry)) // Candidate A1 @@ -1580,7 +1580,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { // candidate A1 is created so that its hash is greater than the candidate A hash. require.Equal(t, -1, forkSelectionRule(candidateAHash, candidateA1Hash)) require.ErrorIs(t, populateFromPreviousStorage(scope, storage). - CanAddCandidateAsPotential(candidateA1Entry), + canAddCandidateAsPotential(candidateA1Entry), errForkChoiceRule{candidateHash: candidateAHash}) require.NoError(t, storage.addCandidateEntry(candidateA1Entry)) @@ -1594,7 +1594,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { ) _, candidateB1Entry := hashAndGetEntry(t, candidateB1, pvdB1, seconded) require.NoError(t, populateFromPreviousStorage(scope, storage). - CanAddCandidateAsPotential(candidateB1Entry)) + canAddCandidateAsPotential(candidateB1Entry)) require.NoError(t, storage.addCandidateEntry(candidateB1Entry)) @@ -1607,7 +1607,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { ) _, candidateC1Entry := hashAndGetEntry(t, candidateC1, pvdC1, backed) require.NoError(t, populateFromPreviousStorage(scope, storage). - CanAddCandidateAsPotential(candidateC1Entry)) + canAddCandidateAsPotential(candidateC1Entry)) require.NoError(t, storage.addCandidateEntry(candidateC1Entry)) @@ -1621,7 +1621,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { _, candidateC2Entry := hashAndGetEntry(t, candidateC2, pvdC2, seconded) require.NoError(t, populateFromPreviousStorage(scope, storage). - CanAddCandidateAsPotential(candidateC2Entry)) + canAddCandidateAsPotential(candidateC2Entry)) require.NoError(t, storage.addCandidateEntry(candidateC2Entry)) // Candidate A2 @@ -1636,7 +1636,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { require.Equal(t, -1, forkSelectionRule(candidateA2Hash, candidateAHash)) require.NoError(t, populateFromPreviousStorage(scope, storage). - CanAddCandidateAsPotential(candidateA2Entry)) + canAddCandidateAsPotential(candidateA2Entry)) require.NoError(t, storage.addCandidateEntry(candidateA2Entry)) @@ -1650,12 +1650,12 @@ func TestPopulateAndCheckPotential(t *testing.T) { ) candidateB2Hash, candidateB2Entry := hashAndGetEntry(t, candidateB2, pvdB2, backed) require.NoError(t, populateFromPreviousStorage(scope, storage). - CanAddCandidateAsPotential(candidateB2Entry)) + canAddCandidateAsPotential(candidateB2Entry)) require.NoError(t, storage.addCandidateEntry(candidateB2Entry)) chain := populateFromPreviousStorage(scope, storage) - assert.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.BestChainVec()) + assert.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.bestChainVec()) unconnectedHashes := make(map[parachaintypes.CandidateHash]struct{}) for _, unconnected := range chain.unconnected.byCandidateHash { @@ -1671,15 +1671,15 @@ func TestPopulateAndCheckPotential(t *testing.T) { assert.Equal(t, expectedUnconnected, unconnectedHashes) // Cannot add as potential an already present candidate (whether it's in the best chain or in unconnected storage) - assert.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), errCandidateAlreadyKnown) - assert.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateFEntry), errCandidateAlreadyKnown) + assert.ErrorIs(t, chain.canAddCandidateAsPotential(candidateAEntry), errCandidateAlreadyKnown) + assert.ErrorIs(t, chain.canAddCandidateAsPotential(candidateFEntry), errCandidateAlreadyKnown) t.Run("simulate_best_chain_reorg", func(t *testing.T) { // back a2, the reversion should happen at the root. chain := cloneFragmentChain(chain) - chain.CandidateBacked(candidateA2Hash) + chain.candidateBacked(candidateA2Hash) - require.Equal(t, []parachaintypes.CandidateHash{candidateA2Hash, candidateB2Hash}, chain.BestChainVec()) + require.Equal(t, []parachaintypes.CandidateHash{candidateA2Hash, candidateB2Hash}, chain.bestChainVec()) // candidate F is kept as it was truly unconnected. The rest will be trimmed unconnected := map[parachaintypes.CandidateHash]struct{}{} @@ -1692,10 +1692,10 @@ func TestPopulateAndCheckPotential(t *testing.T) { }, unconnected) // candidates A1 and A will never have potential again - require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateA1Entry), errForkChoiceRule{ + require.ErrorIs(t, chain.canAddCandidateAsPotential(candidateA1Entry), errForkChoiceRule{ candidateHash: candidateA2Hash, }) - require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), errForkChoiceRule{ + require.ErrorIs(t, chain.canAddCandidateAsPotential(candidateAEntry), errForkChoiceRule{ candidateHash: candidateA2Hash, }) }) @@ -1731,7 +1731,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { // c4 should have a lower candidate hash than c3 require.Equal(t, -1, forkSelectionRule(candidateC4Hash, candidateC3Hash)) - storage := storage.Clone() + storage := storage.clone() require.NoError(t, storage.addCandidateEntry(candidateC3Entry)) require.NoError(t, storage.addCandidateEntry(candidateC4Entry)) @@ -1743,26 +1743,26 @@ func TestPopulateAndCheckPotential(t *testing.T) { // and trigger another reorg when backing c4 require.Equal(t, []parachaintypes.CandidateHash{ candidateAHash, candidateBHash, candidateCHash, - }, chain.BestChainVec()) + }, chain.bestChainVec()) - chain.CandidateBacked(candidateA2Hash) + chain.candidateBacked(candidateA2Hash) require.Equal(t, []parachaintypes.CandidateHash{ candidateA2Hash, candidateB2Hash, - }, chain.BestChainVec()) + }, chain.bestChainVec()) - chain.CandidateBacked(candidateC3Hash) + chain.candidateBacked(candidateC3Hash) require.Equal(t, []parachaintypes.CandidateHash{ candidateA2Hash, candidateB2Hash, candidateC3Hash, - }, chain.BestChainVec()) + }, chain.bestChainVec()) // backing c4 will cause a reorg - chain.CandidateBacked(candidateC4Hash) + chain.candidateBacked(candidateC4Hash) require.Equal(t, []parachaintypes.CandidateHash{ candidateA2Hash, candidateB2Hash, candidateC4Hash, - }, chain.BestChainVec()) + }, chain.bestChainVec()) unconnected := make(map[parachaintypes.CandidateHash]struct{}) for _, entry := range chain.unconnected.byCandidateHash { @@ -1786,7 +1786,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { candidateEHash, _ := hashAndInsertCandididate(t, storage, candidateE, candidateEPvd, seconded) chain = populateFromPreviousStorage(scope, storage) - require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.BestChainVec()) + require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.bestChainVec()) unconnected := make(map[parachaintypes.CandidateHash]struct{}) for _, entry := range chain.unconnected.byCandidateHash { @@ -1817,7 +1817,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { chain := populateFromPreviousStorage(scope, storage) require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, - chain.BestChainVec()) + chain.bestChainVec()) unconnectedHashes := make(map[parachaintypes.CandidateHash]struct{}) for _, unconnected := range chain.unconnected.byCandidateHash { @@ -1831,7 +1831,7 @@ func TestPopulateAndCheckPotential(t *testing.T) { }, unconnectedHashes) // cannot add as potential an already pending availability candidate - require.ErrorIs(t, chain.CanAddCandidateAsPotential(candidateAEntry), errCandidateAlreadyKnown) + require.ErrorIs(t, chain.canAddCandidateAsPotential(candidateAEntry), errCandidateAlreadyKnown) // simulate the fact that candidate A, B and C have been included baseConstraints := makeConstraints(0, []uint{0}, parachaintypes.HeadData{Data: []byte{0x0d}}) @@ -1840,8 +1840,8 @@ func TestPopulateAndCheckPotential(t *testing.T) { prevChain := chain chain = newFragmentChain(scope, newCandidateStorage()) - chain.PopulateFromPrevious(prevChain) - require.Equal(t, []parachaintypes.CandidateHash{candidateDHash}, chain.BestChainVec()) + chain.populateFromPrevious(prevChain) + require.Equal(t, []parachaintypes.CandidateHash{candidateDHash}, chain.bestChainVec()) unconnectedHashes = make(map[parachaintypes.CandidateHash]struct{}) for _, unconnected := range chain.unconnected.byCandidateHash { @@ -1855,20 +1855,20 @@ func TestPopulateAndCheckPotential(t *testing.T) { // mark E as backed, F will be dropped for invalid watermark. // empty unconnected candidates - chain.CandidateBacked(candidateEHash) - require.Equal(t, []parachaintypes.CandidateHash{candidateDHash, candidateEHash}, chain.BestChainVec()) - require.Zero(t, chain.UnconnectedLen()) + chain.candidateBacked(candidateEHash) + require.Equal(t, []parachaintypes.CandidateHash{candidateDHash, candidateEHash}, chain.bestChainVec()) + require.Zero(t, chain.unconnected.len()) var expectedErr error = &errCheckAgainstConstraints{ - fragmentValidityErr: &ErrOutputsInvalid{ - ModificationError: &ErrDisallowedHrmpWatermark{ + fragmentValidityErr: &errOutputsInvalid{ + ModificationError: &errDisallowedHrmpWatermark{ BlockNumber: 1000, }, }, } errCheckAgainstConstraints := new(errCheckAgainstConstraints) - err = chain.CanAddCandidateAsPotential(candidateFEntry) + err = chain.canAddCandidateAsPotential(candidateFEntry) require.True(t, errors.As(err, errCheckAgainstConstraints)) require.Equal(t, errCheckAgainstConstraints, expectedErr) @@ -1884,7 +1884,7 @@ func cloneFragmentChain(original *fragmentChain) *fragmentChain { pendingAvailability: append([]*pendingAvailability(nil), original.scope.pendingAvailability...), maxDepth: original.scope.maxDepth, ancestors: original.scope.ancestors.Copy(), - ancestorsByHash: make(map[common.Hash]RelayChainBlockInfo), + ancestorsByHash: make(map[common.Hash]relayChainBlockInfo), } for k, v := range original.scope.ancestorsByHash { @@ -1901,11 +1901,11 @@ func cloneFragmentChain(original *fragmentChain) *fragmentChain { outputHeadDataHash: node.outputHeadDataHash, cumulativeModifications: node.cumulativeModifications.Clone(), } - clonedBestChain.Push(clonedNode) + clonedBestChain.push(clonedNode) } // Clone the unconnected storage - clonedUnconnected := original.unconnected.Clone() + clonedUnconnected := original.unconnected.clone() // Create the cloned fragment chain clonedFragmentChain := &fragmentChain{ @@ -1925,7 +1925,7 @@ func TestFindAncestorPathAndFindBackableChainEmptyBestChain(t *testing.T) { // Empty chain baseConstraints := makeConstraints(0, []uint{0}, requiredParent) - relayParentInfo := RelayChainBlockInfo{ + relayParentInfo := relayChainBlockInfo{ Number: 0, Hash: relayParent, StorageRoot: common.Hash{}, @@ -1935,17 +1935,17 @@ func TestFindAncestorPathAndFindBackableChainEmptyBestChain(t *testing.T) { require.NoError(t, err) chain := newFragmentChain(scope, newCandidateStorage()) - assert.Equal(t, 0, chain.BestChainLen()) + assert.Equal(t, 0, chain.bestChainLen()) assert.Equal(t, 0, chain.findAncestorPath(map[parachaintypes.CandidateHash]struct{}{})) - assert.Equal(t, []*candidateAndRelayParent{}, chain.FindBackableChain(map[parachaintypes.CandidateHash]struct{}{}, 2)) + assert.Equal(t, []*candidateAndRelayParent{}, chain.findBackableChain(map[parachaintypes.CandidateHash]struct{}{}, 2)) // Invalid candidate ancestors := map[parachaintypes.CandidateHash]struct{}{ {Value: common.Hash{}}: {}, } assert.Equal(t, 0, chain.findAncestorPath(ancestors)) - assert.Equal(t, []*candidateAndRelayParent{}, chain.FindBackableChain(ancestors, 2)) + assert.Equal(t, []*candidateAndRelayParent{}, chain.findBackableChain(ancestors, 2)) } func TestFindAncestorPathAndFindBackableChain(t *testing.T) { @@ -2013,7 +2013,7 @@ func TestFindAncestorPathAndFindBackableChain(t *testing.T) { return output } - relayParentInfo := RelayChainBlockInfo{ + relayParentInfo := relayChainBlockInfo{ Number: uint(relayParentNumber), Hash: relayParent, StorageRoot: relayParentStorageRoot, @@ -2035,44 +2035,44 @@ func TestFindAncestorPathAndFindBackableChain(t *testing.T) { // and no candidate will be returned require.Equal(t, 6, len(candidateHashes)) - require.Equal(t, 0, chain.BestChainLen()) - require.Equal(t, 6, chain.UnconnectedLen()) + require.Equal(t, 0, chain.bestChainLen()) + require.Equal(t, 6, chain.unconnected.len()) for count := 0; count < 10; count++ { - require.Equal(t, 0, len(chain.FindBackableChain(make(Ancestors), uint32(count)))) + require.Equal(t, 0, len(chain.findBackableChain(make(Ancestors), uint32(count)))) } t.Run("couple_candidates_backed", func(t *testing.T) { chain := cloneFragmentChain(chain) - chain.CandidateBacked(candidateHashes[5]) + chain.candidateBacked(candidateHashes[5]) for count := 0; count < 10; count++ { - require.Equal(t, 0, len(chain.FindBackableChain(make(Ancestors), uint32(count)))) + require.Equal(t, 0, len(chain.findBackableChain(make(Ancestors), uint32(count)))) } - chain.CandidateBacked(candidateHashes[3]) - chain.CandidateBacked(candidateHashes[4]) + chain.candidateBacked(candidateHashes[3]) + chain.candidateBacked(candidateHashes[4]) for count := 0; count < 10; count++ { - require.Equal(t, 0, len(chain.FindBackableChain(make(Ancestors), uint32(count)))) + require.Equal(t, 0, len(chain.findBackableChain(make(Ancestors), uint32(count)))) } - chain.CandidateBacked(candidateHashes[1]) + chain.candidateBacked(candidateHashes[1]) for count := 0; count < 10; count++ { - require.Equal(t, 0, len(chain.FindBackableChain(make(Ancestors), uint32(count)))) + require.Equal(t, 0, len(chain.findBackableChain(make(Ancestors), uint32(count)))) } - chain.CandidateBacked(candidateHashes[0]) - require.Equal(t, hashes(0, 1), chain.FindBackableChain(make(Ancestors), 1)) + chain.candidateBacked(candidateHashes[0]) + require.Equal(t, hashes(0, 1), chain.findBackableChain(make(Ancestors), 1)) for c := 2; c < 10; c++ { - require.Equal(t, hashes(0, 2), chain.FindBackableChain(make(Ancestors), uint32(c))) + require.Equal(t, hashes(0, 2), chain.findBackableChain(make(Ancestors), uint32(c))) } // now back the missing piece - chain.CandidateBacked(candidateHashes[2]) - require.Equal(t, 6, chain.BestChainLen()) + chain.candidateBacked(candidateHashes[2]) + require.Equal(t, 6, chain.bestChainLen()) for count := 0; count < 10; count++ { var result []*candidateAndRelayParent @@ -2086,7 +2086,7 @@ func TestFindAncestorPathAndFindBackableChain(t *testing.T) { }) } } - require.Equal(t, result, chain.FindBackableChain(make(Ancestors), uint32(count))) + require.Equal(t, result, chain.findBackableChain(make(Ancestors), uint32(count))) } }) @@ -2103,19 +2103,19 @@ func TestFindAncestorPathAndFindBackableChain(t *testing.T) { }) for _, c := range candidatesShuffled { - chain.CandidateBacked(c) + chain.candidateBacked(c) storage.markBacked(c) } // no ancestors supplied require.Equal(t, 0, chain.findAncestorPath(make(Ancestors))) - require.Equal(t, []*candidateAndRelayParent(nil), chain.FindBackableChain(make(Ancestors), 0)) - require.Equal(t, hashes(0, 1), chain.FindBackableChain(make(Ancestors), 1)) - require.Equal(t, hashes(0, 2), chain.FindBackableChain(make(Ancestors), 2)) - require.Equal(t, hashes(0, 5), chain.FindBackableChain(make(Ancestors), 5)) + require.Equal(t, []*candidateAndRelayParent(nil), chain.findBackableChain(make(Ancestors), 0)) + require.Equal(t, hashes(0, 1), chain.findBackableChain(make(Ancestors), 1)) + require.Equal(t, hashes(0, 2), chain.findBackableChain(make(Ancestors), 2)) + require.Equal(t, hashes(0, 5), chain.findBackableChain(make(Ancestors), 5)) for count := 6; count < 10; count++ { - backableChain := chain.FindBackableChain(make(Ancestors), uint32(count)) + backableChain := chain.findBackableChain(make(Ancestors), uint32(count)) require.Equal(t, hashes(0, 6), backableChain) } @@ -2123,26 +2123,26 @@ func TestFindAncestorPathAndFindBackableChain(t *testing.T) { ancestors := make(Ancestors) ancestors[parachaintypes.CandidateHash{Value: common.Hash{}}] = struct{}{} require.Equal(t, 0, chain.findAncestorPath(ancestors)) - require.Equal(t, hashes(0, 4), chain.FindBackableChain(ancestors, 4)) + require.Equal(t, hashes(0, 4), chain.findBackableChain(ancestors, 4)) ancestors = make(Ancestors) ancestors[candidateHashes[1]] = struct{}{} ancestors[parachaintypes.CandidateHash{Value: common.Hash{}}] = struct{}{} require.Equal(t, 0, chain.findAncestorPath(ancestors)) - require.Equal(t, hashes(0, 4), chain.FindBackableChain(ancestors, 4)) + require.Equal(t, hashes(0, 4), chain.findBackableChain(ancestors, 4)) ancestors = make(Ancestors) ancestors[candidateHashes[0]] = struct{}{} ancestors[parachaintypes.CandidateHash{Value: common.Hash{}}] = struct{}{} require.Equal(t, 1, chain.findAncestorPath(maps.Clone(ancestors))) - require.Equal(t, hashes(1, 5), chain.FindBackableChain(ancestors, 4)) + require.Equal(t, hashes(1, 5), chain.findBackableChain(ancestors, 4)) // ancestors which are part of the chain but don't form a path from root, will be ignored ancestors = make(Ancestors) ancestors[candidateHashes[1]] = struct{}{} ancestors[candidateHashes[2]] = struct{}{} require.Equal(t, 0, chain.findAncestorPath(maps.Clone(ancestors))) - require.Equal(t, hashes(0, 4), chain.FindBackableChain(ancestors, 4)) + require.Equal(t, hashes(0, 4), chain.findBackableChain(ancestors, 4)) // valid ancestors ancestors = make(Ancestors) @@ -2150,10 +2150,10 @@ func TestFindAncestorPathAndFindBackableChain(t *testing.T) { ancestors[candidateHashes[0]] = struct{}{} ancestors[candidateHashes[1]] = struct{}{} require.Equal(t, 3, chain.findAncestorPath(maps.Clone(ancestors))) - require.Equal(t, hashes(3, 5), chain.FindBackableChain(maps.Clone(ancestors), 2)) + require.Equal(t, hashes(3, 5), chain.findBackableChain(maps.Clone(ancestors), 2)) for count := 3; count < 10; count++ { - require.Equal(t, hashes(3, 6), chain.FindBackableChain(maps.Clone(ancestors), uint32(count))) + require.Equal(t, hashes(3, 6), chain.findBackableChain(maps.Clone(ancestors), uint32(count))) } // valid ancestors with candidates which have been omitted due to timeouts @@ -2161,11 +2161,11 @@ func TestFindAncestorPathAndFindBackableChain(t *testing.T) { ancestors[candidateHashes[0]] = struct{}{} ancestors[candidateHashes[2]] = struct{}{} require.Equal(t, 1, chain.findAncestorPath(maps.Clone(ancestors))) - require.Equal(t, hashes(1, 4), chain.FindBackableChain(maps.Clone(ancestors), 3)) - require.Equal(t, hashes(1, 5), chain.FindBackableChain(maps.Clone(ancestors), 4)) + require.Equal(t, hashes(1, 4), chain.findBackableChain(maps.Clone(ancestors), 3)) + require.Equal(t, hashes(1, 5), chain.findBackableChain(maps.Clone(ancestors), 4)) for count := 5; count < 10; count++ { - require.Equal(t, hashes(1, 6), chain.FindBackableChain(maps.Clone(ancestors), uint32(count))) + require.Equal(t, hashes(1, 6), chain.findBackableChain(maps.Clone(ancestors), uint32(count))) } ancestors = make(Ancestors) @@ -2173,9 +2173,9 @@ func TestFindAncestorPathAndFindBackableChain(t *testing.T) { ancestors[candidateHashes[1]] = struct{}{} ancestors[candidateHashes[3]] = struct{}{} require.Equal(t, 2, chain.findAncestorPath(maps.Clone(ancestors))) - require.Equal(t, hashes(2, 6), chain.FindBackableChain(maps.Clone(ancestors), 4)) + require.Equal(t, hashes(2, 6), chain.findBackableChain(maps.Clone(ancestors), 4)) - require.Equal(t, hashes(0, 0), chain.FindBackableChain(maps.Clone(ancestors), 0)) + require.Equal(t, hashes(0, 0), chain.findBackableChain(maps.Clone(ancestors), 0)) // stop when we've found a candidate which is pending availability scope, err := newScopeWithAncestors(relayParentInfo, baseConstraints, @@ -2190,6 +2190,6 @@ func TestFindAncestorPathAndFindBackableChain(t *testing.T) { ancestors = make(Ancestors) ancestors[candidateHashes[0]] = struct{}{} ancestors[candidateHashes[1]] = struct{}{} - require.Equal(t, hashes(2, 3), chain.FindBackableChain(maps.Clone(ancestors), 3)) + require.Equal(t, hashes(2, 3), chain.findBackableChain(maps.Clone(ancestors), 3)) }) } diff --git a/dot/parachain/prospective-parachains/inclusion_emulator.go b/dot/parachain/prospective-parachains/inclusion_emulator.go index 0018d60937..f1dd020482 100644 --- a/dot/parachain/prospective-parachains/inclusion_emulator.go +++ b/dot/parachain/prospective-parachains/inclusion_emulator.go @@ -2,7 +2,6 @@ package prospectiveparachains import ( "bytes" - "errors" "fmt" "iter" "maps" @@ -13,176 +12,40 @@ import ( "github.com/ethereum/go-ethereum/common/math" ) -// ProspectiveCandidate includes key informations that represents a candidate +// prospectiveCandidate includes key informations that represents a candidate // without pinning it to a particular session. For example, commitments are // represented here, but the erasure-root is not. This means that, prospective // candidates are not correlated to any session in particular. -type ProspectiveCandidate struct { +type prospectiveCandidate struct { Commitments parachaintypes.CandidateCommitments PersistedValidationData parachaintypes.PersistedValidationData PoVHash common.Hash ValidationCodeHash parachaintypes.ValidationCodeHash } -type ErrDisallowedHrmpWatermark struct { - BlockNumber uint -} - -func (e *ErrDisallowedHrmpWatermark) Error() string { - return fmt.Sprintf("DisallowedHrmpWatermark(BlockNumber: %d)", e.BlockNumber) -} - -type ErrNoSuchHrmpChannel struct { - paraID parachaintypes.ParaID -} - -func (e *ErrNoSuchHrmpChannel) Error() string { - return fmt.Sprintf("NoSuchHrmpChannel(ParaId: %d)", e.paraID) -} - -type ErrHrmpMessagesOverflow struct { - paraID parachaintypes.ParaID - messagesRemaining uint32 - messagesSubmitted uint32 -} - -func (e *ErrHrmpMessagesOverflow) Error() string { - return fmt.Sprintf("HrmpMessagesOverflow(ParaId: %d, MessagesRemaining: %d, MessagesSubmitted: %d)", - e.paraID, e.messagesRemaining, e.messagesSubmitted) -} - -type ErrHrmpBytesOverflow struct { - paraID parachaintypes.ParaID - bytesRemaining uint32 - bytesSubmitted uint32 -} - -func (e *ErrHrmpBytesOverflow) Error() string { - return fmt.Sprintf("HrmpBytesOverflow(ParaId: %d, BytesRemaining: %d, BytesSubmitted: %d)", - e.paraID, e.bytesRemaining, e.bytesSubmitted) -} - -type ErrUmpMessagesOverflow struct { - messagesRemaining uint32 - messagesSubmitted uint32 -} - -func (e *ErrUmpMessagesOverflow) Error() string { - return fmt.Sprintf("UmpMessagesOverflow(MessagesRemaining: %d, MessagesSubmitted: %d)", - e.messagesRemaining, e.messagesSubmitted) -} - -type ErrUmpBytesOverflow struct { - bytesRemaining uint32 - bytesSubmitted uint32 -} - -func (e *ErrUmpBytesOverflow) Error() string { - return fmt.Sprintf("UmpBytesOverflow(BytesRemaining: %d, BytesSubmitted: %d)", e.bytesRemaining, e.bytesSubmitted) -} - -type ErrDmpMessagesUnderflow struct { - messagesRemaining uint32 - messagesProcessed uint32 -} - -func (e *ErrDmpMessagesUnderflow) Error() string { - return fmt.Sprintf("DmpMessagesUnderflow(MessagesRemaining: %d, MessagesProcessed: %d)", - e.messagesRemaining, e.messagesProcessed) -} - -var ( - ErrAppliedNonexistentCodeUpgrade = errors.New("AppliedNonexistentCodeUpgrade()") - ErrDmpAdvancementRule = errors.New("DmpAdvancementRule()") - ErrCodeUpgradeRestricted = errors.New("CodeUpgradeRestricted()") -) - -type ErrValidationCodeMismatch struct { - expected parachaintypes.ValidationCodeHash - got parachaintypes.ValidationCodeHash -} - -func (e *ErrValidationCodeMismatch) Error() string { - return fmt.Sprintf("ValidationCodeMismatch(Expected: %v, Got: %v)", e.expected, e.got) -} - -type ErrOutputsInvalid struct { - ModificationError error -} - -func (e *ErrOutputsInvalid) Error() string { - return fmt.Sprintf("OutputsInvalid(ModificationError: %v)", e.ModificationError) -} - -type ErrCodeSizeTooLarge struct { - maxAllowed uint32 - newSize uint32 -} - -func (e *ErrCodeSizeTooLarge) Error() string { - return fmt.Sprintf("CodeSizeTooLarge(MaxAllowed: %d, NewSize: %d)", e.maxAllowed, e.newSize) -} - -type ErrRelayParentTooOld struct { - minAllowed uint - current uint -} - -func (e *ErrRelayParentTooOld) Error() string { - return fmt.Sprintf("RelayParentTooOld(MinAllowed: %d, Current: %d)", e.minAllowed, e.current) -} - -type ErrUmpMessagesPerCandidateOverflow struct { - messagesAllowed uint32 - messagesSubmitted uint32 -} - -func (e *ErrUmpMessagesPerCandidateOverflow) Error() string { - return fmt.Sprintf("UmpMessagesPerCandidateOverflow(MessagesAllowed: %d, MessagesSubmitted: %d)", - e.messagesAllowed, e.messagesSubmitted) -} - -type ErrHrmpMessagesPerCandidateOverflow struct { - messagesAllowed uint32 - messagesSubmitted uint32 -} - -func (e *ErrHrmpMessagesPerCandidateOverflow) Error() string { - return fmt.Sprintf("HrmpMessagesPerCandidateOverflow(MessagesAllowed: %d, MessagesSubmitted: %d)", - e.messagesAllowed, e.messagesSubmitted) -} - -type ErrHrmpMessagesDescendingOrDuplicate struct { - index uint -} - -func (e *ErrHrmpMessagesDescendingOrDuplicate) Error() string { - return fmt.Sprintf("HrmpMessagesDescendingOrDuplicate(Index: %d)", e.index) -} - -// RelayChainBlockInfo contains minimum information about a relay-chain block. -type RelayChainBlockInfo struct { +// relayChainBlockInfo contains minimum information about a relay-chain block. +type relayChainBlockInfo struct { Hash common.Hash StorageRoot common.Hash Number uint } -func CheckModifications(c *parachaintypes.Constraints, modifications *ConstraintModifications) error { +func checkModifications(c *parachaintypes.Constraints, modifications *constraintModifications) error { if modifications.HrmpWatermark != nil && modifications.HrmpWatermark.Type == Trunk { if !slices.Contains(c.HrmpInbound.ValidWatermarks, modifications.HrmpWatermark.Watermark()) { - return &ErrDisallowedHrmpWatermark{BlockNumber: modifications.HrmpWatermark.Watermark()} + return &errDisallowedHrmpWatermark{BlockNumber: modifications.HrmpWatermark.Watermark()} } } for id, outboundHrmpMod := range modifications.OutboundHrmp { outbound, ok := c.HrmpChannelsOut[id] if !ok { - return &ErrNoSuchHrmpChannel{paraID: id} + return &errNoSuchHrmpChannel{paraID: id} } _, overflow := math.SafeSub(uint64(outbound.BytesRemaining), uint64(outboundHrmpMod.BytesSubmitted)) if overflow { - return &ErrHrmpBytesOverflow{ + return &errHrmpBytesOverflow{ paraID: id, bytesRemaining: outbound.BytesRemaining, bytesSubmitted: outboundHrmpMod.BytesSubmitted, @@ -191,7 +54,7 @@ func CheckModifications(c *parachaintypes.Constraints, modifications *Constraint _, overflow = math.SafeSub(uint64(outbound.MessagesRemaining), uint64(outboundHrmpMod.MessagesSubmitted)) if overflow { - return &ErrHrmpMessagesOverflow{ + return &errHrmpMessagesOverflow{ paraID: id, messagesRemaining: outbound.MessagesRemaining, messagesSubmitted: outboundHrmpMod.MessagesSubmitted, @@ -201,7 +64,7 @@ func CheckModifications(c *parachaintypes.Constraints, modifications *Constraint _, overflow := math.SafeSub(uint64(c.UmpRemaining), uint64(modifications.UmpMessagesSent)) if overflow { - return &ErrUmpMessagesOverflow{ + return &errUmpMessagesOverflow{ messagesRemaining: c.UmpRemaining, messagesSubmitted: modifications.UmpMessagesSent, } @@ -209,7 +72,7 @@ func CheckModifications(c *parachaintypes.Constraints, modifications *Constraint _, overflow = math.SafeSub(uint64(c.UmpRemainingBytes), uint64(modifications.UmpBytesSent)) if overflow { - return &ErrUmpBytesOverflow{ + return &errUmpBytesOverflow{ bytesRemaining: c.UmpRemainingBytes, bytesSubmitted: modifications.UmpBytesSent, } @@ -217,20 +80,20 @@ func CheckModifications(c *parachaintypes.Constraints, modifications *Constraint _, overflow = math.SafeSub(uint64(len(c.DmpRemainingMessages)), uint64(modifications.DmpMessagesProcessed)) if overflow { - return &ErrDmpMessagesUnderflow{ + return &errDmpMessagesUnderflow{ messagesRemaining: uint32(len(c.DmpRemainingMessages)), messagesProcessed: modifications.DmpMessagesProcessed, } } if c.FutureValidationCode == nil && modifications.CodeUpgradeApplied { - return ErrAppliedNonexistentCodeUpgrade + return errAppliedNonexistentCodeUpgrade } return nil } -func ApplyModifications(c *parachaintypes.Constraints, modifications *ConstraintModifications) ( +func applyModifications(c *parachaintypes.Constraints, modifications *constraintModifications) ( *parachaintypes.Constraints, error) { newConstraints := c.Clone() @@ -253,7 +116,7 @@ func ApplyModifications(c *parachaintypes.Constraints, modifications *Constraint newConstraints.HrmpInbound.ValidWatermarks = newConstraints.HrmpInbound.ValidWatermarks[pos:] case Trunk: // Trunk update landing on disallowed watermark is not OK. - return nil, &ErrDisallowedHrmpWatermark{BlockNumber: modifications.HrmpWatermark.Block} + return nil, &errDisallowedHrmpWatermark{BlockNumber: modifications.HrmpWatermark.Block} } } } @@ -261,11 +124,11 @@ func ApplyModifications(c *parachaintypes.Constraints, modifications *Constraint for id, outboundHrmpMod := range modifications.OutboundHrmp { outbound, ok := newConstraints.HrmpChannelsOut[id] if !ok { - return nil, &ErrNoSuchHrmpChannel{id} + return nil, &errNoSuchHrmpChannel{id} } if outboundHrmpMod.BytesSubmitted > outbound.BytesRemaining { - return nil, &ErrHrmpBytesOverflow{ + return nil, &errHrmpBytesOverflow{ paraID: id, bytesRemaining: outbound.BytesRemaining, bytesSubmitted: outboundHrmpMod.BytesSubmitted, @@ -273,7 +136,7 @@ func ApplyModifications(c *parachaintypes.Constraints, modifications *Constraint } if outboundHrmpMod.MessagesSubmitted > outbound.MessagesRemaining { - return nil, &ErrHrmpMessagesOverflow{ + return nil, &errHrmpMessagesOverflow{ paraID: id, messagesRemaining: outbound.MessagesRemaining, messagesSubmitted: outboundHrmpMod.MessagesSubmitted, @@ -285,7 +148,7 @@ func ApplyModifications(c *parachaintypes.Constraints, modifications *Constraint } if modifications.UmpMessagesSent > newConstraints.UmpRemaining { - return nil, &ErrUmpMessagesOverflow{ + return nil, &errUmpMessagesOverflow{ messagesRemaining: newConstraints.UmpRemaining, messagesSubmitted: modifications.UmpMessagesSent, } @@ -293,7 +156,7 @@ func ApplyModifications(c *parachaintypes.Constraints, modifications *Constraint newConstraints.UmpRemaining -= modifications.UmpMessagesSent if modifications.UmpBytesSent > newConstraints.UmpRemainingBytes { - return nil, &ErrUmpBytesOverflow{ + return nil, &errUmpBytesOverflow{ bytesRemaining: newConstraints.UmpRemainingBytes, bytesSubmitted: modifications.UmpBytesSent, } @@ -301,7 +164,7 @@ func ApplyModifications(c *parachaintypes.Constraints, modifications *Constraint newConstraints.UmpRemainingBytes -= modifications.UmpBytesSent if modifications.DmpMessagesProcessed > uint32(len(newConstraints.DmpRemainingMessages)) { - return nil, &ErrDmpMessagesUnderflow{ + return nil, &errDmpMessagesUnderflow{ messagesRemaining: uint32(len(newConstraints.DmpRemainingMessages)), messagesProcessed: modifications.DmpMessagesProcessed, } @@ -311,7 +174,7 @@ func ApplyModifications(c *parachaintypes.Constraints, modifications *Constraint if modifications.CodeUpgradeApplied { if newConstraints.FutureValidationCode == nil { - return nil, ErrAppliedNonexistentCodeUpgrade + return nil, errAppliedNonexistentCodeUpgrade } newConstraints.ValidationCodeHash = newConstraints.FutureValidationCode.ValidationCodeHash @@ -320,39 +183,39 @@ func ApplyModifications(c *parachaintypes.Constraints, modifications *Constraint return newConstraints, nil } -// OutboundHrmpChannelModification represents modifications to outbound HRMP channels. -type OutboundHrmpChannelModification struct { +// outboundHrmpChannelModification represents modifications to outbound HRMP channels. +type outboundHrmpChannelModification struct { BytesSubmitted uint32 MessagesSubmitted uint32 } -// HrmpWatermarkUpdate represents an update to the HRMP Watermark. -type HrmpWatermarkUpdate struct { - Type HrmpWatermarkUpdateType +// hrmpWatermarkUpdate represents an update to the HRMP Watermark. +type hrmpWatermarkUpdate struct { + Type hrmpWatermarkUpdateType Block uint } -// HrmpWatermarkUpdateType defines the type of HrmpWatermarkUpdate. -type HrmpWatermarkUpdateType int +// hrmpWatermarkUpdateType defines the type of HrmpWatermarkUpdate. +type hrmpWatermarkUpdateType int const ( - Head HrmpWatermarkUpdateType = iota + Head hrmpWatermarkUpdateType = iota Trunk ) // Watermark returns the block number of the HRMP Watermark update. -func (h HrmpWatermarkUpdate) Watermark() uint { +func (h hrmpWatermarkUpdate) Watermark() uint { return h.Block } -// ConstraintModifications represents modifications to constraints as a result of prospective candidates. -type ConstraintModifications struct { +// constraintModifications represents modifications to constraints as a result of prospective candidates. +type constraintModifications struct { // The required parent head to build upon. RequiredParent *parachaintypes.HeadData // The new HRMP watermark. - HrmpWatermark *HrmpWatermarkUpdate + HrmpWatermark *hrmpWatermarkUpdate // Outbound HRMP channel modifications. - OutboundHrmp map[parachaintypes.ParaID]OutboundHrmpChannelModification + OutboundHrmp map[parachaintypes.ParaID]outboundHrmpChannelModification // The amount of UMP XCM messages sent. `UMPSignal` and separator are excluded. UmpMessagesSent uint32 // The amount of UMP XCM bytes sent. `UMPSignal` and separator are excluded. @@ -363,8 +226,8 @@ type ConstraintModifications struct { CodeUpgradeApplied bool } -func (cm *ConstraintModifications) Clone() *ConstraintModifications { - return &ConstraintModifications{ +func (cm *constraintModifications) Clone() *constraintModifications { + return &constraintModifications{ RequiredParent: cm.RequiredParent, HrmpWatermark: cm.HrmpWatermark, OutboundHrmp: maps.Clone(cm.OutboundHrmp), @@ -377,16 +240,16 @@ func (cm *ConstraintModifications) Clone() *ConstraintModifications { // Identity returns the 'identity' modifications: these can be applied to // any constraints and yield the exact same result. -func NewConstraintModificationsIdentity() *ConstraintModifications { - return &ConstraintModifications{ - OutboundHrmp: make(map[parachaintypes.ParaID]OutboundHrmpChannelModification), +func NewConstraintModificationsIdentity() *constraintModifications { + return &constraintModifications{ + OutboundHrmp: make(map[parachaintypes.ParaID]outboundHrmpChannelModification), } } // Stack stacks other modifications on top of these. This does no sanity-checking, so if // `other` is garbage relative to `self`, then the new value will be garbage as well. // This is an addition which is not commutative. -func (cm *ConstraintModifications) Stack(other *ConstraintModifications) { +func (cm *constraintModifications) Stack(other *constraintModifications) { if other.RequiredParent != nil { cm.RequiredParent = other.RequiredParent } @@ -398,7 +261,7 @@ func (cm *ConstraintModifications) Stack(other *ConstraintModifications) { for id, mods := range other.OutboundHrmp { record, ok := cm.OutboundHrmp[id] if !ok { - record = OutboundHrmpChannelModification{} + record = outboundHrmpChannelModification{} } record.BytesSubmitted += mods.BytesSubmitted @@ -415,21 +278,21 @@ func (cm *ConstraintModifications) Stack(other *ConstraintModifications) { // Fragment represents another prospective parachain block // This is a type which guarantees that the candidate is valid under the operating constraints type Fragment struct { - relayParent *RelayChainBlockInfo + relayParent *relayChainBlockInfo operatingConstraints *parachaintypes.Constraints - candidate *ProspectiveCandidate - modifications *ConstraintModifications + candidate *prospectiveCandidate + modifications *constraintModifications } -func (f *Fragment) RelayParent() *RelayChainBlockInfo { +func (f *Fragment) RelayParent() *relayChainBlockInfo { return f.relayParent } -func (f *Fragment) Candidate() *ProspectiveCandidate { +func (f *Fragment) Candidate() *prospectiveCandidate { return f.candidate } -func (f *Fragment) ConstraintModifications() *ConstraintModifications { +func (f *Fragment) ConstraintModifications() *constraintModifications { return f.modifications } @@ -439,11 +302,11 @@ func (f *Fragment) ConstraintModifications() *ConstraintModifications { // This does not check that the collator signature is valid or whether the PoV is // small enough. func NewFragment( - relayParent *RelayChainBlockInfo, + relayParent *relayChainBlockInfo, operatingConstraints *parachaintypes.Constraints, - candidate *ProspectiveCandidate) (*Fragment, error) { + candidate *prospectiveCandidate) (*Fragment, error) { - modifications, err := CheckAgainstConstraints( + modifications, err := checkAgainstConstraints( relayParent, operatingConstraints, candidate.Commitments, @@ -462,13 +325,13 @@ func NewFragment( }, nil } -func CheckAgainstConstraints( - relayParent *RelayChainBlockInfo, +func checkAgainstConstraints( + relayParent *relayChainBlockInfo, operatingConstraints *parachaintypes.Constraints, commitments parachaintypes.CandidateCommitments, validationCodeHash parachaintypes.ValidationCodeHash, persistedValidationData parachaintypes.PersistedValidationData, -) (*ConstraintModifications, error) { +) (*constraintModifications, error) { upwardMessages := make([]parachaintypes.UpwardMessage, 0) // filter UMP signals for upwardMessage := range skipUmpSignals(commitments.UpwardMessages) { @@ -481,7 +344,7 @@ func CheckAgainstConstraints( umpBytesSent += len(message) } - hrmpWatermark := HrmpWatermarkUpdate{ + hrmpWatermark := hrmpWatermarkUpdate{ Type: Trunk, Block: uint(commitments.HrmpWatermark), } @@ -490,19 +353,19 @@ func CheckAgainstConstraints( hrmpWatermark.Type = Head } - outboundHrmp := make(map[parachaintypes.ParaID]OutboundHrmpChannelModification) + outboundHrmp := make(map[parachaintypes.ParaID]outboundHrmpChannelModification) var lastRecipient *parachaintypes.ParaID for i, message := range commitments.HorizontalMessages { if lastRecipient != nil && *lastRecipient >= parachaintypes.ParaID(message.Recipient) { - return nil, &ErrHrmpMessagesDescendingOrDuplicate{index: uint(i)} + return nil, &errHrmpMessagesDescendingOrDuplicate{index: uint(i)} } recipientParaID := parachaintypes.ParaID(message.Recipient) lastRecipient = &recipientParaID record, ok := outboundHrmp[recipientParaID] if !ok { - record = OutboundHrmpChannelModification{} + record = outboundHrmpChannelModification{} } record.BytesSubmitted += uint32(len(message.Data)) @@ -515,7 +378,7 @@ func CheckAgainstConstraints( codeUpgradeApplied = relayParent.Number >= operatingConstraints.FutureValidationCode.BlockNumber } - modifications := &ConstraintModifications{ + modifications := &constraintModifications{ RequiredParent: &commitments.HeadData, HrmpWatermark: &hrmpWatermark, OutboundHrmp: outboundHrmp, @@ -540,11 +403,9 @@ func CheckAgainstConstraints( return modifications, nil } -// UmpSeparator is a constant used to separate UMP signals. -var UmpSeparator = []byte{} - // skipUmpSignals is a utility function for skipping the UMP signals. func skipUmpSignals(upwardMessages []parachaintypes.UpwardMessage) iter.Seq[parachaintypes.UpwardMessage] { + var UmpSeparator = []byte{} return func(yield func(parachaintypes.UpwardMessage) bool) { for _, message := range upwardMessages { if !bytes.Equal([]byte(message), UmpSeparator) { @@ -560,11 +421,11 @@ func skipUmpSignals(upwardMessages []parachaintypes.UpwardMessage) iter.Seq[para func validateAgainstConstraints( constraints *parachaintypes.Constraints, - relayParent *RelayChainBlockInfo, + relayParent *relayChainBlockInfo, commitments parachaintypes.CandidateCommitments, persistedValidationData parachaintypes.PersistedValidationData, validationCodeHash parachaintypes.ValidationCodeHash, - modifications *ConstraintModifications, + modifications *constraintModifications, ) error { expectedPVD := parachaintypes.PersistedValidationData{ ParentHead: constraints.RequiredParent, @@ -579,28 +440,23 @@ func validateAgainstConstraints( } if constraints.ValidationCodeHash != validationCodeHash { - return &ErrValidationCodeMismatch{ + return &errValidationCodeMismatch{ expected: constraints.ValidationCodeHash, got: validationCodeHash, } } if relayParent.Number < constraints.MinRelayParentNumber { - return &ErrRelayParentTooOld{ + return &errRelayParentTooOld{ minAllowed: constraints.MinRelayParentNumber, current: relayParent.Number, } } if commitments.NewValidationCode != nil { - restriction, err := constraints.UpgradeRestriction.Value() - if err != nil { - return fmt.Errorf("while getting upgrade restriction: %w", err) - } - - switch restriction.(type) { + switch constraints.UpgradeRestriction.(type) { case *parachaintypes.Present: - return ErrCodeUpgradeRestricted + return errCodeUpgradeRestricted } } @@ -610,7 +466,7 @@ func validateAgainstConstraints( } if uint32(announcedCodeSize) > constraints.MaxCodeSize { - return &ErrCodeSizeTooLarge{ + return &errCodeSizeTooLarge{ maxAllowed: constraints.MaxCodeSize, newSize: uint32(announcedCodeSize), } @@ -618,26 +474,26 @@ func validateAgainstConstraints( if modifications.DmpMessagesProcessed == 0 { if len(constraints.DmpRemainingMessages) > 0 && constraints.DmpRemainingMessages[0] <= relayParent.Number { - return ErrDmpAdvancementRule + return errDmpAdvancementRule } } if len(commitments.HorizontalMessages) > int(constraints.MaxHrmpNumPerCandidate) { - return &ErrHrmpMessagesPerCandidateOverflow{ + return &errHrmpMessagesPerCandidateOverflow{ messagesAllowed: constraints.MaxHrmpNumPerCandidate, messagesSubmitted: uint32(len(commitments.HorizontalMessages)), } } if modifications.UmpMessagesSent > constraints.MaxUmpNumPerCandidate { - return &ErrUmpMessagesPerCandidateOverflow{ + return &errUmpMessagesPerCandidateOverflow{ messagesAllowed: constraints.MaxUmpNumPerCandidate, messagesSubmitted: modifications.UmpMessagesSent, } } - if err := CheckModifications(constraints, modifications); err != nil { - return &ErrOutputsInvalid{ModificationError: err} + if err := checkModifications(constraints, modifications); err != nil { + return &errOutputsInvalid{ModificationError: err} } return nil diff --git a/dot/parachain/types/async_backing.go b/dot/parachain/types/async_backing.go index e35ec994dd..b7a635e4b7 100644 --- a/dot/parachain/types/async_backing.go +++ b/dot/parachain/types/async_backing.go @@ -69,7 +69,7 @@ type Constraints struct { // The expected validation-code-hash of this parachain. ValidationCodeHash ValidationCodeHash // The code upgrade restriction signal as-of this parachain. - UpgradeRestriction *UpgradeRestriction + UpgradeRestriction UpgradeRestriction // The future validation code hash, if any, and at what relay-parent // number the upgrade would be minimally applied. FutureValidationCode *FutureValidationCode From e5d8019f2fd6c6983a848c509768b40fa22b9345 Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Sat, 14 Dec 2024 15:27:42 -0400 Subject: [PATCH 30/31] chore: small comments --- dot/parachain/prospective-parachains/inclusion_emulator.go | 6 +++++- dot/parachain/types/async_backing.go | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/dot/parachain/prospective-parachains/inclusion_emulator.go b/dot/parachain/prospective-parachains/inclusion_emulator.go index f1dd020482..648f3415a8 100644 --- a/dot/parachain/prospective-parachains/inclusion_emulator.go +++ b/dot/parachain/prospective-parachains/inclusion_emulator.go @@ -454,7 +454,11 @@ func validateAgainstConstraints( } if commitments.NewValidationCode != nil { - switch constraints.UpgradeRestriction.(type) { + restriction, err := constraints.UpgradeRestriction.Value() + if err != nil { + return fmt.Errorf("while retrieving value: %w", err) + } + switch restriction.(type) { case *parachaintypes.Present: return errCodeUpgradeRestricted } diff --git a/dot/parachain/types/async_backing.go b/dot/parachain/types/async_backing.go index b7a635e4b7..e35ec994dd 100644 --- a/dot/parachain/types/async_backing.go +++ b/dot/parachain/types/async_backing.go @@ -69,7 +69,7 @@ type Constraints struct { // The expected validation-code-hash of this parachain. ValidationCodeHash ValidationCodeHash // The code upgrade restriction signal as-of this parachain. - UpgradeRestriction UpgradeRestriction + UpgradeRestriction *UpgradeRestriction // The future validation code hash, if any, and at what relay-parent // number the upgrade would be minimally applied. FutureValidationCode *FutureValidationCode From 3ba5603248206cff819c2f364aa102bb1d44f22f Mon Sep 17 00:00:00 2001 From: EclesioMeloJunior Date: Mon, 6 Jan 2025 15:37:01 -0400 Subject: [PATCH 31/31] chore: nolint unused funcs --- dot/parachain/prospective-parachains/errors.go | 2 +- dot/parachain/prospective-parachains/fragment_chain.go | 8 ++++---- .../prospective-parachains/fragment_chain_test.go | 10 ++++++---- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/dot/parachain/prospective-parachains/errors.go b/dot/parachain/prospective-parachains/errors.go index 56704a1c96..2b20aeadeb 100644 --- a/dot/parachain/prospective-parachains/errors.go +++ b/dot/parachain/prospective-parachains/errors.go @@ -13,7 +13,7 @@ var ( errZeroLengthCycle = errors.New("candidate's parent head is equal to its output head. Would introduce a cycle") //nolint:lll errCycle = errors.New("candidate would introduce a cycle") errMultiplePaths = errors.New("candidate would introduce two paths to the same output state") - errIntroduceBackedCandidate = errors.New("attempting to directly introduce a Backed candidate. It should first be introduced as Seconded") //nolint:lll + errIntroduceBackedCandidate = errors.New("attempting to directly introduce a Backed candidate. It should first be introduced as Seconded") //nolint:lll,unused errParentCandidateNotFound = errors.New("could not find parent of the candidate") errRelayParentMovedBackwards = errors.New("relay parent would move backwards from the latest candidate in the chain") //nolint:lll errPersistedValidationDataMismatch = errors.New("candidate does not match the persisted validation data provided alongside it") //nolint:lll diff --git a/dot/parachain/prospective-parachains/fragment_chain.go b/dot/parachain/prospective-parachains/fragment_chain.go index dfe48a7732..407d6b88d7 100644 --- a/dot/parachain/prospective-parachains/fragment_chain.go +++ b/dot/parachain/prospective-parachains/fragment_chain.go @@ -534,7 +534,7 @@ func (f *fragmentChain) bestChainLen() int { return len(f.bestChain.chain) } -func (f *fragmentChain) containsUnconnectedCandidate(candidateHash parachaintypes.CandidateHash) bool { +func (f *fragmentChain) containsUnconnectedCandidate(candidateHash parachaintypes.CandidateHash) bool { //nolint:unused _, ok := f.unconnected.byCandidateHash[candidateHash] return ok } @@ -548,7 +548,7 @@ func (f *fragmentChain) bestChainVec() (hashes []parachaintypes.CandidateHash) { return hashes } -func (f *fragmentChain) isCandidateBacked(hash parachaintypes.CandidateHash) bool { +func (f *fragmentChain) isCandidateBacked(hash parachaintypes.CandidateHash) bool { //nolint:unused if _, ok := f.bestChain.candidates[hash]; ok { return true } @@ -603,7 +603,7 @@ func (f *fragmentChain) canAddCandidateAsPotential(entry *candidateEntry) error // tryAddingSecondedCandidate tries to add a candidate as a seconded candidate, if the // candidate has potential. It will never be added to the chain directly in the seconded // state, it will only be part of the unconnected storage -func (f *fragmentChain) tryAddingSecondedCandidate(entry *candidateEntry) error { +func (f *fragmentChain) tryAddingSecondedCandidate(entry *candidateEntry) error { //nolint:unused if entry.state == backed { return errIntroduceBackedCandidate } @@ -617,7 +617,7 @@ func (f *fragmentChain) tryAddingSecondedCandidate(entry *candidateEntry) error } // getHeadDataByHash tries to get the full head data associated with this hash -func (f *fragmentChain) getHeadDataByHash(headDataHash common.Hash) (*parachaintypes.HeadData, error) { +func (f *fragmentChain) getHeadDataByHash(headDataHash common.Hash) (*parachaintypes.HeadData, error) { //nolint:unused reqParent := f.scope.baseConstraints.RequiredParent reqParentHash, err := reqParent.Hash() if err != nil { diff --git a/dot/parachain/prospective-parachains/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment_chain_test.go index b7fa51c1c6..7951565557 100644 --- a/dot/parachain/prospective-parachains/fragment_chain_test.go +++ b/dot/parachain/prospective-parachains/fragment_chain_test.go @@ -603,8 +603,9 @@ func TestScopeRejectsAncestors(t *testing.T) { StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), }, }, - maxDepth: 2, - baseConstraints: makeConstraints(0, []parachaintypes.BlockNumber{}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}), + maxDepth: 2, + baseConstraints: makeConstraints(0, []parachaintypes.BlockNumber{}, + parachaintypes.HeadData{Data: []byte{1, 2, 3}}), pendingAvailability: make([]*pendingAvailability, 0), expectedError: errUnexpectedAncestor{number: 99999, prev: 0}, }, @@ -631,8 +632,9 @@ func TestScopeRejectsAncestors(t *testing.T) { StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), }, }, - maxDepth: 2, - baseConstraints: makeConstraints(0, []parachaintypes.BlockNumber{2}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}), + maxDepth: 2, + baseConstraints: makeConstraints(0, []parachaintypes.BlockNumber{2}, + parachaintypes.HeadData{Data: []byte{1, 2, 3}}), pendingAvailability: make([]*pendingAvailability, 0), expectedError: errUnexpectedAncestor{number: 2, prev: 4}, },