diff --git a/dot/parachain/prospective-parachains/errors.go b/dot/parachain/prospective-parachains/errors.go new file mode 100644 index 0000000000..2b20aeadeb --- /dev/null +++ b/dot/parachain/prospective-parachains/errors.go @@ -0,0 +1,214 @@ +package prospectiveparachains + +import ( + "errors" + "fmt" + + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" + "github.com/ChainSafe/gossamer/lib/common" +) + +var ( + errCandidateAlreadyKnown = errors.New("candidate already known") + errZeroLengthCycle = errors.New("candidate's parent head is equal to its output head. Would introduce a cycle") //nolint:lll + errCycle = errors.New("candidate would introduce a cycle") + errMultiplePaths = errors.New("candidate would introduce two paths to the same output state") + errIntroduceBackedCandidate = errors.New("attempting to directly introduce a Backed candidate. It should first be introduced as Seconded") //nolint:lll,unused + errParentCandidateNotFound = errors.New("could not find parent of the candidate") + errRelayParentMovedBackwards = errors.New("relay parent would move backwards from the latest candidate in the chain") //nolint:lll + errPersistedValidationDataMismatch = errors.New("candidate does not match the persisted validation data provided alongside it") //nolint:lll + errAppliedNonexistentCodeUpgrade = errors.New("applied non existent code upgrade") + errDmpAdvancementRule = errors.New("dmp advancement rule") + errCodeUpgradeRestricted = errors.New("code upgrade restricted") +) + +type errRelayParentPrecedesCandidatePendingAvailability struct { + relayParentA, relayParentB common.Hash +} + +func (e errRelayParentPrecedesCandidatePendingAvailability) Error() string { + return fmt.Sprintf("relay parent %x of the candidate precedes the relay parent %x of a pending availability candidate", + e.relayParentA, e.relayParentB) +} + +type errForkWithCandidatePendingAvailability struct { + candidateHash parachaintypes.CandidateHash +} + +func (e errForkWithCandidatePendingAvailability) Error() string { + return fmt.Sprintf("candidate would introduce a fork with a pending availability candidate: %x", e.candidateHash.Value) +} + +type errForkChoiceRule struct { + candidateHash parachaintypes.CandidateHash +} + +func (e errForkChoiceRule) Error() string { + return fmt.Sprintf("fork selection rule favours another candidate: %x", e.candidateHash.Value) +} + +type errComputeConstraints struct { + modificationErr error +} + +func (e errComputeConstraints) Error() string { + return fmt.Sprintf("could not compute candidate constraints: %s", e.modificationErr) +} + +type errCheckAgainstConstraints struct { + fragmentValidityErr error +} + +func (e errCheckAgainstConstraints) Error() string { + return fmt.Sprintf("candidate violates constraints: %s", e.fragmentValidityErr) +} + +type errRelayParentNotInScope struct { + relayParentA, relayParentB common.Hash +} + +func (e errRelayParentNotInScope) Error() string { + return fmt.Sprintf("relay parent %s not in scope, earliest relay parent allowed %s", + e.relayParentA.String(), e.relayParentB.String()) +} + +type errUnexpectedAncestor struct { + // The block number that this error occurred at + number parachaintypes.BlockNumber + // The previous seen block number, which did not match `number`. + prev parachaintypes.BlockNumber +} + +func (e errUnexpectedAncestor) Error() string { + return fmt.Sprintf("unexpected ancestor %d, expected %d", e.number, e.prev) +} + +type errDisallowedHrmpWatermark struct { + BlockNumber parachaintypes.BlockNumber +} + +func (e *errDisallowedHrmpWatermark) Error() string { + return fmt.Sprintf("DisallowedHrmpWatermark(BlockNumber: %d)", e.BlockNumber) +} + +type errNoSuchHrmpChannel struct { + paraID parachaintypes.ParaID +} + +func (e *errNoSuchHrmpChannel) Error() string { + return fmt.Sprintf("NoSuchHrmpChannel(ParaId: %d)", e.paraID) +} + +type errHrmpMessagesOverflow struct { + paraID parachaintypes.ParaID + messagesRemaining uint32 + messagesSubmitted uint32 +} + +func (e *errHrmpMessagesOverflow) Error() string { + return fmt.Sprintf("HrmpMessagesOverflow(ParaId: %d, MessagesRemaining: %d, MessagesSubmitted: %d)", + e.paraID, e.messagesRemaining, e.messagesSubmitted) +} + +type errHrmpBytesOverflow struct { + paraID parachaintypes.ParaID + bytesRemaining uint32 + bytesSubmitted uint32 +} + +func (e *errHrmpBytesOverflow) Error() string { + return fmt.Sprintf("HrmpBytesOverflow(ParaId: %d, BytesRemaining: %d, BytesSubmitted: %d)", + e.paraID, e.bytesRemaining, e.bytesSubmitted) +} + +type errUmpMessagesOverflow struct { + messagesRemaining uint32 + messagesSubmitted uint32 +} + +func (e *errUmpMessagesOverflow) Error() string { + return fmt.Sprintf("UmpMessagesOverflow(MessagesRemaining: %d, MessagesSubmitted: %d)", + e.messagesRemaining, e.messagesSubmitted) +} + +type errUmpBytesOverflow struct { + bytesRemaining uint32 + bytesSubmitted uint32 +} + +func (e *errUmpBytesOverflow) Error() string { + return fmt.Sprintf("UmpBytesOverflow(BytesRemaining: %d, BytesSubmitted: %d)", e.bytesRemaining, e.bytesSubmitted) +} + +type errDmpMessagesUnderflow struct { + messagesRemaining uint32 + messagesProcessed uint32 +} + +func (e *errDmpMessagesUnderflow) Error() string { + return fmt.Sprintf("DmpMessagesUnderflow(MessagesRemaining: %d, MessagesProcessed: %d)", + e.messagesRemaining, e.messagesProcessed) +} + +type errValidationCodeMismatch struct { + expected parachaintypes.ValidationCodeHash + got parachaintypes.ValidationCodeHash +} + +func (e *errValidationCodeMismatch) Error() string { + return fmt.Sprintf("ValidationCodeMismatch(Expected: %v, Got: %v)", e.expected, e.got) +} + +type errOutputsInvalid struct { + ModificationError error +} + +func (e *errOutputsInvalid) Error() string { + return fmt.Sprintf("OutputsInvalid(ModificationError: %v)", e.ModificationError) +} + +type errCodeSizeTooLarge struct { + maxAllowed uint32 + newSize uint32 +} + +func (e *errCodeSizeTooLarge) Error() string { + return fmt.Sprintf("CodeSizeTooLarge(MaxAllowed: %d, NewSize: %d)", e.maxAllowed, e.newSize) +} + +type errRelayParentTooOld struct { + minAllowed parachaintypes.BlockNumber + current parachaintypes.BlockNumber +} + +func (e *errRelayParentTooOld) Error() string { + return fmt.Sprintf("RelayParentTooOld(MinAllowed: %d, Current: %d)", e.minAllowed, e.current) +} + +type errUmpMessagesPerCandidateOverflow struct { + messagesAllowed uint32 + messagesSubmitted uint32 +} + +func (e *errUmpMessagesPerCandidateOverflow) Error() string { + return fmt.Sprintf("UmpMessagesPerCandidateOverflow(MessagesAllowed: %d, MessagesSubmitted: %d)", + e.messagesAllowed, e.messagesSubmitted) +} + +type errHrmpMessagesPerCandidateOverflow struct { + messagesAllowed uint32 + messagesSubmitted uint32 +} + +func (e *errHrmpMessagesPerCandidateOverflow) Error() string { + return fmt.Sprintf("HrmpMessagesPerCandidateOverflow(MessagesAllowed: %d, MessagesSubmitted: %d)", + e.messagesAllowed, e.messagesSubmitted) +} + +type errHrmpMessagesDescendingOrDuplicate struct { + index uint +} + +func (e *errHrmpMessagesDescendingOrDuplicate) Error() string { + return fmt.Sprintf("HrmpMessagesDescendingOrDuplicate(Index: %d)", e.index) +} diff --git a/dot/parachain/prospective-parachains/fragment_chain.go b/dot/parachain/prospective-parachains/fragment_chain.go new file mode 100644 index 0000000000..407d6b88d7 --- /dev/null +++ b/dot/parachain/prospective-parachains/fragment_chain.go @@ -0,0 +1,1168 @@ +package prospectiveparachains + +import ( + "bytes" + "container/list" + "fmt" + "iter" + "slices" + + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/tidwall/btree" +) + +type candidateState byte + +const ( + seconded candidateState = iota + backed +) + +// forkSelectionRule does a normal comparison between 2 candidate hashes +// and returns -1 if the first hash is lower than the second one meaning that +// the first hash will be chosen as the best candidate. +func forkSelectionRule(hash1, hash2 parachaintypes.CandidateHash) int { + return bytes.Compare(hash1.Value[:], hash2.Value[:]) +} + +// candidateEntry represents a candidate in the candidateStorage +type candidateEntry struct { + candidateHash parachaintypes.CandidateHash + parentHeadDataHash common.Hash + outputHeadDataHash common.Hash + relayParent common.Hash + candidate *prospectiveCandidate + state candidateState +} + +func newCandidateEntry( + candidateHash parachaintypes.CandidateHash, + candidate parachaintypes.CommittedCandidateReceipt, + persistedValidationData parachaintypes.PersistedValidationData, + state candidateState, +) (*candidateEntry, error) { + pvdHash, err := persistedValidationData.Hash() + if err != nil { + return nil, fmt.Errorf("hashing persisted validation data: %w", err) + } + + if pvdHash != candidate.Descriptor.PersistedValidationDataHash { + return nil, errPersistedValidationDataMismatch + } + + parentHeadDataHash, err := persistedValidationData.ParentHead.Hash() + if err != nil { + return nil, fmt.Errorf("hashing parent head data: %w", err) + } + + outputHeadDataHash, err := candidate.Commitments.HeadData.Hash() + if err != nil { + return nil, fmt.Errorf("hashing output head data: %w", err) + } + + if parentHeadDataHash == outputHeadDataHash { + return nil, errZeroLengthCycle + } + + return &candidateEntry{ + candidateHash: candidateHash, + parentHeadDataHash: parentHeadDataHash, + outputHeadDataHash: outputHeadDataHash, + relayParent: candidate.Descriptor.RelayParent, + state: state, + candidate: &prospectiveCandidate{ + Commitments: candidate.Commitments, + PersistedValidationData: persistedValidationData, + PoVHash: candidate.Descriptor.PovHash, + ValidationCodeHash: candidate.Descriptor.ValidationCodeHash, + }, + }, nil +} + +// candidateStorage is an utility for storing candidates and information about them such as +// their relay-parents and their backing states. This does not assume any restriction on whether +// or not candidates form a chain. Useful for storing all kinds of candidates. +type candidateStorage struct { + byParentHead map[common.Hash]map[parachaintypes.CandidateHash]struct{} + byOutputHead map[common.Hash]map[parachaintypes.CandidateHash]struct{} + byCandidateHash map[parachaintypes.CandidateHash]*candidateEntry +} + +func (c *candidateStorage) clone() *candidateStorage { + clone := newCandidateStorage() + + for parentHead, candidates := range c.byParentHead { + clone.byParentHead[parentHead] = make(map[parachaintypes.CandidateHash]struct{}) + for candidateHash := range candidates { + clone.byParentHead[parentHead][candidateHash] = struct{}{} + } + } + + for outputHead, candidates := range c.byOutputHead { + clone.byOutputHead[outputHead] = make(map[parachaintypes.CandidateHash]struct{}) + for candidateHash := range candidates { + clone.byOutputHead[outputHead][candidateHash] = struct{}{} + } + } + + for candidateHash, entry := range c.byCandidateHash { + clone.byCandidateHash[candidateHash] = &candidateEntry{ + candidateHash: entry.candidateHash, + parentHeadDataHash: entry.parentHeadDataHash, + outputHeadDataHash: entry.outputHeadDataHash, + relayParent: entry.relayParent, + candidate: entry.candidate, + state: entry.state, + } + } + + return clone +} + +func newCandidateStorage() *candidateStorage { + return &candidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), + } +} + +func (c *candidateStorage) addPendingAvailabilityCandidate( + candidateHash parachaintypes.CandidateHash, + candidate parachaintypes.CommittedCandidateReceipt, + persistedValidationData parachaintypes.PersistedValidationData, +) error { + entry, err := newCandidateEntry(candidateHash, candidate, persistedValidationData, backed) + if err != nil { + return err + } + + if err := c.addCandidateEntry(entry); err != nil { + return fmt.Errorf("adding candidate entry: %w", err) + } + + return nil +} + +// Len return the number of stored candidate +func (c *candidateStorage) len() int { + return len(c.byCandidateHash) +} + +// addCandidateEntry inserts a new entry in the storage map, where the candidate hash +// is the key and the *candidateEntry is the value, also it create other links, the +// parent head hash points to the candidate hash also the output head hash points to the +// candidate hash +func (c *candidateStorage) addCandidateEntry(candidate *candidateEntry) error { + _, ok := c.byCandidateHash[candidate.candidateHash] + if ok { + return errCandidateAlreadyKnown + } + + // updates the reference parent hash -> candidate + // we don't check the `ok` value since the key can + // exists in the map but pointing to a nil map + setOfCandidates := c.byParentHead[candidate.parentHeadDataHash] + if setOfCandidates == nil { + setOfCandidates = make(map[parachaintypes.CandidateHash]struct{}) + } + setOfCandidates[candidate.candidateHash] = struct{}{} + c.byParentHead[candidate.parentHeadDataHash] = setOfCandidates + + // udpates the reference output hash -> candidate + setOfCandidates = c.byOutputHead[candidate.outputHeadDataHash] + if setOfCandidates == nil { + setOfCandidates = make(map[parachaintypes.CandidateHash]struct{}) + } + setOfCandidates[candidate.candidateHash] = struct{}{} + c.byOutputHead[candidate.outputHeadDataHash] = setOfCandidates + + c.byCandidateHash[candidate.candidateHash] = candidate + return nil +} + +// removeCandidate removes the candidate entry from the storage based on candidateHash +// it also removes the parent head hash entry that points to candidateHash and +// removes the output head hash entry that points to candidateHash +func (c *candidateStorage) removeCandidate(candidateHash parachaintypes.CandidateHash) { + entry, ok := c.byCandidateHash[candidateHash] + if !ok { + return + } + + delete(c.byCandidateHash, candidateHash) + + if setOfCandidates, ok := c.byParentHead[entry.parentHeadDataHash]; ok { + delete(setOfCandidates, candidateHash) + if len(setOfCandidates) == 0 { + delete(c.byParentHead, entry.parentHeadDataHash) + } + } + + if setOfCandidates, ok := c.byOutputHead[entry.outputHeadDataHash]; ok { + delete(setOfCandidates, candidateHash) + if len(setOfCandidates) == 0 { + delete(c.byOutputHead, entry.outputHeadDataHash) + } + } +} + +func (c *candidateStorage) markBacked(candidateHash parachaintypes.CandidateHash) { + entry, ok := c.byCandidateHash[candidateHash] + if !ok { + logger.Tracef("candidate not found while marking as backed") + } + + entry.state = backed +} + +func (c *candidateStorage) headDataByHash(hash common.Hash) *parachaintypes.HeadData { + // first, search for candidates outputting this head data and extract the head data + // from their commitments if they exist. + // otherwise, search for candidates building upon this head data and extract the + // head data from their persisted validation data if they exist. + + if setOfCandidateHashes, ok := c.byOutputHead[hash]; ok { + for candidateHash := range setOfCandidateHashes { + if candidate, ok := c.byCandidateHash[candidateHash]; ok { + return &candidate.candidate.Commitments.HeadData + } + } + } + + if setOfCandidateHashes, ok := c.byParentHead[hash]; ok { + for candidateHash := range setOfCandidateHashes { + if candidate, ok := c.byCandidateHash[candidateHash]; ok { + return &candidate.candidate.PersistedValidationData.ParentHead + } + } + } + + return nil +} + +func (c *candidateStorage) possibleBackedParaChildren(parentHeadHash common.Hash) iter.Seq[*candidateEntry] { + return func(yield func(*candidateEntry) bool) { + seqOfCandidateHashes, ok := c.byParentHead[parentHeadHash] + if !ok { + return + } + + for candidateHash := range seqOfCandidateHashes { + if entry, ok := c.byCandidateHash[candidateHash]; ok && entry.state == backed { + if !yield(entry) { + return + } + } + } + } +} + +// pendingAvailability is a candidate on-chain but pending availability, for special +// treatment in the `scope` +type pendingAvailability struct { + candidateHash parachaintypes.CandidateHash + relayParent relayChainBlockInfo +} + +// The scope of a fragment chain +type scope struct { + // the relay parent we're currently building on top of + relayParent relayChainBlockInfo + // the other relay parents candidates are allowed to build upon, + // mapped by the block number + ancestors *btree.Map[parachaintypes.BlockNumber, relayChainBlockInfo] + // the other relay parents candidates are allowed to build upon, + // mapped by hash + ancestorsByHash map[common.Hash]relayChainBlockInfo + // candidates pending availability at this block + pendingAvailability []*pendingAvailability + // the base constraints derived from the latest included candidate + baseConstraints *parachaintypes.Constraints + // equal to `max_candidate_depth` + maxDepth uint +} + +// newScopeWithAncestors defines a new scope, all arguments are straightforward +// except ancestors. Ancestor should be in reverse order, starting with the parent +// of the relayParent, and proceeding backwards in block number decrements of 1. +// Ancestors not following these conditions will be rejected. +// +// This function will only consume ancestors up to the `MinRelayParentNumber` of the +// `baseConstraints`. +// +// Only ancestor whose children have the same session id as the relay parent's children +// should be provided. It is allowed to provide 0 ancestors. +func newScopeWithAncestors( + relayParent relayChainBlockInfo, + baseConstraints *parachaintypes.Constraints, + pendingAvailability []*pendingAvailability, + maxDepth uint, + ancestors []relayChainBlockInfo, +) (*scope, error) { + ancestorsMap := btree.NewMap[parachaintypes.BlockNumber, relayChainBlockInfo](100) + ancestorsByHash := make(map[common.Hash]relayChainBlockInfo) + + prev := relayParent.Number + for _, ancestor := range ancestors { + if prev == 0 { + return nil, errUnexpectedAncestor{number: ancestor.Number, prev: prev} + } + + if ancestor.Number != prev-1 { + return nil, errUnexpectedAncestor{number: ancestor.Number, prev: prev} + } + + if prev == baseConstraints.MinRelayParentNumber { + break + } + + prev = ancestor.Number + ancestorsByHash[ancestor.Hash] = ancestor + ancestorsMap.Set(ancestor.Number, ancestor) + } + + return &scope{ + relayParent: relayParent, + baseConstraints: baseConstraints, + pendingAvailability: pendingAvailability, + maxDepth: maxDepth, + ancestors: ancestorsMap, + ancestorsByHash: ancestorsByHash, + }, nil +} + +// earliestRelayParent gets the earliest relay-parent allowed in the scope of the fragment chain. +func (s *scope) earliestRelayParent() relayChainBlockInfo { + if iter := s.ancestors.Iter(); iter.Next() { + return iter.Value() + } + return s.relayParent +} + +// Ancestor gets the relay ancestor of the fragment chain by hash. +func (s *scope) ancestor(hash common.Hash) *relayChainBlockInfo { + if hash == s.relayParent.Hash { + return &s.relayParent + } + + if blockInfo, ok := s.ancestorsByHash[hash]; ok { + return &blockInfo + } + + return nil +} + +// Whether the candidate in question is one pending availability in this scope. +func (s *scope) getPendingAvailability(candidateHash parachaintypes.CandidateHash) *pendingAvailability { + for _, c := range s.pendingAvailability { + if c.candidateHash == candidateHash { + return c + } + } + return nil +} + +// Fragment node is a node that belongs to a `BackedChain`. It holds constraints based on +// the ancestors in the chain +type fragmentNode struct { + fragment *Fragment + candidateHash parachaintypes.CandidateHash + cumulativeModifications *constraintModifications + parentHeadDataHash common.Hash + outputHeadDataHash common.Hash +} + +func (f *fragmentNode) relayParent() common.Hash { + return f.fragment.RelayParent().Hash +} + +// newCandidateEntryFromFragment creates a candidate entry from a fragment, we dont need +// to perform the checks done in `newCandidateEntry` since a `fragmentNode` always comes +// from a `candidateEntry` +func newCandidateEntryFromFragment(node *fragmentNode) *candidateEntry { + return &candidateEntry{ + candidateHash: node.candidateHash, + parentHeadDataHash: node.parentHeadDataHash, + outputHeadDataHash: node.outputHeadDataHash, + candidate: node.fragment.Candidate(), + relayParent: node.relayParent(), + // a fragment node is always backed + state: backed, + } +} + +// backedChain is a chain of backed/backable candidates +// Includes candidates pending availability and candidates which may be backed on-chain +type backedChain struct { + // holds the candidate chain + chain []*fragmentNode + + // index from parent head data to the candidate that has that head data as parent + // only contains the candidates present in the `chain` + byParentHead map[common.Hash]parachaintypes.CandidateHash + + // index from head data hash to the candidate hash outputting that head data + // only contains the candidates present in the `chain` + byOutputHead map[common.Hash]parachaintypes.CandidateHash + + // a set of candidate hashes in the `chain` + candidates map[parachaintypes.CandidateHash]struct{} +} + +func newBackedChain() *backedChain { + return &backedChain{ + chain: make([]*fragmentNode, 0), + byParentHead: make(map[common.Hash]parachaintypes.CandidateHash), + byOutputHead: make(map[common.Hash]parachaintypes.CandidateHash), + candidates: make(map[parachaintypes.CandidateHash]struct{}), + } +} + +func (bc *backedChain) push(candidate *fragmentNode) { + bc.candidates[candidate.candidateHash] = struct{}{} + bc.byParentHead[candidate.parentHeadDataHash] = candidate.candidateHash + bc.byOutputHead[candidate.outputHeadDataHash] = candidate.candidateHash + bc.chain = append(bc.chain, candidate) +} + +func (bc *backedChain) clear() []*fragmentNode { + bc.byParentHead = make(map[common.Hash]parachaintypes.CandidateHash) + bc.byOutputHead = make(map[common.Hash]parachaintypes.CandidateHash) + bc.candidates = make(map[parachaintypes.CandidateHash]struct{}) + + oldChain := bc.chain + bc.chain = nil + return oldChain +} + +func (bc *backedChain) revertToParentHash(parentHeadDataHash common.Hash) []*fragmentNode { + foundIndex := -1 + + for i := 0; i < len(bc.chain); i++ { + node := bc.chain[i] + + if foundIndex != -1 { + delete(bc.byParentHead, node.parentHeadDataHash) + delete(bc.byOutputHead, node.outputHeadDataHash) + delete(bc.candidates, node.candidateHash) + } else if node.outputHeadDataHash == parentHeadDataHash { + foundIndex = i + } + } + + if foundIndex != -1 { + // drain the elements from the found index until + // the end of the slice and return them + removed := make([]*fragmentNode, len(bc.chain)-(foundIndex+1)) + copy(removed, bc.chain[foundIndex+1:]) + bc.chain = slices.Delete(bc.chain, foundIndex+1, len(bc.chain)) + + return removed + } + + return nil +} + +// this is a fragment chain specific to an active leaf. It holds the current +// best backable candidate chain, as well as potential candidates which could +// become connected to the chain in the future or which could even overwrite +// the existing chain +type fragmentChain struct { + // the current scope, which dictates the on-chain operating constraints that + // all future candidates must ad-here to. + scope *scope + + // the current best chain of backable candidates. It only contains candidates + // which build on top of each other and which have reached the backing quorum. + // In the presence of potential forks, this chain will pick a fork according to + // the `forkSelectionRule` + bestChain *backedChain + + // the potential candidate storage. Contains candidates which are not yet part of + // the `chain` but may become in the future. These can form any tree shape as well + // as contain unconnected candidates for which we don't know the parent. + unconnected *candidateStorage +} + +// newFragmentChain createa a new fragment chain with the given scope and populates it with +// the candidates pending availability +func newFragmentChain(scope *scope, candidatesPendingAvailability *candidateStorage) *fragmentChain { + fragmentChain := &fragmentChain{ + scope: scope, + bestChain: newBackedChain(), + unconnected: newCandidateStorage(), + } + + // we only need to populate the best backable chain. Candidates pending availability + // must form a chain with the latest included head. + fragmentChain.populateChain(candidatesPendingAvailability) + return fragmentChain +} + +// populateFromPrevious populates the `fragmentChain` given the new candidates pending +// availability and the optional previous fragment chain (of the previous relay parent) +func (f *fragmentChain) populateFromPrevious(prevFragmentChain *fragmentChain) { + prevStorage := prevFragmentChain.unconnected.clone() + for _, candidate := range prevFragmentChain.bestChain.chain { + // if they used to be pending availability, dont add them. This is fine because: + // - if they still are pending availability, they have already been added to + // the new storage + // - if they were included, no point in keeping them + // + // This cannot happen for the candidates in the unconnected storage. The pending + // availability candidates will always be part of the best chain + pending := prevFragmentChain.scope.getPendingAvailability(candidate.candidateHash) + if pending == nil { + _ = prevStorage.addCandidateEntry(newCandidateEntryFromFragment(candidate)) + } + } + + // first populate the best backable chain + f.populateChain(prevStorage) + + // now that we picked the best backable chain, trim the forks generated by candidates + // which are not present in the best chain + f.trimUneligibleForks(prevStorage, nil) + + // finally, keep any candidates which haven't been trimmed but still have potential + f.populateUnconnectedPotentialCandidates(prevStorage) +} + +func (f *fragmentChain) bestChainLen() int { + return len(f.bestChain.chain) +} + +func (f *fragmentChain) containsUnconnectedCandidate(candidateHash parachaintypes.CandidateHash) bool { //nolint:unused + _, ok := f.unconnected.byCandidateHash[candidateHash] + return ok +} + +// bestChainVec returns a vector of the chain's candidate hashes, in-order. +func (f *fragmentChain) bestChainVec() (hashes []parachaintypes.CandidateHash) { + hashes = make([]parachaintypes.CandidateHash, len(f.bestChain.chain)) + for idx, node := range f.bestChain.chain { + hashes[idx] = node.candidateHash + } + return hashes +} + +func (f *fragmentChain) isCandidateBacked(hash parachaintypes.CandidateHash) bool { //nolint:unused + if _, ok := f.bestChain.candidates[hash]; ok { + return true + } + + candidate := f.unconnected.byCandidateHash[hash] + return candidate != nil && candidate.state == backed +} + +// candidateBacked marks a candidate as backed. This can trigger a recreation of the best backable chain. +func (f *fragmentChain) candidateBacked(newlyBackedCandidate parachaintypes.CandidateHash) { + // already backed + if _, ok := f.bestChain.candidates[newlyBackedCandidate]; ok { + return + } + + candidateEntry, ok := f.unconnected.byCandidateHash[newlyBackedCandidate] + if !ok { + // candidate is not in unconnected storage + return + } + + parentHeadDataHash := candidateEntry.parentHeadDataHash + + f.unconnected.markBacked(newlyBackedCandidate) + + if !f.revertTo(parentHeadDataHash) { + // if nothing was reverted, there is nothing we can do for now + return + } + + prevStorage := f.unconnected.clone() + f.unconnected = newCandidateStorage() + + f.populateChain(prevStorage) + f.trimUneligibleForks(prevStorage, &parentHeadDataHash) + f.populateUnconnectedPotentialCandidates(prevStorage) +} + +// canAddCandidateAsPotential checks if this candidate could be added in the future +func (f *fragmentChain) canAddCandidateAsPotential(entry *candidateEntry) error { + candidateHash := entry.candidateHash + + _, existsInCandidateStorage := f.unconnected.byCandidateHash[candidateHash] + _, existsInBestChain := f.bestChain.candidates[candidateHash] + if existsInBestChain || existsInCandidateStorage { + return errCandidateAlreadyKnown + } + + return f.checkPotential(entry) +} + +// tryAddingSecondedCandidate tries to add a candidate as a seconded candidate, if the +// candidate has potential. It will never be added to the chain directly in the seconded +// state, it will only be part of the unconnected storage +func (f *fragmentChain) tryAddingSecondedCandidate(entry *candidateEntry) error { //nolint:unused + if entry.state == backed { + return errIntroduceBackedCandidate + } + + err := f.canAddCandidateAsPotential(entry) + if err != nil { + return err + } + + return f.unconnected.addCandidateEntry(entry) +} + +// getHeadDataByHash tries to get the full head data associated with this hash +func (f *fragmentChain) getHeadDataByHash(headDataHash common.Hash) (*parachaintypes.HeadData, error) { //nolint:unused + reqParent := f.scope.baseConstraints.RequiredParent + reqParentHash, err := reqParent.Hash() + if err != nil { + return nil, fmt.Errorf("while hashing required parent: %w", err) + } + if reqParentHash == headDataHash { + return &reqParent, nil + } + + hasHeadDataInChain := false + if _, ok := f.bestChain.byParentHead[headDataHash]; ok { + hasHeadDataInChain = true + } else if _, ok := f.bestChain.byOutputHead[headDataHash]; ok { + hasHeadDataInChain = true + } + + if hasHeadDataInChain { + for _, candidate := range f.bestChain.chain { + if candidate.parentHeadDataHash == headDataHash { + headData := candidate. + fragment. + Candidate(). + PersistedValidationData. + ParentHead + return &headData, nil + } else if candidate.outputHeadDataHash == headDataHash { + headData := candidate.fragment.Candidate().Commitments.HeadData + return &headData, nil + } else { + continue + } + } + } + + return f.unconnected.headDataByHash(headDataHash), nil +} + +type candidateAndRelayParent struct { + candidateHash parachaintypes.CandidateHash + realyParentHash common.Hash +} + +// findBackableChain selects `count` candidates after the given `ancestors` which +// can be backed on chain next. The intention of the `ancestors` is to allow queries +// on the basis of one or more candidates which were previously pending availability +// becoming available or candidates timing out +func (f *fragmentChain) findBackableChain( + ancestors map[parachaintypes.CandidateHash]struct{}, count uint32) []*candidateAndRelayParent { + if count == 0 { + return nil + } + + basePos := f.findAncestorPath(ancestors) + + actualEndIdx := min(basePos+int(count), len(f.bestChain.chain)) + res := make([]*candidateAndRelayParent, 0, actualEndIdx-basePos) + + for _, elem := range f.bestChain.chain[basePos:actualEndIdx] { + // only supply candidates which are not yet pending availability. + // `ancestors` should have already contained them, but check just in case + if pending := f.scope.getPendingAvailability(elem.candidateHash); pending == nil { + res = append(res, &candidateAndRelayParent{ + candidateHash: elem.candidateHash, + realyParentHash: elem.relayParent(), + }) + } else { + break + } + } + + return res +} + +// findAncestorPath tries to orders the ancestors into a viable path from root to the last one. +// stops when the ancestors are all used or when a node in the chain is not present in the +// ancestors set. Returns the index in the chain were the search stopped +func (f *fragmentChain) findAncestorPath(ancestors map[parachaintypes.CandidateHash]struct{}) int { + if len(f.bestChain.chain) == 0 { + return 0 + } + + for idx, candidate := range f.bestChain.chain { + _, ok := ancestors[candidate.candidateHash] + if !ok { + return idx + } + delete(ancestors, candidate.candidateHash) + } + + // this means that we found the entire chain in the ancestor set. There wont be + // anything left to back. + return len(f.bestChain.chain) +} + +// earliestRelayParent returns the earliest relay parent a new candidate can have in order +// to be added to the chain right now. This is the relay parent of the latest candidate in +// the chain. The value returned may not be valid if we want to add a candidate pending +// availability, which may have a relay parent which is out of scope, special handling +// is needed in that case. +func (f *fragmentChain) earliestRelayParent() *relayChainBlockInfo { + if len(f.bestChain.chain) > 0 { + lastCandidate := f.bestChain.chain[len(f.bestChain.chain)-1] + info := f.scope.ancestor(lastCandidate.relayParent()) + if info != nil { + return info + } + + // if the relay parent is out of scope AND it is in the chain + // it must be a candidate pending availability + pending := f.scope.getPendingAvailability(lastCandidate.candidateHash) + if pending == nil { + return nil + } + + return &pending.relayParent + } + + earliest := f.scope.earliestRelayParent() + return &earliest +} + +// earliestRelayParentPendingAvailability returns the earliest relay parent a potential +// candidate may have for it to ever be added to the chain. This is the relay parent of +// the last candidate pending availability or the earliest relay parent in scope. +func (f *fragmentChain) earliestRelayParentPendingAvailability() *relayChainBlockInfo { + for i := len(f.bestChain.chain) - 1; i >= 0; i-- { + candidate := f.bestChain.chain[i] + if pending := f.scope.getPendingAvailability(candidate.candidateHash); pending != nil { + return &pending.relayParent + } + } + earliest := f.scope.earliestRelayParent() + return &earliest +} + +// populateUnconnectedPotentialCandidates populates the unconnected potential candidate storage +// starting from a previous storage +func (f *fragmentChain) populateUnconnectedPotentialCandidates(oldStorage *candidateStorage) { + for _, candidate := range oldStorage.byCandidateHash { + // sanity check, all pending availability candidates should be already present + // in the chain + if pending := f.scope.getPendingAvailability(candidate.candidateHash); pending != nil { + continue + } + + // we can just use the error to check if we can add + // or not an entry since an error can legitimately + // happen when pruning stale candidates. + err := f.canAddCandidateAsPotential(candidate) + if err == nil { + _ = f.unconnected.addCandidateEntry(candidate) + } + } +} + +func (f *fragmentChain) checkPotential(candidate *candidateEntry) error { + relayParent := candidate.relayParent + parentHeadHash := candidate.parentHeadDataHash + + // trivial 0-length cycle + if candidate.outputHeadDataHash == parentHeadHash { + return errZeroLengthCycle + } + + // Check if the relay parent is in scope + relayParentInfo := f.scope.ancestor(relayParent) + if relayParentInfo == nil { + return errRelayParentNotInScope{ + relayParentA: relayParent, + relayParentB: f.scope.earliestRelayParent().Hash, + } + } + + // Check if the relay parent moved backwards from the latest candidate pending availability + earliestRPOfPendingAvailability := f.earliestRelayParentPendingAvailability() + if relayParentInfo.Number < earliestRPOfPendingAvailability.Number { + return errRelayParentPrecedesCandidatePendingAvailability{ + relayParentA: relayParentInfo.Hash, + relayParentB: earliestRPOfPendingAvailability.Hash, + } + } + + // If it's a fork with a backed candidate in the current chain + if otherCandidateHash, ok := f.bestChain.byParentHead[parentHeadHash]; ok { + if f.scope.getPendingAvailability(otherCandidateHash) != nil { + // Cannot accept a fork with a candidate pending availability + return errForkWithCandidatePendingAvailability{candidateHash: otherCandidateHash} + } + + // If the candidate is backed and in the current chain, accept only a candidate + // according to the fork selection rule + if forkSelectionRule(otherCandidateHash, candidate.candidateHash) == -1 { + return errForkChoiceRule{candidateHash: otherCandidateHash} + } + } + + // Try seeing if the parent candidate is in the current chain or if it is the latest + // included candidate. If so, get the constraints the candidate must satisfy + var constraints *parachaintypes.Constraints + var maybeMinRelayParentNumber *parachaintypes.BlockNumber + + requiredParentHash, err := f.scope.baseConstraints.RequiredParent.Hash() + if err != nil { + return fmt.Errorf("while hashing required parent: %w", err) + } + + if parentCandidateHash, ok := f.bestChain.byOutputHead[parentHeadHash]; ok { + var parentCandidate *fragmentNode + + for _, c := range f.bestChain.chain { + if c.candidateHash == parentCandidateHash { + parentCandidate = c + break + } + } + + if parentCandidate == nil { + return errParentCandidateNotFound + } + + var err error + constraints, err = applyModifications( + f.scope.baseConstraints, + parentCandidate.cumulativeModifications) + if err != nil { + return errComputeConstraints{modificationErr: err} + } + + if ancestor := f.scope.ancestor(parentCandidate.relayParent()); ancestor != nil { + maybeMinRelayParentNumber = &ancestor.Number + } + } else if requiredParentHash == parentHeadHash { + // It builds on the latest included candidate + constraints = f.scope.baseConstraints.Clone() + } else { + // If the parent is not yet part of the chain, there's nothing else we can check for now + return nil + } + + // Check for cycles or invalid tree transitions + if err := f.checkCyclesOrInvalidTree(candidate.outputHeadDataHash); err != nil { + return err + } + + // Check against constraints if we have a full concrete candidate + _, err = checkAgainstConstraints( + relayParentInfo, + constraints, + candidate.candidate.Commitments, + candidate.candidate.ValidationCodeHash, + candidate.candidate.PersistedValidationData, + ) + if err != nil { + return errCheckAgainstConstraints{fragmentValidityErr: err} + } + + if relayParentInfo.Number < constraints.MinRelayParentNumber { + return errRelayParentMovedBackwards + } + + if maybeMinRelayParentNumber != nil && relayParentInfo.Number < *maybeMinRelayParentNumber { + return errRelayParentMovedBackwards + } + + return nil +} + +// trimUneligibleForks once the backable chain was populated, trim the forks generated by candidate +// hashes which are not present in the best chain. Fan this out into a full breadth-first search. If +// starting point is not nil then start the search from the candidates having this parent head hash. +func (f *fragmentChain) trimUneligibleForks(storage *candidateStorage, startingPoint *common.Hash) { + type queueItem struct { + hash common.Hash + hasPotential bool + } + + queue := list.New() + + // start out with the candidates in the chain. They are all valid candidates. + if startingPoint != nil { + queue.PushBack(queueItem{hash: *startingPoint, hasPotential: true}) + } else { + if len(f.bestChain.chain) == 0 { + reqParentHeadHash, err := f.scope.baseConstraints.RequiredParent.Hash() + if err != nil { + panic(fmt.Sprintf("while hashing required parent: %s", err.Error())) + } + + queue.PushBack(queueItem{hash: reqParentHeadHash, hasPotential: true}) + } else { + for _, candidate := range f.bestChain.chain { + queue.PushBack(queueItem{hash: candidate.parentHeadDataHash, hasPotential: true}) + } + } + } + + // to make sure that cycles dont make us loop forever, keep track + // of the visited parent head hashes + visited := map[common.Hash]struct{}{} + + for queue.Len() > 0 { + // queue.PopFront() + parent := queue.Remove(queue.Front()).(queueItem) + visited[parent.hash] = struct{}{} + + children, ok := storage.byParentHead[parent.hash] + if !ok { + continue + } + + // cannot remove while iterating so store them here temporarily + var toRemove []parachaintypes.CandidateHash + + for childHash := range children { + child, ok := storage.byCandidateHash[childHash] + if !ok { + continue + } + + // already visited this child. either is a cycle or multipath that lead + // to the same candidate. either way, stop this branch to avoid looping + // forever + if _, ok = visited[child.outputHeadDataHash]; ok { + continue + } + + // only keep a candidate if its full ancestry was already kept as potential + // and this candidate itself has potential + if parent.hasPotential && f.checkPotential(child) == nil { + queue.PushBack(queueItem{hash: child.outputHeadDataHash, hasPotential: true}) + } else { + // otherwise, remove this candidate and continue looping for its children + // but mark the parent's potential as false. we only want to remove its children. + toRemove = append(toRemove, childHash) + queue.PushBack(queueItem{hash: child.outputHeadDataHash, hasPotential: false}) + } + } + + for _, hash := range toRemove { + storage.removeCandidate(hash) + } + } +} + +type possibleChild struct { + fragment *Fragment + candidateHash parachaintypes.CandidateHash + outputHeadDataHash common.Hash + parentHeadDataHash common.Hash +} + +// populateChain populates the fragment chain with candidates from the supplied `candidateStorage`. +// Can be called by the `newFragmentChain` or when backing a new candidate. When this is called +// it may cause the previous chain to be completely erased or it may add more than one candidate +func (f *fragmentChain) populateChain(storage *candidateStorage) { + var cumulativeModifications *constraintModifications + if len(f.bestChain.chain) > 0 { + lastCandidate := f.bestChain.chain[len(f.bestChain.chain)-1] + cumulativeModifications = lastCandidate.cumulativeModifications.Clone() + } else { + cumulativeModifications = NewConstraintModificationsIdentity() + } + + earliestRelayParent := f.earliestRelayParent() + if earliestRelayParent == nil { + return + } + + for len(f.bestChain.chain) < int(f.scope.maxDepth)+1 { + childConstraints, err := applyModifications( + f.scope.baseConstraints, cumulativeModifications) + if err != nil { + logger.Warnf("failed to apply modifications: %s", err.Error()) + break + } + + requiredHeadHash, err := childConstraints.RequiredParent.Hash() + if err != nil { + panic(fmt.Sprintf("failed while hashing required parent: %s", err.Error())) + } + + possibleChildren := make([]*possibleChild, 0) + // select the few possible backed/backable children which can be added to the chain right now + for candidateEntry := range storage.possibleBackedParaChildren(requiredHeadHash) { + // only select a candidate if: + // 1. it does not introduce a fork or a cycle + // 2. parent hash is correct + // 3. relay parent does not move backwards + // 4. all non-pending-availability candidates have relay-parent in the scope + // 5. candidate outputs fulfil constraints + + var relayParent *relayChainBlockInfo + var minRelayParent parachaintypes.BlockNumber + + pending := f.scope.getPendingAvailability(candidateEntry.candidateHash) + if pending != nil { + relayParent = &pending.relayParent + if len(f.bestChain.chain) == 0 { + minRelayParent = pending.relayParent.Number + } else { + minRelayParent = earliestRelayParent.Number + } + } else { + info := f.scope.ancestor(candidateEntry.relayParent) + if info == nil { + continue + } + + relayParent = info + minRelayParent = earliestRelayParent.Number + } + + if err := f.checkCyclesOrInvalidTree(candidateEntry.outputHeadDataHash); err != nil { + logger.Warnf("failed while checking cycle or invalid tree: %s", err.Error()) + continue + } + + // require: candidates dont move backwards and only pending availability + // candidates can be out-of-scope. + // + // earliest relay parent can be before the + if relayParent.Number < minRelayParent { + // relay parent moved backwards + continue + } + + // don't add candidates if they're already present in the chain + // this can never happen, as candidates can only be duplicated + // if there's a cycle and we shouldnt have allowed for a cycle + // to be chained + if _, ok := f.bestChain.candidates[candidateEntry.candidateHash]; ok { + continue + } + + constraints := childConstraints.Clone() + if pending != nil { + // overwrite for candidates pending availability as a special-case + constraints.MinRelayParentNumber = pending.relayParent.Number + } + + fragment, err := NewFragment(relayParent, constraints, candidateEntry.candidate) + if err != nil { + logger.Warnf("failed to create fragment: %s", err.Error()) + continue + } + + possibleChildren = append(possibleChildren, &possibleChild{ + fragment: fragment, + candidateHash: candidateEntry.candidateHash, + outputHeadDataHash: candidateEntry.outputHeadDataHash, + parentHeadDataHash: candidateEntry.parentHeadDataHash, + }) + } + + if len(possibleChildren) == 0 { + break + } + + // choose the best candidate + bestCandidate := slices.MinFunc(possibleChildren, func(fst, snd *possibleChild) int { + // always pick a candidate pending availability as best. + if f.scope.getPendingAvailability(fst.candidateHash) != nil { + return -1 + } else if f.scope.getPendingAvailability(snd.candidateHash) != nil { + return 1 + } else { + return forkSelectionRule(fst.candidateHash, snd.candidateHash) + } + }) + + // remove the candidate from storage + storage.removeCandidate(bestCandidate.candidateHash) + + // update the cumulative constraint modifications + cumulativeModifications.Stack(bestCandidate.fragment.ConstraintModifications()) + + // update the earliest relay parent + earliestRelayParent = &relayChainBlockInfo{ + Hash: bestCandidate.fragment.RelayParent().Hash, + Number: bestCandidate.fragment.RelayParent().Number, + StorageRoot: bestCandidate.fragment.RelayParent().StorageRoot, + } + + node := &fragmentNode{ + fragment: bestCandidate.fragment, + candidateHash: bestCandidate.candidateHash, + parentHeadDataHash: bestCandidate.parentHeadDataHash, + outputHeadDataHash: bestCandidate.outputHeadDataHash, + cumulativeModifications: cumulativeModifications.Clone(), + } + + // add the candidate to the chain now + f.bestChain.push(node) + } +} + +// checkCyclesOrInvalidTree checks whether a candidate outputting this head data would +// introduce a cycle or multiple paths to the same state. Trivial 0-length cycles are +// checked in `newCandidateEntry`. +func (f *fragmentChain) checkCyclesOrInvalidTree(outputHeadDataHash common.Hash) error { + // this should catch a cycle where this candidate would point back to the parent + // of some candidate in the chain + _, ok := f.bestChain.byParentHead[outputHeadDataHash] + if ok { + return errCycle + } + + // multiple paths to the same state, which cannot happen for a chain + _, ok = f.bestChain.byOutputHead[outputHeadDataHash] + if ok { + return errMultiplePaths + } + + return nil +} + +// revertTo reverts the best backable chain so that the last candidate will be one outputting the given +// `parent_head_hash`. If the `parent_head_hash` is exactly the required parent of the base +// constraints (builds on the latest included candidate), revert the entire chain. +// Return false if we couldn't find the parent head hash +func (f *fragmentChain) revertTo(parentHeadDataHash common.Hash) bool { + var removedItems []*fragmentNode = nil + + requiredParentHash, err := f.scope.baseConstraints.RequiredParent.Hash() + if err != nil { + panic(fmt.Sprintf("failed while hashing required parent: %s", err.Error())) + } + + if requiredParentHash == parentHeadDataHash { + removedItems = f.bestChain.clear() + } + + if _, ok := f.bestChain.byOutputHead[parentHeadDataHash]; removedItems == nil && ok { + removedItems = f.bestChain.revertToParentHash(parentHeadDataHash) + } + + if removedItems == nil { + return false + } + + // Even if it's empty, we need to return true, because we'll be able to add a new candidate + // to the chain. + for _, node := range removedItems { + _ = f.unconnected.addCandidateEntry(newCandidateEntryFromFragment(node)) + } + + return true +} diff --git a/dot/parachain/prospective-parachains/fragment_chain_test.go b/dot/parachain/prospective-parachains/fragment_chain_test.go new file mode 100644 index 0000000000..7951565557 --- /dev/null +++ b/dot/parachain/prospective-parachains/fragment_chain_test.go @@ -0,0 +1,2197 @@ +package prospectiveparachains + +import ( + "bytes" + "errors" + "maps" + "math/rand" + "slices" + "testing" + + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/crypto/sr25519" + "github.com/ChainSafe/gossamer/pkg/scale" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tidwall/btree" +) + +func TestCandidateStorage_RemoveCandidate(t *testing.T) { + storage := &candidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), + } + + candidateHash := parachaintypes.CandidateHash{Value: common.Hash{1, 2, 3}} + parentHeadHash := common.Hash{4, 5, 6} + outputHeadHash := common.Hash{7, 8, 9} + + entry := &candidateEntry{ + candidateHash: candidateHash, + parentHeadDataHash: parentHeadHash, + outputHeadDataHash: outputHeadHash, + state: backed, + } + + storage.byCandidateHash[candidateHash] = entry + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: {}} + storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: {}} + + storage.removeCandidate(candidateHash) + + _, exists := storage.byCandidateHash[candidateHash] + assert.False(t, exists, "candidate should be removed from byCandidateHash") + + _, exists = storage.byParentHead[parentHeadHash] + assert.False(t, exists, "candidate should be removed from byParentHead") + + _, exists = storage.byOutputHead[outputHeadHash] + assert.False(t, exists, "candidate should be removed from byOutputHead") +} + +func TestCandidateStorage_MarkBacked(t *testing.T) { + storage := &candidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), + } + + candidateHash := parachaintypes.CandidateHash{Value: common.Hash{1, 2, 3}} + parentHeadHash := common.Hash{4, 5, 6} + outputHeadHash := common.Hash{7, 8, 9} + + entry := &candidateEntry{ + candidateHash: candidateHash, + parentHeadDataHash: parentHeadHash, + outputHeadDataHash: outputHeadHash, + state: seconded, + } + + storage.byCandidateHash[candidateHash] = entry + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: {}} + storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: {}} + + storage.markBacked(candidateHash) + + assert.Equal(t, backed, entry.state, "candidate state should be marked as backed") +} + +func TestCandidateStorage_HeadDataByHash(t *testing.T) { + tests := map[string]struct { + setup func() *candidateStorage + hash common.Hash + expected *parachaintypes.HeadData + }{ + "find_head_data_of_first_candidate_using_output_head_data_hash": { + setup: func() *candidateStorage { + storage := &candidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), + } + + candidateHash := parachaintypes.CandidateHash{Value: common.Hash{1, 2, 3}} + parentHeadHash := common.Hash{4, 5, 6} + outputHeadHash := common.Hash{7, 8, 9} + headData := parachaintypes.HeadData{Data: []byte{10, 11, 12}} + + entry := &candidateEntry{ + candidateHash: candidateHash, + parentHeadDataHash: parentHeadHash, + outputHeadDataHash: outputHeadHash, + candidate: &prospectiveCandidate{ + Commitments: parachaintypes.CandidateCommitments{ + HeadData: headData, + }, + }, + } + + storage.byCandidateHash[candidateHash] = entry + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: {}} + storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: {}} + + return storage + }, + hash: common.Hash{7, 8, 9}, + expected: ¶chaintypes.HeadData{Data: []byte{10, 11, 12}}, + }, + "find_head_data_using_parent_head_data_hash_from_second_candidate": { + setup: func() *candidateStorage { + storage := &candidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), + } + + candidateHash := parachaintypes.CandidateHash{Value: common.Hash{13, 14, 15}} + parentHeadHash := common.Hash{16, 17, 18} + outputHeadHash := common.Hash{19, 20, 21} + headData := parachaintypes.HeadData{Data: []byte{22, 23, 24}} + + entry := &candidateEntry{ + candidateHash: candidateHash, + parentHeadDataHash: parentHeadHash, + outputHeadDataHash: outputHeadHash, + candidate: &prospectiveCandidate{ + PersistedValidationData: parachaintypes.PersistedValidationData{ + ParentHead: headData, + }, + }, + } + + storage.byCandidateHash[candidateHash] = entry + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: {}} + storage.byOutputHead[outputHeadHash] = map[parachaintypes.CandidateHash]struct{}{candidateHash: {}} + + return storage + }, + hash: common.Hash{16, 17, 18}, + expected: ¶chaintypes.HeadData{Data: []byte{22, 23, 24}}, + }, + "use_nonexistent_hash_and_should_get_nil": { + setup: func() *candidateStorage { + storage := &candidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), + } + return storage + }, + hash: common.Hash{99, 99, 99}, + expected: nil, + }, + "insert_0_candidates_and_try_to_find_but_should_get_nil": { + setup: func() *candidateStorage { + return &candidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), + } + }, + hash: common.Hash{7, 8, 9}, + expected: nil, + }, + } + + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + storage := tt.setup() + result := storage.headDataByHash(tt.hash) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestCandidateStorage_PossibleBackedParaChildren(t *testing.T) { + tests := map[string]struct { + setup func() *candidateStorage + hash common.Hash + expected []*candidateEntry + }{ + "insert_2_candidates_for_same_parent_one_seconded_one_backed": { + setup: func() *candidateStorage { + storage := &candidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), + } + + candidateHash1 := parachaintypes.CandidateHash{Value: common.Hash{1, 2, 3}} + parentHeadHash := common.Hash{4, 5, 6} + outputHeadHash1 := common.Hash{7, 8, 9} + + candidateHash2 := parachaintypes.CandidateHash{Value: common.Hash{10, 11, 12}} + outputHeadHash2 := common.Hash{13, 14, 15} + + entry1 := &candidateEntry{ + candidateHash: candidateHash1, + parentHeadDataHash: parentHeadHash, + outputHeadDataHash: outputHeadHash1, + state: seconded, + } + + entry2 := &candidateEntry{ + candidateHash: candidateHash2, + parentHeadDataHash: parentHeadHash, + outputHeadDataHash: outputHeadHash2, + state: backed, + } + + storage.byCandidateHash[candidateHash1] = entry1 + storage.byCandidateHash[candidateHash2] = entry2 + storage.byParentHead[parentHeadHash] = map[parachaintypes.CandidateHash]struct{}{ + candidateHash1: {}, + candidateHash2: {}, + } + + return storage + }, + hash: common.Hash{4, 5, 6}, + expected: []*candidateEntry{{candidateHash: parachaintypes.CandidateHash{ + Value: common.Hash{10, 11, 12}}, + parentHeadDataHash: common.Hash{4, 5, 6}, + outputHeadDataHash: common.Hash{13, 14, 15}, state: backed}, + }, + }, + "insert_nothing_and_call_function_should_return_nothing": { + setup: func() *candidateStorage { + return &candidateStorage{ + byParentHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byOutputHead: make(map[common.Hash]map[parachaintypes.CandidateHash]struct{}), + byCandidateHash: make(map[parachaintypes.CandidateHash]*candidateEntry), + } + }, + hash: common.Hash{4, 5, 6}, + expected: nil, + }, + } + + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + storage := tt.setup() + var result []*candidateEntry + for entry := range storage.possibleBackedParaChildren(tt.hash) { + result = append(result, entry) + } + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestEarliestRelayParent(t *testing.T) { + tests := map[string]struct { + setup func() *scope + expect relayChainBlockInfo + }{ + "returns_from_ancestors": { + setup: func() *scope { + relayParent := relayChainBlockInfo{ + Hash: common.Hash{0x01}, + Number: 10, + } + baseConstraints := ¶chaintypes.Constraints{ + MinRelayParentNumber: 5, + } + ancestor := relayChainBlockInfo{ + Hash: common.Hash{0x02}, + Number: 9, + } + ancestorsMap := btree.NewMap[parachaintypes.BlockNumber, relayChainBlockInfo](100) + ancestorsMap.Set(ancestor.Number, ancestor) + return &scope{ + relayParent: relayParent, + baseConstraints: baseConstraints, + ancestors: ancestorsMap, + } + }, + expect: relayChainBlockInfo{ + Hash: common.Hash{0x02}, + Number: 9, + }, + }, + "returns_relayParent": { + setup: func() *scope { + relayParent := relayChainBlockInfo{ + Hash: common.Hash{0x01}, + Number: 10, + } + baseConstraints := ¶chaintypes.Constraints{ + MinRelayParentNumber: 5, + } + return &scope{ + relayParent: relayParent, + baseConstraints: baseConstraints, + ancestors: btree.NewMap[parachaintypes.BlockNumber, relayChainBlockInfo](100), + } + }, + expect: relayChainBlockInfo{ + Hash: common.Hash{0x01}, + Number: 10, + }, + }, + } + + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + scope := tt.setup() + result := scope.earliestRelayParent() + assert.Equal(t, tt.expect, result) + }) + } +} + +func TestBackedChain_RevertToParentHash(t *testing.T) { + tests := map[string]struct { + setup func() *backedChain + hash common.Hash + expectedChainSize int + expectedRemovedFragments int + }{ + "revert_to_parent_at_pos_2": { + setup: func() *backedChain { + chain := &backedChain{ + chain: make([]*fragmentNode, 0), + byParentHead: make(map[common.Hash]parachaintypes.CandidateHash), + byOutputHead: make(map[common.Hash]parachaintypes.CandidateHash), + candidates: make(map[parachaintypes.CandidateHash]struct{}), + } + + for i := 0; i < 5; i++ { + node := &fragmentNode{ + candidateHash: parachaintypes.CandidateHash{Value: common.Hash{byte(i)}}, + parentHeadDataHash: common.Hash{byte(i)}, + outputHeadDataHash: common.Hash{byte(i + 1)}, + cumulativeModifications: &constraintModifications{}, + } + chain.push(node) + } + return chain + }, + hash: common.Hash{3}, + expectedChainSize: 3, + expectedRemovedFragments: 2, + }, + "revert_to_parent_at_pos_0": { + setup: func() *backedChain { + chain := &backedChain{ + chain: make([]*fragmentNode, 0), + byParentHead: make(map[common.Hash]parachaintypes.CandidateHash), + byOutputHead: make(map[common.Hash]parachaintypes.CandidateHash), + candidates: make(map[parachaintypes.CandidateHash]struct{}), + } + + for i := 0; i < 2; i++ { + node := &fragmentNode{ + candidateHash: parachaintypes.CandidateHash{Value: common.Hash{byte(i)}}, + parentHeadDataHash: common.Hash{byte(i)}, + outputHeadDataHash: common.Hash{byte(i + 1)}, + cumulativeModifications: &constraintModifications{}, + } + chain.push(node) + } + return chain + }, + hash: common.Hash{1}, + expectedChainSize: 1, + expectedRemovedFragments: 1, + }, + "no_node_removed": { + setup: func() *backedChain { + chain := &backedChain{ + chain: make([]*fragmentNode, 0), + byParentHead: make(map[common.Hash]parachaintypes.CandidateHash), + byOutputHead: make(map[common.Hash]parachaintypes.CandidateHash), + candidates: make(map[parachaintypes.CandidateHash]struct{}), + } + + for i := 0; i < 3; i++ { + node := &fragmentNode{ + candidateHash: parachaintypes.CandidateHash{Value: common.Hash{byte(i)}}, + parentHeadDataHash: common.Hash{byte(i)}, + outputHeadDataHash: common.Hash{byte(i + 1)}, + cumulativeModifications: &constraintModifications{}, + } + chain.push(node) + } + return chain + }, + hash: common.Hash{99}, // Non-existent hash + expectedChainSize: 3, + expectedRemovedFragments: 0, + }, + } + + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + backedChain := tt.setup() + removedNodes := backedChain.revertToParentHash(tt.hash) + + // Check the number of removed nodes + assert.Equal(t, tt.expectedRemovedFragments, len(removedNodes)) + + // Check the properties of the chain + assert.Equal(t, tt.expectedChainSize, len(backedChain.chain)) + assert.Equal(t, tt.expectedChainSize, len(backedChain.byParentHead)) + assert.Equal(t, tt.expectedChainSize, len(backedChain.byOutputHead)) + assert.Equal(t, tt.expectedChainSize, len(backedChain.candidates)) + + // Check that the remaining nodes are correct + for i := 0; i < len(backedChain.chain); i++ { + assert.Contains(t, backedChain.byParentHead, common.Hash{byte(i)}) + assert.Contains(t, backedChain.byOutputHead, common.Hash{byte(i + 1)}) + assert.Contains(t, backedChain.candidates, parachaintypes.CandidateHash{Value: common.Hash{byte(i)}}) + } + }) + } +} + +func TestFragmentChainWithFreshScope(t *testing.T) { + relayParent := relayChainBlockInfo{ + Hash: common.Hash{0x00}, + Number: 0, + StorageRoot: common.Hash{0x00}, + } + + baseConstraints := ¶chaintypes.Constraints{ + RequiredParent: parachaintypes.HeadData{Data: []byte{byte(0)}}, + MinRelayParentNumber: 0, + ValidationCodeHash: parachaintypes.ValidationCodeHash(common.Hash{0x03}), + } + + scope, err := newScopeWithAncestors(relayParent, baseConstraints, nil, 10, nil) + assert.NoError(t, err) + + candidateStorage := newCandidateStorage() + + // Create 3 candidate entries forming a chain + for i := 0; i < 3; i++ { + candidateHash := parachaintypes.CandidateHash{Value: [32]byte{byte(i + 1)}} + parentHead := parachaintypes.HeadData{Data: []byte{byte(i)}} + outputHead := parachaintypes.HeadData{Data: []byte{byte(i + 1)}} + + persistedValidationData := parachaintypes.PersistedValidationData{ + ParentHead: parentHead, + } + + // Marshal and hash the persisted validation data + pvdBytes, err := scale.Marshal(persistedValidationData) + assert.NoError(t, err) + pvdHash, err := common.Blake2bHash(pvdBytes) + assert.NoError(t, err) + + committedCandidate := parachaintypes.CommittedCandidateReceipt{ + Descriptor: parachaintypes.CandidateDescriptor{ + RelayParent: common.Hash{0x00}, + PersistedValidationDataHash: pvdHash, + PovHash: common.Hash{0x02}, + ValidationCodeHash: parachaintypes.ValidationCodeHash(common.Hash{0x03}), + }, + Commitments: parachaintypes.CandidateCommitments{ + HeadData: outputHead, + }, + } + + err = candidateStorage.addPendingAvailabilityCandidate(candidateHash, committedCandidate, persistedValidationData) + assert.NoError(t, err) + } + + fragmentChain := newFragmentChain(scope, candidateStorage) + + // Check that the best chain contains 3 candidates + assert.Equal(t, 3, len(fragmentChain.bestChain.chain)) +} + +func makeConstraints( + minRelayParentNumber parachaintypes.BlockNumber, + validWatermarks []parachaintypes.BlockNumber, + requiredParent parachaintypes.HeadData, +) *parachaintypes.Constraints { + return ¶chaintypes.Constraints{ + MinRelayParentNumber: minRelayParentNumber, + MaxPoVSize: 1_000_000, + MaxCodeSize: 1_000_000, + UMPRemaining: 10, + UMPRemainingBytes: 1_000, + MaxNumUMPPerCandidate: 10, + DMPRemainingMessages: make([]parachaintypes.BlockNumber, 10), + HRMPInbound: parachaintypes.InboundHRMPLimitations{ + ValidWatermarks: validWatermarks, + }, + HRMPChannelsOut: make(map[parachaintypes.ParaID]parachaintypes.OutboundHRMPChannelLimitations), + MaxNumHRMPPerCandidate: 0, + RequiredParent: requiredParent, + ValidationCodeHash: parachaintypes.ValidationCodeHash(common.BytesToHash(bytes.Repeat([]byte{42}, 32))), + UpgradeRestriction: nil, + FutureValidationCode: nil, + } +} + +func makeCommittedCandidate( + t *testing.T, + paraID parachaintypes.ParaID, + relayParent common.Hash, + relayParentNumber uint32, + parentHead parachaintypes.HeadData, + paraHead parachaintypes.HeadData, + hrmpWatermark uint32, +) (parachaintypes.PersistedValidationData, parachaintypes.CommittedCandidateReceipt) { + persistedValidationData := parachaintypes.PersistedValidationData{ + ParentHead: parentHead, + RelayParentNumber: relayParentNumber, + RelayParentStorageRoot: common.Hash{}, + MaxPovSize: 1_000_000, + } + + pvdBytes, err := scale.Marshal(persistedValidationData) + require.NoError(t, err) + + pvdHash, err := common.Blake2bHash(pvdBytes) + require.NoError(t, err) + + paraHeadHash, err := paraHead.Hash() + require.NoError(t, err) + + candidate := parachaintypes.CommittedCandidateReceipt{ + Descriptor: parachaintypes.CandidateDescriptor{ + ParaID: paraID, + RelayParent: relayParent, + Collator: parachaintypes.CollatorID([sr25519.PublicKeyLength]byte{}), + PersistedValidationDataHash: pvdHash, + PovHash: common.BytesToHash(bytes.Repeat([]byte{1}, 32)), + ErasureRoot: common.BytesToHash(bytes.Repeat([]byte{1}, 32)), + Signature: parachaintypes.CollatorSignature([sr25519.SignatureLength]byte{}), + ParaHead: paraHeadHash, + ValidationCodeHash: parachaintypes.ValidationCodeHash(common.BytesToHash(bytes.Repeat([]byte{42}, 32))), + }, + Commitments: parachaintypes.CandidateCommitments{ + UpwardMessages: []parachaintypes.UpwardMessage{}, + HorizontalMessages: []parachaintypes.OutboundHrmpMessage{}, + NewValidationCode: nil, + HeadData: paraHead, + ProcessedDownwardMessages: 1, + HrmpWatermark: hrmpWatermark, + }, + } + + return persistedValidationData, candidate +} + +func TestScopeRejectsAncestors(t *testing.T) { + tests := map[string]struct { + relayParent *relayChainBlockInfo + ancestors []relayChainBlockInfo + maxDepth uint + baseConstraints *parachaintypes.Constraints + pendingAvailability []*pendingAvailability + expectedError error + }{ + "rejects_ancestor_that_skips_blocks": { + relayParent: &relayChainBlockInfo{ + Number: 10, + Hash: common.BytesToHash(bytes.Repeat([]byte{0x10}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{0x69}, 32)), + }, + ancestors: []relayChainBlockInfo{ + { + Number: 8, + Hash: common.BytesToHash(bytes.Repeat([]byte{0x08}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{0x69}, 69)), + }, + }, + maxDepth: 2, + baseConstraints: makeConstraints(8, []parachaintypes.BlockNumber{8, 9}, + parachaintypes.HeadData{Data: []byte{0x01, 0x02, 0x03}}), + pendingAvailability: make([]*pendingAvailability, 0), + expectedError: errUnexpectedAncestor{number: 8, prev: 10}, + }, + "rejects_ancestor_for_zero_block": { + relayParent: &relayChainBlockInfo{ + Number: 0, + Hash: common.BytesToHash(bytes.Repeat([]byte{0}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + ancestors: []relayChainBlockInfo{ + { + Number: 99999, + Hash: common.BytesToHash(bytes.Repeat([]byte{99}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + }, + maxDepth: 2, + baseConstraints: makeConstraints(0, []parachaintypes.BlockNumber{}, + parachaintypes.HeadData{Data: []byte{1, 2, 3}}), + pendingAvailability: make([]*pendingAvailability, 0), + expectedError: errUnexpectedAncestor{number: 99999, prev: 0}, + }, + "rejects_unordered_ancestors": { + relayParent: &relayChainBlockInfo{ + Number: 5, + Hash: common.BytesToHash(bytes.Repeat([]byte{0}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + ancestors: []relayChainBlockInfo{ + { + Number: 4, + Hash: common.BytesToHash(bytes.Repeat([]byte{4}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + { + Number: 2, + Hash: common.BytesToHash(bytes.Repeat([]byte{2}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + { + Number: 3, + Hash: common.BytesToHash(bytes.Repeat([]byte{3}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + }, + maxDepth: 2, + baseConstraints: makeConstraints(0, []parachaintypes.BlockNumber{2}, + parachaintypes.HeadData{Data: []byte{1, 2, 3}}), + pendingAvailability: make([]*pendingAvailability, 0), + expectedError: errUnexpectedAncestor{number: 2, prev: 4}, + }, + } + + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + scope, err := newScopeWithAncestors( + *tt.relayParent, + tt.baseConstraints, + tt.pendingAvailability, + tt.maxDepth, + tt.ancestors) + require.ErrorIs(t, err, tt.expectedError) + require.Nil(t, scope) + }) + } +} + +func TestScopeOnlyTakesAncestorsUpToMin(t *testing.T) { + relayParent := relayChainBlockInfo{ + Number: 5, + Hash: common.BytesToHash(bytes.Repeat([]byte{0}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + } + + ancestors := []relayChainBlockInfo{ + { + Number: 4, + Hash: common.BytesToHash(bytes.Repeat([]byte{4}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + { + Number: 3, + Hash: common.BytesToHash(bytes.Repeat([]byte{3}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + { + Number: 2, + Hash: common.BytesToHash(bytes.Repeat([]byte{2}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{69}, 32)), + }, + } + + maxDepth := uint(2) + baseConstraints := makeConstraints(3, []parachaintypes.BlockNumber{2}, parachaintypes.HeadData{Data: []byte{1, 2, 3}}) + pendingAvailability := make([]*pendingAvailability, 0) + + scope, err := newScopeWithAncestors(relayParent, baseConstraints, pendingAvailability, maxDepth, ancestors) + require.NoError(t, err) + + assert.Equal(t, 2, scope.ancestors.Len()) + assert.Equal(t, 2, len(scope.ancestorsByHash)) +} + +func TestCandidateStorageMethods(t *testing.T) { + tests := map[string]struct { + runTest func(*testing.T) + }{ + "persistedValidationDataMismatch": { + runTest: func(t *testing.T) { + relayParent := common.BytesToHash(bytes.Repeat([]byte{69}, 32)) + + pvd, candidate := makeCommittedCandidate( + t, + parachaintypes.ParaID(5), + relayParent, + 8, + parachaintypes.HeadData{Data: []byte{4, 5, 6}}, + parachaintypes.HeadData{Data: []byte{1, 2, 3}}, + 7, + ) + + wrongPvd := pvd + wrongPvd.MaxPovSize = 0 + + candidateHash, err := candidate.Hash() + require.NoError(t, err) + + entry, err := newCandidateEntry(parachaintypes.CandidateHash{Value: candidateHash}, + candidate, wrongPvd, seconded) + require.ErrorIs(t, err, errPersistedValidationDataMismatch) + require.Nil(t, entry) + }, + }, + + "zero_length_cycle": { + runTest: func(t *testing.T) { + relayParent := common.BytesToHash(bytes.Repeat([]byte{69}, 32)) + + pvd, candidate := makeCommittedCandidate( + t, + parachaintypes.ParaID(5), + relayParent, + 8, + parachaintypes.HeadData{Data: []byte{4, 5, 6}}, + parachaintypes.HeadData{Data: []byte{1, 2, 3}}, + 7, + ) + + candidate.Commitments.HeadData = parachaintypes.HeadData{Data: bytes.Repeat([]byte{1}, 10)} + pvd.ParentHead = parachaintypes.HeadData{Data: bytes.Repeat([]byte{1}, 10)} + wrongPvdHash, err := pvd.Hash() + require.NoError(t, err) + + candidate.Descriptor.PersistedValidationDataHash = wrongPvdHash + + candidateHash, err := candidate.Hash() + require.NoError(t, err) + + entry, err := newCandidateEntry(parachaintypes.CandidateHash{Value: candidateHash}, + candidate, pvd, seconded) + require.Nil(t, entry) + require.ErrorIs(t, err, errZeroLengthCycle) + }, + }, + + "add_valid_candidate": { + runTest: func(t *testing.T) { + relayParent := common.BytesToHash(bytes.Repeat([]byte{69}, 32)) + + pvd, candidate := makeCommittedCandidate( + t, + parachaintypes.ParaID(5), + relayParent, + 8, + parachaintypes.HeadData{Data: []byte{4, 5, 6}}, + parachaintypes.HeadData{Data: []byte{1, 2, 3}}, + 7, + ) + + hash, err := candidate.Hash() + require.NoError(t, err) + candidateHash := parachaintypes.CandidateHash{Value: hash} + + parentHeadHash, err := pvd.ParentHead.Hash() + require.NoError(t, err) + + entry, err := newCandidateEntry(candidateHash, candidate, pvd, seconded) + require.NoError(t, err) + + storage := newCandidateStorage() + + t.Run("add_candidate_entry_as_seconded", func(t *testing.T) { + err = storage.addCandidateEntry(entry) + require.NoError(t, err) + _, ok := storage.byCandidateHash[candidateHash] + require.True(t, ok) + + // should not have any possible backed candidate yet + for entry := range storage.possibleBackedParaChildren(parentHeadHash) { + assert.Fail(t, "expected no entries, but found one", entry) + } + + require.Equal(t, storage.headDataByHash(candidate.Descriptor.ParaHead), + &candidate.Commitments.HeadData) + require.Equal(t, storage.headDataByHash(parentHeadHash), &pvd.ParentHead) + + // re-add the candidate should fail + err = storage.addCandidateEntry(entry) + require.ErrorIs(t, err, errCandidateAlreadyKnown) + }) + + t.Run("mark_candidate_entry_as_backed", func(t *testing.T) { + storage.markBacked(candidateHash) + // marking twice is fine + storage.markBacked(candidateHash) + + // here we should have 1 possible backed candidate when we + // use the parentHeadHash (parent of our current candidate) to query + possibleBackedCandidateHashes := make([]parachaintypes.CandidateHash, 0) + for entry := range storage.possibleBackedParaChildren(parentHeadHash) { + possibleBackedCandidateHashes = append(possibleBackedCandidateHashes, entry.candidateHash) + } + + require.Equal(t, []parachaintypes.CandidateHash{candidateHash}, possibleBackedCandidateHashes) + + // here we should have 0 possible backed candidate because we are + // using the candidate hash paraHead as base to query + possibleBackedCandidateHashes = make([]parachaintypes.CandidateHash, 0) + for entry := range storage.possibleBackedParaChildren(candidate.Descriptor.ParaHead) { + possibleBackedCandidateHashes = append(possibleBackedCandidateHashes, entry.candidateHash) + } + + require.Empty(t, possibleBackedCandidateHashes) + }) + + t.Run("remove_candidate_entry", func(t *testing.T) { + storage.removeCandidate(candidateHash) + // remove it twice should be fine + storage.removeCandidate(candidateHash) + + _, ok := storage.byCandidateHash[candidateHash] + require.False(t, ok) + + // should not have any possible backed candidate anymore + for entry := range storage.possibleBackedParaChildren(parentHeadHash) { + assert.Fail(t, "expected no entries, but found one", entry) + } + + require.Nil(t, storage.headDataByHash(candidate.Descriptor.ParaHead)) + require.Nil(t, storage.headDataByHash(parentHeadHash)) + }) + }, + }, + + "add_pending_availability_candidate": { + runTest: func(t *testing.T) { + relayParent := common.BytesToHash(bytes.Repeat([]byte{69}, 32)) + + pvd, candidate := makeCommittedCandidate( + t, + parachaintypes.ParaID(5), + relayParent, + 8, + parachaintypes.HeadData{Data: []byte{4, 5, 6}}, + parachaintypes.HeadData{Data: []byte{1, 2, 3}}, + 7, + ) + + hash, err := candidate.Hash() + require.NoError(t, err) + candidateHash := parachaintypes.CandidateHash{Value: hash} + + parentHeadHash, err := pvd.ParentHead.Hash() + require.NoError(t, err) + + storage := newCandidateStorage() + err = storage.addPendingAvailabilityCandidate(candidateHash, candidate, pvd) + require.NoError(t, err) + + _, ok := storage.byCandidateHash[candidateHash] + require.True(t, ok) + + // here we should have 1 possible backed candidate when we + // use the parentHeadHash (parent of our current candidate) to query + possibleBackedCandidateHashes := make([]parachaintypes.CandidateHash, 0) + for entry := range storage.possibleBackedParaChildren(parentHeadHash) { + possibleBackedCandidateHashes = append(possibleBackedCandidateHashes, entry.candidateHash) + } + + require.Equal(t, []parachaintypes.CandidateHash{candidateHash}, possibleBackedCandidateHashes) + + // here we should have 0 possible backed candidate because we are + // using the candidate hash paraHead as base to query + possibleBackedCandidateHashes = make([]parachaintypes.CandidateHash, 0) + for entry := range storage.possibleBackedParaChildren(candidate.Descriptor.ParaHead) { + possibleBackedCandidateHashes = append(possibleBackedCandidateHashes, entry.candidateHash) + } + + require.Empty(t, possibleBackedCandidateHashes) + + t.Run("add_seconded_candidate_to_create_fork", func(t *testing.T) { + pvd2, candidate2 := makeCommittedCandidate( + t, + parachaintypes.ParaID(5), + relayParent, + 8, + parachaintypes.HeadData{Data: []byte{4, 5, 6}}, + parachaintypes.HeadData{Data: []byte{2, 3, 4}}, + 7, + ) + + hash2, err := candidate2.Hash() + require.NoError(t, err) + candidateHash2 := parachaintypes.CandidateHash{Value: hash2} + + candidateEntry2, err := newCandidateEntry(candidateHash2, candidate2, pvd2, seconded) + require.NoError(t, err) + + err = storage.addCandidateEntry(candidateEntry2) + require.NoError(t, err) + + // here we should have 1 possible backed candidate since + // the other candidate is seconded + possibleBackedCandidateHashes := make([]parachaintypes.CandidateHash, 0) + for entry := range storage.possibleBackedParaChildren(parentHeadHash) { + possibleBackedCandidateHashes = append(possibleBackedCandidateHashes, entry.candidateHash) + } + + require.Equal(t, []parachaintypes.CandidateHash{candidateHash}, possibleBackedCandidateHashes) + + // now mark it as backed + storage.markBacked(candidateHash2) + + // here we should have 1 possible backed candidate since + // the other candidate is seconded + possibleBackedCandidateHashes = make([]parachaintypes.CandidateHash, 0) + for entry := range storage.possibleBackedParaChildren(parentHeadHash) { + possibleBackedCandidateHashes = append(possibleBackedCandidateHashes, entry.candidateHash) + } + + require.Equal(t, []parachaintypes.CandidateHash{ + candidateHash, candidateHash2}, possibleBackedCandidateHashes) + + }) + }, + }, + } + + for name, tt := range tests { + tt := tt + t.Run(name, func(t *testing.T) { + tt.runTest(t) + }) + } +} + +func TestInitAndPopulateFromEmpty(t *testing.T) { + baseConstraints := makeConstraints(0, []parachaintypes.BlockNumber{0}, parachaintypes.HeadData{Data: []byte{0x0a}}) + + scope, err := newScopeWithAncestors( + relayChainBlockInfo{ + Number: 1, + Hash: common.BytesToHash(bytes.Repeat([]byte{1}, 32)), + StorageRoot: common.BytesToHash(bytes.Repeat([]byte{2}, 32)), + }, + baseConstraints, + nil, + 4, + nil, + ) + require.NoError(t, err) + + chain := newFragmentChain(scope, newCandidateStorage()) + assert.Equal(t, 0, chain.bestChainLen()) + assert.Equal(t, 0, chain.unconnected.len()) + + newChain := newFragmentChain(scope, newCandidateStorage()) + newChain.populateFromPrevious(chain) + assert.Equal(t, 0, newChain.bestChainLen()) + assert.Equal(t, 0, newChain.unconnected.len()) +} + +func populateFromPreviousStorage(scope *scope, storage *candidateStorage) *fragmentChain { + chain := newFragmentChain(scope, newCandidateStorage()) + + // clone the value + prevChain := *chain + (&prevChain).unconnected = storage.clone() + chain.populateFromPrevious(&prevChain) + return chain +} + +func TestPopulateAndCheckPotential(t *testing.T) { + storage := newCandidateStorage() + paraID := parachaintypes.ParaID(5) + + relayParentAHash := common.BytesToHash(bytes.Repeat([]byte{1}, 32)) + relayParentBHash := common.BytesToHash(bytes.Repeat([]byte{2}, 32)) + relayParentCHash := common.BytesToHash(bytes.Repeat([]byte{3}, 32)) + + relayParentAInfo := &relayChainBlockInfo{ + Number: 0, Hash: relayParentAHash, StorageRoot: common.Hash{}, + } + + relayParentBInfo := &relayChainBlockInfo{ + Number: 1, Hash: relayParentBHash, StorageRoot: common.Hash{}, + } + + relayParentCInfo := &relayChainBlockInfo{ + Number: 2, Hash: relayParentCHash, StorageRoot: common.Hash{}, + } + + // the ancestors must be in the reverse order + ancestors := []relayChainBlockInfo{ + *relayParentBInfo, + *relayParentAInfo, + } + + firstParachainHead := parachaintypes.HeadData{Data: []byte{0x0a}} + baseConstraints := makeConstraints(0, []parachaintypes.BlockNumber{0}, firstParachainHead) + + // helper function to hash the candidate and add its entry + // into the candidate storage + hashAndInsertCandididate := func(t *testing.T, storage *candidateStorage, + candidate parachaintypes.CommittedCandidateReceipt, + pvd parachaintypes.PersistedValidationData, state candidateState) ( + parachaintypes.CandidateHash, *candidateEntry) { + + hash, err := candidate.Hash() + require.NoError(t, err) + candidateHash := parachaintypes.CandidateHash{Value: hash} + entry, err := newCandidateEntry(candidateHash, candidate, pvd, state) + require.NoError(t, err) + err = storage.addCandidateEntry(entry) + require.NoError(t, err) + + return candidateHash, entry + } + + hashAndGetEntry := func(t *testing.T, candidate parachaintypes.CommittedCandidateReceipt, + pvd parachaintypes.PersistedValidationData, state candidateState) (parachaintypes.CandidateHash, *candidateEntry) { + hash, err := candidate.Hash() + require.NoError(t, err) + candidateHash := parachaintypes.CandidateHash{Value: hash} + entry, err := newCandidateEntry(candidateHash, candidate, pvd, state) + require.NoError(t, err) + return candidateHash, entry + } + + // candidates A -> B -> C are all backed + candidateAParaHead := parachaintypes.HeadData{Data: []byte{0x0b}} + pvdA, candidateA := makeCommittedCandidate(t, paraID, + relayParentAInfo.Hash, uint32(relayParentAInfo.Number), + firstParachainHead, + candidateAParaHead, + uint32(relayParentAInfo.Number), + ) + + candidateAHash, candidateAEntry := hashAndInsertCandididate(t, storage, candidateA, pvdA, backed) + + candidateBParaHead := parachaintypes.HeadData{Data: []byte{0x0c}} + pvdB, candidateB := makeCommittedCandidate(t, paraID, + relayParentBInfo.Hash, uint32(relayParentBInfo.Number), + candidateAParaHead, // defines candidate A as parent of candidate B + candidateBParaHead, + uint32(relayParentBInfo.Number), + ) + + candidateBHash, candidateBEntry := hashAndInsertCandididate(t, storage, candidateB, pvdB, backed) + + candidateCParaHead := parachaintypes.HeadData{Data: []byte{0x0d}} + pvdC, candidateC := makeCommittedCandidate(t, paraID, + relayParentCInfo.Hash, uint32(relayParentCInfo.Number), + candidateBParaHead, + candidateCParaHead, + uint32(relayParentCInfo.Number), + ) + + candidateCHash, candidateCEntry := hashAndInsertCandididate(t, storage, candidateC, pvdC, backed) + + t.Run("candidate_A_doesnt_adhere_to_base_constraints", func(t *testing.T) { + wrongConstraints := []parachaintypes.Constraints{ + // define a constraint that requires a parent head data + // that is different from candidate A parent head + *makeConstraints(relayParentAInfo.Number, + []parachaintypes.BlockNumber{relayParentAInfo.Number}, parachaintypes.HeadData{Data: []byte{0x0e}}), + + // the min relay parent for candidate A is wrong + *makeConstraints(relayParentBInfo.Number, []parachaintypes.BlockNumber{0}, firstParachainHead), + } + + for _, wrongConstraint := range wrongConstraints { + scope, err := newScopeWithAncestors( + *relayParentCInfo, + &wrongConstraint, + nil, + 4, + ancestors, + ) + require.NoError(t, err) + + chain := populateFromPreviousStorage(scope, storage) + require.Empty(t, chain.bestChainVec()) + + // if the min relay parent is wrong, candidate A can never become valid, otherwise + // if only the required parent doesnt match, candidate A still a potential candidate + if wrongConstraint.MinRelayParentNumber == relayParentBInfo.Number { + // if A is not a potential candidate, its descendants will also not be added. + require.Equal(t, chain.unconnected.len(), 0) + err := chain.canAddCandidateAsPotential(candidateAEntry) + require.ErrorIs(t, err, errRelayParentNotInScope{ + relayParentA: relayParentAHash, // candidate A has relay parent A + relayParentB: relayParentBHash, // while the constraint is expecting at least relay parent B + }) + + // however if taken independently, both B and C still have potential + err = chain.canAddCandidateAsPotential(candidateBEntry) + require.NoError(t, err) + err = chain.canAddCandidateAsPotential(candidateCEntry) + require.NoError(t, err) + } else { + potentials := make([]parachaintypes.CandidateHash, 0) + for _, unconnected := range chain.unconnected.byCandidateHash { + potentials = append(potentials, unconnected.candidateHash) + } + + slices.SortStableFunc(potentials, func(i, j parachaintypes.CandidateHash) int { + return bytes.Compare(i.Value[:], j.Value[:]) + }) + + require.Equal(t, []parachaintypes.CandidateHash{ + candidateAHash, + candidateCHash, + candidateBHash, + }, potentials) + } + } + }) + + t.Run("depth_cases", func(t *testing.T) { + depthCases := map[string]struct { + depth []uint + expectedBestChain []parachaintypes.CandidateHash + expectedUnconnected map[parachaintypes.CandidateHash]struct{} + }{ + "0_depth_only_allows_one_candidate_but_keep_the_rest_as_potential": { + depth: []uint{0}, + expectedBestChain: []parachaintypes.CandidateHash{candidateAHash}, + expectedUnconnected: map[parachaintypes.CandidateHash]struct{}{ + candidateBHash: {}, + candidateCHash: {}, + }, + }, + "1_depth_allow_two_candidates": { + depth: []uint{1}, + expectedBestChain: []parachaintypes.CandidateHash{candidateAHash, candidateBHash}, + expectedUnconnected: map[parachaintypes.CandidateHash]struct{}{ + candidateCHash: {}, + }, + }, + "2_more_depth_allow_all_candidates": { + depth: []uint{2, 3, 4, 5}, + expectedBestChain: []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, + expectedUnconnected: map[parachaintypes.CandidateHash]struct{}{}, + }, + } + + for tname, tt := range depthCases { + tt := tt + t.Run(tname, func(t *testing.T) { + // iterate over all the depth values + for _, depth := range tt.depth { + scope, err := newScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + nil, + depth, + ancestors, + ) + require.NoError(t, err) + + chain := newFragmentChain(scope, newCandidateStorage()) + // individually each candidate is a potential candidate + require.NoError(t, chain.canAddCandidateAsPotential(candidateAEntry)) + require.NoError(t, chain.canAddCandidateAsPotential(candidateBEntry)) + require.NoError(t, chain.canAddCandidateAsPotential(candidateCEntry)) + + chain = populateFromPreviousStorage(scope, storage) + require.Equal(t, tt.expectedBestChain, chain.bestChainVec()) + + // Check that the unconnected candidates are as expected + unconnectedHashes := make(map[parachaintypes.CandidateHash]struct{}) + for _, unconnected := range chain.unconnected.byCandidateHash { + unconnectedHashes[unconnected.candidateHash] = struct{}{} + } + + assert.Equal(t, tt.expectedUnconnected, unconnectedHashes) + } + }) + } + }) + + t.Run("relay_parent_out_of_scope", func(t *testing.T) { + // candidate A has a relay parent out of scope. Candidates B and C + // will also be deleted since they form a chain with A + t.Run("candidate_A_relay_parent_out_of_scope", func(t *testing.T) { + newAncestors := []relayChainBlockInfo{ + *relayParentBInfo, + } + + scope, err := newScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + nil, + 4, + newAncestors, + ) + require.NoError(t, err) + chain := populateFromPreviousStorage(scope, storage) + require.Empty(t, chain.bestChainVec()) + require.Equal(t, 0, chain.unconnected.len()) + + require.ErrorIs(t, chain.canAddCandidateAsPotential(candidateAEntry), + errRelayParentNotInScope{ + relayParentA: relayParentAHash, + relayParentB: relayParentBHash, + }) + + // however if taken indepently, both B and C still have potential + require.NoError(t, chain.canAddCandidateAsPotential(candidateBEntry)) + require.NoError(t, chain.canAddCandidateAsPotential(candidateCEntry)) + }) + + t.Run("candidate_A_and_B_out_of_scope_C_still_potential", func(t *testing.T) { + scope, err := newScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + nil, + 4, + nil, + ) + require.NoError(t, err) + chain := populateFromPreviousStorage(scope, storage) + require.Empty(t, chain.bestChainVec()) + require.Equal(t, 0, chain.unconnected.len()) + + require.ErrorIs(t, chain.canAddCandidateAsPotential(candidateAEntry), + errRelayParentNotInScope{ + relayParentA: relayParentAHash, + relayParentB: relayParentCHash, + }) + + // however if taken indepently, both B and C still have potential + require.ErrorIs(t, chain.canAddCandidateAsPotential(candidateBEntry), + errRelayParentNotInScope{ + relayParentA: relayParentBHash, + relayParentB: relayParentCHash, + }) + + require.NoError(t, chain.canAddCandidateAsPotential(candidateCEntry)) + }) + }) + + t.Run("parachain_cycle_not_allowed", func(t *testing.T) { + // make C parent of parachain block A + modifiedStorage := storage.clone() + modifiedStorage.removeCandidate(candidateCHash) + + wrongPvdC, wrongCandidateC := makeCommittedCandidate(t, paraID, + relayParentCInfo.Hash, uint32(relayParentCInfo.Number), + candidateBParaHead, // defines candidate B as parent of candidate C + firstParachainHead, // defines this candidate para head output as the parent of candidate A + uint32(relayParentCInfo.Number), + ) + + _, wrongCandidateCEntry := hashAndInsertCandididate(t, modifiedStorage, wrongCandidateC, wrongPvdC, backed) + + scope, err := newScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + nil, + 4, + ancestors, + ) + require.NoError(t, err) + + chain := populateFromPreviousStorage(scope, modifiedStorage) + require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash}, chain.bestChainVec()) + require.Equal(t, 0, chain.unconnected.len()) + + err = chain.canAddCandidateAsPotential(wrongCandidateCEntry) + require.ErrorIs(t, err, errCycle) + + // However, if taken independently, C still has potential, since we don't know A and B. + chain = newFragmentChain(scope, newCandidateStorage()) + require.NoError(t, chain.canAddCandidateAsPotential(wrongCandidateCEntry)) + }) + + t.Run("relay_parent_move_backwards_not_allowed", func(t *testing.T) { + // each candidate was build using a different, and contigous, relay parent + // in this test we are going to change candidate C to have the same relay + // parent of candidate A, given that candidate B is one block ahead. + modifiedStorage := storage.clone() + modifiedStorage.removeCandidate(candidateCHash) + + wrongPvdC, wrongCandidateC := makeCommittedCandidate(t, paraID, + relayParentAInfo.Hash, uint32(relayParentAInfo.Number), + candidateBParaHead, + candidateCParaHead, + 0, + ) + + _, wrongCandidateCEntry := hashAndInsertCandididate(t, modifiedStorage, wrongCandidateC, wrongPvdC, backed) + + scope, err := newScopeWithAncestors(*relayParentCInfo, baseConstraints, nil, 4, ancestors) + require.NoError(t, err) + + chain := populateFromPreviousStorage(scope, modifiedStorage) + require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash}, chain.bestChainVec()) + require.Equal(t, 0, chain.unconnected.len()) + + require.ErrorIs(t, chain.canAddCandidateAsPotential(wrongCandidateCEntry), errRelayParentMovedBackwards) + }) + + t.Run("unconnected_candidate_C", func(t *testing.T) { + // candidate C is an unconnected candidate, C's relay parent is allowed to move + // backwards from B's relay parent, because C may latter on trigger a reorg and + // B may get removed + + modifiedStorage := storage.clone() + modifiedStorage.removeCandidate(candidateCHash) + + parenteHead := parachaintypes.HeadData{Data: []byte{0x0d}} + unconnectedCandidateCHead := parachaintypes.HeadData{Data: []byte{0x0e}} + + unconnectedCPvd, unconnectedCandidateC := makeCommittedCandidate(t, paraID, + relayParentAInfo.Hash, uint32(relayParentAInfo.Number), + parenteHead, + unconnectedCandidateCHead, + 0, + ) + + unconnectedCandidateCHash, unconnectedCandidateCEntry := hashAndInsertCandididate(t, + modifiedStorage, unconnectedCandidateC, unconnectedCPvd, backed) + + scope, err := newScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + nil, + 4, + ancestors, + ) + require.NoError(t, err) + + chain := newFragmentChain(scope, newCandidateStorage()) + require.NoError(t, chain.canAddCandidateAsPotential(unconnectedCandidateCEntry)) + + chain = populateFromPreviousStorage(scope, modifiedStorage) + require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash}, chain.bestChainVec()) + + unconnected := make(map[parachaintypes.CandidateHash]struct{}) + for _, entry := range chain.unconnected.byCandidateHash { + unconnected[entry.candidateHash] = struct{}{} + } + + require.Equal(t, map[parachaintypes.CandidateHash]struct{}{ + unconnectedCandidateCHash: {}, + }, unconnected) + + t.Run("candidate_A_is_pending_availability_candidate_C_should_not_move_backwards", func(t *testing.T) { + // candidate A is pending availability and candidate C is an unconnected candidate, C's relay parent + // is not allowed to move backwards from A's relay parent because we're sure A will not get remove + // in the future, as it's already on-chain (unless it times out availability, a case for which we + // don't care to optmise for) + modifiedStorage.removeCandidate(candidateAHash) + modifiedAPvd, modifiedCandidateA := makeCommittedCandidate(t, paraID, + relayParentBInfo.Hash, uint32(relayParentBInfo.Number), + firstParachainHead, + candidateAParaHead, + uint32(relayParentBInfo.Number), + ) + + modifiedCandidateAHash, _ := hashAndInsertCandididate(t, + modifiedStorage, modifiedCandidateA, modifiedAPvd, backed) + + scope, err := newScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + []*pendingAvailability{ + {candidateHash: modifiedCandidateAHash, relayParent: *relayParentBInfo}, + }, + 4, + ancestors, + ) + require.NoError(t, err) + + chain := populateFromPreviousStorage(scope, modifiedStorage) + require.Equal(t, []parachaintypes.CandidateHash{modifiedCandidateAHash, candidateBHash}, chain.bestChainVec()) + require.Equal(t, 0, chain.unconnected.len()) + + require.ErrorIs(t, + chain.canAddCandidateAsPotential(unconnectedCandidateCEntry), + errRelayParentPrecedesCandidatePendingAvailability{ + relayParentA: relayParentAHash, + relayParentB: relayParentBHash, + }) + }) + }) + + t.Run("cannot_fork_from_a_candidate_pending_availability", func(t *testing.T) { + modifiedStorage := storage.clone() + modifiedStorage.removeCandidate(candidateCHash) + + modifiedStorage.removeCandidate(candidateAHash) + modifiedAPvd, modifiedCandidateA := makeCommittedCandidate(t, paraID, + relayParentBInfo.Hash, uint32(relayParentBInfo.Number), + firstParachainHead, + candidateAParaHead, + uint32(relayParentBInfo.Number), + ) + + modifiedCandidateAHash, _ := hashAndInsertCandididate(t, + modifiedStorage, modifiedCandidateA, modifiedAPvd, backed) + + wrongCandidateCHead := parachaintypes.HeadData{Data: []byte{0x01}} + wrongPvdC, wrongCandidateC := makeCommittedCandidate(t, paraID, + relayParentBInfo.Hash, uint32(relayParentBInfo.Number), + firstParachainHead, + wrongCandidateCHead, + uint32(relayParentBInfo.Number), + ) + + wrongCandidateCHash, wrongCandidateCEntry := hashAndInsertCandididate(t, + modifiedStorage, wrongCandidateC, wrongPvdC, backed) + + // does not matter if the fork selection rule picks the new candidate + // as the modified candidate A is pending availability + require.Equal(t, -1, forkSelectionRule(wrongCandidateCHash, modifiedCandidateAHash)) + + scope, err := newScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + []*pendingAvailability{ + { + candidateHash: modifiedCandidateAHash, + relayParent: *relayParentBInfo, + }, + }, + 4, + ancestors, + ) + require.NoError(t, err) + chain := populateFromPreviousStorage(scope, modifiedStorage) + require.Equal(t, []parachaintypes.CandidateHash{modifiedCandidateAHash, candidateBHash}, chain.bestChainVec()) + require.Equal(t, 0, chain.unconnected.len()) + require.ErrorIs(t, chain.canAddCandidateAsPotential(wrongCandidateCEntry), errForkWithCandidatePendingAvailability{ + candidateHash: modifiedCandidateAHash, + }) + }) + + t.Run("multiple_pending_availability_candidates", func(t *testing.T) { + validOptions := [][]*pendingAvailability{ + { + {candidateHash: candidateAHash, relayParent: *relayParentAInfo}, + }, + { + {candidateHash: candidateAHash, relayParent: *relayParentAInfo}, + {candidateHash: candidateBHash, relayParent: *relayParentBInfo}, + }, + { + {candidateHash: candidateAHash, relayParent: *relayParentAInfo}, + {candidateHash: candidateBHash, relayParent: *relayParentBInfo}, + {candidateHash: candidateCHash, relayParent: *relayParentCInfo}, + }, + } + + for _, pending := range validOptions { + scope, err := newScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + pending, + 3, + ancestors, + ) + require.NoError(t, err) + + chain := populateFromPreviousStorage(scope, storage) + assert.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.bestChainVec()) + assert.Equal(t, 0, chain.unconnected.len()) + } + }) + + t.Run("relay_parents_of_pending_availability_candidates_can_be_out_of_scope", func(t *testing.T) { + ancestorsWithoutA := []relayChainBlockInfo{ + *relayParentBInfo, + } + + scope, err := newScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + []*pendingAvailability{ + {candidateHash: candidateAHash, relayParent: *relayParentAInfo}, + }, + 4, + ancestorsWithoutA, + ) + require.NoError(t, err) + + chain := populateFromPreviousStorage(scope, storage) + assert.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.bestChainVec()) + assert.Equal(t, 0, chain.unconnected.len()) + }) + + t.Run("relay_parents_of_pending_availability_candidates_cannot_move_backwards", func(t *testing.T) { + scope, err := newScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + []*pendingAvailability{ + { + candidateHash: candidateAHash, + relayParent: relayChainBlockInfo{ + Hash: relayParentAInfo.Hash, + Number: 1, + StorageRoot: relayParentAInfo.StorageRoot, + }, + }, + { + candidateHash: candidateBHash, + relayParent: relayChainBlockInfo{ + Hash: relayParentBInfo.Hash, + Number: 0, + StorageRoot: relayParentBInfo.StorageRoot, + }, + }, + }, + 4, + []relayChainBlockInfo{}, + ) + require.NoError(t, err) + + chain := populateFromPreviousStorage(scope, storage) + assert.Empty(t, chain.bestChainVec()) + assert.Equal(t, 0, chain.unconnected.len()) + }) + + t.Run("more_complex_case_with_multiple_candidates_and_constraints", func(t *testing.T) { + scope, err := newScopeWithAncestors( + *relayParentCInfo, + baseConstraints, + nil, + 2, + ancestors, + ) + require.NoError(t, err) + + // Candidate D + candidateDParaHead := parachaintypes.HeadData{Data: []byte{0x0e}} + pvdD, candidateD := makeCommittedCandidate(t, paraID, + relayParentCInfo.Hash, uint32(relayParentCInfo.Number), + candidateCParaHead, + candidateDParaHead, + uint32(relayParentCInfo.Number), + ) + candidateDHash, candidateDEntry := hashAndGetEntry(t, candidateD, pvdD, backed) + require.NoError(t, populateFromPreviousStorage(scope, storage). + canAddCandidateAsPotential(candidateDEntry)) + require.NoError(t, storage.addCandidateEntry(candidateDEntry)) + + // Candidate F + candidateEParaHead := parachaintypes.HeadData{Data: []byte{0x0f}} + candidateFParaHead := parachaintypes.HeadData{Data: []byte{0xf1}} + pvdF, candidateF := makeCommittedCandidate(t, paraID, + relayParentCInfo.Hash, uint32(relayParentCInfo.Number), + candidateEParaHead, + candidateFParaHead, + 1000, + ) + candidateFHash, candidateFEntry := hashAndGetEntry(t, candidateF, pvdF, seconded) + require.NoError(t, populateFromPreviousStorage(scope, storage). + canAddCandidateAsPotential(candidateFEntry)) + require.NoError(t, storage.addCandidateEntry(candidateFEntry)) + + // Candidate A1 + pvdA1, candidateA1 := makeCommittedCandidate(t, paraID, + relayParentAInfo.Hash, uint32(relayParentAInfo.Number), + firstParachainHead, + parachaintypes.HeadData{Data: []byte{0xb1}}, + uint32(relayParentAInfo.Number), + ) + candidateA1Hash, candidateA1Entry := hashAndGetEntry(t, candidateA1, pvdA1, backed) + + // candidate A1 is created so that its hash is greater than the candidate A hash. + require.Equal(t, -1, forkSelectionRule(candidateAHash, candidateA1Hash)) + require.ErrorIs(t, populateFromPreviousStorage(scope, storage). + canAddCandidateAsPotential(candidateA1Entry), + errForkChoiceRule{candidateHash: candidateAHash}) + + require.NoError(t, storage.addCandidateEntry(candidateA1Entry)) + + // Candidate B1 + pvdB1, candidateB1 := makeCommittedCandidate(t, paraID, + relayParentAInfo.Hash, uint32(relayParentAInfo.Number), + parachaintypes.HeadData{Data: []byte{0xb1}}, + parachaintypes.HeadData{Data: []byte{0xc1}}, + uint32(relayParentAInfo.Number), + ) + _, candidateB1Entry := hashAndGetEntry(t, candidateB1, pvdB1, seconded) + require.NoError(t, populateFromPreviousStorage(scope, storage). + canAddCandidateAsPotential(candidateB1Entry)) + + require.NoError(t, storage.addCandidateEntry(candidateB1Entry)) + + // Candidate C1 + pvdC1, candidateC1 := makeCommittedCandidate(t, paraID, + relayParentAInfo.Hash, uint32(relayParentAInfo.Number), + parachaintypes.HeadData{Data: []byte{0xc1}}, + parachaintypes.HeadData{Data: []byte{0xd1}}, + uint32(relayParentAInfo.Number), + ) + _, candidateC1Entry := hashAndGetEntry(t, candidateC1, pvdC1, backed) + require.NoError(t, populateFromPreviousStorage(scope, storage). + canAddCandidateAsPotential(candidateC1Entry)) + + require.NoError(t, storage.addCandidateEntry(candidateC1Entry)) + + // Candidate C2 + pvdC2, candidateC2 := makeCommittedCandidate(t, paraID, + relayParentAInfo.Hash, uint32(relayParentAInfo.Number), + parachaintypes.HeadData{Data: []byte{0xc1}}, + parachaintypes.HeadData{Data: []byte{0xd2}}, + uint32(relayParentAInfo.Number), + ) + + _, candidateC2Entry := hashAndGetEntry(t, candidateC2, pvdC2, seconded) + require.NoError(t, populateFromPreviousStorage(scope, storage). + canAddCandidateAsPotential(candidateC2Entry)) + require.NoError(t, storage.addCandidateEntry(candidateC2Entry)) + + // Candidate A2 + candidateA2HeadData := parachaintypes.HeadData{Data: []byte{0x0c9}} + pvdA2, candidateA2 := makeCommittedCandidate(t, paraID, + relayParentAInfo.Hash, uint32(relayParentAInfo.Number), + firstParachainHead, + candidateA2HeadData, + uint32(relayParentAInfo.Number), + ) + candidateA2Hash, candidateA2Entry := hashAndGetEntry(t, candidateA2, pvdA2, seconded) + + require.Equal(t, -1, forkSelectionRule(candidateA2Hash, candidateAHash)) + require.NoError(t, populateFromPreviousStorage(scope, storage). + canAddCandidateAsPotential(candidateA2Entry)) + + require.NoError(t, storage.addCandidateEntry(candidateA2Entry)) + + // Candidate B2 + candidateB2HeadData := parachaintypes.HeadData{Data: []byte{0xb4}} + pvdB2, candidateB2 := makeCommittedCandidate(t, paraID, + relayParentBInfo.Hash, uint32(relayParentBInfo.Number), + candidateA2HeadData, + candidateB2HeadData, + uint32(relayParentBInfo.Number), + ) + candidateB2Hash, candidateB2Entry := hashAndGetEntry(t, candidateB2, pvdB2, backed) + require.NoError(t, populateFromPreviousStorage(scope, storage). + canAddCandidateAsPotential(candidateB2Entry)) + + require.NoError(t, storage.addCandidateEntry(candidateB2Entry)) + + chain := populateFromPreviousStorage(scope, storage) + assert.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.bestChainVec()) + + unconnectedHashes := make(map[parachaintypes.CandidateHash]struct{}) + for _, unconnected := range chain.unconnected.byCandidateHash { + unconnectedHashes[unconnected.candidateHash] = struct{}{} + } + + expectedUnconnected := map[parachaintypes.CandidateHash]struct{}{ + candidateDHash: {}, + candidateFHash: {}, + candidateA2Hash: {}, + candidateB2Hash: {}, + } + assert.Equal(t, expectedUnconnected, unconnectedHashes) + + // Cannot add as potential an already present candidate (whether it's in the best chain or in unconnected storage) + assert.ErrorIs(t, chain.canAddCandidateAsPotential(candidateAEntry), errCandidateAlreadyKnown) + assert.ErrorIs(t, chain.canAddCandidateAsPotential(candidateFEntry), errCandidateAlreadyKnown) + + t.Run("simulate_best_chain_reorg", func(t *testing.T) { + // back a2, the reversion should happen at the root. + chain := cloneFragmentChain(chain) + chain.candidateBacked(candidateA2Hash) + + require.Equal(t, []parachaintypes.CandidateHash{candidateA2Hash, candidateB2Hash}, chain.bestChainVec()) + + // candidate F is kept as it was truly unconnected. The rest will be trimmed + unconnected := map[parachaintypes.CandidateHash]struct{}{} + for _, entry := range chain.unconnected.byCandidateHash { + unconnected[entry.candidateHash] = struct{}{} + } + + require.Equal(t, map[parachaintypes.CandidateHash]struct{}{ + candidateFHash: {}, + }, unconnected) + + // candidates A1 and A will never have potential again + require.ErrorIs(t, chain.canAddCandidateAsPotential(candidateA1Entry), errForkChoiceRule{ + candidateHash: candidateA2Hash, + }) + require.ErrorIs(t, chain.canAddCandidateAsPotential(candidateAEntry), errForkChoiceRule{ + candidateHash: candidateA2Hash, + }) + }) + + t.Run("simulate_more_complex_reorg", func(t *testing.T) { + // a2 points to b2, which is backed + // a2 has underneath a subtree a2 -> b2 -> c3 and a2 -> b2 -> c4 + // b2 and c3 are backed, c4 is kept because it has a lower candidate hash than c3 + // backing c4 will cause a chain reorg + + // candidate c3 + candidateC3HeadData := parachaintypes.HeadData{Data: []byte{0xc2}} + candidateC3Pvd, candidateC3 := makeCommittedCandidate(t, paraID, + relayParentBHash, uint32(relayParentBInfo.Number), + candidateB2HeadData, + candidateC3HeadData, + uint32(relayParentBInfo.Number), + ) + + candidateC3Hash, candidateC3Entry := hashAndGetEntry(t, candidateC3, candidateC3Pvd, seconded) + + // candidate c4 + candidateC4HeadData := parachaintypes.HeadData{Data: []byte{0xc3}} + candidateC4Pvd, candidateC4 := makeCommittedCandidate(t, paraID, + relayParentBHash, uint32(relayParentBInfo.Number), + candidateB2HeadData, + candidateC4HeadData, + uint32(relayParentBInfo.Number), + ) + + candidateC4Hash, candidateC4Entry := hashAndGetEntry(t, candidateC4, candidateC4Pvd, seconded) + + // c4 should have a lower candidate hash than c3 + require.Equal(t, -1, forkSelectionRule(candidateC4Hash, candidateC3Hash)) + + storage := storage.clone() + + require.NoError(t, storage.addCandidateEntry(candidateC3Entry)) + require.NoError(t, storage.addCandidateEntry(candidateC4Entry)) + + chain := populateFromPreviousStorage(scope, storage) + + // current best chain + // so we will cause a reorg when backing a2 and c3 + // and trigger another reorg when backing c4 + require.Equal(t, []parachaintypes.CandidateHash{ + candidateAHash, candidateBHash, candidateCHash, + }, chain.bestChainVec()) + + chain.candidateBacked(candidateA2Hash) + + require.Equal(t, []parachaintypes.CandidateHash{ + candidateA2Hash, candidateB2Hash, + }, chain.bestChainVec()) + + chain.candidateBacked(candidateC3Hash) + + require.Equal(t, []parachaintypes.CandidateHash{ + candidateA2Hash, candidateB2Hash, candidateC3Hash, + }, chain.bestChainVec()) + + // backing c4 will cause a reorg + chain.candidateBacked(candidateC4Hash) + + require.Equal(t, []parachaintypes.CandidateHash{ + candidateA2Hash, candidateB2Hash, candidateC4Hash, + }, chain.bestChainVec()) + + unconnected := make(map[parachaintypes.CandidateHash]struct{}) + for _, entry := range chain.unconnected.byCandidateHash { + unconnected[entry.candidateHash] = struct{}{} + } + + require.Equal(t, map[parachaintypes.CandidateHash]struct{}{ + candidateFHash: {}, + }, unconnected) + }) + + // candidate F has an invalid hrmp watermark, however it was not checked beforehand + // as we don't have its parent yet. Add its parent now (candidate E), this will not impact anything + // as E is not yet part of the best chain. + candidateEPvd, candidateE := makeCommittedCandidate(t, paraID, + relayParentCHash, uint32(relayParentCInfo.Number), + candidateDParaHead, + candidateEParaHead, + uint32(relayParentCInfo.Number), + ) + + candidateEHash, _ := hashAndInsertCandididate(t, storage, candidateE, candidateEPvd, seconded) + chain = populateFromPreviousStorage(scope, storage) + require.Equal(t, []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, chain.bestChainVec()) + + unconnected := make(map[parachaintypes.CandidateHash]struct{}) + for _, entry := range chain.unconnected.byCandidateHash { + unconnected[entry.candidateHash] = struct{}{} + } + require.Equal(t, map[parachaintypes.CandidateHash]struct{}{ + candidateDHash: {}, + candidateFHash: {}, + candidateA2Hash: {}, + candidateB2Hash: {}, + candidateEHash: {}, + }, unconnected) + + t.Run("simulate_candidates_A_B_C_are_pending_availability", func(t *testing.T) { + scope, err := newScopeWithAncestors( + *relayParentCInfo, baseConstraints.Clone(), + []*pendingAvailability{ + {candidateHash: candidateAHash, relayParent: *relayParentAInfo}, + {candidateHash: candidateBHash, relayParent: *relayParentBInfo}, + {candidateHash: candidateCHash, relayParent: *relayParentCInfo}, + }, + 2, + ancestors, + ) + require.NoError(t, err) + + // candidates A2, B2 will now be trimmed + chain := populateFromPreviousStorage(scope, storage) + require.Equal(t, + []parachaintypes.CandidateHash{candidateAHash, candidateBHash, candidateCHash}, + chain.bestChainVec()) + + unconnectedHashes := make(map[parachaintypes.CandidateHash]struct{}) + for _, unconnected := range chain.unconnected.byCandidateHash { + unconnectedHashes[unconnected.candidateHash] = struct{}{} + } + + require.Equal(t, map[parachaintypes.CandidateHash]struct{}{ + candidateDHash: {}, + candidateFHash: {}, + candidateEHash: {}, + }, unconnectedHashes) + + // cannot add as potential an already pending availability candidate + require.ErrorIs(t, chain.canAddCandidateAsPotential(candidateAEntry), errCandidateAlreadyKnown) + + // simulate the fact that candidate A, B and C have been included + baseConstraints := makeConstraints(0, []parachaintypes.BlockNumber{0}, parachaintypes.HeadData{Data: []byte{0x0d}}) + scope, err = newScopeWithAncestors(*relayParentCInfo, baseConstraints, nil, 2, ancestors) + require.NoError(t, err) + + prevChain := chain + chain = newFragmentChain(scope, newCandidateStorage()) + chain.populateFromPrevious(prevChain) + require.Equal(t, []parachaintypes.CandidateHash{candidateDHash}, chain.bestChainVec()) + + unconnectedHashes = make(map[parachaintypes.CandidateHash]struct{}) + for _, unconnected := range chain.unconnected.byCandidateHash { + unconnectedHashes[unconnected.candidateHash] = struct{}{} + } + + require.Equal(t, map[parachaintypes.CandidateHash]struct{}{ + candidateEHash: {}, + candidateFHash: {}, + }, unconnectedHashes) + + // mark E as backed, F will be dropped for invalid watermark. + // empty unconnected candidates + chain.candidateBacked(candidateEHash) + require.Equal(t, []parachaintypes.CandidateHash{candidateDHash, candidateEHash}, chain.bestChainVec()) + require.Zero(t, chain.unconnected.len()) + + var expectedErr error = &errCheckAgainstConstraints{ + fragmentValidityErr: &errOutputsInvalid{ + ModificationError: &errDisallowedHrmpWatermark{ + BlockNumber: 1000, + }, + }, + } + + errCheckAgainstConstraints := new(errCheckAgainstConstraints) + err = chain.canAddCandidateAsPotential(candidateFEntry) + + require.True(t, errors.As(err, errCheckAgainstConstraints)) + require.Equal(t, errCheckAgainstConstraints, expectedErr) + }) + }) +} + +func cloneFragmentChain(original *fragmentChain) *fragmentChain { + // Clone the scope + clonedScope := &scope{ + relayParent: original.scope.relayParent, + baseConstraints: original.scope.baseConstraints.Clone(), + pendingAvailability: append([]*pendingAvailability(nil), original.scope.pendingAvailability...), + maxDepth: original.scope.maxDepth, + ancestors: original.scope.ancestors.Copy(), + ancestorsByHash: make(map[common.Hash]relayChainBlockInfo), + } + + for k, v := range original.scope.ancestorsByHash { + clonedScope.ancestorsByHash[k] = v + } + + // Clone the best chain + clonedBestChain := newBackedChain() + for _, node := range original.bestChain.chain { + clonedNode := &fragmentNode{ + fragment: node.fragment, + candidateHash: node.candidateHash, + parentHeadDataHash: node.parentHeadDataHash, + outputHeadDataHash: node.outputHeadDataHash, + cumulativeModifications: node.cumulativeModifications.Clone(), + } + clonedBestChain.push(clonedNode) + } + + // Clone the unconnected storage + clonedUnconnected := original.unconnected.clone() + + // Create the cloned fragment chain + clonedFragmentChain := &fragmentChain{ + scope: clonedScope, + bestChain: clonedBestChain, + unconnected: clonedUnconnected, + } + + return clonedFragmentChain +} + +func TestFindAncestorPathAndFindBackableChainEmptyBestChain(t *testing.T) { + relayParent := common.BytesToHash(bytes.Repeat([]byte{1}, 32)) + requiredParent := parachaintypes.HeadData{Data: []byte{0xff}} + maxDepth := uint(10) + + // Empty chain + baseConstraints := makeConstraints(0, []parachaintypes.BlockNumber{0}, requiredParent) + + relayParentInfo := relayChainBlockInfo{ + Number: 0, + Hash: relayParent, + StorageRoot: common.Hash{}, + } + + scope, err := newScopeWithAncestors(relayParentInfo, baseConstraints, nil, maxDepth, nil) + require.NoError(t, err) + + chain := newFragmentChain(scope, newCandidateStorage()) + assert.Equal(t, 0, chain.bestChainLen()) + + assert.Equal(t, 0, chain.findAncestorPath(map[parachaintypes.CandidateHash]struct{}{})) + assert.Equal(t, []*candidateAndRelayParent{}, chain.findBackableChain(map[parachaintypes.CandidateHash]struct{}{}, 2)) + + // Invalid candidate + ancestors := map[parachaintypes.CandidateHash]struct{}{ + {Value: common.Hash{}}: {}, + } + assert.Equal(t, 0, chain.findAncestorPath(ancestors)) + assert.Equal(t, []*candidateAndRelayParent{}, chain.findBackableChain(ancestors, 2)) +} + +func TestFindAncestorPathAndFindBackableChain(t *testing.T) { + paraID := parachaintypes.ParaID(5) + relayParent := common.BytesToHash(bytes.Repeat([]byte{1}, 32)) + requiredParent := parachaintypes.HeadData{Data: []byte{0xff}} + maxDepth := uint(5) + relayParentNumber := uint32(0) + relayParentStorageRoot := common.Hash{} + + type CandidateAndPVD struct { + candidate parachaintypes.CommittedCandidateReceipt + pvd parachaintypes.PersistedValidationData + } + + candidates := make([]*CandidateAndPVD, 0) + + // candidate 0 + candidate0Pvd, candidate0 := makeCommittedCandidate(t, paraID, + relayParent, 0, requiredParent, parachaintypes.HeadData{Data: []byte{0x00}}, relayParentNumber) + candidates = append(candidates, &CandidateAndPVD{candidate: candidate0, pvd: candidate0Pvd}) + + // candidate 1 to 5 + for i := 1; i <= 5; i++ { + candidatePvd, candidate := makeCommittedCandidate(t, paraID, + relayParent, 0, + parachaintypes.HeadData{Data: []byte{byte(i - 1)}}, + parachaintypes.HeadData{Data: []byte{byte(i)}}, + relayParentNumber) + candidates = append(candidates, &CandidateAndPVD{candidate: candidate, pvd: candidatePvd}) + } + + storage := newCandidateStorage() + + for _, c := range candidates { + candidateHash, err := c.candidate.Hash() + require.NoError(t, err) + + entry, err := newCandidateEntry(parachaintypes.CandidateHash{Value: candidateHash}, c.candidate, c.pvd, seconded) + require.NoError(t, err) + + err = storage.addCandidateEntry(entry) + require.NoError(t, err) + } + + candidateHashes := make([]parachaintypes.CandidateHash, 0) + for _, c := range candidates { + candidateHash, err := c.candidate.Hash() + require.NoError(t, err) + candidateHashes = append(candidateHashes, parachaintypes.CandidateHash{Value: candidateHash}) + } + + type Ancestors = map[parachaintypes.CandidateHash]struct{} + + hashes := func(from, to uint) []*candidateAndRelayParent { + var output []*candidateAndRelayParent + + for i := from; i < to; i++ { + output = append(output, &candidateAndRelayParent{ + candidateHash: candidateHashes[i], + realyParentHash: relayParent, + }) + } + + return output + } + + relayParentInfo := relayChainBlockInfo{ + Number: parachaintypes.BlockNumber(relayParentNumber), + Hash: relayParent, + StorageRoot: relayParentStorageRoot, + } + + baseConstraints := makeConstraints(0, []parachaintypes.BlockNumber{0}, requiredParent) + scope, err := newScopeWithAncestors( + relayParentInfo, + baseConstraints, + nil, + maxDepth, + nil, + ) + require.NoError(t, err) + + chain := populateFromPreviousStorage(scope, storage) + + // for now candidates are only seconded, not backed, the best chain is empty + // and no candidate will be returned + + require.Equal(t, 6, len(candidateHashes)) + require.Equal(t, 0, chain.bestChainLen()) + require.Equal(t, 6, chain.unconnected.len()) + + for count := 0; count < 10; count++ { + require.Equal(t, 0, len(chain.findBackableChain(make(Ancestors), uint32(count)))) + } + + t.Run("couple_candidates_backed", func(t *testing.T) { + chain := cloneFragmentChain(chain) + chain.candidateBacked(candidateHashes[5]) + + for count := 0; count < 10; count++ { + require.Equal(t, 0, len(chain.findBackableChain(make(Ancestors), uint32(count)))) + } + + chain.candidateBacked(candidateHashes[3]) + chain.candidateBacked(candidateHashes[4]) + + for count := 0; count < 10; count++ { + require.Equal(t, 0, len(chain.findBackableChain(make(Ancestors), uint32(count)))) + } + + chain.candidateBacked(candidateHashes[1]) + + for count := 0; count < 10; count++ { + require.Equal(t, 0, len(chain.findBackableChain(make(Ancestors), uint32(count)))) + } + + chain.candidateBacked(candidateHashes[0]) + require.Equal(t, hashes(0, 1), chain.findBackableChain(make(Ancestors), 1)) + + for c := 2; c < 10; c++ { + require.Equal(t, hashes(0, 2), chain.findBackableChain(make(Ancestors), uint32(c))) + } + + // now back the missing piece + chain.candidateBacked(candidateHashes[2]) + require.Equal(t, 6, chain.bestChainLen()) + + for count := 0; count < 10; count++ { + var result []*candidateAndRelayParent + if count > 6 { + result = hashes(0, 6) + } else { + for i := 0; i < count && i < 6; i++ { + result = append(result, &candidateAndRelayParent{ + candidateHash: candidateHashes[i], + realyParentHash: relayParent, + }) + } + } + require.Equal(t, result, chain.findBackableChain(make(Ancestors), uint32(count))) + } + }) + + t.Run("back_all_candidates_in_random_order", func(t *testing.T) { + candidatesShuffled := make([]parachaintypes.CandidateHash, len(candidateHashes)) + for i := range candidateHashes { + candidatesShuffled[i] = parachaintypes.CandidateHash{ + Value: common.NewHash(candidateHashes[i].Value.ToBytes()), + } + } + + rand.Shuffle(len(candidatesShuffled), func(i, j int) { + candidatesShuffled[i], candidatesShuffled[j] = candidatesShuffled[j], candidatesShuffled[i] + }) + + for _, c := range candidatesShuffled { + chain.candidateBacked(c) + storage.markBacked(c) + } + + // no ancestors supplied + require.Equal(t, 0, chain.findAncestorPath(make(Ancestors))) + require.Equal(t, []*candidateAndRelayParent(nil), chain.findBackableChain(make(Ancestors), 0)) + require.Equal(t, hashes(0, 1), chain.findBackableChain(make(Ancestors), 1)) + require.Equal(t, hashes(0, 2), chain.findBackableChain(make(Ancestors), 2)) + require.Equal(t, hashes(0, 5), chain.findBackableChain(make(Ancestors), 5)) + + for count := 6; count < 10; count++ { + backableChain := chain.findBackableChain(make(Ancestors), uint32(count)) + require.Equal(t, hashes(0, 6), backableChain) + } + + // ancestors which is not part of the chain will be ignored + ancestors := make(Ancestors) + ancestors[parachaintypes.CandidateHash{Value: common.Hash{}}] = struct{}{} + require.Equal(t, 0, chain.findAncestorPath(ancestors)) + require.Equal(t, hashes(0, 4), chain.findBackableChain(ancestors, 4)) + + ancestors = make(Ancestors) + ancestors[candidateHashes[1]] = struct{}{} + ancestors[parachaintypes.CandidateHash{Value: common.Hash{}}] = struct{}{} + require.Equal(t, 0, chain.findAncestorPath(ancestors)) + require.Equal(t, hashes(0, 4), chain.findBackableChain(ancestors, 4)) + + ancestors = make(Ancestors) + ancestors[candidateHashes[0]] = struct{}{} + ancestors[parachaintypes.CandidateHash{Value: common.Hash{}}] = struct{}{} + require.Equal(t, 1, chain.findAncestorPath(maps.Clone(ancestors))) + require.Equal(t, hashes(1, 5), chain.findBackableChain(ancestors, 4)) + + // ancestors which are part of the chain but don't form a path from root, will be ignored + ancestors = make(Ancestors) + ancestors[candidateHashes[1]] = struct{}{} + ancestors[candidateHashes[2]] = struct{}{} + require.Equal(t, 0, chain.findAncestorPath(maps.Clone(ancestors))) + require.Equal(t, hashes(0, 4), chain.findBackableChain(ancestors, 4)) + + // valid ancestors + ancestors = make(Ancestors) + ancestors[candidateHashes[2]] = struct{}{} + ancestors[candidateHashes[0]] = struct{}{} + ancestors[candidateHashes[1]] = struct{}{} + require.Equal(t, 3, chain.findAncestorPath(maps.Clone(ancestors))) + require.Equal(t, hashes(3, 5), chain.findBackableChain(maps.Clone(ancestors), 2)) + + for count := 3; count < 10; count++ { + require.Equal(t, hashes(3, 6), chain.findBackableChain(maps.Clone(ancestors), uint32(count))) + } + + // valid ancestors with candidates which have been omitted due to timeouts + ancestors = make(Ancestors) + ancestors[candidateHashes[0]] = struct{}{} + ancestors[candidateHashes[2]] = struct{}{} + require.Equal(t, 1, chain.findAncestorPath(maps.Clone(ancestors))) + require.Equal(t, hashes(1, 4), chain.findBackableChain(maps.Clone(ancestors), 3)) + require.Equal(t, hashes(1, 5), chain.findBackableChain(maps.Clone(ancestors), 4)) + + for count := 5; count < 10; count++ { + require.Equal(t, hashes(1, 6), chain.findBackableChain(maps.Clone(ancestors), uint32(count))) + } + + ancestors = make(Ancestors) + ancestors[candidateHashes[0]] = struct{}{} + ancestors[candidateHashes[1]] = struct{}{} + ancestors[candidateHashes[3]] = struct{}{} + require.Equal(t, 2, chain.findAncestorPath(maps.Clone(ancestors))) + require.Equal(t, hashes(2, 6), chain.findBackableChain(maps.Clone(ancestors), 4)) + + require.Equal(t, hashes(0, 0), chain.findBackableChain(maps.Clone(ancestors), 0)) + + // stop when we've found a candidate which is pending availability + scope, err := newScopeWithAncestors(relayParentInfo, baseConstraints, + []*pendingAvailability{ + {candidateHash: candidateHashes[3], relayParent: relayParentInfo}, + }, + maxDepth, + nil, + ) + require.NoError(t, err) + chain = populateFromPreviousStorage(scope, storage) + ancestors = make(Ancestors) + ancestors[candidateHashes[0]] = struct{}{} + ancestors[candidateHashes[1]] = struct{}{} + require.Equal(t, hashes(2, 3), chain.findBackableChain(maps.Clone(ancestors), 3)) + }) +} diff --git a/dot/parachain/prospective-parachains/inclusion_emulator.go b/dot/parachain/prospective-parachains/inclusion_emulator.go new file mode 100644 index 0000000000..1143f69544 --- /dev/null +++ b/dot/parachain/prospective-parachains/inclusion_emulator.go @@ -0,0 +1,504 @@ +package prospectiveparachains + +import ( + "bytes" + "fmt" + "iter" + "maps" + "slices" + + parachaintypes "github.com/ChainSafe/gossamer/dot/parachain/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ethereum/go-ethereum/common/math" +) + +// prospectiveCandidate includes key informations that represents a candidate +// without pinning it to a particular session. For example, commitments are +// represented here, but the erasure-root is not. This means that, prospective +// candidates are not correlated to any session in particular. +type prospectiveCandidate struct { + Commitments parachaintypes.CandidateCommitments + PersistedValidationData parachaintypes.PersistedValidationData + PoVHash common.Hash + ValidationCodeHash parachaintypes.ValidationCodeHash +} + +// relayChainBlockInfo contains minimum information about a relay-chain block. +type relayChainBlockInfo struct { + Hash common.Hash + StorageRoot common.Hash + Number parachaintypes.BlockNumber +} + +func checkModifications(c *parachaintypes.Constraints, modifications *constraintModifications) error { + if modifications.HrmpWatermark != nil && modifications.HrmpWatermark.Type == Trunk { + if !slices.Contains(c.HRMPInbound.ValidWatermarks, modifications.HrmpWatermark.Watermark()) { + return &errDisallowedHrmpWatermark{BlockNumber: modifications.HrmpWatermark.Watermark()} + } + } + + for id, outboundHrmpMod := range modifications.OutboundHrmp { + outbound, ok := c.HRMPChannelsOut[id] + if !ok { + return &errNoSuchHrmpChannel{paraID: id} + } + + _, overflow := math.SafeSub(uint64(outbound.BytesRemaining), uint64(outboundHrmpMod.BytesSubmitted)) + if overflow { + return &errHrmpBytesOverflow{ + paraID: id, + bytesRemaining: outbound.BytesRemaining, + bytesSubmitted: outboundHrmpMod.BytesSubmitted, + } + } + + _, overflow = math.SafeSub(uint64(outbound.MessagesRemaining), uint64(outboundHrmpMod.MessagesSubmitted)) + if overflow { + return &errHrmpMessagesOverflow{ + paraID: id, + messagesRemaining: outbound.MessagesRemaining, + messagesSubmitted: outboundHrmpMod.MessagesSubmitted, + } + } + } + + _, overflow := math.SafeSub(uint64(c.UMPRemaining), uint64(modifications.UmpMessagesSent)) + if overflow { + return &errUmpMessagesOverflow{ + messagesRemaining: c.UMPRemaining, + messagesSubmitted: modifications.UmpMessagesSent, + } + } + + _, overflow = math.SafeSub(uint64(c.UMPRemainingBytes), uint64(modifications.UmpBytesSent)) + if overflow { + return &errUmpBytesOverflow{ + bytesRemaining: c.UMPRemainingBytes, + bytesSubmitted: modifications.UmpBytesSent, + } + } + + _, overflow = math.SafeSub(uint64(len(c.DMPRemainingMessages)), uint64(modifications.DmpMessagesProcessed)) + if overflow { + return &errDmpMessagesUnderflow{ + messagesRemaining: uint32(len(c.DMPRemainingMessages)), + messagesProcessed: modifications.DmpMessagesProcessed, + } + } + + if c.FutureValidationCode == nil && modifications.CodeUpgradeApplied { + return errAppliedNonexistentCodeUpgrade + } + + return nil +} + +func applyModifications(c *parachaintypes.Constraints, modifications *constraintModifications) ( + *parachaintypes.Constraints, error) { + newConstraints := c.Clone() + + if modifications.RequiredParent != nil { + newConstraints.RequiredParent = *modifications.RequiredParent + } + + if modifications.HrmpWatermark != nil { + pos, found := slices.BinarySearch( + newConstraints.HRMPInbound.ValidWatermarks, + modifications.HrmpWatermark.Watermark()) + + if found { + // Exact match, so this is OK in all cases. + newConstraints.HRMPInbound.ValidWatermarks = newConstraints.HRMPInbound.ValidWatermarks[pos+1:] + } else { + switch modifications.HrmpWatermark.Type { + case Head: + // Updates to Head are always OK. + newConstraints.HRMPInbound.ValidWatermarks = newConstraints.HRMPInbound.ValidWatermarks[pos:] + case Trunk: + // Trunk update landing on disallowed watermark is not OK. + return nil, &errDisallowedHrmpWatermark{BlockNumber: modifications.HrmpWatermark.Block} + } + } + } + + for id, outboundHrmpMod := range modifications.OutboundHrmp { + outbound, ok := newConstraints.HRMPChannelsOut[id] + if !ok { + return nil, &errNoSuchHrmpChannel{id} + } + + if outboundHrmpMod.BytesSubmitted > outbound.BytesRemaining { + return nil, &errHrmpBytesOverflow{ + paraID: id, + bytesRemaining: outbound.BytesRemaining, + bytesSubmitted: outboundHrmpMod.BytesSubmitted, + } + } + + if outboundHrmpMod.MessagesSubmitted > outbound.MessagesRemaining { + return nil, &errHrmpMessagesOverflow{ + paraID: id, + messagesRemaining: outbound.MessagesRemaining, + messagesSubmitted: outboundHrmpMod.MessagesSubmitted, + } + } + + outbound.BytesRemaining -= outboundHrmpMod.BytesSubmitted + outbound.MessagesRemaining -= outboundHrmpMod.MessagesSubmitted + } + + if modifications.UmpMessagesSent > newConstraints.UMPRemaining { + return nil, &errUmpMessagesOverflow{ + messagesRemaining: newConstraints.UMPRemaining, + messagesSubmitted: modifications.UmpMessagesSent, + } + } + newConstraints.UMPRemaining -= modifications.UmpMessagesSent + + if modifications.UmpBytesSent > newConstraints.UMPRemainingBytes { + return nil, &errUmpBytesOverflow{ + bytesRemaining: newConstraints.UMPRemainingBytes, + bytesSubmitted: modifications.UmpBytesSent, + } + } + newConstraints.UMPRemainingBytes -= modifications.UmpBytesSent + + if modifications.DmpMessagesProcessed > uint32(len(newConstraints.DMPRemainingMessages)) { + return nil, &errDmpMessagesUnderflow{ + messagesRemaining: uint32(len(newConstraints.DMPRemainingMessages)), + messagesProcessed: modifications.DmpMessagesProcessed, + } + } else { + newConstraints.DMPRemainingMessages = newConstraints.DMPRemainingMessages[modifications.DmpMessagesProcessed:] + } + + if modifications.CodeUpgradeApplied { + if newConstraints.FutureValidationCode == nil { + return nil, errAppliedNonexistentCodeUpgrade + } + + newConstraints.ValidationCodeHash = newConstraints.FutureValidationCode.ValidationCodeHash + } + + return newConstraints, nil +} + +// outboundHrmpChannelModification represents modifications to outbound HRMP channels. +type outboundHrmpChannelModification struct { + BytesSubmitted uint32 + MessagesSubmitted uint32 +} + +// hrmpWatermarkUpdate represents an update to the HRMP Watermark. +type hrmpWatermarkUpdate struct { + Type hrmpWatermarkUpdateType + Block parachaintypes.BlockNumber +} + +// hrmpWatermarkUpdateType defines the type of HrmpWatermarkUpdate. +type hrmpWatermarkUpdateType int + +const ( + Head hrmpWatermarkUpdateType = iota + Trunk +) + +// Watermark returns the block number of the HRMP Watermark update. +func (h hrmpWatermarkUpdate) Watermark() parachaintypes.BlockNumber { + return h.Block +} + +// constraintModifications represents modifications to constraints as a result of prospective candidates. +type constraintModifications struct { + // The required parent head to build upon. + RequiredParent *parachaintypes.HeadData + // The new HRMP watermark. + HrmpWatermark *hrmpWatermarkUpdate + // Outbound HRMP channel modifications. + OutboundHrmp map[parachaintypes.ParaID]outboundHrmpChannelModification + // The amount of UMP XCM messages sent. `UMPSignal` and separator are excluded. + UmpMessagesSent uint32 + // The amount of UMP XCM bytes sent. `UMPSignal` and separator are excluded. + UmpBytesSent uint32 + // The amount of DMP messages processed. + DmpMessagesProcessed uint32 + // Whether a pending code upgrade has been applied. + CodeUpgradeApplied bool +} + +func (cm *constraintModifications) Clone() *constraintModifications { + return &constraintModifications{ + RequiredParent: cm.RequiredParent, + HrmpWatermark: cm.HrmpWatermark, + OutboundHrmp: maps.Clone(cm.OutboundHrmp), + UmpMessagesSent: cm.UmpMessagesSent, + UmpBytesSent: cm.UmpBytesSent, + DmpMessagesProcessed: cm.DmpMessagesProcessed, + CodeUpgradeApplied: cm.CodeUpgradeApplied, + } +} + +// Identity returns the 'identity' modifications: these can be applied to +// any constraints and yield the exact same result. +func NewConstraintModificationsIdentity() *constraintModifications { + return &constraintModifications{ + OutboundHrmp: make(map[parachaintypes.ParaID]outboundHrmpChannelModification), + } +} + +// Stack stacks other modifications on top of these. This does no sanity-checking, so if +// `other` is garbage relative to `self`, then the new value will be garbage as well. +// This is an addition which is not commutative. +func (cm *constraintModifications) Stack(other *constraintModifications) { + if other.RequiredParent != nil { + cm.RequiredParent = other.RequiredParent + } + + if other.HrmpWatermark != nil { + cm.HrmpWatermark = other.HrmpWatermark + } + + for id, mods := range other.OutboundHrmp { + record, ok := cm.OutboundHrmp[id] + if !ok { + record = outboundHrmpChannelModification{} + } + + record.BytesSubmitted += mods.BytesSubmitted + record.MessagesSubmitted += mods.MessagesSubmitted + cm.OutboundHrmp[id] = record + } + + cm.UmpMessagesSent += other.UmpMessagesSent + cm.UmpBytesSent += other.UmpBytesSent + cm.DmpMessagesProcessed += other.DmpMessagesProcessed + cm.CodeUpgradeApplied = cm.CodeUpgradeApplied || other.CodeUpgradeApplied +} + +// Fragment represents another prospective parachain block +// This is a type which guarantees that the candidate is valid under the operating constraints +type Fragment struct { + relayParent *relayChainBlockInfo + operatingConstraints *parachaintypes.Constraints + candidate *prospectiveCandidate + modifications *constraintModifications +} + +func (f *Fragment) RelayParent() *relayChainBlockInfo { + return f.relayParent +} + +func (f *Fragment) Candidate() *prospectiveCandidate { + return f.candidate +} + +func (f *Fragment) ConstraintModifications() *constraintModifications { + return f.modifications +} + +// NewFragment creates a new Fragment. This fails if the fragment isnt in line +// with the operating constraints. That is, either its inputs or outputs fail +// checks against the constraints. +// This does not check that the collator signature is valid or whether the PoV is +// small enough. +func NewFragment( + relayParent *relayChainBlockInfo, + operatingConstraints *parachaintypes.Constraints, + candidate *prospectiveCandidate) (*Fragment, error) { + + modifications, err := checkAgainstConstraints( + relayParent, + operatingConstraints, + candidate.Commitments, + candidate.ValidationCodeHash, + candidate.PersistedValidationData, + ) + if err != nil { + return nil, err + } + + return &Fragment{ + relayParent: relayParent, + operatingConstraints: operatingConstraints, + candidate: candidate, + modifications: modifications, + }, nil +} + +func checkAgainstConstraints( + relayParent *relayChainBlockInfo, + operatingConstraints *parachaintypes.Constraints, + commitments parachaintypes.CandidateCommitments, + validationCodeHash parachaintypes.ValidationCodeHash, + persistedValidationData parachaintypes.PersistedValidationData, +) (*constraintModifications, error) { + upwardMessages := make([]parachaintypes.UpwardMessage, 0) + // filter UMP signals + for upwardMessage := range skipUmpSignals(commitments.UpwardMessages) { + upwardMessages = append(upwardMessages, upwardMessage) + } + + umpMessagesSent := len(upwardMessages) + umpBytesSent := 0 + for _, message := range upwardMessages { + umpBytesSent += len(message) + } + + hrmpWatermark := hrmpWatermarkUpdate{ + Type: Trunk, + Block: parachaintypes.BlockNumber(commitments.HrmpWatermark), + } + + if parachaintypes.BlockNumber(commitments.HrmpWatermark) == relayParent.Number { + hrmpWatermark.Type = Head + } + + outboundHrmp := make(map[parachaintypes.ParaID]outboundHrmpChannelModification) + var lastRecipient *parachaintypes.ParaID + + for i, message := range commitments.HorizontalMessages { + if lastRecipient != nil && *lastRecipient >= parachaintypes.ParaID(message.Recipient) { + return nil, &errHrmpMessagesDescendingOrDuplicate{index: uint(i)} + } + + recipientParaID := parachaintypes.ParaID(message.Recipient) + lastRecipient = &recipientParaID + record, ok := outboundHrmp[recipientParaID] + if !ok { + record = outboundHrmpChannelModification{} + } + + record.BytesSubmitted += uint32(len(message.Data)) + record.MessagesSubmitted++ + outboundHrmp[recipientParaID] = record + } + + codeUpgradeApplied := false + if operatingConstraints.FutureValidationCode != nil { + codeUpgradeApplied = relayParent.Number >= operatingConstraints.FutureValidationCode.BlockNumber + } + + modifications := &constraintModifications{ + RequiredParent: &commitments.HeadData, + HrmpWatermark: &hrmpWatermark, + OutboundHrmp: outboundHrmp, + UmpMessagesSent: uint32(umpMessagesSent), + UmpBytesSent: uint32(umpBytesSent), + DmpMessagesProcessed: commitments.ProcessedDownwardMessages, + CodeUpgradeApplied: codeUpgradeApplied, + } + + err := validateAgainstConstraints( + operatingConstraints, + relayParent, + commitments, + persistedValidationData, + validationCodeHash, + modifications, + ) + if err != nil { + return nil, err + } + + return modifications, nil +} + +// skipUmpSignals is a utility function for skipping the UMP signals. +func skipUmpSignals(upwardMessages []parachaintypes.UpwardMessage) iter.Seq[parachaintypes.UpwardMessage] { + var UmpSeparator = []byte{} + return func(yield func(parachaintypes.UpwardMessage) bool) { + for _, message := range upwardMessages { + if !bytes.Equal([]byte(message), UmpSeparator) { + if !yield([]byte(message)) { + return + } + } + + return //nolint:staticcheck + } + } +} + +func validateAgainstConstraints( + constraints *parachaintypes.Constraints, + relayParent *relayChainBlockInfo, + commitments parachaintypes.CandidateCommitments, + persistedValidationData parachaintypes.PersistedValidationData, + validationCodeHash parachaintypes.ValidationCodeHash, + modifications *constraintModifications, +) error { + expectedPVD := parachaintypes.PersistedValidationData{ + ParentHead: constraints.RequiredParent, + RelayParentNumber: uint32(relayParent.Number), + RelayParentStorageRoot: relayParent.StorageRoot, + MaxPovSize: constraints.MaxPoVSize, + } + + if !expectedPVD.Equal(persistedValidationData) { + return fmt.Errorf("%w, expected: %v, got: %v", + errPersistedValidationDataMismatch, expectedPVD, persistedValidationData) + } + + if constraints.ValidationCodeHash != validationCodeHash { + return &errValidationCodeMismatch{ + expected: constraints.ValidationCodeHash, + got: validationCodeHash, + } + } + + if relayParent.Number < constraints.MinRelayParentNumber { + return &errRelayParentTooOld{ + minAllowed: constraints.MinRelayParentNumber, + current: relayParent.Number, + } + } + + if commitments.NewValidationCode != nil { + restriction, err := constraints.UpgradeRestriction.Value() + if err != nil { + return fmt.Errorf("while retrieving value: %w", err) + } + switch restriction.(type) { + case *parachaintypes.Present: + return errCodeUpgradeRestricted + } + } + + announcedCodeSize := 0 + if commitments.NewValidationCode != nil { + announcedCodeSize = len(*commitments.NewValidationCode) + } + + if uint32(announcedCodeSize) > constraints.MaxCodeSize { + return &errCodeSizeTooLarge{ + maxAllowed: constraints.MaxCodeSize, + newSize: uint32(announcedCodeSize), + } + } + + if modifications.DmpMessagesProcessed == 0 { + if len(constraints.DMPRemainingMessages) > 0 && constraints.DMPRemainingMessages[0] <= relayParent.Number { + return errDmpAdvancementRule + } + } + + if len(commitments.HorizontalMessages) > int(constraints.MaxNumHRMPPerCandidate) { + return &errHrmpMessagesPerCandidateOverflow{ + messagesAllowed: constraints.MaxNumHRMPPerCandidate, + messagesSubmitted: uint32(len(commitments.HorizontalMessages)), + } + } + + if modifications.UmpMessagesSent > constraints.MaxNumUMPPerCandidate { + return &errUmpMessagesPerCandidateOverflow{ + messagesAllowed: constraints.MaxNumUMPPerCandidate, + messagesSubmitted: modifications.UmpMessagesSent, + } + } + + if err := checkModifications(constraints, modifications); err != nil { + return &errOutputsInvalid{ModificationError: err} + } + + return nil +} diff --git a/dot/parachain/statement-distribution/statement_distribution.go b/dot/parachain/statement-distribution/statement_distribution.go index e9d01812db..ff4ae40d8c 100644 --- a/dot/parachain/statement-distribution/statement_distribution.go +++ b/dot/parachain/statement-distribution/statement_distribution.go @@ -34,7 +34,6 @@ func (s StatementDistribution) Run(ctx context.Context, overseerToSubSystem <-ch } func (s StatementDistribution) processMessage(msg any) error { - switch msg := msg.(type) { case statementedistributionmessages.Backed: // TODO #4171 diff --git a/dot/parachain/types/async_backing.go b/dot/parachain/types/async_backing.go index f41f61bac8..41b2cb4251 100644 --- a/dot/parachain/types/async_backing.go +++ b/dot/parachain/types/async_backing.go @@ -3,6 +3,11 @@ package parachaintypes +import ( + "maps" + "slices" +) + // AsyncBackingParams contains the parameters for the async backing. type AsyncBackingParams struct { // The maximum number of para blocks between the para head in a relay parent @@ -52,7 +57,7 @@ type Constraints struct { // The maximum number of UMP messages allowed per candidate. MaxNumUMPPerCandidate uint32 // Remaining DMP queue. Only includes sent-at block numbers. - DMPRemainingMessages []uint32 + DMPRemainingMessages []BlockNumber // The limitations of all registered inbound HRMP channels. HRMPInbound InboundHRMPLimitations // The limitations of all registered outbound HRMP channels. @@ -91,3 +96,43 @@ type BackingState struct { Constraints Constraints PendingAvailability []CandidatePendingAvailability } + +func (c *Constraints) Clone() *Constraints { + requiredParent := HeadData{ + Data: make([]byte, len(c.RequiredParent.Data)), + } + copy(requiredParent.Data, c.RequiredParent.Data) + + var upgradeRestriction *UpgradeRestriction + if c.UpgradeRestriction != nil { + restriction := *c.UpgradeRestriction + upgradeRestriction = &restriction + } + + var futureValidationCode *FutureValidationCode + if c.FutureValidationCode != nil { + futureValidationCode = &FutureValidationCode{ + BlockNumber: c.FutureValidationCode.BlockNumber, + ValidationCodeHash: c.FutureValidationCode.ValidationCodeHash, + } + } + + return &Constraints{ + MinRelayParentNumber: c.MinRelayParentNumber, + MaxPoVSize: c.MaxPoVSize, + MaxCodeSize: c.MaxCodeSize, + UMPRemaining: c.UMPRemaining, + UMPRemainingBytes: c.UMPRemainingBytes, + MaxNumUMPPerCandidate: c.MaxNumUMPPerCandidate, + DMPRemainingMessages: slices.Clone(c.DMPRemainingMessages), + HRMPInbound: InboundHRMPLimitations{ + ValidWatermarks: slices.Clone(c.HRMPInbound.ValidWatermarks), + }, + HRMPChannelsOut: maps.Clone(c.HRMPChannelsOut), + MaxNumHRMPPerCandidate: c.MaxNumHRMPPerCandidate, + RequiredParent: requiredParent, + ValidationCodeHash: c.ValidationCodeHash, + UpgradeRestriction: upgradeRestriction, + FutureValidationCode: futureValidationCode, + } +} diff --git a/dot/parachain/types/types.go b/dot/parachain/types/types.go index 64e40bae2f..520d0d2650 100644 --- a/dot/parachain/types/types.go +++ b/dot/parachain/types/types.go @@ -567,6 +567,13 @@ type PersistedValidationData struct { MaxPovSize uint32 `scale:"4"` } +func (pvd PersistedValidationData) Equal(other PersistedValidationData) bool { + return bytes.Equal(pvd.ParentHead.Data, other.ParentHead.Data) && + pvd.RelayParentNumber == other.RelayParentNumber && + pvd.RelayParentStorageRoot == other.RelayParentStorageRoot && + pvd.MaxPovSize == other.MaxPovSize +} + func (pvd PersistedValidationData) Hash() (common.Hash, error) { bytes, err := scale.Marshal(pvd) if err != nil { diff --git a/go.mod b/go.mod index 1ab95fa04d..283758230f 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,6 @@ require ( github.com/fatih/color v1.17.0 github.com/gammazero/deque v0.2.1 github.com/go-playground/validator/v10 v10.21.0 - github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 diff --git a/go.sum b/go.sum index 990d74d772..03ec8f0ac3 100644 --- a/go.sum +++ b/go.sum @@ -206,8 +206,6 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4er github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -799,7 +797,6 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=