From b2f035da0aec347d452fa175e304776b80278647 Mon Sep 17 00:00:00 2001 From: Dave Collins Date: Wed, 23 Dec 2020 15:44:32 -0600 Subject: [PATCH 1/7] chaingen: Support querying block test name by hash. --- blockchain/chaingen/generator.go | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/blockchain/chaingen/generator.go b/blockchain/chaingen/generator.go index 9f6d6a674d..f37076492d 100644 --- a/blockchain/chaingen/generator.go +++ b/blockchain/chaingen/generator.go @@ -164,6 +164,7 @@ type Generator struct { blocks map[chainhash.Hash]*wire.MsgBlock blockHeights map[chainhash.Hash]uint32 blocksByName map[string]*wire.MsgBlock + blockNames map[chainhash.Hash]string p2shOpTrueAddr dcrutil.Address p2shOpTrueScript []byte @@ -203,8 +204,9 @@ func MakeGenerator(params *chaincfg.Params) (Generator, error) { tip: genesis, tipName: "genesis", blocks: map[chainhash.Hash]*wire.MsgBlock{genesisHash: genesis}, - blockHeights: map[chainhash.Hash]uint32{genesis.BlockHash(): 0}, + blockHeights: map[chainhash.Hash]uint32{genesisHash: 0}, blocksByName: map[string]*wire.MsgBlock{"genesis": genesis}, + blockNames: map[chainhash.Hash]string{genesisHash: "genesis"}, p2shOpTrueAddr: p2shOpTrueAddr, p2shOpTrueScript: p2shOpTrueScript, originalParents: make(map[chainhash.Hash]chainhash.Hash), @@ -245,6 +247,16 @@ func (g *Generator) BlockByName(blockName string) *wire.MsgBlock { return block } +// BlockName returns the name associated with the provided block hash. It will +// panic if the specified block hash does not exist. +func (g *Generator) BlockName(hash *chainhash.Hash) string { + name, ok := g.blockNames[*hash] + if !ok { + panic(fmt.Sprintf("block name for hash %s does not exist", hash)) + } + return name +} + // BlockByHash returns the block associated with the provided block hash. It // will panic if the specified block hash does not exist. func (g *Generator) BlockByHash(hash *chainhash.Hash) *wire.MsgBlock { @@ -2045,7 +2057,7 @@ func (g *Generator) disconnectBlockTickets(b *wire.MsgBlock) { // originalParent returns the original block the passed block was built from. // This is necessary because callers might change the previous block hash in a -// munger which would cause the like ticket pool to be reconstructed improperly. +// munger which would cause the live ticket pool to be reconstructed improperly. func (g *Generator) originalParent(b *wire.MsgBlock) *wire.MsgBlock { parentHash, ok := g.originalParents[b.BlockHash()] if !ok { @@ -2402,6 +2414,7 @@ func (g *Generator) NextBlock(blockName string, spend *SpendableOut, ticketSpend g.blocks[blockHash] = &block g.blockHeights[blockHash] = nextHeight g.blocksByName[blockName] = &block + g.blockNames[blockHash] = blockName g.tip = &block g.tipName = blockName return &block @@ -2456,6 +2469,7 @@ func (g *Generator) UpdateBlockState(oldBlockName string, oldBlockHash chainhash delete(g.blocks, oldBlockHash) delete(g.blockHeights, oldBlockHash) delete(g.blocksByName, oldBlockName) + delete(g.blockNames, oldBlockHash) delete(g.wonTickets, oldBlockHash) // Add new entries. @@ -2463,6 +2477,7 @@ func (g *Generator) UpdateBlockState(oldBlockName string, oldBlockHash chainhash g.blocks[newBlockHash] = newBlock g.blockHeights[newBlockHash] = existingHeight g.blocksByName[newBlockName] = newBlock + g.blockNames[newBlockHash] = newBlockName g.wonTickets[newBlockHash] = wonTickets } From f7f5aa783b5131991f88885fa7223066c9010e9b Mon Sep 17 00:00:00 2001 From: Dave Collins Date: Wed, 23 Dec 2020 15:44:32 -0600 Subject: [PATCH 2/7] blockchain: Improve test harness logging. This makes the logging in the test harness more consistent and improves it to make use of the ability to look up the test name for blocks when logging issues for easier identification. --- blockchain/common_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/blockchain/common_test.go b/blockchain/common_test.go index b3d9ed83b2..556b32e18b 100644 --- a/blockchain/common_test.go +++ b/blockchain/common_test.go @@ -505,7 +505,7 @@ func (g *chaingenHarness) AcceptBlock(blockName string) { msgBlock := g.BlockByName(blockName) blockHeight := msgBlock.Header.Height block := dcrutil.NewBlock(msgBlock) - g.t.Logf("Testing block %s (hash %s, height %d)", blockName, block.Hash(), + g.t.Logf("Testing block %q (hash %s, height %d)", blockName, block.Hash(), blockHeight) forkLen, err := g.chain.ProcessBlock(block, BFNone) @@ -540,8 +540,8 @@ func (g *chaingenHarness) RejectBlock(blockName string, kind ErrorKind) { msgBlock := g.BlockByName(blockName) blockHeight := msgBlock.Header.Height block := dcrutil.NewBlock(msgBlock) - g.t.Logf("Testing block %s (hash %s, height %d)", blockName, block.Hash(), - blockHeight) + g.t.Logf("Testing reject block %q (hash %s, height %d, reason %v)", + blockName, block.Hash(), blockHeight, kind) _, err := g.chain.ProcessBlock(block, BFNone) if err == nil { @@ -549,8 +549,7 @@ func (g *chaingenHarness) RejectBlock(blockName string, kind ErrorKind) { blockName, block.Hash(), blockHeight) } - // Ensure the error kind is of the expected type and matches - // the value specified in the test instance. + // Ensure the error matches the value specified in the test instance. if !errors.Is(err, kind) { g.t.Fatalf("block %q (hash %s, height %d) does not have expected reject "+ "code -- got %v, want %v", blockName, block.Hash(), blockHeight, @@ -577,8 +576,9 @@ func (g *chaingenHarness) ExpectTip(tipName string) { if best.Hash != wantTip.BlockHash() || best.Height != int64(wantTip.Header.Height) { g.t.Fatalf("block %q (hash %s, height %d) should be the current tip "+ - "-- got (hash %s, height %d)", tipName, wantTip.BlockHash(), - wantTip.Header.Height, best.Hash, best.Height) + "-- got %q (hash %s, height %d)", tipName, wantTip.BlockHash(), + wantTip.Header.Height, g.BlockName(&best.Hash), best.Hash, + best.Height) } } @@ -591,7 +591,7 @@ func (g *chaingenHarness) AcceptedToSideChainWithExpectedTip(tipName string) { msgBlock := g.Tip() blockHeight := msgBlock.Header.Height block := dcrutil.NewBlock(msgBlock) - g.t.Logf("Testing block %s (hash %s, height %d)", g.TipName(), block.Hash(), + g.t.Logf("Testing block %q (hash %s, height %d)", g.TipName(), block.Hash(), blockHeight) forkLen, err := g.chain.ProcessBlock(block, BFNone) @@ -701,7 +701,7 @@ func (g *chaingenHarness) ForceTipReorg(fromTipName, toTipName string) { from := g.BlockByName(fromTipName) to := g.BlockByName(toTipName) - g.t.Logf("Testing forced reorg from %s (hash %s, height %d) to %s (hash "+ + g.t.Logf("Testing forced reorg from %q (hash %s, height %d) to %q (hash "+ "%s, height %d)", fromTipName, from.BlockHash(), from.Header.Height, toTipName, to.BlockHash(), to.Header.Height) From a2742f286ad4fc6bcd539a4c8381904c28252b89 Mon Sep 17 00:00:00 2001 From: Dave Collins Date: Wed, 23 Dec 2020 15:44:33 -0600 Subject: [PATCH 3/7] blockchain: Support separate test block generation. This modifies the chain test harness to allow the initial blocks that are generated to specified heights, such as stake validation height, to be independently generated without also accepting them to the chain. It also allows a custom underlying chain generator which houses those blocks to be provided when creating a test harness. These two combined provide more flexibility to the tests since they can choose when (and if) to make the generated blocks available to the chain. --- blockchain/common_test.go | 163 ++++++++++++++++++++++++++++---------- 1 file changed, 123 insertions(+), 40 deletions(-) diff --git a/blockchain/common_test.go b/blockchain/common_test.go index 556b32e18b..7a7f11bb9d 100644 --- a/blockchain/common_test.go +++ b/blockchain/common_test.go @@ -467,29 +467,26 @@ type chaingenHarness struct { deploymentVersions map[string]uint32 } -// newChaingenHarness creates and returns a new instance of a chaingen harness -// that encapsulates the provided test instance along with a teardown function -// the caller should invoke when done testing to clean up. +// newChaingenHarnessWithGen creates and returns a new instance of a chaingen +// harness that encapsulates the provided test instance and existing chaingen +// generator along with a teardown function the caller should invoke when done +// testing to clean up. +// +// This differs from newChaingenHarness in that it allows the caller to provide +// a chaingen generator to use instead of creating a new one. // // See the documentation for the chaingenHarness type for more details. -func newChaingenHarness(t *testing.T, params *chaincfg.Params, dbName string) (*chaingenHarness, func()) { +func newChaingenHarnessWithGen(t *testing.T, dbName string, g *chaingen.Generator) (*chaingenHarness, func()) { t.Helper() - // Create a test generator instance initialized with the genesis block as - // the tip. - g, err := chaingen.MakeGenerator(params) - if err != nil { - t.Fatalf("Failed to create generator: %v", err) - } - // Create a new database and chain instance to run tests against. - chain, teardownFunc, err := chainSetup(dbName, params) + chain, teardownFunc, err := chainSetup(dbName, g.Params()) if err != nil { t.Fatalf("Failed to setup chain instance: %v", err) } harness := chaingenHarness{ - Generator: &g, + Generator: g, t: t, chain: chain, deploymentVersions: make(map[string]uint32), @@ -497,6 +494,27 @@ func newChaingenHarness(t *testing.T, params *chaincfg.Params, dbName string) (* return &harness, teardownFunc } +// newChaingenHarness creates and returns a new instance of a chaingen harness +// that encapsulates the provided test instance along with a teardown function +// the caller should invoke when done testing to clean up. +// +// This differs from newChaingenHarnessWithGen in that it creates a new chaingen +// generator to use instead of allowing the caller to provide one. +// +// See the documentation for the chaingenHarness type for more details. +func newChaingenHarness(t *testing.T, params *chaincfg.Params, dbName string) (*chaingenHarness, func()) { + t.Helper() + + // Create a test generator instance initialized with the genesis block as + // the tip. + g, err := chaingen.MakeGenerator(params) + if err != nil { + t.Fatalf("Failed to create generator: %v", err) + } + + return newChaingenHarnessWithGen(t, dbName, &g) +} + // AcceptBlock processes the block associated with the given name in the // harness generator and expects it to be accepted to the main chain. func (g *chaingenHarness) AcceptBlock(blockName string) { @@ -723,17 +741,25 @@ func minUint32(a, b uint32) uint32 { return b } -// AdvanceToHeight generates and accepts enough blocks to the chain instance -// associated with the harness to reach the provided height while purchasing the -// provided tickets per block after coinbase maturity. -func (g *chaingenHarness) AdvanceToHeight(height uint32, buyTicketsPerBlock uint32) { +// generateToHeight generates enough blocks in the generator associated with the +// harness to reach the provided height assuming the blocks up to and including +// the provided from height have already been generated. It also purchases the +// provided number of tickets per block after coinbase maturity. +// +// The accept flag specifies whether or not to accept the blocks to the chain +// instance associated with the harness as well. +// +// The function will fail with a fatal test error if it is called with a from +// height that is greater than or equal to the to height or a number of tickets +// that exceeds the max allowed for a block. +func (g *chaingenHarness) generateToHeight(fromHeight, toHeight uint32, buyTicketsPerBlock uint32, accept bool) { g.t.Helper() - // Only allow this to be called with a sane height. - tipHeight := g.Tip().Header.Height - if height <= tipHeight { - g.t.Fatalf("not possible to advanced to height %d when the current "+ - "height is already %d", height, tipHeight) + // Only allow this to be called with a sane heights. + tipHeight := fromHeight + if toHeight <= tipHeight { + g.t.Fatalf("not possible to generate to height %d when the current "+ + "height is already %d", toHeight, tipHeight) } // Only allow this to be called with a sane number of tickets to buy per @@ -741,8 +767,8 @@ func (g *chaingenHarness) AdvanceToHeight(height uint32, buyTicketsPerBlock uint params := g.Params() maxOutsForTickets := uint32(params.TicketsPerBlock) if buyTicketsPerBlock > maxOutsForTickets { - g.t.Fatalf("a max of %v outputs are available for ticket "+ - "purchases per block", maxOutsForTickets) + g.t.Fatalf("a max of %v outputs are available for ticket purchases "+ + "per block", maxOutsForTickets) } // Shorter versions of useful params for convenience. @@ -755,7 +781,9 @@ func (g *chaingenHarness) AdvanceToHeight(height uint32, buyTicketsPerBlock uint if tipHeight == 0 { g.CreateBlockOne("bfb", 0) g.AssertTipHeight(1) - g.AcceptTipBlock() + if accept { + g.AcceptTipBlock() + } tipHeight++ } intermediateHeight := uint32(1) @@ -765,12 +793,14 @@ func (g *chaingenHarness) AdvanceToHeight(height uint32, buyTicketsPerBlock uint // // genesis -> bfb -> bm2 -> bm3 -> ... -> bm# alreadyAsserted := tipHeight >= coinbaseMaturity+1 - targetHeight := minUint32(coinbaseMaturity+1, height) + targetHeight := minUint32(coinbaseMaturity+1, toHeight) for ; tipHeight < targetHeight; tipHeight++ { blockName := fmt.Sprintf("bm%d", tipHeight-intermediateHeight) g.NextBlock(blockName, nil, nil) g.SaveTipCoinbaseOuts() - g.AcceptTipBlock() + if accept { + g.AcceptTipBlock() + } } intermediateHeight = targetHeight if !alreadyAsserted { @@ -784,7 +814,7 @@ func (g *chaingenHarness) AdvanceToHeight(height uint32, buyTicketsPerBlock uint // ... -> bm# ... -> bse18 -> bse19 -> ... -> bse# var ticketsPurchased uint32 alreadyAsserted = tipHeight >= stakeEnabledHeight - targetHeight = minUint32(stakeEnabledHeight, height) + targetHeight = minUint32(stakeEnabledHeight, toHeight) for ; tipHeight < targetHeight; tipHeight++ { var ticketOuts []chaingen.SpendableOut if buyTicketsPerBlock > 0 { @@ -797,7 +827,9 @@ func (g *chaingenHarness) AdvanceToHeight(height uint32, buyTicketsPerBlock uint blockName := fmt.Sprintf("bse%d", tipHeight-intermediateHeight) g.NextBlock(blockName, nil, ticketOuts) g.SaveTipCoinbaseOuts() - g.AcceptTipBlock() + if accept { + g.AcceptTipBlock() + } } intermediateHeight = targetHeight if !alreadyAsserted { @@ -805,10 +837,9 @@ func (g *chaingenHarness) AdvanceToHeight(height uint32, buyTicketsPerBlock uint } targetPoolSize := uint32(g.Params().TicketPoolSize) * buyTicketsPerBlock - for ; tipHeight < height; tipHeight++ { + for ; tipHeight < toHeight; tipHeight++ { var ticketOuts []chaingen.SpendableOut - // Only purchase tickets until the target ticket pool size is - // reached. + // Only purchase tickets until the target ticket pool size is reached. ticketsNeeded := targetPoolSize - ticketsPurchased ticketsNeeded = minUint32(ticketsNeeded, buyTicketsPerBlock) if ticketsNeeded > 0 { @@ -820,28 +851,80 @@ func (g *chaingenHarness) AdvanceToHeight(height uint32, buyTicketsPerBlock uint blockName := fmt.Sprintf("bsv%d", tipHeight-intermediateHeight) g.NextBlock(blockName, nil, ticketOuts) g.SaveTipCoinbaseOuts() - g.AcceptTipBlock() + if accept { + g.AcceptTipBlock() + } } - g.AssertTipHeight(height) + g.AssertTipHeight(toHeight) } -// AdvanceToStakeValidationHeight generates and accepts enough blocks to the -// chain instance associated with the harness to reach stake validation height. +// AdvanceToHeight generates and accepts enough blocks to the chain instance +// associated with the harness to reach the provided height while purchasing the +// provided tickets per block after coinbase maturity. +func (g *chaingenHarness) AdvanceToHeight(height uint32, buyTicketsPerBlock uint32) { + g.t.Helper() + + // Only allow this to be called with a sane height. + tipHeight := g.Tip().Header.Height + if height <= tipHeight { + g.t.Fatalf("not possible to advance to height %d when the current "+ + "height is already %d", height, tipHeight) + } + + const accept = true + g.generateToHeight(tipHeight, height, buyTicketsPerBlock, accept) +} + +// generateToStakeValidationHeight generates enough blocks in the generator +// associated with the harness to reach stake validation height. +// +// The accept flag specifies whether or not to accept the blocks to the chain +// instance associated with the harness as well. // // The function will fail with a fatal test error if it is not called with the // harness at the genesis block which is the case when it is first created. -func (g *chaingenHarness) AdvanceToStakeValidationHeight() { +func (g *chaingenHarness) generateToStakeValidationHeight(accept bool) { + g.t.Helper() + // Only allow this to be called on a newly created harness. if g.Tip().Header.Height != 0 { g.t.Fatalf("chaingen harness instance must be at the genesis block " + - "to advance to stake validation height") + "to generate to stake validation height") } + // Shorter versions of useful params for convenience. params := g.Params() ticketsPerBlock := uint32(params.TicketsPerBlock) stakeValidationHeight := uint32(params.StakeValidationHeight) - g.AdvanceToHeight(stakeValidationHeight, ticketsPerBlock) - g.AssertTipHeight(stakeValidationHeight) + + // Generate enough blocks in the associated harness generator to reach stake + // validation height. + g.generateToHeight(0, stakeValidationHeight, ticketsPerBlock, accept) +} + +// GenerateToStakeValidationHeight generates enough blocks in the generator +// associated with the harness to reach stake validation height without +// accepting them to the chain instance associated with the harness. +// +// The function will fail with a fatal test error if it is not called with the +// harness at the genesis block which is the case when it is first created. +func (g *chaingenHarness) GenerateToStakeValidationHeight() { + g.t.Helper() + + const accept = false + g.generateToStakeValidationHeight(accept) +} + +// AdvanceToStakeValidationHeight generates and accepts enough blocks to the +// chain instance associated with the harness to reach stake validation height. +// +// The function will fail with a fatal test error if it is not called with the +// harness at the genesis block which is the case when it is first created. +func (g *chaingenHarness) AdvanceToStakeValidationHeight() { + g.t.Helper() + + const accept = true + g.generateToStakeValidationHeight(accept) } // AdvanceFromSVHToActiveAgenda generates and accepts enough blocks with the From 57b59b4e266748b1ddfb94a579988bdeb5b6cb9c Mon Sep 17 00:00:00 2001 From: Dave Collins Date: Wed, 23 Dec 2020 15:44:34 -0600 Subject: [PATCH 4/7] blockchain: Decouple processing and download logic. This completely reworks the way block index and processing works to use headers-first semantics and support out of order processing of the associated block data. This will ultimately provide a wide variety of benefits as it means the entire shape of the block tree can be determined from the headers alone which in turn will allow much more informed decisions to be made, provides better information regarding sync status and warning conditions, and allows future work to add invalidation and reconsideration of arbitrary blocks. It must be noted that this is a major change to the way the block index and block handling is done and is deeply integrated with the consensus rules, so it will need a significant amount of testing and extremely careful review. Also of note is that there are a lot of assumptions made in the calling code in regards to expected error attribution and the state the chain has at the moment notifications are processed, so all of those semantics have intentionally been retained, even though they might seem a bit out of place now, in order to limit the amount of changes introduced at a time and thus better ensures correctness. For example, in the future it would probably make more sense to make the notifications entirely asynchronous and for validation failures to be reported via those asynchronous notifications. However, before that can happen, all callers would first need to be updated to ensure they do not rely on the chain being in the same state it was at the moment the notification was generated. In addition to all of the existing full block tests, this introduces a comprehensive set of processing order tests which exercise all of the new logic. High level overview of the changes: - Introduce tracking for whether a block is fully linked, meaning it builds on a branch that has block data for all of its ancestors - Add received order tracking to ensure miners are not able to gain an advantage in terms of chain selection by only advertising a header - Add several new pieces of information to the block index: - Header with the most cumulative work that is not known to be invalid - Header with the most cumulative work that is known to be invalid - Map of best chain candidates to aid in efficient selection - Map of unlinked children for more efficient linking - Prunable cached chain tips to significantly reduce potential search space during invalidation - Introduce a compare function which determines which of two nodes should be considered better for the purposes of best chain selection - Add chain tip iteration capabilities with potential filtering - Mark all descendants invalid due to known invalid ancestor when a block is marked invalid - Add ability to determine if a block can currently be validated - Rework the chain reorganization func to work with an arbitrary target - Modify the overall chain reorg func to reorg to the block with the most cumulative work that is valid in the case it is not possible to reorg to the target block - Add a new MultiError type for keeping track of multiple errors in the same call - Add a new ErrNoBlockData error kind - Update all cases that only require all of the ancestor block data to be available to check for that condition instead of the more strict condition of already being fully validated - Introduce a new processing lock separate from the overall chain lock - Add new method and ability to independently accept block headers - Rework the block processing func to accept blocks out of order so long as their header is already known valid - Keep a cache of blocks that recently passed contextual validation checks - Cleanup and correct various comments to match reality - Add comprehensive tests to exercise the new processing logic and best header tracking (for both invalid and not known invalid) --- blockchain/accept.go | 157 ---- blockchain/blockindex.go | 709 ++++++++++++++++- blockchain/blockindex_test.go | 198 +++++ blockchain/chain.go | 601 +++++++------- blockchain/chain_test.go | 18 +- blockchain/chainio.go | 13 +- blockchain/chainquery.go | 22 + blockchain/checkpoints.go | 14 + blockchain/common_test.go | 204 ++++- blockchain/difficulty.go | 1 - blockchain/error.go | 86 +- blockchain/error_test.go | 2 + blockchain/example_test.go | 5 +- blockchain/fullblocktests/generate.go | 2 +- blockchain/notifications.go | 4 +- blockchain/process.go | 544 ++++++++++++- blockchain/process_test.go | 1034 ++++++++++++++++++++++++- blockchain/prune.go | 6 +- blockchain/stakenode.go | 5 +- blockchain/stakeversion.go | 2 +- blockchain/thresholdstate.go | 25 +- blockchain/treasury.go | 2 +- blockchain/validate.go | 139 ++-- cmd/addblock/import.go | 6 +- server.go | 15 + 25 files changed, 3177 insertions(+), 637 deletions(-) delete mode 100644 blockchain/accept.go diff --git a/blockchain/accept.go b/blockchain/accept.go deleted file mode 100644 index f415068e53..0000000000 --- a/blockchain/accept.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright (c) 2013-2016 The btcsuite developers -// Copyright (c) 2015-2020 The Decred developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package blockchain - -import ( - "fmt" - - "github.com/decred/dcrd/blockchain/stake/v4" - "github.com/decred/dcrd/database/v2" - "github.com/decred/dcrd/dcrutil/v4" -) - -// maybeAcceptBlock potentially accepts a block into the block chain and, if -// accepted, returns the length of the fork the block extended. It performs -// several validation checks which depend on its position within the block chain -// before adding it. The block is expected to have already gone through -// ProcessBlock before calling this function with it. In the case the block -// extends the best chain or is now the tip of the best chain due to causing a -// reorganize, the fork length will be 0. -// -// The flags are also passed to checkBlockPositional, checkBlockContext and -// connectBestChain. See their documentation for how the flags modify their -// behavior. -// -// This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) maybeAcceptBlock(block *dcrutil.Block, flags BehaviorFlags) (int64, error) { - // This function should never be called with orphan blocks or the - // genesis block. - prevHash := &block.MsgBlock().Header.PrevBlock - prevNode := b.index.LookupNode(prevHash) - if prevNode == nil { - str := fmt.Sprintf("previous block %s is not known", prevHash) - return 0, ruleError(ErrMissingParent, str) - } - - // There is no need to validate the block if an ancestor is already - // known to be invalid. - if b.index.NodeStatus(prevNode).KnownInvalid() { - str := fmt.Sprintf("previous block %s is known to be invalid", - prevHash) - return 0, ruleError(ErrInvalidAncestorBlock, str) - } - - // The block must pass all of the validation rules which depend on having - // the headers of all ancestors available, but do not rely on having the - // full block data of all ancestors available. - err := b.checkBlockPositional(block, prevNode, flags) - if err != nil { - return 0, err - } - - // The block must pass all of the validation rules which depend on having - // the full block data for all of its ancestors available. - err = b.checkBlockContext(block, prevNode, flags) - if err != nil { - return 0, err - } - - // Prune stake nodes which are no longer needed before creating a new - // node. - b.pruner.pruneChainIfNeeded() - - // Insert the block into the database if it's not already there. Even - // though it is possible the block will ultimately fail to connect, it - // has already passed all proof-of-work and validity tests which means - // it would be prohibitively expensive for an attacker to fill up the - // disk with a bunch of blocks that fail to connect. This is necessary - // since it allows block download to be decoupled from the much more - // expensive connection logic. It also has some other nice properties - // such as making blocks that never become part of the main chain or - // blocks that fail to connect available for further analysis. - err = b.db.Update(func(dbTx database.Tx) error { - return dbMaybeStoreBlock(dbTx, block) - }) - if err != nil { - return 0, err - } - - // Create a new block node for the block and add it to the block index. - // The block could either be on a side chain or the main chain, but it - // starts off as a side chain regardless. - blockHeader := &block.MsgBlock().Header - newNode := newBlockNode(blockHeader, prevNode) - newNode.populateTicketInfo(stake.FindSpentTicketsInBlock(block.MsgBlock())) - newNode.status = statusDataStored - b.index.AddNode(newNode) - - // Ensure the new block index entry is written to the database. - err = b.flushBlockIndex() - if err != nil { - return 0, err - } - - // Notify the caller when the block intends to extend the main chain, - // the chain believes it is current, and the block has passed all of the - // sanity and contextual checks, such as having valid proof of work, - // valid merkle and stake roots, and only containing allowed votes and - // revocations. - // - // This allows the block to be relayed before doing the more expensive - // connection checks, because even though the block might still fail - // to connect and becomes the new main chain tip, that is quite rare in - // practice since a lot of work was expended to create a block that - // satisfies the proof of work requirement. - // - // Notice that the chain lock is not released before sending the - // notification. This is intentional and must not be changed without - // understanding why! - if b.isCurrent() && b.bestChain.Tip() == prevNode { - b.sendNotification(NTNewTipBlockChecked, block) - } - - // Fetching a stake node could enable a new DoS vector, so restrict - // this only to blocks that are recent in history. - if newNode.height < b.bestChain.Tip().height-minMemoryNodes { - newNode.stakeNode, err = b.fetchStakeNode(newNode) - if err != nil { - return 0, err - } - } - - // Grab the parent block since it is required throughout the block - // connection process. - parent, err := b.fetchBlockByNode(newNode.parent) - if err != nil { - return 0, err - } - - // Connect the passed block to the chain while respecting proper chain - // selection according to the chain with the most proof of work. This - // also handles validation of the transaction scripts. - forkLen, err := b.connectBestChain(newNode, block, parent, flags) - if err != nil { - return 0, err - } - - // Potentially update the most recently known checkpoint to this block. - b.maybeUpdateMostRecentCheckpoint(newNode) - - // Notify the caller that the new block was accepted into the block - // chain. The caller would typically want to react by relaying the - // inventory to other peers unless it was already relayed above - // via NTNewTipBlockChecked. - bestHeight := b.bestChain.Tip().height - b.chainLock.Unlock() - b.sendNotification(NTBlockAccepted, &BlockAcceptedNtfnsData{ - BestHeight: bestHeight, - ForkLen: forkLen, - Block: block, - }) - b.chainLock.Lock() - - return forkLen, nil -} diff --git a/blockchain/blockindex.go b/blockchain/blockindex.go index 34ddd79d06..5e2b638e76 100644 --- a/blockchain/blockindex.go +++ b/blockchain/blockindex.go @@ -45,6 +45,18 @@ const ( statusInvalidAncestor blockStatus = 1 << 3 ) +const ( + // cachedTipsPruneInterval is the amount of time to wait in between pruning + // the cache that tracks the most recent chain tips. + cachedTipsPruneInterval = time.Minute * 5 + + // cachedTipsPruneDepth is the number of blocks before the provided best + // block hint to prune cached chain tips. This value is set based on the + // target block time for the main network such that there is approximately + // one hour of chain tips cached. + cachedTipsPruneDepth = 12 +) + // HaveData returns whether the full block data is stored in the database. This // will return false for a block node where only the header is downloaded or // stored. @@ -76,6 +88,22 @@ func (status blockStatus) KnownInvalid() bool { return status&(statusValidateFailed|statusInvalidAncestor) != 0 } +// KnownInvalidAncestor returns whether the block is known to have an invalid +// ancestor. A return value of false in no way implies the block only has valid +// ancestors. Thus, this will return false for blocks with invalid ancestors +// that have not been proven invalid yet. +func (status blockStatus) KnownInvalidAncestor() bool { + return status&(statusInvalidAncestor) != 0 +} + +// KnownValidateFailed returns whether the block is known to have failed +// validation. A return value of false in no way implies the block is valid. +// Thus, this will return false for blocks that have not been proven to fail +// validation yet. +func (status blockStatus) KnownValidateFailed() bool { + return status&(statusValidateFailed) != 0 +} + // blockNode represents a block within the block chain and is primarily used to // aid in selecting the best chain to be the main chain. The main chain is // stored into the block database. @@ -130,9 +158,17 @@ type blockNode struct { // methods on blockIndex once the node has been added to the index. status blockStatus + // isFullyLinked indicates whether or not this block builds on a branch + // that has the block data for all of its ancestors and is therefore + // eligible for validation. + // + // It is protected by the block index mutex and is not stored in the + // database. + isFullyLinked bool + // stakeNode contains all the consensus information required for the // staking system. The node also caches information required to add or - // remove stake nodes, so that the stake node itself may be pruneable + // remove stake nodes, so that the stake node itself may be prunable // to save memory while maintaining high throughput efficiency for the // evaluation of sidechains. stakeNode *stake.Node @@ -142,6 +178,16 @@ type blockNode struct { // Keep track of all vote version and bits in this block. votes []stake.VoteVersionTuple + + // receivedOrderID tracks the order block data was received for the node and + // is only stored in memory. It is set when the block data is received, and + // the block data for all parents is also already known, as opposed to when + // the header was received in order to ensure that no additional priority in + // terms of chain selection between competing branches can be gained by + // submitting the header first. + // + // It is protected by the block index mutex. + receivedOrderID uint32 } // clearLowestOneBit clears the lowest set bit in the passed value. @@ -272,11 +318,11 @@ func (node *blockNode) lotteryIV() chainhash.Hash { return stake.CalcHash256PRNGIV(buf.Bytes()) } -// populateTicketInfo sets pruneable ticket information in the provided block +// populateTicketInfo sets prunable ticket information in the provided block // node. // // This function is NOT safe for concurrent access. It must only be called when -// initially creating a node or when protected by the chain lock. +// initially creating a node or when protected by the block index lock. func (node *blockNode) populateTicketInfo(spentTickets *stake.SpentTicketsInBlock) { node.ticketsVoted = spentTickets.VotedTickets node.ticketsRevoked = spentTickets.RevokedTickets @@ -357,6 +403,79 @@ func (node *blockNode) CalcPastMedianTime() time.Time { return time.Unix(medianTimestamp, 0) } +// compareHashesAsUint256LE compares two raw hashes treated as if they were +// little-endian uint256s in a way that is more efficient than converting them +// to big integers first. It returns 1 when a > b, -1 when a < b, and 0 when a +// == b. +func compareHashesAsUint256LE(a, b *chainhash.Hash) int { + // Find the index of the first byte that differs. + index := len(a) - 1 + for ; index >= 0 && a[index] == b[index]; index-- { + // Nothing to do. + } + if index < 0 { + return 0 + } + if a[index] > b[index] { + return 1 + } + return -1 +} + +// workSorterLess returns whether node 'a' is a worse candidate than 'b' for the +// purposes of best chain selection. +// +// The criteria for determining what constitutes a worse candidate, in order of +// priority, is as follows: +// +// 1. Less total cumulative work +// 2. Not having block data available +// 3. Receiving data later +// 4. Hash that represents less work (larger value as a little-endian uint256) +// +// This function MUST be called with the block index lock held (for reads). +func workSorterLess(a, b *blockNode) bool { + // First, sort by the total cumulative work. + // + // Blocks with less cumulative work are worse candidates for best chain + // selection. + if workCmp := a.workSum.Cmp(b.workSum); workCmp != 0 { + return workCmp < 0 + } + + // Then sort according to block data availability. + // + // Blocks that do not have all of their data available yet are worse + // candidates than those that do. They have the same priority if either + // both have their data available or neither do. + if aHasData := a.status.HaveData(); aHasData != b.status.HaveData() { + return !aHasData + } + + // Then sort according to blocks that received their data first. Note that + // the received order will be 0 for both in the case neither block has its + // data available. + // + // Blocks that receive their data later are worse candidates. + if a.receivedOrderID != b.receivedOrderID { + // Using greater than here because data that was received later will + // have a higher id. + return a.receivedOrderID > b.receivedOrderID + } + + // Finally, fall back to sorting based on the hash in the case the work, + // block data availability, and received order are all the same. In + // practice, the order will typically only be the same for blocks loaded + // from disk since the received order is only stored in memory, however it + // can be the same when the block data for a given header is not yet known + // as well. + // + // Note that it is more difficult to find hashes with more leading zeros + // when treated as a little-endian uint256, so larger values represent less + // work and are therefore worse candidates. + return compareHashesAsUint256LE(&a.hash, &b.hash) > 0 +} + // chainTipEntry defines an entry used to track the chain tips and is structured // such that there is a single statically-allocated field to house a tip, and a // dynamically-allocated slice for the rare case when there are multiple @@ -396,17 +515,74 @@ type blockIndex struct { modified map[*blockNode]struct{} chainTips map[int64]chainTipEntry totalTips uint64 + + // These fields are related to selecting the best chain. They are protected + // by the embedded mutex. + // + // bestHeader tracks the highest work block node in the index that is not + // known to be invalid. This is not necessarily the same as the active best + // chain, especially when block data is not yet known. However, since block + // nodes are only added to the index for block headers that pass all sanity + // and positional checks, which include checking proof of work, it does + // represent the tip of the header chain with the highest known work that + // has a reasonably high chance of becoming the best chain tip and is useful + // for things such as reporting progress and discovering the most suitable + // blocks to download. + // + // bestInvalid tracks the highest work block node that was found to be + // invalid. + // + // bestChainCandidates tracks a set of block nodes in the block index that + // are potential candidates to become the best chain. + // + // unlinkedChildrenOf maps blocks that do not yet have the full block data + // available to any immediate children that do have the full block data + // available. It is used to efficiently discover all child blocks which + // might be eligible for connection when the full block data for a block + // becomes available. + // + // nextReceivedOrderID is assigned to block nodes and incremented each time + // block data is received in order to aid in chain selection. In + // particular, it helps ensure that no additional priority in terms of chain + // selection between competing branches can be gained by submitting the + // header first. + bestHeader *blockNode + bestInvalid *blockNode + bestChainCandidates map[*blockNode]struct{} + unlinkedChildrenOf map[*blockNode][]*blockNode + nextReceivedOrderID uint32 + + // These fields are related to caching the most recent chain tips. They are + // protected by the embedded mutex. + // + // cachedTips is similar to chainTips except that it only tracks chain tips + // starting at the height specified by cachedTipsStart. It is primarily + // used to optimize the block invalidation logic. + // + // cachedTipsStart is the starting height (inclusive) for which the cached + // chain tips are tracked. + // + // cachedTipsLastPruned is the last time the cached chain tips were pruned. + cachedTips map[chainhash.Hash]*blockNode + cachedTipsStart int64 + cachedTipsLastPruned time.Time } // newBlockIndex returns a new empty instance of a block index. The index will // be dynamically populated as block nodes are loaded from the database and // manually added. func newBlockIndex(db database.DB) *blockIndex { + // Notice the next received ID starts at one since all entries loaded from + // disk will be zero. return &blockIndex{ - db: db, - index: make(map[chainhash.Hash]*blockNode), - modified: make(map[*blockNode]struct{}), - chainTips: make(map[int64]chainTipEntry), + db: db, + index: make(map[chainhash.Hash]*blockNode), + modified: make(map[*blockNode]struct{}), + chainTips: make(map[int64]chainTipEntry), + cachedTips: make(map[chainhash.Hash]*blockNode), + bestChainCandidates: make(map[*blockNode]struct{}), + unlinkedChildrenOf: make(map[*blockNode][]*blockNode), + nextReceivedOrderID: 1, } } @@ -438,6 +614,41 @@ func (bi *blockIndex) addNode(node *blockNode) { if node.parent != nil { bi.removeChainTip(node.parent) } + + // Update the header with most known work that is also not known to be + // invalid to this node if needed. + if !node.status.KnownInvalid() && workSorterLess(bi.bestHeader, node) { + bi.bestHeader = node + } +} + +// addNodeFromDB adds the provided node, which is expected to have come from +// storage, to the block index and also updates the unlinked block dependencies +// and best known invalid block as needed. +// +// This differs from addNode in that it performs the additional updates to the +// block index which only apply when nodes are first loaded from storage. +// +// This function is NOT safe for concurrent access and therefore must only be +// called during block index initialization. +func (bi *blockIndex) addNodeFromDB(node *blockNode) { + bi.addNode(node) + + // Add this node to the map of unlinked blocks that are potentially eligible + // for connection when it is not already fully linked, but the data for it + // is already known and its parent is not already known to be invalid. + if !node.isFullyLinked && node.status.HaveData() && node.parent != nil && + !node.parent.status.KnownInvalid() { + + unlinkedChildren := bi.unlinkedChildrenOf[node.parent] + bi.unlinkedChildrenOf[node.parent] = append(unlinkedChildren, node) + } + + // Set this node as the best known invalid block when it is invalid and has + // more work than the current one. + if node.status.KnownInvalid() { + bi.maybeUpdateBestInvalid(node) + } } // AddNode adds the provided node to the block index and marks it as modified. @@ -456,6 +667,7 @@ func (bi *blockIndex) AddNode(node *blockNode) { // This function MUST be called with the block index lock held (for writes). func (bi *blockIndex) addChainTip(tip *blockNode) { bi.totalTips++ + bi.cachedTips[tip.hash] = tip // When an entry does not already exist for the given tip height, add an // entry to the map with the tip stored in the statically-allocated field. @@ -475,6 +687,9 @@ func (bi *blockIndex) addChainTip(tip *blockNode) { // // This function MUST be called with the block index lock held (for writes). func (bi *blockIndex) removeChainTip(tip *blockNode) { + // Remove it from the cached tips as needed. + delete(bi.cachedTips, tip.hash) + // Nothing to do if no tips exist at the given height. entry, ok := bi.chainTips[tip.height] if !ok { @@ -525,6 +740,76 @@ func (bi *blockIndex) removeChainTip(tip *blockNode) { } } +// forEachChainTip calls the provided function with each chain tip known to the +// block index. Returning an error from the provided function will stop the +// iteration early and return said error from this function. +// +// This function MUST be called with the block index lock held (for reads). +func (bi *blockIndex) forEachChainTip(f func(tip *blockNode) error) error { + for _, tipEntry := range bi.chainTips { + if err := f(tipEntry.tip); err != nil { + return err + } + for _, tip := range tipEntry.otherTips { + if err := f(tip); err != nil { + return err + } + } + } + return nil +} + +// forEachChainTipAfterHeight calls the provided function with each chain tip +// known to the block index that has a height which is greater than the provided +// filter node. +// +// Providing a filter node also makes use of the recent chain tip cache when +// possible which typically further reduces the number of chain tips that need +// to be iterated since all old chain tips are pruned from the cache. +// +// Returning an error from the provided function will stop the iteration early +// and return said error from this function. +// +// This function MUST be called with the block index lock held (for reads). +func (bi *blockIndex) forEachChainTipAfterHeight(filter *blockNode, f func(tip *blockNode) error) error { + // Use the cached recent chain tips when the filter height permits it. + if filter.height >= bi.cachedTipsStart-1 { + for _, tip := range bi.cachedTips { + // Ignore any chain tips at the same or lower heights than the + // provided filter. + if tip.height <= filter.height { + continue + } + + if err := f(tip); err != nil { + return err + } + } + return nil + } + + // Fall back to iterating through all chain tips when the filter height is + // prior to the point the cached recent chain tips are tracking. + for tipHeight, tipEntry := range bi.chainTips { + // Ignore any chain tips at the same or lower heights than the provided + // filter. + if tipHeight <= filter.height { + continue + } + + if err := f(tipEntry.tip); err != nil { + return err + } + for _, tip := range tipEntry.otherTips { + if err := f(tip); err != nil { + return err + } + } + } + + return nil +} + // lookupNode returns the block node identified by the provided hash. It will // return nil if there is no entry for the hash. // @@ -544,6 +829,17 @@ func (bi *blockIndex) LookupNode(hash *chainhash.Hash) *blockNode { return node } +// PopulateTicketInfo sets prunable ticket information in the provided block +// node. +// +// This function is safe for concurrent access. +func (bi *blockIndex) PopulateTicketInfo(node *blockNode, spentTickets *stake.SpentTicketsInBlock) { + bi.Lock() + node.populateTicketInfo(spentTickets) + bi.modified[node] = struct{}{} + bi.Unlock() +} + // NodeStatus returns the status associated with the provided node. // // This function is safe for concurrent access. @@ -554,18 +850,38 @@ func (bi *blockIndex) NodeStatus(node *blockNode) blockStatus { return status } +// setStatusFlags sets the provided status flags for the given block node +// regardless of their previous state. It does not unset any flags. +// +// This function MUST be called with the block index lock held (for writes). +func (bi *blockIndex) setStatusFlags(node *blockNode, flags blockStatus) { + origStatus := node.status + node.status |= flags + if node.status != origStatus { + bi.modified[node] = struct{}{} + } +} + // SetStatusFlags sets the provided status flags for the given block node // regardless of their previous state. It does not unset any flags. // // This function is safe for concurrent access. func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) { bi.Lock() + bi.setStatusFlags(node, flags) + bi.Unlock() +} + +// unsetStatusFlags unsets the provided status flags for the given block node +// regardless of their previous state. +// +// This function MUST be called with the block index lock held (for writes). +func (bi *blockIndex) unsetStatusFlags(node *blockNode, flags blockStatus) { origStatus := node.status - node.status |= flags + node.status &^= flags if node.status != origStatus { bi.modified[node] = struct{}{} } - bi.Unlock() } // UnsetStatusFlags unsets the provided status flags for the given block node @@ -574,12 +890,379 @@ func (bi *blockIndex) SetStatusFlags(node *blockNode, flags blockStatus) { // This function is safe for concurrent access. func (bi *blockIndex) UnsetStatusFlags(node *blockNode, flags blockStatus) { bi.Lock() - origStatus := node.status - node.status &^= flags - if node.status != origStatus { - bi.modified[node] = struct{}{} + bi.unsetStatusFlags(node, flags) + bi.Unlock() +} + +// addBestChainCandidate adds the passed block node as a potential candidate +// for becoming the tip of the best chain. +// +// This function MUST be called with the block index lock held (for writes). +func (bi *blockIndex) addBestChainCandidate(node *blockNode) { + bi.bestChainCandidates[node] = struct{}{} +} + +// pruneCachedTips removes old cached chain tips used to optimize block +// invalidation by treating the passed best known block as a reference point. +// +// This function MUST be called with the block index lock held (for writes). +func (bi *blockIndex) pruneCachedTips(bestNode *blockNode) { + // No blocks exist before height 0. + height := bestNode.height - cachedTipsPruneDepth + if height <= 0 { + bi.cachedTipsLastPruned = time.Now() + return + } + + for hash, n := range bi.cachedTips { + if n.height < height { + delete(bi.cachedTips, hash) + } + } + bi.cachedTipsStart = height + bi.cachedTipsLastPruned = time.Now() +} + +// MaybePruneCachedTips periodically removes old cached chain tips used to +// optimize block invalidation by treating the passed best known block as a +// reference point. +// +// This function is safe for concurrent access. +func (bi *blockIndex) MaybePruneCachedTips(bestNode *blockNode) { + bi.Lock() + if time.Since(bi.cachedTipsLastPruned) >= cachedTipsPruneInterval { + bi.pruneCachedTips(bestNode) + } + bi.Unlock() +} + +// removeBestChainCandidate removes the passed block node from the potential +// candidates for becoming the tip of the best chain. +// +// This function MUST be called with the block index lock held (for writes). +func (bi *blockIndex) removeBestChainCandidate(node *blockNode) { + delete(bi.bestChainCandidates, node) +} + +// maybeUpdateBestInvalid potentially updates the best known invalid block, as +// determined by having the most cumulative work, by comparing the passed block +// node, which must have already been determined to be invalid, against the +// current one. +// +// This function MUST be called with the block index lock held (for writes). +func (bi *blockIndex) maybeUpdateBestInvalid(invalidNode *blockNode) { + if bi.bestInvalid == nil || workSorterLess(bi.bestInvalid, invalidNode) { + bi.bestInvalid = invalidNode + } +} + +// maybeUpdateBestHeaderForTip potentially updates the best known header that is +// not known to be invalid, as determined by having the most cumulative work. +// It works by walking backwards from the provided tip so long as those headers +// have more work than the current best header and selecting the first one that +// is not known to be invalid. +// +// This function MUST be called with the block index lock held (for writes). +func (bi *blockIndex) maybeUpdateBestHeaderForTip(tip *blockNode) { + for n := tip; n != nil && workSorterLess(bi.bestHeader, n); n = n.parent { + if !n.status.KnownInvalid() { + bi.bestHeader = n + return + } + } +} + +// MarkBlockFailedValidation marks the passed node as having failed validation +// and then marks all of its descendants (if any) as having a failed ancestor. +// +// This function is safe for concurrent access. +func (bi *blockIndex) MarkBlockFailedValidation(node *blockNode) { + bi.Lock() + bi.setStatusFlags(node, statusValidateFailed) + bi.unsetStatusFlags(node, statusValidated) + bi.removeBestChainCandidate(node) + bi.maybeUpdateBestInvalid(node) + delete(bi.unlinkedChildrenOf, node) + + // Mark all descendants of the failed block as having a failed ancestor. + // + // In order to fairly efficiently determine all of the descendants of the + // block without having to iterate the entire block index, walk through all + // of the known chain tips and check if the block being invalidated is an + // ancestor of the tip. In the case it is, then all blocks between that tip + // and the failed block are descendants. As an additional optimization, a + // cache of recent tips (those after a recent height) is maintained and used + // when possible to reduce the number of potential affected chain tips that + // need to be iterated. + // + // In order to help visualize the logic, consider the following block tree + // with several branches: + // + // 100 -> 101 -> 102 -> 103 -> 104 -> 105 -> 106 -> 107 -> 108 + // \-> 101a -> 102a -> 103a -> 104a -> 105a \-> 107a + // \-> 101b ---- | \-> 105b -> 106b + // ^^ \-> 104c -> 105c -> 106c + // Failed \-> 104d -> 105d + // + // Further, assume block 102a failed validation. As can be seen, its + // descendants are 103a, 104a, 105a, 105b, 106b, 104c, 105c, 106c, 104d, and + // 105d, and the chain tips of this hypothetical block tree would be 101b, + // 105a, 105d, 106b, 106c, 107a, and 108. + // + // Since the failed block, 102a, is not an ancestor of tips 101b, 107a, or + // 108, those tips are ignored. Also notice that, of the remaining tips, + // 103a is a common ancestor to all of them, and 104a is a common ancestor + // to tips 105a and 106b. + // + // Given all of the above, the blocks would semantically be marked as having + // an invalid ancestor as follows: + // + // Tip 105a: 105a, 104a, 103a (102a is failed block, next) + // Tip 106b: 106b, 105b, 104a, 103a (102a is failed block, next) + // Tip 106c: 106c, 105c, 104c, 103a (102a is failed block, next) + // Tip 105d: 105d, 104d, 103a (102a is failed block, next) + // + // Note that it might be tempting to consider trying to optimize this to + // skip directly to the next tip once a node with a common invalid ancestor + // is found. However, that would result in incorrect behavior if a block is + // marked invalid deeper in a branch first and then an earlier block is + // later marked invalid as it would result in skipping the intermediate + // blocks thereby NOT marking them as invalid as they should be. + // + // For example, consider what happens if 105c is marked invalid prior to the + // block data for 102a becoming available and found to be invalid. Blocks + // 106c and 105c would already be marked invalid, and blocks 104c and 103a + // need to be marked invalid. + markDescendantsInvalid := func(node, tip *blockNode) { + // Nothing to do if the node is not an ancestor of the given chain tip. + if tip.Ancestor(node.height) != node { + return + } + + // Set this chain tip as the best known invalid block when it has more + // work than the current one. + bi.maybeUpdateBestInvalid(tip) + + // Mark everything that descends from the failed block as having an + // invalid ancestor. + for n := tip; n != node; n = n.parent { + // Skip blocks that are already known to have an invalid ancestor. + if n.status.KnownInvalidAncestor() { + continue + } + + bi.setStatusFlags(n, statusInvalidAncestor) + bi.unsetStatusFlags(n, statusValidated) + bi.removeBestChainCandidate(n) + + // Remove any children that depend on the failed block from the set + // of unlinked blocks accordingly since they are no longer eligible + // for connection even if the full block data for a block becomes + // available. + delete(bi.unlinkedChildrenOf, n) + } + } + + // Chain tips at the same or lower heights than the failed block can't + // possibly be descendants of it, so use it as the lower height bound filter + // when iterating chain tips. Note that this will make use of the cache of + // recent tips when possible. + bi.forEachChainTipAfterHeight(node, func(tip *blockNode) error { + markDescendantsInvalid(node, tip) + return nil + }) + + // Update the best header if the current one is now invalid which will be + // the case when the best header is a descendant of the failed block. + if bi.bestHeader.status.KnownInvalid() { + // Use the first ancestor of the failed block that is not known to be + // invalid as the lower bound for the best header. This will typically + // be the parent of the failed block, but it might be some more distant + // ancestor when performing manual invalidation. + n := node.parent + for n != nil && n.status.KnownInvalid() { + n = n.parent + } + bi.bestHeader = n + + // Scour the block tree to find a new best header. + // + // Note that all chain tips must be iterated versus filtering based on + // the current best header height because, while uncommon, it is + // possible for lower heights to have more work. + bi.forEachChainTip(func(tip *blockNode) error { + // Skip chain tips that are descendants of the failed block since + // none of the intermediate headers are eligible to become the best + // header given they all have an invalid ancestor. + if tip.Ancestor(node.height) == node { + return nil + } + + bi.maybeUpdateBestHeaderForTip(tip) + return nil + }) + } + bi.Unlock() +} + +// canValidate returns whether or not the block associated with the provided +// node can be validated. In order for a block to be validated, both it, and +// all of its ancestors, must have the block data available. +// +// This function MUST be called with the block index lock held (for reads). +func (bi *blockIndex) canValidate(node *blockNode) bool { + return node.isFullyLinked && node.status.HaveData() +} + +// CanValidate returns whether or not the block associated with the provided +// node can be validated. In order for a block to be validated, both it, and +// all of its ancestors, must have the block data available. +// +// This function is safe for concurrent access. +func (bi *blockIndex) CanValidate(node *blockNode) bool { + bi.RLock() + canValidate := bi.canValidate(node) + bi.RUnlock() + return canValidate +} + +// removeLessWorkCandidates removes all potential best chain candidates that +// have less work than the provided node, which is typically a newly connected +// best chain tip. +// +// This function MUST be called with the block index lock held (for writes). +func (bi *blockIndex) removeLessWorkCandidates(node *blockNode) { + // Remove all best chain candidates that have less work than the passed + // node. + for n := range bi.bestChainCandidates { + if n.workSum.Cmp(node.workSum) < 0 { + bi.removeBestChainCandidate(n) + } + } + + // The best chain candidates must always contain at least the current best + // chain tip. Assert this assumption is true. + if len(bi.bestChainCandidates) == 0 { + panicf("best chain candidates list is empty after removing less work " + + "candidates") + } +} + +// RemoveLessWorkCandidates removes all potential best chain candidates that +// have less work than the provided node, which is typically a newly connected +// best chain tip. +// +// This function is safe for concurrent access. +func (bi *blockIndex) RemoveLessWorkCandidates(node *blockNode) { + bi.Lock() + bi.removeLessWorkCandidates(node) + bi.Unlock() +} + +// linkBlockData marks the provided block as fully linked to indicate that both +// it and all of its ancestors have their data available and then determines if +// there are any unlinked blocks which depend on the passed block and links +// those as well until there are no more. It returns a list of blocks that were +// linked. +// +// It also accounts for the order that the blocks are linked and potentially +// adds the newly-linked blocks as best chain candidates if they have more +// cumulative work than the current best chain tip. +// +// This function MUST be called with the block index lock held (for writes). +func (bi *blockIndex) linkBlockData(node, tip *blockNode) []*blockNode { + // Start with processing at least the passed node. + // + // Note that no additional space is preallocated here because it is fairly + // rare (after the initial sync) for there to be more than the single block + // being linked and thus it will typically remain on the stack and avoid an + // allocation. + linkedNodes := []*blockNode{node} + for nodeIndex := 0; nodeIndex < len(linkedNodes); nodeIndex++ { + linkedNode := linkedNodes[nodeIndex] + + // Mark the block as fully linked to indicate that both it and all of + // its ancestors have their data available. + linkedNode.isFullyLinked = true + + // Keep track of the order in which the block data was received to + // ensure miners gain no advantage by advertising the header first. + linkedNode.receivedOrderID = bi.nextReceivedOrderID + bi.nextReceivedOrderID++ + + // The block is now a candidate to potentially become the best chain if + // it has the same or more work than the current best chain tip. + if linkedNode.workSum.Cmp(tip.workSum) >= 0 { + bi.addBestChainCandidate(linkedNode) + } + + // Add any children of the block that was just linked to the list to be + // linked and remove them from the set of unlinked blocks accordingly. + // There will typically only be zero or one, but it could be more if + // multiple solutions are mined and broadcast around the same time. + unlinkedChildren := bi.unlinkedChildrenOf[linkedNode] + if len(unlinkedChildren) > 0 { + linkedNodes = append(linkedNodes, unlinkedChildren...) + delete(bi.unlinkedChildrenOf, linkedNode) + } + } + + return linkedNodes +} + +// AcceptBlockData updates the block index state to account for the full data +// for a block becoming available. For example, blocks that are currently not +// eligible for validation due to either not having the block data itself or not +// having all ancestor data available might become eligible for validation. It +// returns a list of all blocks that were linked, if any. +// +// NOTE: It is up to the caller to only call this function when the data was not +// previously available. +// +// This function is safe for concurrent access. +func (bi *blockIndex) AcceptBlockData(node, tip *blockNode) []*blockNode { + // The passed block, and any blocks that also have their data available, are + // now eligible for validation when the parent of the passed block is also + // eligible (or has already been validated). + var linkedBlocks []*blockNode + bi.Lock() + if bi.canValidate(node.parent) { + linkedBlocks = bi.linkBlockData(node, tip) + } else if !node.parent.status.KnownInvalid() { + unlinkedChildren := bi.unlinkedChildrenOf[node.parent] + bi.unlinkedChildrenOf[node.parent] = append(unlinkedChildren, node) } bi.Unlock() + return linkedBlocks +} + +// FindBestChainCandidate searches the block index for the best potentially +// valid chain that contains the most cumulative work and returns its tip. In +// order to be potentially valid, all of the block data leading up to a block +// must have already been received and must not be part of a chain that is +// already known to be invalid. A chain that has not yet been fully validated, +// such as a side chain that has never been the main chain, is neither known to +// be valid nor invalid, so it is possible that the returned candidate will form +// a chain that is invalid. +// +// This function is safe for concurrent access. +func (bi *blockIndex) FindBestChainCandidate() *blockNode { + bi.RLock() + defer bi.RUnlock() + + // Find the best candidate among the potential candidates as determined by + // having the highest cumulative work with fallback to the criteria + // described by the invoked function in the case of equal work. + // + // Note that the best candidate should never actually be nil in practice + // since the current best tip is always a candidate. + var bestCandidate *blockNode + for node := range bi.bestChainCandidates { + if bestCandidate == nil || workSorterLess(bestCandidate, node) { + bestCandidate = node + } + } + return bestCandidate } // flush writes all of the modified block nodes to the database and clears the diff --git a/blockchain/blockindex_test.go b/blockchain/blockindex_test.go index 38d0f7b634..26ffaf9d09 100644 --- a/blockchain/blockindex_test.go +++ b/blockchain/blockindex_test.go @@ -5,6 +5,7 @@ package blockchain import ( + "math/big" "math/rand" "reflect" "testing" @@ -296,3 +297,200 @@ func TestAncestorSkipList(t *testing.T) { } } } + +// TestWorkSorterCompare ensures the work sorter less and hash comparison +// functions work as intended including multiple keys. +func TestWorkSorterCompare(t *testing.T) { + lowerHash := mustParseHash("000000000000c41019872ff7db8fd2e9bfa05f42d3f8fee8e895e8c1e5b8dcba") + higherHash := mustParseHash("000000000000d41019872ff7db8fd2e9bfa05f42d3f8fee8e895e8c1e5b8dcba") + tests := []struct { + name string // test description + nodeA *blockNode // first node to compare + nodeB *blockNode // second node to compare + wantCmp int // expected result of the hash comparison + wantLess bool // expected result of the less comparison + + }{{ + name: "exactly equal, both data", + nodeA: &blockNode{ + hash: *mustParseHash("0000000000000000000000000000000000000000000000000000000000000000"), + workSum: big.NewInt(2), + status: statusDataStored, + receivedOrderID: 0, + }, + nodeB: &blockNode{ + hash: *mustParseHash("0000000000000000000000000000000000000000000000000000000000000000"), + workSum: big.NewInt(2), + status: statusDataStored, + receivedOrderID: 0, + }, + wantCmp: 0, + wantLess: false, + }, { + name: "exactly equal, no data", + nodeA: &blockNode{ + hash: *mustParseHash("0000000000000000000000000000000000000000000000000000000000000000"), + workSum: big.NewInt(2), + receivedOrderID: 0, + }, + nodeB: &blockNode{ + hash: *mustParseHash("0000000000000000000000000000000000000000000000000000000000000000"), + workSum: big.NewInt(2), + receivedOrderID: 0, + }, + wantCmp: 0, + wantLess: false, + }, { + name: "a has more cumulative work, same order, higher hash, a has data", + nodeA: &blockNode{ + hash: *higherHash, + workSum: big.NewInt(4), + status: statusDataStored, + receivedOrderID: 0, + }, + nodeB: &blockNode{ + hash: *lowerHash, + workSum: big.NewInt(2), + receivedOrderID: 0, + }, + wantCmp: 1, + wantLess: false, + }, { + name: "a has less cumulative work, same order, lower hash, b has data", + nodeA: &blockNode{ + hash: *lowerHash, + workSum: big.NewInt(2), + receivedOrderID: 0, + }, + nodeB: &blockNode{ + hash: *higherHash, + workSum: big.NewInt(4), + status: statusDataStored, + receivedOrderID: 0, + }, + wantCmp: -1, + wantLess: true, + }, { + name: "a has same cumulative work, same order, lower hash, a has data", + nodeA: &blockNode{ + hash: *lowerHash, + workSum: big.NewInt(2), + status: statusDataStored, + receivedOrderID: 0, + }, + nodeB: &blockNode{ + hash: *higherHash, + workSum: big.NewInt(2), + receivedOrderID: 0, + }, + wantCmp: -1, + wantLess: false, + }, { + name: "a has same cumulative work, same order, higher hash, b has data", + nodeA: &blockNode{ + hash: *higherHash, + workSum: big.NewInt(2), + receivedOrderID: 0, + }, + nodeB: &blockNode{ + hash: *lowerHash, + workSum: big.NewInt(2), + status: statusDataStored, + receivedOrderID: 0, + }, + wantCmp: 1, + wantLess: true, + }, { + name: "a has same cumulative work, higher order, lower hash, both data", + nodeA: &blockNode{ + hash: *lowerHash, + workSum: big.NewInt(2), + status: statusDataStored, + receivedOrderID: 1, + }, + nodeB: &blockNode{ + hash: *higherHash, + workSum: big.NewInt(2), + status: statusDataStored, + receivedOrderID: 0, + }, + wantCmp: -1, + wantLess: true, + }, { + name: "a has same cumulative work, lower order, lower hash, both data", + nodeA: &blockNode{ + hash: *lowerHash, + workSum: big.NewInt(2), + status: statusDataStored, + receivedOrderID: 1, + }, + nodeB: &blockNode{ + hash: *higherHash, + workSum: big.NewInt(2), + status: statusDataStored, + receivedOrderID: 2, + }, + wantCmp: -1, + wantLess: false, + }, { + name: "a has same cumulative work, same order, lower hash, no data", + nodeA: &blockNode{ + hash: *lowerHash, + workSum: big.NewInt(2), + receivedOrderID: 0, + }, + nodeB: &blockNode{ + hash: *higherHash, + workSum: big.NewInt(2), + receivedOrderID: 0, + }, + wantCmp: -1, + wantLess: false, + }, { + name: "a has same cumulative work, same order, lower hash, both data", + nodeA: &blockNode{ + hash: *lowerHash, + workSum: big.NewInt(2), + status: statusDataStored, + receivedOrderID: 0, + }, + nodeB: &blockNode{ + hash: *higherHash, + workSum: big.NewInt(2), + status: statusDataStored, + receivedOrderID: 0, + }, + wantCmp: -1, + wantLess: false, + }, { + name: "a has same cumulative work, same order, higher hash, both data", + nodeA: &blockNode{ + hash: *higherHash, + workSum: big.NewInt(2), + status: statusDataStored, + receivedOrderID: 0, + }, + nodeB: &blockNode{ + hash: *lowerHash, + workSum: big.NewInt(2), + status: statusDataStored, + receivedOrderID: 0, + }, + wantCmp: 1, + wantLess: true, + }} + + for _, test := range tests { + gotLess := workSorterLess(test.nodeA, test.nodeB) + if gotLess != test.wantLess { + t.Fatalf("%q: unexpected result -- got %v, want %v", test.name, + gotLess, test.wantLess) + } + + gotCmp := compareHashesAsUint256LE(&test.nodeA.hash, &test.nodeB.hash) + if gotCmp != test.wantCmp { + t.Fatalf("%q: unexpected result -- got %v, want %v", test.name, + gotCmp, test.wantCmp) + } + } +} diff --git a/blockchain/chain.go b/blockchain/chain.go index b418b03606..1fed6ca650 100644 --- a/blockchain/chain.go +++ b/blockchain/chain.go @@ -28,13 +28,6 @@ import ( ) const ( - // minMemoryNodes is the minimum number of consecutive nodes needed - // in memory in order to perform all necessary validation. It is used - // to determine when it's safe to prune nodes from memory without - // causing constant dynamic reloading. This value should be larger than - // that for minMemoryStakeNodes. - minMemoryNodes = 2880 - // minMemoryStakeNodes is the maximum height to keep stake nodes // in memory for in their respective nodes. Beyond this height, // they will need to be manually recalculated. This value should @@ -47,6 +40,10 @@ const ( // be made network independent and calculated based on the parameters, but // that would result in larger caches than desired for other networks. recentBlockCacheSize = 12 + + // contextCheckCacheSize is the number of recent successful contextual block + // check results to keep in memory. + contextCheckCacheSize = 25 ) // panicf is a convenience function that formats according to the given format @@ -145,11 +142,17 @@ type BlockChain struct { notifications NotificationCallback sigCache *txscript.SigCache indexManager indexers.IndexManager + interrupt <-chan struct{} // subsidyCache is the cache that provides quick lookup of subsidy // values. subsidyCache *standalone.SubsidyCache + // processLock protects concurrent access to overall chain processing + // independent from the chain lock which is periodically released to + // send notifications. + processLock sync.Mutex + // chainLock protects concurrent access to the vast majority of the // fields in this struct below this point. chainLock sync.RWMutex @@ -170,9 +173,17 @@ type BlockChain struct { index *blockIndex bestChain *chainView - // recentBlocks houses a block cache to facilitate faster chain reorgs and - // more efficient recent block serving. It is protected by its own lock. - recentBlocks lru.KVCache + // These fields house caches for blocks to facilitate faster chain reorgs, + // block connection, and more efficient recent block serving. + // + // recentBlocks houses a block cache of block data that has been seen + // recently. + // + // recentContextChecks tracks recent blocks that have successfully passed + // all contextual checks and is primarily used as an optimization to avoid + // running the checks again when possible. + recentBlocks lru.KVCache + recentContextChecks lru.Cache // These fields house a cached view that represents a block that votes // against its parent and therefore contains all changes as a result @@ -205,9 +216,6 @@ type BlockChain struct { // The following caches are used to efficiently keep track of the // current deployment threshold state of each rule change deployment. // - // This information is stored in the database so it can be quickly - // reconstructed on load. - // // deploymentCaches caches the current deployment threshold state for // blocks in each of the actively defined deployments. deploymentCaches map[uint32][]thresholdStateCache @@ -249,13 +257,8 @@ type StakeVersions struct { // GetStakeVersions returns a cooked array of StakeVersions. We do this in // order to not bloat memory by returning raw blocks. func (b *BlockChain) GetStakeVersions(hash *chainhash.Hash, count int32) ([]StakeVersions, error) { - // NOTE: The requirement for the node being fully validated here is strictly - // stronger than what is actually required. In reality, all that is needed - // is for the block data for the node and all of its ancestors to be - // available, but there is not currently any tracking to be able to - // efficiently determine that state. startNode := b.index.LookupNode(hash) - if startNode == nil || !b.index.NodeStatus(startNode).HasValidated() { + if startNode == nil || !b.index.CanValidate(startNode) { return nil, unknownBlockError(hash) } @@ -660,6 +663,7 @@ func (b *BlockChain) connectBlock(node *blockNode, block, parent *dcrutil.Block, // This node is now the end of the best chain. b.bestChain.SetTip(node) + b.index.MaybePruneCachedTips(node) // Update the state for the best block. Notice how this replaces the // entire struct instead of updating the existing one. This effectively @@ -688,7 +692,7 @@ func (b *BlockChain) connectBlock(node *blockNode, block, parent *dcrutil.Block, return err } - // Notify of spent and missed tickets + // Notify of spent and missed tickets. b.sendNotification(NTSpentAndMissedTickets, &TicketNotificationsData{ Hash: node.hash, @@ -696,16 +700,17 @@ func (b *BlockChain) connectBlock(node *blockNode, block, parent *dcrutil.Block, StakeDifficulty: nextStakeDiff, TicketsSpent: node.stakeNode.SpentByBlock(), TicketsMissed: node.stakeNode.MissedByBlock(), - TicketsNew: []chainhash.Hash{}, + TicketsNew: nil, }) - // Notify of new tickets + + // Notify of new tickets. b.sendNotification(NTNewTickets, &TicketNotificationsData{ Hash: node.hash, Height: node.height, StakeDifficulty: nextStakeDiff, - TicketsSpent: []chainhash.Hash{}, - TicketsMissed: []chainhash.Hash{}, + TicketsSpent: nil, + TicketsMissed: nil, TicketsNew: node.stakeNode.NewTickets(), }) } @@ -929,66 +934,56 @@ func (b *BlockChain) loadOrCreateFilter(block *dcrutil.Block, view *UtxoViewpoin return filter, nil } -// reorganizeChainInternal attempts to reorganize the block chain to the -// provided tip without attempting to undo failed reorgs. -// -// Since reorganizing to a new chain tip might involve validating blocks that -// have not previously been validated, or attempting to reorganize to a branch -// that is already known to be invalid, it possible for the reorganize to fail. -// When that is the case, this function will return the error without attempting -// to undo what has already been reorganized to that point. That means the best -// chain tip will be set to some intermediate block along the reorg path and -// will not actually be the best chain. This is acceptable because this -// function is only intended to be called from the reorganizeChain function -// which handles reorg failures by reorganizing back to the known good best -// chain tip. -// -// A reorg entails disconnecting all blocks from the current best chain tip back -// to the fork point between it and the provided target tip in reverse order -// (think popping them off the end of the chain) and then connecting the blocks -// on the new branch in forwards order (think pushing them onto the end of the -// chain). +// reorganizeChainInternal attempts to reorganize the block chain to the given +// target without attempting to undo failed reorgs. +// +// The actions needed to reorganize the chain to the given target fall into +// three main cases: +// +// 1. The target is a descendant of the current best chain tip (most common) +// 2. The target is an ancestor of the current best chain tip (least common) +// 3. The target is neither of the above which means it is on another branch +// and that branch forks from the main chain at some ancestor of the current +// best chain tip +// +// For the first case, the blocks between the current best chain tip and the +// given target need to be connected (think pushed onto the end of the chain). +// +// For the second case, the blocks between the current best chain tip and the +// given target need to be disconnected in reverse order (think popped off the +// end of chain). +// +// The third case is essentially a combination of the first two. Namely, the +// blocks between the current best chain tip and the fork point between it and +// the given target need to be disconnected in reverse order and then the blocks +// between that fork point and the given target (aka the blocks that form the +// new branch) need to be connected in forwards order. // // This function may modify the validation state of nodes in the block index // without flushing in the case the chain is not able to reorganize due to a // block failing to connect. // // This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) reorganizeChainInternal(targetTip *blockNode) error { - // Find the fork point adding each block to a slice of blocks to attach - // below once the current best chain has been disconnected. They are added - // to the slice from back to front so that so they are attached in the - // appropriate order when iterating the slice later. - // - // In the case a known invalid block is detected while constructing this - // list, mark all of its descendants as having an invalid ancestor and - // prevent the reorganize. - fork := b.bestChain.FindFork(targetTip) - attachNodes := make([]*blockNode, targetTip.height-fork.height) - for n := targetTip; n != nil && n != fork; n = n.parent { - if b.index.NodeStatus(n).KnownInvalid() { - for _, dn := range attachNodes[n.height-fork.height:] { - b.index.SetStatusFlags(dn, statusInvalidAncestor) - } - - str := fmt.Sprintf("block %s is known to be invalid or a "+ - "descendant of an invalid block", n.hash) - return ruleError(ErrKnownInvalidBlock, str) - } - - attachNodes[n.height-fork.height-1] = n - } +func (b *BlockChain) reorganizeChainInternal(target *blockNode) error { + // Find the fork point between the current tip and target block. + tip := b.bestChain.Tip() + fork := b.bestChain.FindFork(target) // Disconnect all of the blocks back to the point of the fork. This entails // loading the blocks and their associated spent txos from the database and // using that information to unspend all of the spent txos and remove the // utxos created by the blocks. In addition, if a block votes against its // parent, the regular transactions are reconnected. - tip := b.bestChain.Tip() view := NewUtxoViewpoint(b) view.SetBestHash(&tip.hash) var nextBlockToDetach *dcrutil.Block for tip != nil && tip != fork { + select { + case <-b.interrupt: + return errInterruptRequested + default: + } + // Grab the block to detach based on the node. Use the fact that the // blocks are being detached in reverse order, so the parent of the // current block being detached is the next one being detached. @@ -1025,8 +1020,7 @@ func (b *BlockChain) reorganizeChainInternal(targetTip *blockNode) error { // Load all of the spent txos for the block from the spend journal. var stxos []spentTxOut err = b.db.View(func(dbTx database.Tx) error { - stxos, err = dbFetchSpendJournalEntry(dbTx, block, - isTreasuryEnabled) + stxos, err = dbFetchSpendJournalEntry(dbTx, block, isTreasuryEnabled) return err }) if err != nil { @@ -1036,8 +1030,7 @@ func (b *BlockChain) reorganizeChainInternal(targetTip *blockNode) error { // Update the view to unspend all of the spent txos and remove the utxos // created by the block. Also, if the block votes against its parent, // reconnect all of the regular transactions. - err = view.disconnectBlock(b.db, block, parent, stxos, - isTreasuryEnabled) + err = view.disconnectBlock(b.db, block, parent, stxos, isTreasuryEnabled) if err != nil { return err } @@ -1048,10 +1041,21 @@ func (b *BlockChain) reorganizeChainInternal(targetTip *blockNode) error { return err } + log.Tracef("Disconnected block %s (height %d) from main chain", n.hash, + n.height) + tip = n.parent } - // Load the fork block if there are blocks to attach and it's not already + // Determine the blocks to attach after the fork point. Each block is added + // to the slice from back to front so they are attached in the appropriate + // order when iterating the slice below. + attachNodes := make([]*blockNode, target.height-fork.height) + for n := target; n != nil && n != fork; n = n.parent { + attachNodes[n.height-fork.height-1] = n + } + + // Load the fork block if there are blocks to attach and its not already // loaded which will be the case if no nodes were detached. The fork block // is used as the parent to the first node to be attached below. forkBlock := nextBlockToDetach @@ -1069,6 +1073,12 @@ func (b *BlockChain) reorganizeChainInternal(targetTip *blockNode) error { // relevant information related to the current chain state. var prevBlockAttached *dcrutil.Block for i, n := range attachNodes { + select { + case <-b.interrupt: + return errInterruptRequested + default: + } + // Grab the block to attach based on the node. Use the fact that the // parent of the block is either the fork point for the first node being // attached or the previous one that was attached for subsequent blocks @@ -1096,11 +1106,11 @@ func (b *BlockChain) reorganizeChainInternal(targetTip *blockNode) error { return err } - // Skip validation if the block is already known to be valid. However, + // Skip validation if the block has already been validated. However, // the utxo view still needs to be updated and the stxos and header // commitment data are still needed. - stxos := make([]spentTxOut, 0, countSpentOutputs(block, - isTreasuryEnabled)) + numSpentOutputs := countSpentOutputs(block, isTreasuryEnabled) + stxos := make([]spentTxOut, 0, numSpentOutputs) var hdrCommitments headerCommitmentData if b.index.NodeStatus(n).HasValidated() { // Update the view to mark all utxos referenced by the block as @@ -1108,7 +1118,7 @@ func (b *BlockChain) reorganizeChainInternal(targetTip *blockNode) error { // In the case the block votes against the parent, also disconnect // all of the regular transactions in the parent block. Finally, // provide an stxo slice so the spent txout details are generated. - err = view.connectBlock(b.db, block, parent, &stxos, + err := view.connectBlock(b.db, block, parent, &stxos, isTreasuryEnabled) if err != nil { return err @@ -1120,6 +1130,20 @@ func (b *BlockChain) reorganizeChainInternal(targetTip *blockNode) error { } hdrCommitments.filter = filter } else { + // The block must pass all of the validation rules which depend on + // having the full block data for all of its ancestors available. + if err := b.checkBlockContext(block, n.parent, BFNone); err != nil { + var rerr RuleError + if errors.As(err, &rerr) { + b.index.MarkBlockFailedValidation(n) + } + return err + } + + // Mark the block as recently checked to avoid checking it again + // when processing. + b.recentContextChecks.Add(n.hash) + // In the case the block is determined to be invalid due to a rule // violation, mark it as invalid and mark all of its descendants as // having an invalid ancestor. @@ -1128,10 +1152,7 @@ func (b *BlockChain) reorganizeChainInternal(targetTip *blockNode) error { if err != nil { var rerr RuleError if errors.As(err, &rerr) { - b.index.SetStatusFlags(n, statusValidateFailed) - for _, dn := range attachNodes[i+1:] { - b.index.SetStatusFlags(dn, statusInvalidAncestor) - } + b.index.MarkBlockFailedValidation(n) } return err } @@ -1143,94 +1164,147 @@ func (b *BlockChain) reorganizeChainInternal(targetTip *blockNode) error { if err != nil { return err } + + log.Tracef("Connected block %s (height %d) to main chain", n.hash, + n.height) + + // Remove any best chain candidates that have less work than the new + // tip. + b.index.RemoveLessWorkCandidates(n) } return nil } -// reorganizeChain attempts to reorganize the block chain to the provided tip. -// The tip must have already been determined to be on another branch by the -// caller. Upon return, the chain will be fully reorganized to the provided tip -// or an appropriate error will be returned and the chain will remain at the -// same tip it was prior to calling this function. +// reorganizeChain attempts to reorganize the block chain to the given target +// with additional handling for failed reorgs. // -// Reorganizing the chain entails disconnecting all blocks from the current best -// chain tip back to the fork point between it and the provided target tip in -// reverse order (think popping them off the end of the chain) and then -// connecting the blocks on the new branch in forwards order (think pushing them -// onto the end of the chain). +// When the given target is already known to be invalid, or is determined to be +// invalid during the process, the chain will be reorganized to the best valid +// block as determined by having the most cumulative proof of work instead. +// +// This is most commonly called with a target that is a descendant of the +// current best chain. However, it supports arbitrary targets. +// +// See reorganizeChainInternal for more details on the various actions needed to +// reorganize the chain. // // This function may modify the validation state of nodes in the block index -// without flushing in the case the chain is not able to reorganize due to a -// block failing to connect. +// without flushing. // // This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) reorganizeChain(targetTip *blockNode) error { - // Nothing to do if there is no target tip or the target tip is already the - // current tip. - if targetTip == nil { - return nil - } - origTip := b.bestChain.Tip() - if origTip == targetTip { +func (b *BlockChain) reorganizeChain(target *blockNode) error { + // Nothing to do if there is no target specified or it is already the + // current best chain tip. + tip := b.bestChain.Tip() + if target == nil || tip == target { return nil } + origTip := tip - // Send a notification announcing the start of the chain reorganization. - b.chainLock.Unlock() - b.sendNotification(NTChainReorgStarted, nil) - b.chainLock.Lock() - - defer func() { - // Send a notification announcing the end of the chain reorganization. - b.chainLock.Unlock() - b.sendNotification(NTChainReorgDone, nil) - b.chainLock.Lock() - }() - - // Attempt to reorganize to the chain to the new tip. In the case it fails, - // reorganize back to the original tip. There is no way to recover if the - // chain fails to reorganize back to the original tip since something is - // very wrong if a chain tip that was already known to be valid fails to - // reconnect. - // - // NOTE: The failure handling makes an assumption that a block in the path - // between the fork point and original tip are not somehow invalidated in - // between the point a reorged chain fails to connect and the reorg back to - // the original tip. That is a safe assumption with the current code due to - // all modifications which mark blocks invalid being performed under the - // chain lock, however, this will need to be reworked if that assumption is - // violated. - fork := b.bestChain.FindFork(targetTip) - reorgErr := b.reorganizeChainInternal(targetTip) - if reorgErr != nil { - if err := b.reorganizeChainInternal(origTip); err != nil { - panicf("failed to reorganize back to known good chain tip %s "+ - "(height %d): %v -- probable database corruption", origTip.hash, - origTip.height, err) + var sentReorgingNtfn bool + var reorgErrs []error + for ; target != nil && tip != target; tip = b.bestChain.Tip() { + select { + case <-b.interrupt: + return errInterruptRequested + default: } - return reorgErr - } + // Determine if the chain is being reorganized to a competing branch. + // This is the case when the current tip is not an ancestor of the + // target tip. + if !sentReorgingNtfn && target.Ancestor(tip.height) != tip { + // Send a notification announcing the start of the chain + // reorganization. + // + // Notice that the chain lock is not released before sending the + // notification. This is intentional and must not be changed + // without understanding why! + b.sendNotification(NTChainReorgStarted, nil) + sentReorgingNtfn = true + + defer func() { + // Send a notification announcing the end of the chain + // reorganization. + // + // Notice that the chain lock is not released before sending the + // notification. This is intentional and must not be changed + // without understanding why! + b.sendNotification(NTChainReorgDone, nil) + }() + } - // Send a notification that a blockchain reorganization took place. - reorgData := &ReorganizationNtfnsData{origTip.hash, origTip.height, - targetTip.hash, targetTip.height} - b.chainLock.Unlock() - b.sendNotification(NTReorganization, reorgData) - b.chainLock.Lock() + // Attempt to reorganize the chain to the new tip. In the case it + // fails, attempt to reorganize to the best valid block with the most + // cumulative proof of work instead. + err := b.reorganizeChainInternal(target) + if err != nil { + // Shutting down. + if errors.Is(err, errInterruptRequested) { + return err + } - // Log the point where the chain forked and old and new best chain tips. - if fork != nil { - log.Infof("REORGANIZE: Chain forks at %v (height %v)", fork.hash, - fork.height) + // Typically, if a reorganize fails, there will only be a single + // error due to the block that caused the failure. However, it is + // possible that several candidate branches might fail. Thus, track + // them all so they can potentially be converted to a multi error + // later if needed. + reorgErrs = append(reorgErrs, err) + + // Determine a new best candidate since the reorg failed. This + // should realistically always result in a different target than the + // current one unless there is some type of unrecoverable error, + // such as a disk failure. In that case, bail out to avoid + // attempting to do the same reorg over and over. + newTarget := b.index.FindBestChainCandidate() + if newTarget == target { + break + } + target = newTarget + } } - log.Infof("REORGANIZE: Old best chain tip was %v (height %v)", - &origTip.hash, origTip.height) - log.Infof("REORGANIZE: New best chain tip is %v (height %v)", - targetTip.hash, targetTip.height) - return nil + // Log chain reorganizations and send a notification as needed. Notice that + // the tip is reset to whatever the best chain actually is here versus using + // the one from above since it might not match reality if there were errors + // while reorganizing. + newTip := b.bestChain.Tip() + if sentReorgingNtfn && newTip != origTip { + // Send a notification that a chain reorganization took place. + // + // Notice that the chain lock is not released before sending the + // notification. This is intentional and must not be changed without + // understanding why! + b.sendNotification(NTReorganization, &ReorganizationNtfnsData{ + OldHash: origTip.hash, + OldHeight: origTip.height, + NewHash: newTip.hash, + NewHeight: newTip.height, + }) + + // Log the point where the chain forked and old and new best chain tips. + if fork := b.bestChain.FindFork(origTip); fork != nil { + log.Infof("REORGANIZE: Chain forks at %v (height %v)", fork.hash, + fork.height) + } + log.Infof("REORGANIZE: Old best chain tip was %v (height %v)", + &origTip.hash, origTip.height) + log.Infof("REORGANIZE: New best chain tip is %v (height %v)", + &newTip.hash, newTip.height) + } + + // Determine if there were any reorg errors and either extract and return + // the error directly when there was only a single error or return them all + // as a multi error when there are more. + var finalErr error + switch { + case len(reorgErrs) == 1: + finalErr = reorgErrs[0] + case len(reorgErrs) > 1: + finalErr = MultiError(reorgErrs) + } + return finalErr } // forceHeadReorganization forces a reorganization of the block chain to the @@ -1242,14 +1316,16 @@ func (b *BlockChain) reorganizeChain(targetTip *blockNode) error { // // This function MUST be called with the chain state lock held (for writes). func (b *BlockChain) forceHeadReorganization(formerBest chainhash.Hash, newBest chainhash.Hash) error { - if formerBest.IsEqual(&newBest) { - return fmt.Errorf("can't reorganize to the same block") + // Don't try to reorganize to the same block. + if formerBest == newBest { + str := "tried to force reorg to the same block" + return ruleError(ErrForceReorgSameBlock, str) } - formerBestNode := b.bestChain.Tip() - // We can't reorganize the chain unless our head block matches up with - // b.bestChain. - if !formerBestNode.hash.IsEqual(&formerBest) { + // Don't allow a reorganize when the former best is not the current best + // chain tip. + formerBestNode := b.bestChain.Tip() + if formerBestNode.hash != formerBest { str := "tried to force reorg on wrong chain" return ruleError(ErrForceReorgWrongChain, str) } @@ -1268,6 +1344,11 @@ func (b *BlockChain) forceHeadReorganization(formerBest chainhash.Hash, newBest return ruleError(ErrKnownInvalidBlock, str) } + // Don't try to reorganize to a block when its data is not available. + if !newBestNodeStatus.HaveData() { + return ruleError(ErrNoBlockData, "block data is not available") + } + // Reorganize the chain and flush any potential unsaved changes to the // block index to the database. It is safe to ignore any flushing // errors here as the only time the index will be modified is if the @@ -1283,205 +1364,62 @@ func (b *BlockChain) forceHeadReorganization(formerBest chainhash.Hash, newBest // // This function is safe for concurrent access. func (b *BlockChain) ForceHeadReorganization(formerBest chainhash.Hash, newBest chainhash.Hash) error { + b.processLock.Lock() b.chainLock.Lock() err := b.forceHeadReorganization(formerBest, newBest) b.chainLock.Unlock() + b.processLock.Unlock() return err } // flushBlockIndex populates any ticket data that has been pruned from modified // block nodes, writes those nodes to the database and clears the set of // modified nodes if it succeeds. +// +// This function MUST be called with the chain lock held (for writes). func (b *BlockChain) flushBlockIndex() error { + // Ensure that any ticket information that has been pruned is reloaded + // before flushing modified nodes. + // + // Note that a separate slice is created for the modified nodes that + // potentially need the ticket information reloaded as opposed to doing it + // directly in the loop over the modified nodes because reloading the ticket + // information is shared code that locks the index to mark the entry + // modified. Therefore, it has to be called without the index lock. b.index.RLock() + maybePruned := make([]*blockNode, 0, len(b.index.modified)) for node := range b.index.modified { + if !b.index.canValidate(node) { + continue + } + maybePruned = append(maybePruned, node) + } + b.index.RUnlock() + for _, node := range maybePruned { if err := b.maybeFetchTicketInfo(node); err != nil { - b.index.RUnlock() return err } } - b.index.RUnlock() - return b.index.flush() } -// flushBlockIndexWarnOnly attempts to flush and modified block index nodes to +// flushBlockIndexWarnOnly attempts to flush any modified block index nodes to // the database and will log a warning if it fails. // // NOTE: This MUST only be used in the specific circumstances where failure to // flush only results in a worst case scenario of requiring one or more blocks // to be validated again. All other cases must directly call the function on // the block index and check the error return accordingly. +// +// This function MUST be called with the chain lock held (for writes). func (b *BlockChain) flushBlockIndexWarnOnly() { if err := b.flushBlockIndex(); err != nil { log.Warnf("Unable to flush block index changes to db: %v", err) } } -// connectBestChain handles connecting the passed block to the chain while -// respecting proper chain selection according to the chain with the most -// proof of work. In the typical case, the new block simply extends the main -// chain. However, it may also be extending (or creating) a side chain (fork) -// which may or may not end up becoming the main chain depending on which fork -// cumulatively has the most proof of work. It returns the resulting fork -// length, that is to say the number of blocks to the fork point from the main -// chain, which will be zero if the block ends up on the main chain (either -// due to extending the main chain or causing a reorganization to become the -// main chain). -// -// The flags modify the behavior of this function as follows: -// - BFFastAdd: Avoids several expensive transaction validation operations. -// This is useful when using checkpoints. -// -// This function MUST be called with the chain state lock held (for writes). -func (b *BlockChain) connectBestChain(node *blockNode, block, parent *dcrutil.Block, flags BehaviorFlags) (int64, error) { - fastAdd := flags&BFFastAdd == BFFastAdd - - // Ensure the passed parent is actually the parent of the block. - if *parent.Hash() != node.parent.hash { - panicf("parent block %v (height %v) does not match expected parent %v "+ - "(height %v)", parent.Hash(), parent.MsgBlock().Header.Height, - node.parent.hash, node.height-1) - } - - // We are extending the main (best) chain with a new block. This is the - // most common case. - parentHash := &block.MsgBlock().Header.PrevBlock - tip := b.bestChain.Tip() - if *parentHash == tip.hash { - // Skip expensive checks if the block has already been fully - // validated. - hasValidated := b.index.NodeStatus(node).HasValidated() - fastAdd = fastAdd || hasValidated - - // Perform several checks to verify the block can be connected - // to the main chain without violating any rules and without - // actually connecting the block. - // - // Also, set the applicable status result in the block index, - // and flush the status changes to the database. It is safe to - // ignore any errors when flushing here as the changes will be - // flushed when a valid block is connected, and the worst case - // scenario if a block is invalid is it would need to be - // revalidated after a restart. - view := NewUtxoViewpoint(b) - view.SetBestHash(parentHash) - var stxos []spentTxOut - var hdrCommitments headerCommitmentData - if !fastAdd { - err := b.checkConnectBlock(node, block, parent, view, &stxos, - &hdrCommitments) - if err != nil { - var rerr RuleError - if errors.As(err, &rerr) { - b.index.SetStatusFlags(node, statusValidateFailed) - b.flushBlockIndexWarnOnly() - } - return 0, err - } - } - if !hasValidated { - b.index.SetStatusFlags(node, statusValidated) - b.flushBlockIndexWarnOnly() - } - - // In the fast add case the code to check the block connection - // was skipped, so the utxo view needs to load the referenced - // utxos, spend them, and add the new utxos being created by - // this block. Also, in the case the block votes against - // the parent, its regular transaction tree must be - // disconnected. - isTreasuryEnabled, err := b.isTreasuryAgendaActive(node.parent) - if err != nil { - return 0, err - } - - if fastAdd { - err := view.connectBlock(b.db, block, parent, &stxos, - isTreasuryEnabled) - if err != nil { - return 0, err - } - - // Create a version 2 block filter for the block and store it into - // the header commitment data. - filter, err := blockcf2.Regular(block.MsgBlock(), view) - if err != nil { - return 0, ruleError(ErrMissingTxOut, err.Error()) - } - hdrCommitments.filter = filter - } - - // Connect the block to the main chain. - err = b.connectBlock(node, block, parent, view, stxos, &hdrCommitments) - if err != nil { - return 0, err - } - - validateStr := "validating" - if !voteBitsApproveParent(node.voteBits) { - validateStr = "invalidating" - } - - log.Debugf("Block %v (height %v) connected to the main chain, "+ - "%v the previous block", node.hash, node.height, - validateStr) - - // The fork length is zero since the block is now the tip of the - // best chain. - return 0, nil - } - if fastAdd { - log.Warnf("fastAdd set in the side chain case? %v\n", - block.Hash()) - } - - // We're extending (or creating) a side chain, but the cumulative - // work for this new side chain is not enough to make it the new chain. - if node.workSum.Cmp(tip.workSum) <= 0 { - // Log information about how the block is forking the chain. - fork := b.bestChain.FindFork(node) - if fork.hash == *parentHash { - log.Infof("FORK: Block %v (height %v) forks the chain at height "+ - "%d/block %v, but does not cause a reorganize", - node.hash, node.height, fork.height, fork.hash) - } else { - log.Infof("EXTEND FORK: Block %v (height %v) extends a side chain "+ - "which forks the chain at height %d/block %v", node.hash, - node.height, fork.height, fork.hash) - } - - forkLen := node.height - fork.height - return forkLen, nil - } - - // We're extending (or creating) a side chain and the cumulative work - // for this new side chain is more than the old best chain, so this side - // chain needs to become the main chain. In order to accomplish that, - // find the common ancestor of both sides of the fork, disconnect the - // blocks that form the (now) old fork from the main chain, and attach - // the blocks that form the new chain to the main chain starting at the - // common ancestor (the point where the chain forked). - // - // Reorganize the chain and flush any potential unsaved changes to the - // block index to the database. It is safe to ignore any flushing - // errors here as the only time the index will be modified is if the - // block failed to connect. - log.Infof("REORGANIZE: Block %v is causing a reorganize.", node.hash) - err := b.reorganizeChain(node) - b.flushBlockIndexWarnOnly() - if err != nil { - return 0, err - } - - // The fork length is zero since the block is now the tip of the best - // chain. - return 0, nil -} - -// isCurrent returns whether or not the chain believes it is current. Several -// factors are used to guess, but the key factors that allow the chain to -// believe it is current are: +// isCurrent returns whether or not the chain believes it is current. The +// factors that are used to determine if the chain believes it is current are: // - Total amount of cumulative work is more than the minimum known work // specified by the parameters for the network // - Latest block has a timestamp newer than 24 hours ago @@ -1515,9 +1453,9 @@ func (b *BlockChain) isCurrent() bool { // This function is safe for concurrent access. func (b *BlockChain) IsCurrent() bool { b.chainLock.RLock() - defer b.chainLock.RUnlock() - - return b.isCurrent() + isCurrent := b.isCurrent() + b.chainLock.RUnlock() + return isCurrent } // BestSnapshot returns information about the current best chain block and @@ -1570,13 +1508,8 @@ func (b *BlockChain) maxBlockSize(prevNode *blockNode) (int64, error) { // // This function is safe for concurrent access. func (b *BlockChain) MaxBlockSize(hash *chainhash.Hash) (int64, error) { - // NOTE: The requirement for the node being fully validated here is strictly - // stronger than what is actually required. In reality, all that is needed - // is for the block data for the node and all of its ancestors to be - // available, but there is not currently any tracking to be able to - // efficiently determine that state. node := b.index.LookupNode(hash) - if node == nil || !b.index.NodeStatus(node).HasValidated() { + if node == nil || !b.index.CanValidate(node) { return 0, unknownBlockError(hash) } @@ -2172,10 +2105,12 @@ func New(ctx context.Context, config *Config) (*BlockChain, error) { notifications: config.Notifications, sigCache: config.SigCache, indexManager: config.IndexManager, + interrupt: ctx.Done(), subsidyCache: subsidyCache, index: newBlockIndex(config.DB), bestChain: newChainView(nil), recentBlocks: lru.NewKVCache(recentBlockCacheSize), + recentContextChecks: lru.NewCache(contextCheckCacheSize), deploymentCaches: newThresholdCaches(params), isVoterMajorityVersionCache: make(map[[stakeMajorityCacheKeySize]byte]bool), isStakeMajorityVersionCache: make(map[[stakeMajorityCacheKeySize]byte]bool), diff --git a/blockchain/chain_test.go b/blockchain/chain_test.go index b3b5f0707b..61bd361dd5 100644 --- a/blockchain/chain_test.go +++ b/blockchain/chain_test.go @@ -360,6 +360,18 @@ func TestForceHeadReorg(t *testing.T) { g.ForceTipReorg("b5", "b3") g.ExpectTip("b3") + // Attempt to force tip reorganization to the same tip. This should + // fail since that is not allowed. + // + // ... -> b1(0) -> b3(1) + // \-> b2(1) + // \-> b4(1) + // \-> b5(1) + // \-> b2bad0(1) + // \-> b2bad1(1) + // \-> b2bad2(1) + rejectForceTipReorg("b2", "b2", ErrForceReorgSameBlock) + // Attempt to force tip reorganization from a block that is not the // current tip. This should fail since that is not allowed. // @@ -401,7 +413,9 @@ func TestForceHeadReorg(t *testing.T) { // Attempt to force tip reorganization to an invalid block that has an // entry in the block index, but is not already known to be invalid. - // + // Notice that this requires a full reorganization attempt, so the + // expected behavior is to reorganize back to the best known good tip, + // which is b2 because it was seen before b3. // // ... -> b1(0) -> b3(1) // \-> b2(1) @@ -411,7 +425,7 @@ func TestForceHeadReorg(t *testing.T) { // \-> b2bad1(1) // \-> b2bad2(1) rejectForceTipReorg("b3", "b2bad2", ErrFraudAmountIn) - g.ExpectTip("b3") + g.ExpectTip("b2") } // locatorHashes is a convenience function that returns the hashes for all of diff --git a/blockchain/chainio.go b/blockchain/chainio.go index 43a5b6a7f2..d63f26bab2 100644 --- a/blockchain/chainio.go +++ b/blockchain/chainio.go @@ -1590,6 +1590,7 @@ func (b *BlockChain) createChainState() error { header := &genesisBlock.MsgBlock().Header node := newBlockNode(header, nil) node.status = statusDataStored | statusValidated + node.isFullyLinked = true // Initialize the state related to the best block. Since it is the // genesis block, use its timestamp for the median time. @@ -1713,6 +1714,10 @@ func loadBlockIndex(dbTx database.Tx, genesisHash *chainhash.Hash, index *blockI } blockNodes := make([]blockNode, blockCount) + // Initialize the best header to the node that will become the genesis block + // below. + index.bestHeader = &blockNodes[0] + // Load all of the block index entries and construct the block index // accordingly. // @@ -1754,8 +1759,9 @@ func loadBlockIndex(dbTx database.Tx, genesisHash *chainhash.Hash, index *blockI node := &blockNodes[i] initBlockNode(node, header, parent) node.status = entry.status + node.isFullyLinked = parent == nil || index.canValidate(parent) node.votes = entry.voteInfo - index.addNode(node) + index.addNodeFromDB(node) lastNode = node i++ @@ -1889,6 +1895,11 @@ func (b *BlockChain) initChainState(ctx context.Context) error { "chain tip %s in block index", state.hash)) } b.bestChain.SetTip(tip) + b.index.MaybePruneCachedTips(tip) + + // Add the best chain tip to the set of candidates since it is required + // to have the current best tip in it at all times. + b.index.addBestChainCandidate(tip) log.Debugf("Block index loaded in %v", time.Since(bidxStart)) diff --git a/blockchain/chainquery.go b/blockchain/chainquery.go index f1733b7926..dd3ffe33e6 100644 --- a/blockchain/chainquery.go +++ b/blockchain/chainquery.go @@ -132,3 +132,25 @@ func (b *BlockChain) ChainTips() []ChainTipInfo { } return results } + +// BestHeader returns the header with the most cumulative work that is NOT +// known to be invalid. +func (b *BlockChain) BestHeader() (chainhash.Hash, int64) { + b.index.RLock() + header := b.index.bestHeader + b.index.RUnlock() + return header.hash, header.height +} + +// BestInvalidHeader returns the header with the most cumulative work that is +// known to be invalid. It will be a hash of all zeroes if there is no such +// header. +func (b *BlockChain) BestInvalidHeader() chainhash.Hash { + var hash chainhash.Hash + b.index.RLock() + if b.index.bestInvalid != nil { + hash = b.index.bestInvalid.hash + } + b.index.RUnlock() + return hash +} diff --git a/blockchain/checkpoints.go b/blockchain/checkpoints.go index 3f069063a2..e674a3f195 100644 --- a/blockchain/checkpoints.go +++ b/blockchain/checkpoints.go @@ -72,6 +72,8 @@ func (b *BlockChain) verifyCheckpoint(height int64, hash *chainhash.Hash) bool { // maybeUpdateMostRecentCheckpoint potentially updates the most recently known // checkpoint to the provided block node. +// +// This function MUST be called with the chain lock held (for writes). func (b *BlockChain) maybeUpdateMostRecentCheckpoint(node *blockNode) { if len(b.checkpoints) == 0 { return @@ -93,6 +95,18 @@ func (b *BlockChain) maybeUpdateMostRecentCheckpoint(node *blockNode) { } } +// isKnownCheckpointAncestor determines whether the provided node is an ancestor +// of the most recently-known checkpoint. False is returned when no checkpoint +// is known or checkpoints are disabled. +// +// This function MUST be called with the chain lock held (for reads). +func (b *BlockChain) isKnownCheckpointAncestor(node *blockNode) bool { + if b.checkpointNode == nil { + return false + } + return b.checkpointNode.Ancestor(node.height) == node +} + // isNonstandardTransaction determines whether a transaction contains any // scripts which are not one of the standard types. func isNonstandardTransaction(tx *dcrutil.Tx) bool { diff --git a/blockchain/common_test.go b/blockchain/common_test.go index 7a7f11bb9d..5ac10f8192 100644 --- a/blockchain/common_test.go +++ b/blockchain/common_test.go @@ -137,7 +137,9 @@ func newFakeChain(params *chaincfg.Params) *BlockChain { // when creating the fake chain below. node := newBlockNode(¶ms.GenesisBlock.Header, nil) node.status = statusDataStored | statusValidated + node.isFullyLinked = true index := newBlockIndex(nil) + index.bestHeader = node index.AddNode(node) // Generate a deployment ID to version map from the provided params. @@ -187,6 +189,7 @@ func newFakeNode(parent *blockNode, blockVersion int32, stakeVersion uint32, bit } node := newBlockNode(header, parent) node.status = statusDataStored | statusValidated + node.isFullyLinked = parent == nil || parent.isFullyLinked return node } @@ -515,6 +518,94 @@ func newChaingenHarness(t *testing.T, params *chaincfg.Params, dbName string) (* return newChaingenHarnessWithGen(t, dbName, &g) } +// AcceptHeader processes the block header associated with the given name in the +// harness generator and expects it to be accepted, but not necessarily to the +// main chain. It also ensures the underlying block index is consistent with +// the result. +func (g *chaingenHarness) AcceptHeader(blockName string) { + g.t.Helper() + + header := &g.BlockByName(blockName).Header + blockHash := header.BlockHash() + blockHeight := header.Height + g.t.Logf("Testing accept block header %q (hash %s, height %d)", blockName, + blockHash, blockHeight) + + // Determine if the header is already known before attempting to process it. + alreadyHaveHeader := g.chain.index.LookupNode(&blockHash) != nil + + err := g.chain.ProcessBlockHeader(header, BFNone) + if err != nil { + g.t.Fatalf("block header %q (hash %s, height %d) should have been "+ + "accepted: %v", blockName, blockHash, blockHeight, err) + } + + // Ensure the accepted header now exists in the block index. + node := g.chain.index.LookupNode(&blockHash) + if node == nil { + g.t.Fatalf("accepted block header %q (hash %s, height %d) should have "+ + "been added to the block index", blockName, blockHash, blockHeight) + } + + // Ensure the accepted header is not marked as known valid when it was not + // previously known since that implies the block data is not yet available + // and therefore it can't possibly be known to be valid. + // + // Also, ensure the accepted header is not marked as known invalid, as + // having known invalid ancestors, or as known to have failed validation. + status := g.chain.index.NodeStatus(node) + if !alreadyHaveHeader && status.HasValidated() { + g.t.Fatalf("accepted block header %q (hash %s, height %d) was not "+ + "already known, but is marked as known valid", blockName, blockHash, + blockHeight) + } + if status.KnownInvalid() { + g.t.Fatalf("accepted block header %q (hash %s, height %d) is marked "+ + "as known invalid", blockName, blockHash, blockHeight) + } + if status.KnownInvalidAncestor() { + g.t.Fatalf("accepted block header %q (hash %s, height %d) is marked "+ + "as having a known invalid ancestor", blockName, blockHash, + blockHeight) + } + if status.KnownValidateFailed() { + g.t.Fatalf("accepted block header %q (hash %s, height %d) is marked "+ + "as having known to fail validation", blockName, blockHash, + blockHeight) + } +} + +// AcceptBlockData processes the block associated with the given name in the +// harness generator and expects it to be accepted, but not necessarily to the +// main chain. +func (g *chaingenHarness) AcceptBlockData(blockName string) { + g.t.Helper() + + msgBlock := g.BlockByName(blockName) + blockHeight := msgBlock.Header.Height + block := dcrutil.NewBlock(msgBlock) + blockHash := block.Hash() + g.t.Logf("Testing block %q (hash %s, height %d)", blockName, blockHash, + blockHeight) + + _, err := g.chain.ProcessBlock(block, BFNone) + if err != nil { + g.t.Fatalf("block %q (hash %s, height %d) should have been accepted: %v", + blockName, blockHash, blockHeight, err) + } +} + +// AcceptBlockDataWithExpectedTip processes the block associated with the given +// name in the harness generator and expects it to be accepted, but not +// necessarily to the main chain and for the current best chain tip to be the +// provided value. +func (g *chaingenHarness) AcceptBlockDataWithExpectedTip(blockName, tipName string) { + g.t.Helper() + + g.AcceptBlockData(blockName) + g.ExpectTip(tipName) +} + // AcceptBlock processes the block associated with the given name in the // harness generator and expects it to be accepted to the main chain. func (g *chaingenHarness) AcceptBlock(blockName string) { @@ -550,6 +641,65 @@ func (g *chaingenHarness) AcceptTipBlock() { g.AcceptBlock(g.TipName()) } +// RejectHeader expects the block header associated with the given name in the +// harness generator to be rejected with the provided error kind and also +// ensures the underlying block index is consistent with the result. +func (g *chaingenHarness) RejectHeader(blockName string, kind ErrorKind) { + g.t.Helper() + + header := &g.BlockByName(blockName).Header + blockHash := header.BlockHash() + blockHeight := header.Height + g.t.Logf("Testing reject block header %q (hash %s, height %d, reason %v)", + blockName, blockHash, blockHeight, kind) + + // Determine if the header is already known before attempting to process it. + alreadyHaveHeader := g.chain.index.LookupNode(&blockHash) != nil + + err := g.chain.ProcessBlockHeader(header, BFNone) + if err == nil { + g.t.Fatalf("block header %q (hash %s, height %d) should not have been "+ + "accepted", blockName, blockHash, blockHeight) + } + + // Ensure the error matches the value specified in the test instance. + if !errors.Is(err, kind) { + g.t.Fatalf("block header %q (hash %s, height %d) does not have "+ + "expected reject code -- got %v, want %v", blockName, blockHash, + blockHeight, err, kind) + } + + // Ensure the rejected header was not added to the block index when it was + // not already previously successfully added and that it was not removed if + // it was already previously added. + node := g.chain.index.LookupNode(&blockHash) + switch { + case !alreadyHaveHeader && node == nil: + // Header was not added as expected. + return + + case !alreadyHaveHeader && node != nil: + g.t.Fatalf("rejected block header %q (hash %s, height %d) was added "+ + "to the block index", blockName, blockHash, blockHeight) + + case alreadyHaveHeader && node == nil: + g.t.Fatalf("rejected block header %q (hash %s, height %d) was removed "+ + "from the block index", blockName, blockHash, blockHeight) + } + + // The header was previously added, so ensure it is not reported as having + // been validated and that it is now known invalid. + status := g.chain.index.NodeStatus(node) + if status.HasValidated() { + g.t.Fatalf("rejected block header %q (hash %s, height %d) is marked "+ + "as known valid", blockName, blockHash, blockHeight) + } + if !status.KnownInvalid() { + g.t.Fatalf("rejected block header %q (hash %s, height %d) is NOT "+ + "marked as known invalid", blockName, blockHash, blockHeight) + } +} + // RejectBlock expects the block associated with the given name in the harness // generator to be rejected with the provided error kind. func (g *chaingenHarness) RejectBlock(blockName string, kind ErrorKind) { @@ -630,6 +780,58 @@ func (g *chaingenHarness) AcceptedToSideChainWithExpectedTip(tipName string) { g.ExpectTip(tipName) } +// ExpectBestHeader expects the provided block header associated with the given +// name to be the one identified as the header of the chain associated with the +// harness generator with the most cumulative work that is NOT known to be +// invalid. +func (g *chaingenHarness) ExpectBestHeader(blockName string) { + g.t.Helper() + + // Ensure hash and height match. + want := g.BlockByName(blockName).Header + bestHash, bestHeight := g.chain.BestHeader() + if bestHash != want.BlockHash() || bestHeight != int64(want.Height) { + g.t.Fatalf("block header %q (hash %s, height %d) should be the best "+ + "known header -- got %q (hash %s, height %d)", blockName, + want.BlockHash(), want.Height, g.BlockName(&bestHash), + bestHash, bestHeight) + } +} + +// ExpectBestInvalidHeader expects the provided block header associated with the +// given name to be the one identified as the header of the chain associated +// with the harness generator with the most cumulative work that is known to be +// invalid. Note that the provided block name can be an empty string to +// indicate no such header should exist. +func (g *chaingenHarness) ExpectBestInvalidHeader(blockName string) { + g.t.Helper() + + bestHash := g.chain.BestInvalidHeader() + switch { + case blockName != "" && bestHash != *zeroHash: + want := g.BlockByName(blockName).Header + bestHeader := g.BlockByHash(&bestHash).Header + if bestHash != want.BlockHash() || bestHeader.Height != want.Height { + g.t.Fatalf("block header %q (hash %s, height %d) should be the "+ + "best known invalid header -- got %q (hash %s, height %d)", + blockName, want.BlockHash(), want.Height, g.BlockName(&bestHash), + bestHash, bestHeader.Height) + } + + case blockName != "" && bestHash == *zeroHash: + want := g.BlockByName(blockName).Header + g.t.Fatalf("block header %q (hash %s, height %d) should be the best "+ + "known invalid header -- got none", blockName, want.BlockHash(), + want.Height) + + case blockName == "" && bestHash != *zeroHash: + bestHeight := g.BlockByHash(&bestHash).Header.Height + g.t.Fatalf("there should not be a best known invalid header -- got %q "+ + "(hash %s, height %d)", g.BlockName(&bestHash), bestHash, + bestHeight) + } +} + // lookupDeploymentVersion returns the version of the deployment with the // provided ID and caches the result for future invocations. An error is // returned if the ID is not found. @@ -755,7 +957,7 @@ func minUint32(a, b uint32) uint32 { func (g *chaingenHarness) generateToHeight(fromHeight, toHeight uint32, buyTicketsPerBlock uint32, accept bool) { g.t.Helper() - // Only allow this to be called with a sane heights. + // Only allow this to be called with sane heights. tipHeight := fromHeight if toHeight <= tipHeight { g.t.Fatalf("not possible to generate to height %d when the current "+ diff --git a/blockchain/difficulty.go b/blockchain/difficulty.go index 52b95fb427..f05439443c 100644 --- a/blockchain/difficulty.go +++ b/blockchain/difficulty.go @@ -790,7 +790,6 @@ func (b *BlockChain) estimateNextStakeDifficultyV1(curNode *blockNode, ticketsIn // User a constant pool size for estimate, since // this has much less fluctuation than freshStake. - // TODO Use a better pool size estimate? emptyHeader.PoolSize = curNode.poolSize // Insert the fake fresh stake into each block, diff --git a/blockchain/error.go b/blockchain/error.go index 480796dc2e..ead3ff4f6a 100644 --- a/blockchain/error.go +++ b/blockchain/error.go @@ -6,7 +6,10 @@ package blockchain import ( + "errors" "fmt" + "strconv" + "strings" "github.com/decred/dcrd/chaincfg/chainhash" ) @@ -35,6 +38,12 @@ const ( // ErrMissingParent indicates that the block was an orphan. ErrMissingParent = ErrorKind("ErrMissingParent") + // ErrNoBlockData indicates an attempt to perform an operation on a block + // that requires all data to be available does not have the data. This is + // typically because the header is known, but the full data has not been + // received yet. + ErrNoBlockData = ErrorKind("ErrNoBlockData") + // ErrBlockTooBig indicates the serialized block size exceeds the // maximum allowed size. ErrBlockTooBig = ErrorKind("ErrBlockTooBig") @@ -367,11 +376,15 @@ const ( // ErrPoolSize indicates an error in the ticket pool size for this block. ErrPoolSize = ErrorKind("ErrPoolSize") - // ErrForceReorgWrongChain indicates that a reroganization was attempted + // ErrForceReorgSameBlock indicates that a reorganization was attempted to + // be forced to the same block. + ErrForceReorgSameBlock = ErrorKind("ErrForceReorgSameBlock") + + // ErrForceReorgWrongChain indicates that a reorganization was attempted // to be forced, but the chain indicated was not mirrored by b.bestChain. ErrForceReorgWrongChain = ErrorKind("ErrForceReorgWrongChain") - // ErrForceReorgMissingChild indicates that a reroganization was attempted + // ErrForceReorgMissingChild indicates that a reorganization was attempted // to be forced, but the child node to reorganize to could not be found. ErrForceReorgMissingChild = ErrorKind("ErrForceReorgMissingChild") @@ -611,7 +624,7 @@ func (e ContextError) Unwrap() error { return e.Err } -// ruleError creates a ContextError given a set of arguments. +// contextError creates a ContextError given a set of arguments. func contextError(kind ErrorKind, desc string) ContextError { return ContextError{Err: kind, Description: desc} } @@ -646,3 +659,70 @@ func (e RuleError) Unwrap() error { func ruleError(kind ErrorKind, desc string) RuleError { return RuleError{Err: kind, Description: desc} } + +// MultiError houses several errors as a single error that provides full support +// for errors.Is and errors.As so the caller can easily determine if any of the +// errors match any specific error or error type. Note that this differs from +// typical wrapped error chains which only represent a single error. +type MultiError []error + +// Error satisfies the error interface and prints human-readable errors. +func (e MultiError) Error() string { + if len(e) == 1 { + return e[0].Error() + } + + var builder strings.Builder + builder.WriteString("multiple errors (") + builder.WriteString(strconv.Itoa(len(e))) + builder.WriteString("):\n") + const maxErrs = 5 + i := 0 + for ; i < len(e) && i < maxErrs; i++ { + builder.WriteString(" - ") + builder.WriteString(e[i].Error()) + builder.WriteRune('\n') + } + if len(e) > maxErrs { + builder.WriteString(" - ... ") + builder.WriteString(strconv.Itoa(len(e) - maxErrs)) + builder.WriteString(" more error(s)") + builder.WriteRune('\n') + } + + return builder.String() +} + +// Is implements the interface to work with the standard library's errors.Is. +// +// It iterates each of the errors in the multi error and calls errors.Is on it +// until the first one that matches target is found, in which case it returns +// true. Otherwise, it returns false. +// +// This means it keeps all of the same semantics typically provided by Is in +// terms of unwrapping error chains. +func (e MultiError) Is(target error) bool { + for _, err := range e { + if errors.Is(err, target) { + return true + } + } + return false +} + +// As implements the interface to work with the standard library's errors.As. +// +// It iterates each of the errors in the multi error and calls errors.As on it +// until the first one that matches target is found, in which case it returns +// true. Otherwise, it returns false. +// +// This means it keeps all of the same semantics typically provided by As in +// terms of unwrapping error chains and setting the target to the matched error. +func (e MultiError) As(target interface{}) bool { + for _, err := range e { + if errors.As(err, target) { + return true + } + } + return false +} diff --git a/blockchain/error_test.go b/blockchain/error_test.go index 8bfd2dc034..f2dbb4733f 100644 --- a/blockchain/error_test.go +++ b/blockchain/error_test.go @@ -19,6 +19,7 @@ func TestErrorKindStringer(t *testing.T) { }{ {ErrDuplicateBlock, "ErrDuplicateBlock"}, {ErrMissingParent, "ErrMissingParent"}, + {ErrNoBlockData, "ErrNoBlockData"}, {ErrBlockTooBig, "ErrBlockTooBig"}, {ErrWrongBlockSize, "ErrWrongBlockSize"}, {ErrBlockVersionTooOld, "ErrBlockVersionTooOld"}, @@ -97,6 +98,7 @@ func TestErrorKindStringer(t *testing.T) { {ErrRegTxCreateStakeOut, "ErrRegTxCreateStakeOut"}, {ErrInvalidFinalState, "ErrInvalidFinalState"}, {ErrPoolSize, "ErrPoolSize"}, + {ErrForceReorgSameBlock, "ErrForceReorgSameBlock"}, {ErrForceReorgWrongChain, "ErrForceReorgWrongChain"}, {ErrForceReorgMissingChild, "ErrForceReorgMissingChild"}, {ErrBadStakebaseValue, "ErrBadStakebaseValue"}, diff --git a/blockchain/example_test.go b/blockchain/example_test.go index 5ebe5728d6..54a27c4b1f 100644 --- a/blockchain/example_test.go +++ b/blockchain/example_test.go @@ -58,9 +58,8 @@ func ExampleBlockChain_ProcessBlock() { return } - // Process a block. For this example, we are going to intentionally - // cause an error by trying to process the genesis block which already - // exists. + // Process a block. For this example, intentionally cause an error by + // trying to process the genesis block which already exists. genesisBlock := dcrutil.NewBlock(mainNetParams.GenesisBlock) forkLen, err := chain.ProcessBlock(genesisBlock, blockchain.BFNone) diff --git a/blockchain/fullblocktests/generate.go b/blockchain/fullblocktests/generate.go index ba78ef8134..6e4ee48def 100644 --- a/blockchain/fullblocktests/generate.go +++ b/blockchain/fullblocktests/generate.go @@ -776,7 +776,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // Create a fork that double spends. // // ... -> bf1(0) -> bf2(1) -> bf5(2) -> bf6(3) - // \-> bf7(2) -> bf8(4) + // \-> bf7(2) -> bf8(4) // \-> bf3(1) -> bf4(2) g.SetTip("bf5") g.NextBlock("bf7", outs[2], ticketOuts[3]) diff --git a/blockchain/notifications.go b/blockchain/notifications.go index 0c6b656221..03142a6bfc 100644 --- a/blockchain/notifications.go +++ b/blockchain/notifications.go @@ -69,8 +69,8 @@ const ( // accepted block. NTSpentAndMissedTickets - // NTSpentAndMissedTickets indicates newly maturing tickets from a newly - // accepted block. + // NTNewTickets indicates newly maturing tickets from a newly accepted + // block. NTNewTickets ) diff --git a/blockchain/process.go b/blockchain/process.go index 7e649e7ebf..5a180c47b9 100644 --- a/blockchain/process.go +++ b/blockchain/process.go @@ -6,10 +6,13 @@ package blockchain import ( + "errors" "fmt" - "time" + "github.com/decred/dcrd/blockchain/stake/v4" + "github.com/decred/dcrd/database/v2" "github.com/decred/dcrd/dcrutil/v4" + "github.com/decred/dcrd/wire" ) // BehaviorFlags is a bitmask defining tweaks to the normal behavior when @@ -32,13 +35,330 @@ const ( BFNone BehaviorFlags = 0 ) +// checkKnownInvalidBlock returns an appropriate error when the provided block +// is known to be invalid either due to failing validation itself or due to +// having a known invalid ancestor (aka being part of an invalid branch). +// +// This function is safe for concurrent access. +func (b *BlockChain) checkKnownInvalidBlock(node *blockNode) error { + status := b.index.NodeStatus(node) + if status.KnownValidateFailed() { + str := fmt.Sprintf("block %s is known to be invalid", node.hash) + return ruleError(ErrKnownInvalidBlock, str) + } + if status.KnownInvalidAncestor() { + str := fmt.Sprintf("block %s is known to be part of an invalid branch", + node.hash) + return ruleError(ErrInvalidAncestorBlock, str) + } + + return nil +} + +// maybeAcceptBlockHeader potentially accepts the header to the block index and, +// if accepted, returns the block node associated with the header. It performs +// several context independent checks as well as those which depend on its +// position within the chain. It should be noted that some of the header fields +// require the full block data to be available in order to be able to validate +// them, so those fields are not included here. This provides support for full +// headers-first semantics. +// +// The flag for check header sanity allows the additional header sanity checks +// to be skipped which is useful for the full block processing path which checks +// the sanity of the entire block, including the header, before attempting to +// accept its header in order to quickly eliminate blocks that are obviously +// incorrect. +// +// In the case the block header is already known, the associated block node is +// examined to determine if the block is already known to be invalid, in which +// case an appropriate error will be returned. Otherwise, the block node is +// returned. +// +// The flags do not modify the behavior of this function directly, however they +// are needed to pass along to checkBlockHeaderSanity and +// checkBlockHeaderPositional. +// +// This function MUST be called with the chain lock held (for writes). +func (b *BlockChain) maybeAcceptBlockHeader(header *wire.BlockHeader, flags BehaviorFlags, checkHeaderSanity bool) (*blockNode, error) { + // Avoid validating the header again if its validation status is already + // known. Invalid headers are never added to the block index, so if there + // is an entry for the block hash, the header itself is known to be valid. + // However, it might have since been marked invalid either due to the + // associated block, or an ancestor, later failing validation. + hash := header.BlockHash() + if node := b.index.LookupNode(&hash); node != nil { + if err := b.checkKnownInvalidBlock(node); err != nil { + return nil, err + } + + return node, nil + } + + // Perform context-free sanity checks on the block header. + if checkHeaderSanity { + err := checkBlockHeaderSanity(header, b.timeSource, flags, b.chainParams) + if err != nil { + return nil, err + } + } + + // Orphan headers are not allowed and this function should never be called + // with the genesis block. + prevHash := &header.PrevBlock + prevNode := b.index.LookupNode(prevHash) + if prevNode == nil { + str := fmt.Sprintf("previous block %s is not known", prevHash) + return nil, ruleError(ErrMissingParent, str) + } + + // There is no need to validate the header if an ancestor is already known + // to be invalid. + prevNodeStatus := b.index.NodeStatus(prevNode) + if prevNodeStatus.KnownInvalid() { + str := fmt.Sprintf("previous block %s is known to be invalid", prevHash) + return nil, ruleError(ErrInvalidAncestorBlock, str) + } + + // The block header must pass all of the validation rules which depend on + // its position within the block chain. + err := b.checkBlockHeaderPositional(header, prevNode, flags) + if err != nil { + return nil, err + } + + // Create a new block node for the block and add it to the block index. + // + // Note that the additional information for the actual votes, tickets, and + // revocations in the block can't be populated until the full block data is + // known since that information is not available in the header. + newNode := newBlockNode(header, prevNode) + newNode.status = statusNone + b.index.AddNode(newNode) + + // Potentially update the most recently known checkpoint to this block + // header. + b.maybeUpdateMostRecentCheckpoint(newNode) + + return newNode, nil +} + +// ProcessBlockHeader is the main workhorse for handling insertion of new block +// headers into the block chain using headers-first semantics. It includes +// functionality such as rejecting headers that do not connect to an existing +// known header, ensuring headers follow all rules that do not depend on having +// all ancestor block data available, and insertion into the block index. +// +// Block headers that have already been inserted are ignored, unless they have +// subsequently been marked invalid, in which case an appropriate error is +// returned. +// +// It should be noted that this function intentionally does not accept block +// headers that do not connect to an existing known header or to headers which +// are already known to be a part of an invalid branch. This means headers must +// be processed in order. +// +// This function is safe for concurrent access. +func (b *BlockChain) ProcessBlockHeader(header *wire.BlockHeader, flags BehaviorFlags) error { + b.processLock.Lock() + defer b.processLock.Unlock() + + // Potentially accept the header to the block index. When the header + // already exists in the block index, this acts as a lookup of the existing + // node along with a status check to avoid additional work when possible. + // + // On the other hand, when the header does not already exist in the block + // index, validate it according to both context free and context dependent + // positional checks, and create a block index entry for it. + b.chainLock.Lock() + const checkHeaderSanity = true + _, err := b.maybeAcceptBlockHeader(header, flags, checkHeaderSanity) + if err != nil { + b.chainLock.Unlock() + return err + } + + // Write any modified block index entries to the database since any new + // headers will have added a new entry. + if err := b.flushBlockIndex(); err != nil { + b.chainLock.Unlock() + return err + } + b.chainLock.Unlock() + + return nil +} + +// maybeAcceptBlockData potentially accepts the data for the given block into +// the database, updates the block index state to account for the full data now +// being available, and returns a list of all descendant blocks that already +// have their respective data available and are now therefore eligible for +// validation. +// +// The block is only accepted if it passes several validation checks which +// depend on its position within the block chain and having the headers of all +// ancestors available. This function does not, and must not, rely on having +// the full block data of all ancestors available. +// +// Note that this currently expects that it is only ever called from +// ProcessBlock which already checked the block sanity. Care must be taken if +// the code is changed to violate that assumption. +// +// The flags do not modify the behavior of this function directly, however they +// are needed to pass along to checkBlockPositional. +// +// This function MUST be called with the chain lock held (for writes). +func (b *BlockChain) maybeAcceptBlockData(node *blockNode, block *dcrutil.Block, flags BehaviorFlags) ([]*blockNode, error) { + // Nothing more to do if the block data is already available. Note that + // this function is never called when the data is already available at the + // time this comment was written, but it's a fast check and will prevent + // incorrect behavior if that changes at some point in the future. + if b.index.NodeStatus(node).HaveData() { + return nil, nil + } + + // Populate the prunable information that is related to tickets and votes. + ticketInfo := stake.FindSpentTicketsInBlock(block.MsgBlock()) + b.index.PopulateTicketInfo(node, ticketInfo) + + // The block must pass all of the validation rules which depend on the + // position of the block within the block chain. Not that this only checks + // the block data, not including the header, because the header was already + // checked when it was accepted to the block index. + err := b.checkBlockDataPositional(block, node.parent, flags) + if err != nil { + b.index.MarkBlockFailedValidation(node) + return nil, err + } + + // Prune stake nodes that are no longer needed. + b.pruner.pruneChainIfNeeded() + + // Insert the block into the database if it's not already there. Even + // though it is possible the block will ultimately fail to connect, it has + // already passed all proof-of-work and validity tests which means it would + // be prohibitively expensive for an attacker to fill up the disk with a + // bunch of blocks that fail to connect. This is necessary since it allows + // block download to be decoupled from the much more expensive connection + // logic. It also has some other nice properties such as making blocks that + // never become part of the main chain or blocks that fail to connect + // available for further analysis. + err = b.db.Update(func(dbTx database.Tx) error { + return dbMaybeStoreBlock(dbTx, block) + }) + if err != nil { + return nil, err + } + b.index.SetStatusFlags(node, statusDataStored) + + // Update the block index state to account for the full data for the block + // now being available. This might result in the block, and any others that + // are descendants of it, becoming fully linked (meaning a block has all of + // its own data available and all of its ancestors also have their data + // available) which makes them eligible for full validation. + tip := b.bestChain.Tip() + linkedBlocks := b.index.AcceptBlockData(node, tip) + + return linkedBlocks, nil +} + +// maybeAcceptBlocks tentatively accepts the given blocks, which must have +// already been determined to be fully linked by the caller, to the chain if +// they pass several validation checks which depend on having the full block +// data for all of their ancestors available and updates the block index state +// to account for any that fail validation. +// +// It returns those that were accepted along with an error that applies to the +// first one that failed validation (if any). This is sufficient because the +// provided blocks must all be descendants of previous ones which means all of +// the remaining ones after a validation failure are not eligible for further +// processing and acceptance because they have an invalid ancestor. +// +// The flags do not modify the behavior of this function directly, however they +// are needed to pass along to checkBlockContext. +// +// This function MUST be called with the chain lock held (for writes). +func (b *BlockChain) maybeAcceptBlocks(curTip *blockNode, nodes []*blockNode, flags BehaviorFlags) ([]*blockNode, error) { + isCurrent := b.isCurrent() + for i, n := range nodes { + var err error + linkedBlock, err := b.fetchBlockByNode(n) + if err != nil { + return nodes[:i], err + } + + // The block must pass all of the validation rules which depend on + // having the full block data for all of its ancestors available. + if err := b.checkBlockContext(linkedBlock, n.parent, flags); err != nil { + var rErr RuleError + if errors.As(err, &rErr) { + b.index.MarkBlockFailedValidation(n) + } + + return nodes[:i], err + } + + // Cache the block and mark it as recently checked to avoid loading and + // checking it again when connecting it in the typical case. Since the + // cache is limited in size, it is technically possible that a large + // enough chain of blocks becoming linked at once will end up evicting + // some of the early ones, but the only effect in that case is + // potentially having to load the block and run the context checks again + // later. That said, in practice, eviction of items essentially never + // happens under normal operation, especially once the chain is fully + // synced. + b.addRecentBlock(linkedBlock) + b.recentContextChecks.Add(n.hash) + + // Notify the caller when the block intends to extend the main chain, + // the chain believes it is current, and the block has passed all of the + // sanity and contextual checks, such as having valid proof of work, + // valid merkle and stake roots, and only containing allowed votes and + // revocations. + // + // This allows the block to be relayed before doing the more expensive + // connection checks, because even though the block might still fail to + // connect and become the new main chain tip, that is quite rare in + // practice since a lot of work was expended to create a block that + // satisifies the proof of work requirement. + // + // Notice that the chain lock is not released before sending the + // notification. This is intentional and must not be changed without + // understanding why! + if n.parent == curTip && isCurrent { + b.sendNotification(NTNewTipBlockChecked, linkedBlock) + } + } + + return nodes, nil +} + // ProcessBlock is the main workhorse for handling insertion of new blocks into // the block chain. It includes functionality such as rejecting duplicate // blocks, ensuring blocks follow all rules, and insertion into the block chain // along with best chain selection and reorganization. // -// It is up to the caller to ensure the blocks are processed in order since -// orphans are rejected. +// This function permits blocks to be processed out of order so long as their +// header has already been successfully processed via ProcessBlockHeader which +// itself requires the headers to properly connect. In other words, orphan +// blocks are rejected and thus is up to the caller to either ensure that the +// blocks are processed in order or that the headers for the blocks have already +// been successfully processed. +// +// Upon return, the best chain tip will be whatever branch tip has the most +// proof of work and also passed all validation checks. Due to this, it is also +// worth noting that the best chain tip might be updated even in the case of +// processing a block that ultimately fails validation. +// +// Additionally, due to the ability to process blocks out of order, and the fact +// blocks can only be fully validated once all of their ancestors have the block +// data available, it is to be expected that no error is returned immediately +// for blocks that are valid enough to make it to the point they require the +// remaining ancestor block data to be fully validated even though they might +// ultimately end up failing validation. Similarly, because the data for a +// block becoming available makes any of its direct descendants that already +// have their data available eligible for validation, an error being returned +// does not necessarily mean the block being processed is the one that failed +// validation. // // When no errors occurred during processing, the first return value indicates // the length of the fork the block extended. In the case it either extended @@ -47,48 +367,218 @@ const ( // // This function is safe for concurrent access. func (b *BlockChain) ProcessBlock(block *dcrutil.Block, flags BehaviorFlags) (int64, error) { - b.chainLock.Lock() - defer b.chainLock.Unlock() - - blockHash := block.Hash() - log.Tracef("Processing block %v", blockHash) - currentTime := time.Now() - defer func() { - elapsedTime := time.Since(currentTime) - log.Debugf("Block %v (height %v) finished processing in %s", - blockHash, block.Height(), elapsedTime) - }() + // Since the chain lock is periodically released to send notifications, + // protect the overall processing of blocks with a separate mutex. + b.processLock.Lock() + defer b.processLock.Unlock() // The block must not already exist in the main chain or side chains. + blockHash := block.Hash() if b.index.HaveBlock(blockHash) { str := fmt.Sprintf("already have block %v", blockHash) return 0, ruleError(ErrDuplicateBlock, str) } + b.chainLock.Lock() + defer b.chainLock.Unlock() + + // Reject blocks that are already known to be invalid immediately to avoid + // additional work when possible. + node := b.index.LookupNode(block.Hash()) + if node != nil { + if err := b.checkKnownInvalidBlock(node); err != nil { + return 0, err + } + } + // Perform preliminary sanity checks on the block and its transactions. + // This is done prior to any attempts to accept the block data and connect + // the block to quickly eliminate blocks that are obviously incorrect and + // significantly increase the cost to attackers. Of particular note is that + // the checks include proof-of-work validation which means a significant + // amount of work must have been done in order to pass this check. err := checkBlockSanity(block, b.timeSource, flags, b.chainParams) if err != nil { + // When there is a block index entry for the block, which will be the + // case if the header was previously seen and passed all validation, + // mark it as having failed validation and all of its descendants as + // having an invalid ancestor. + if node != nil { + b.index.MarkBlockFailedValidation(node) + } return 0, err } - // This function should never be called with orphans or the genesis block. - blockHeader := &block.MsgBlock().Header - prevHash := &blockHeader.PrevBlock - if !b.index.HaveBlock(prevHash) { - // The fork length of orphans is unknown since they, by definition, do - // not connect to the best chain. - str := fmt.Sprintf("previous block %s is not known", prevHash) - return 0, ruleError(ErrMissingParent, str) + // Potentially accept the header to the block index when it does not already + // exist. + // + // This entails fully validating it according to both context independent + // and context dependent checks and creating a block index entry for it. + // + // Note that the header sanity checks are skipped because they were just + // performed above as part of the full block sanity checks. + if node == nil { + const checkHeaderSanity = false + header := &block.MsgBlock().Header + node, err = b.maybeAcceptBlockHeader(header, flags, checkHeaderSanity) + if err != nil { + return 0, err + } } - // The block has passed all context independent checks and appears sane - // enough to potentially accept it into the block chain. - forkLen, err := b.maybeAcceptBlock(block, flags) + // Enable skipping some of the more expensive validation checks when the + // block is an ancestor of a known good checkpoint. + // + // NOTE: The fast add flag from the caller is used as a standin for now + // since that validation is currently handled by the calling code. In the + // future, the calling code should be updated to process all headers through + // this package and then the ability to specify the fast add flag should be + // removed along with the fast add portion of this check here so that it is + // solely determined internally. + if flags&BFFastAdd == BFFastAdd || b.isKnownCheckpointAncestor(node) { + b.index.SetStatusFlags(node, statusValidated) + flags |= BFFastAdd + } + + // Potentially accept the block data into the database and update the block + // index state to account for the full data now being available. + // + // This consists of performing several validation checks which depend on the + // block's position within the block chain and determining if the block, and + // any descendants of it are now eligible for full validation due to being + // fully linked (meaning a block has all of its own data available and all + // of its ancestors also have their data available). + // + // The returned linked block nodes are for those aforementioned blocks that + // are now eligible for validation. + linkedNodes, err := b.maybeAcceptBlockData(node, block, flags) if err != nil { return 0, err } - log.Debugf("Accepted block %v", blockHash) + // Write any modified block index entries to the database since any new + // headers will have added a new entry and the block will be marked as now + // having its data stored. + if err := b.flushBlockIndex(); err != nil { + return 0, err + } + + // Tentatively accept the linked blocks to the chain if they pass several + // validation checks which depend on having the full block data for all of + // their ancestors available and update the block index state to account for + // any that fail validation. + // + // Note that this is done here because it allows any blocks that fail this + // level of validation to be detected and discounted early before doing more + // work. + // + // Also, any blocks that do not ultimately end up becoming part of the best + // chain would otherwise not have contextual checks run on them, which is + // required before accepting them, without somewhat more complicated logic + // later to detect them. + var finalErr error + currentTip := b.bestChain.Tip() + b.addRecentBlock(block) + acceptedNodes, err := b.maybeAcceptBlocks(currentTip, linkedNodes, flags) + if err != nil { + finalErr = err - return forkLen, nil + // This intentionally falls through since the code below must run + // whether or not any blocks were accepted. + } + + // Determine what the expected effects of the block, in terms of forks and + // reorganizations, will be on the chain and log it. + // + // 1) There is no effect if the block is not able to be validated yet + // 2) The block is causing a reorg when the new current best tip is not an + // ancestor of the new target tip + // 3) The block is either forking the best chain or extending an existing + // fork of it when it does not cause a reorg and it is not an ancestor + // of the new target tip + target := b.index.FindBestChainCandidate() + if b.index.CanValidate(node) { + triggersReorg := target.Ancestor(currentTip.height) != currentTip + if triggersReorg { + log.Infof("REORGANIZE: Block %v is causing a reorganize", node.hash) + } else if target.Ancestor(node.height) != node { + fork := b.bestChain.FindFork(node) + if fork == node.parent { + log.Infof("FORK: Block %v (height %v) forks the chain at "+ + "height %d/block %v, but does not cause a reorganize", + node.hash, node.height, fork.height, fork.hash) + } else { + log.Infof("EXTEND FORK: Block %v (height %v) extends a side "+ + "chain which forks the chain at height %d/block %v", + node.hash, node.height, fork.height, fork.hash) + } + } + } + + // Find the best chain candidate and attempt to reorganize the chain to it. + // This will have no effect when the target is the same as the current best + // chain tip. + // + // Note that any errors that take place in the reorg will be attributed to + // the block being processed. The calling code currently depends on this + // behavior, so care must be taken if this behavior is changed. + reorgErr := b.reorganizeChain(target) + switch { + // The final error is just the reorg error in the case there was no error + // carried forward from above. + case reorgErr != nil && finalErr == nil: + finalErr = reorgErr + + // The final error is a multi error when there is a reorg error and an error + // was carried forward from above. Additionally, in the case the reorg + // error is itself a multi error, combine it into a single multi error + // rather than wrapping it inside another one. + case reorgErr != nil && finalErr != nil: + var mErr MultiError + if errors.As(reorgErr, &mErr) { + combined := make([]error, 0, len(mErr)+1) + combined = append(combined, finalErr) + combined = append(combined, mErr...) + finalErr = MultiError(combined) + } else { + finalErr = MultiError{finalErr, reorgErr} + } + } + + // Notify the caller about any blocks that are now linked and were accepted + // to the block chain. The caller would typically want to react by relaying + // the inventory to other peers unless it was already relayed above via + // NTNewTipBlockChecked. + // + // Note that this intentionally waits until after the chain reorganization + // above so that the information is relative to the final best chain after + // validation. + newTip := b.bestChain.Tip() + b.chainLock.Unlock() + for _, n := range acceptedNodes { + // Skip any blocks which either themselves failed validation or are + // descenants of one that failed. + if b.index.NodeStatus(n).KnownInvalid() { + continue + } + + var forkLen int64 + if fork := b.bestChain.FindFork(n); fork != nil { + forkLen = n.height - fork.height + } + b.sendNotification(NTBlockAccepted, &BlockAcceptedNtfnsData{ + BestHeight: newTip.height, + ForkLen: forkLen, + Block: block, + }) + } + b.chainLock.Lock() + + var forkLen int64 + if finalErr == nil { + if fork := b.bestChain.FindFork(node); fork != nil { + forkLen = node.height - fork.height + } + } + return forkLen, finalErr } diff --git a/blockchain/process_test.go b/blockchain/process_test.go index 1b131b0fd4..fd874d9773 100644 --- a/blockchain/process_test.go +++ b/blockchain/process_test.go @@ -6,6 +6,7 @@ package blockchain import ( "fmt" + "sync" "testing" "github.com/decred/dcrd/blockchain/v4/chaingen" @@ -14,6 +15,13 @@ import ( "github.com/decred/dcrd/wire" ) +// These variables are used to provide a shared set of generated blocks that +// are only generated one time on demand. +var ( + processTestGeneratorLock sync.Mutex + processTestGenerator *chaingen.Generator +) + // TestProcessOrder ensures processing-specific logic such as orphan handling, // duplicate block handling, and out-of-order reorgs to invalid blocks works as // expected. @@ -120,14 +128,15 @@ func TestProcessOrder(t *testing.T) { g.SetTip("b1") g.NextBlock("bpw1", outs[1], ticketOuts[1]) - g.AcceptedToSideChainWithExpectedTip("b2") g.NextBlock("bpw2", outs[2], ticketOuts[2]) g.NextBlock("bpw3", outs[3], ticketOuts[3], func(b *wire.MsgBlock) { // Increase the first proof-of-work coinbase subsidy. b.Transactions[0].TxOut[2].Value++ }) - g.AcceptBlock("bpw2") - g.RejectBlock("bpw3", ErrBadCoinbaseValue) + g.AcceptHeader("bpw1") + g.AcceptBlockData("bpw2") + g.AcceptBlockData("bpw3") + g.RejectBlock("bpw1", ErrBadCoinbaseValue) g.ExpectTip("bpw2") // Create a fork that ends with block that generates too much dev-org @@ -138,13 +147,1026 @@ func TestProcessOrder(t *testing.T) { // (bdc1 added last) g.SetTip("bpw1") g.NextBlock("bdc1", outs[2], ticketOuts[2]) - g.AcceptedToSideChainWithExpectedTip("bpw2") g.NextBlock("bdc2", outs[3], ticketOuts[3]) g.NextBlock("bdc3", outs[4], ticketOuts[4], func(b *wire.MsgBlock) { // Increase the proof-of-work dev subsidy by the provided amount. b.Transactions[0].TxOut[0].Value++ }) - g.AcceptBlock("bdc2") - g.RejectBlock("bdc3", ErrNoTax) + g.AcceptHeader("bdc1") + g.AcceptBlockData("bdc2") + g.AcceptBlockData("bdc3") + g.RejectBlock("bdc1", ErrNoTax) g.ExpectTip("bdc2") } + +// genSharedProcessTestBlocks either generates a new set of blocks used in the +// process logic tests or returns an already generated set if called more than +// once. +// +// +// The generated blocks form a fairly complex overall block tree as follows: +// - * denotes invalid header +// - ! denotes invalid block prior to connection (e.g. vote from bad ticket) +// - @ denotes invalid block when connected (e.g. double spend) +// - bfb is the required first block +// - bm# are blocks which allow coins to mature +// - bse# are blocks which reach stake enabled height +// - bsv# are blocks which reach stake validation height +// - bfbbad is a variant of the first block that pays too much +// - bfbbadchild and bfbbadchilda are valid blocks that descend from bfbbad +// - b1badhdr has a header with too few votes (hdr sanity failure) +// - b1badhdra has a header with a mismatched height (hdr positional failure) +// - b1bad has a ticket purchase that pays too little (block sanity failure) +// - b1bada is a block with an expired tx (block positional failure) +// - b5b is a block with a vote from bad ticket (context failure) +// - b6g is a block with a vote from bad ticket (context failure) +// - b5h is a block with a double spend (connect failure) +// - b6i is a block with a double spend (connect failure) +// - b7j is a block with a double spend (connect failure) +// +// genesis -> bfb -> bm0 -> ... -> bm# -> bse0 -> ... -> bse# -> bsv0 -> ... +// \-> bfbbad! -> bfbbadchild +// \-> bfbbadchilda +// +// ... -> bsv# -> bbm0 -> bbm1 -> ... -> bbm# -> b1 -> b2 -> ... +// \-> b1badhdr* +// \-> b1badhdra* +// \-> b1bad! +// \-> b1bada! +// +// ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 +// | \-> b10a +// \-> b4b -> b5b! -> b6b -> b7b -> b8b +// \-> b4c | \-> b8c -> b9c +// | \-> b7d -> b8d -> b9d +// | \-> b7e -> b8e +// | +// \-> b4f -> b5f -> b6f -> b7f -> b8f -> b9f -> b10f +// \-> b4g -> b5g -> b6g! -> b7g -> b8g +// \-> b4h -> b5h@ -> b6h -> b7h +// \-> b4i -> b5i -> b6i@ -> b7i -> b8i +// \-> b4j -> b5j -> b6j -> b7j@ +func genSharedProcessTestBlocks(t *testing.T) (*chaingen.Generator, error) { + processTestGeneratorLock.Lock() + defer processTestGeneratorLock.Unlock() + + // Only generate the process test chain once. + if processTestGenerator != nil { + return processTestGenerator, nil + } + + // Create a new database and chain instance needed to create the generator + // populated with the desired blocks. + params := chaincfg.RegNetParams() + g, teardownFunc := newChaingenHarness(t, params, "sharedprocesstestblocks") + defer teardownFunc() + + // Shorter versions of useful params for convenience. + coinbaseMaturity := params.CoinbaseMaturity + stakeValidationHeight := params.StakeValidationHeight + + // ------------------------------------------------------------------------- + // Block one variants. + // ------------------------------------------------------------------------- + + // Produce a first block with too much coinbase. + // + // genesis + // \-> bfbbad + g.CreateBlockOne("bfbbad", 1) + g.AssertTipHeight(1) + + // Create a block that descends from the invalid first block. + // + // genesis + // \-> bfbbad -> bfbbadchild + g.NextBlock("bfbbadchild", nil, nil) + g.AssertTipHeight(2) + + // Create a second block that descends from the invalid first block. + // + // genesis + // \-> bfbbad -> bfbbadchild + // \-> bfbbadchilda + g.SetTip("bfbbad") + g.NextBlock("bfbbadchilda", nil, nil) + g.AssertTipHeight(2) + + // ------------------------------------------------------------------------- + // Generate enough blocks to reach stake validation height. + // ------------------------------------------------------------------------- + + g.SetTip("genesis") + g.GenerateToStakeValidationHeight() + + // ------------------------------------------------------------------------- + // Generate enough blocks to have a known distance to the first mature + // coinbase outputs for all tests that follow. These blocks continue to + // purchase tickets to avoid running out of votes. + // + // ... -> bsv# -> bbm0 -> bbm1 -> ... -> bbm# + // ------------------------------------------------------------------------- + + var finalMaturityBlockName string + for i := uint16(0); i < coinbaseMaturity; i++ { + outs := g.OldestCoinbaseOuts() + blockName := fmt.Sprintf("bbm%d", i) + g.NextBlock(blockName, nil, outs[1:]) + g.SaveTipCoinbaseOuts() + finalMaturityBlockName = blockName + } + g.AssertTipHeight(uint32(stakeValidationHeight) + uint32(coinbaseMaturity)) + + // Collect spendable outputs into two different slices. The outs slice is + // intended to be used for regular transactions that spend from the output, + // while the ticketOuts slice is intended to be used for stake ticket + // purchases. + var outs []*chaingen.SpendableOut + var ticketOuts [][]chaingen.SpendableOut + for i := uint16(0); i < coinbaseMaturity; i++ { + coinbaseOuts := g.OldestCoinbaseOuts() + outs = append(outs, &coinbaseOuts[0]) + ticketOuts = append(ticketOuts, coinbaseOuts[1:]) + } + + // ------------------------------------------------------------------------- + // Generate various invalid variants of b1. + // + // Note that * below indicates a block that has an invalid header and ! + // indicates a block that is invalid prior to connection due to sanity + // and/or positional checks. + // + // ... -> bbm# + // \-> b1badhdr* + // \-> b1badhdra* + // \-> b1bad! + // \-> b1bada! + // ------------------------------------------------------------------------- + + // Generate block with too few votes to possibly have majority which results + // in a header that is rejected due to context-free (aka sanity) checks. + g.NextBlock("b1badhdr", outs[0], ticketOuts[0], g.ReplaceWithNVotes(1)) + + // Generate block with a mismatched height which results in a header that is + // rejected due to positional checks. + g.SetTip(finalMaturityBlockName) + g.NextBlock("b1badhdra", outs[0], ticketOuts[0], func(b *wire.MsgBlock) { + b.Header.Height-- + }) + + // Generate block with a ticket that pays too little which results in being + // rejected due to block sanity checks. + g.SetTip(finalMaturityBlockName) + g.NextBlock("b1bad", outs[0], ticketOuts[0], func(b *wire.MsgBlock) { + b.STransactions[5].TxOut[0].Value-- + }) + + // Generate block with an expired transaction which results in being + // rejected due to block positional checks. + g.SetTip(finalMaturityBlockName) + g.NextBlock("b1bada", outs[0], ticketOuts[0], func(b *wire.MsgBlock) { + b.Transactions[1].Expiry = b.Header.Height + }) + + // ------------------------------------------------------------------------- + // Generate a few blocks to serve as a base for more complex branches below. + // + // ... -> b1 -> b2 -> b3 + // ------------------------------------------------------------------------- + + g.SetTip(finalMaturityBlockName) + g.NextBlock("b1", outs[0], ticketOuts[0]) + g.NextBlock("b2", outs[1], ticketOuts[1]) + g.NextBlock("b3", outs[2], ticketOuts[2]) + + // ------------------------------------------------------------------------- + // Generate a block tree with several branches from different fork points + // where some of them have blocks that are invalid in various ways: + // + // Note that ! below indicates a block that is invalid prior to connection + // due to sanity, positional, and/or contextual checks and @ indicates a + // block that is invalid when actually connected as part of becoming the + // main chain. + // + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // | \-> b10a + // \-> b4b -> b5b! -> b6b -> b7b -> b8b + // \-> b4c | \-> b8c -> b9c + // | \-> b7d -> b8d -> b9d + // | \-> b7e -> b8e + // | + // \-> b4f -> b5f -> b6f -> b7f -> b8f -> b9f -> b10f + // \-> b4g -> b5g -> b6g! -> b7g -> b8g + // \-> b4h -> b5h@ -> b6h -> b7h + // \-> b4i -> b5i -> b6i@ -> b7i -> b8i + // \-> b4j -> b5j -> b6j -> b7j@ + // ------------------------------------------------------------------------- + + // Generate a run of blocks that has the most cumulative work and thus will + // comprise the main chain. + // + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + g.NextBlock("b4", outs[3], ticketOuts[3]) + g.NextBlock("b5", outs[4], ticketOuts[4]) + g.NextBlock("b6", outs[5], ticketOuts[5]) + g.NextBlock("b7", outs[6], ticketOuts[6]) + g.NextBlock("b8", outs[7], ticketOuts[7]) + g.NextBlock("b9", outs[8], ticketOuts[8]) + g.NextBlock("b10", outs[9], ticketOuts[9]) + g.NextBlock("b11", outs[10], ticketOuts[10]) + + // Generate a side chain with a single block just prior to the best tip. + // + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // \-> b10a + g.SetTip("b9") + g.NextBlock("b10a", outs[9], ticketOuts[9]) + + // Generate a side chain that contains a block with a contextual validation + // error. + // + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // | \-> b10a + // \-> b4b -> b5b! -> b6b -> b7b -> b8b + g.SetTip("b3") + g.NextBlock("b4b", outs[3], ticketOuts[3]) + g.NextBlock("b5b", outs[4], ticketOuts[4], func(b *wire.MsgBlock) { + // Corrupt the referenced ticket so the block is invalid due to an + // unavailable ticket. + b.STransactions[1].TxIn[1].PreviousOutPoint.Hash[0] ^= 0x55 + }) + g.NextBlock("b6b", outs[5], ticketOuts[5]) + g.NextBlock("b7b", outs[6], ticketOuts[6]) + g.NextBlock("b8b", outs[7], ticketOuts[7]) + + // Generate a side chain with a single block further back in history. + // + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // | \-> b10a + // \-> b4b -> b5b! -> b6b -> b7b -> b8b + // \-> b4c + g.SetTip("b3") + g.NextBlock("b4c", outs[3], ticketOuts[3]) + + // Generate a side chain that itself forks from a side chain. + // + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // | \-> b10a + // \-> b4b -> b5b! -> b6b -> b7b -> b8b + // \-> b4c \-> b8c -> b9c + g.SetTip("b7b") + g.NextBlock("b8c", outs[7], ticketOuts[7]) + g.NextBlock("b9c", outs[8], ticketOuts[8]) + + // Generate a side chain that itself forks from the same side chain as + // above, but from the prior block. + // + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // | \-> b10a + // \-> b4b -> b5b! -> b6b -> b7b -> b8b + // \-> b4c | \-> b8c -> b9c + // \-> b7d -> b8d -> b9d + g.SetTip("b6b") + g.NextBlock("b7d", outs[6], ticketOuts[6]) + g.NextBlock("b8d", outs[7], ticketOuts[7]) + g.NextBlock("b9d", outs[8], ticketOuts[8]) + + // Generate another side chain that itself forks from the same side chain + // and block as the above. + // + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // | \-> b10a + // \-> b4b -> b5b! -> b6b -> b7b -> b8b + // \-> b4c | \-> b8c -> b9c + // \-> b7d -> b8d -> b9d + // \-> b7e -> b8e + g.SetTip("b6b") + g.NextBlock("b7e", outs[6], ticketOuts[6]) + g.NextBlock("b8e", outs[7], ticketOuts[7]) + + // Generate a valid competing side chain that is a single block behind the + // best tip in terms of cumulative work. + // + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // \-> b4f -> b5f -> b6f -> b7f -> b8f -> b9f -> b10f + g.SetTip("b3") + g.NextBlock("b4f", outs[3], ticketOuts[3]) + g.NextBlock("b5f", outs[4], ticketOuts[4]) + g.NextBlock("b6f", outs[5], ticketOuts[5]) + g.NextBlock("b7f", outs[6], ticketOuts[6]) + g.NextBlock("b8f", outs[7], ticketOuts[7]) + g.NextBlock("b9f", outs[8], ticketOuts[8]) + g.NextBlock("b10f", outs[9], ticketOuts[9]) + + // Generate a side chain that contains an invalid block nested a few blocks + // deep where that block is invalid due to a contextual validation error. + // + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // \-> b4g -> b5g -> b6g! -> b7g -> b8g + g.SetTip("b3") + g.NextBlock("b4g", outs[3], ticketOuts[3]) + g.NextBlock("b5g", outs[4], ticketOuts[4]) + g.NextBlock("b6g", outs[5], ticketOuts[5], func(b *wire.MsgBlock) { + // Corrupt the referenced ticket so the block is invalid due to an + // unavailable ticket. + b.STransactions[1].TxIn[1].PreviousOutPoint.Hash[0] ^= 0x55 + }) + g.NextBlock("b7g", outs[6], ticketOuts[6]) + g.NextBlock("b8g", outs[7], ticketOuts[7]) + + // Generate a side chain that contains an invalid block nested a couple of + // blocks deep where that block is invalid due to a double spend of an + // output spent by one of its ancestors. + // + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // \-> b4h -> b5h@ -> b6h -> b7h + g.SetTip("b3") + g.NextBlock("b4h", outs[3], ticketOuts[3]) + g.NextBlock("b5h", outs[2], ticketOuts[4]) // Double spend + g.NextBlock("b6h", outs[5], ticketOuts[5]) + g.NextBlock("b7h", outs[6], ticketOuts[6]) + + // Generate a side chain that contains an invalid block nested a few blocks + // deep where that block is invalid due to a double spend of an output spent + // by one of its ancestors. + // + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // \-> b4i -> b5i -> b6i@ -> b7i -> b8i + g.SetTip("b3") + g.NextBlock("b4i", outs[3], ticketOuts[3]) + g.NextBlock("b5i", outs[4], ticketOuts[4]) + g.NextBlock("b6i", outs[2], ticketOuts[5]) // Double spend + g.NextBlock("b7i", outs[6], ticketOuts[6]) + g.NextBlock("b8i", outs[7], ticketOuts[7]) + + // Generate a side chain that contains an invalid block nested several + // blocks deep and also serves as its tip where that block is invalid due to + // a double spend of an output spent by one of its ancestors. + // + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // \-> b4j -> b5j -> b6j -> b7j@ + g.SetTip("b3") + g.NextBlock("b4j", outs[3], ticketOuts[3]) + g.NextBlock("b5j", outs[4], ticketOuts[4]) + g.NextBlock("b6j", outs[5], ticketOuts[5]) + g.NextBlock("b7j", outs[2], ticketOuts[6]) // Double spend + + processTestGenerator = g.Generator + return processTestGenerator, nil +} + +// TestProcessLogic ensures processing a mix of headers and blocks under a wide +// variety of fairly complex scenarios selects the expected best chain and +// properly tracks the header with the most cumulative work that is not known to +// be invalid as well as the one that is known to be invalid (when it exists). +func TestProcessLogic(t *testing.T) { + // Generate or reuse a shared chain generator with a set of blocks that form + // a fairly complex overall block tree including multiple forks such that + // some branches are valid and others contain invalid headers and/or blocks + // with multiple valid descendants as well as further forks at various + // heights from those invalid branches. + sharedGen, err := genSharedProcessTestBlocks(t) + if err != nil { + t.Fatalf("Failed to create generator: %v", err) + } + + // Create a new database and chain instance to run tests against. + g, teardownFunc := newChaingenHarnessWithGen(t, "processtest", sharedGen) + defer teardownFunc() + + // Shorter versions of useful params for convenience. + params := g.Params() + coinbaseMaturity := params.CoinbaseMaturity + stakeEnabledHeight := params.StakeEnabledHeight + stakeValidationHeight := params.StakeValidationHeight + + // ------------------------------------------------------------------------- + // Ensure the genesis block is rejected due to already being known, but its + // header is accepted since duplicate headers are ignored and it is not + // marked invalid due to the duplicate attempt. + // ------------------------------------------------------------------------- + + g.RejectBlock("genesis", ErrDuplicateBlock) + g.AcceptHeader("genesis") + g.ExpectBestHeader("genesis") + g.ExpectBestInvalidHeader("") + + // ------------------------------------------------------------------------- + // Test basic acceptance and rejection logic of valid headers of invalid + // blocks and their descendants. + // ------------------------------------------------------------------------- + + // Ensure that the valid header of a block that is invalid, but not yet + // known to be invalid, is accepted. + // + // genesis -> bfbbad + g.AcceptHeader("bfbbad") + + // Ensure that a valid header that is the child of a known header for a + // block that is invalid, but not yet known to be invalid, is accepted. + // + // genesis -> bfbbad -> bfbbadchild + g.AcceptHeader("bfbbadchild") + + // Process the invalid block and ensure it fails with the expected consensus + // violation error. Since the header was already previously accepted, it + // will be marked invalid. + // + // genesis + // \-> bfbbad + g.RejectBlock("bfbbad", ErrBadCoinbaseValue) + g.ExpectBestInvalidHeader("bfbbadchild") + + // Ensure that the header for a known invalid block is rejected. + // + // genesis + // \-> bfbbad + g.RejectHeader("bfbbad", ErrKnownInvalidBlock) + + // Ensure that a valid header that is already known and is the child of a + // another known header for a block that is known invalid is rejected due to + // a known invalid ancestor. + // + // genesis + // \-> bfbbad + // \-> bfbbadchild (already known invalid) + g.RejectHeader("bfbbadchild", ErrInvalidAncestorBlock) + + // Ensure that a valid header that is NOT already known and is the child of + // a known header for a block that is known invalid is rejected due to a + // known invalid ancestor. + // + // genesis + // \-> bfbbad + // \-> bfbbadchilda (NOT already known) + g.RejectHeader("bfbbadchilda", ErrInvalidAncestorBlock) + g.ExpectBestInvalidHeader("bfbbadchild") + + // ------------------------------------------------------------------------- + // Ensure that all headers in the test chain through stake validation height + // and the base maturity blocks are accepted in order. + // + // genesis -> bfb -> bm0 -> ... -> bm# -> bse0 -> ... -> bse# -> ... + // + // ... bsv0 -> ... -> bsv# -> bbm0 -> ... -> bbm# + // ------------------------------------------------------------------------- + + g.AcceptHeader("bfb") + for i := uint16(0); i < coinbaseMaturity; i++ { + blockName := fmt.Sprintf("bm%d", i) + g.AcceptHeader(blockName) + } + tipHeight := int64(coinbaseMaturity) + 1 + for i := int64(0); tipHeight < stakeEnabledHeight; i++ { + blockName := fmt.Sprintf("bse%d", i) + g.AcceptHeader(blockName) + tipHeight++ + } + for i := int64(0); tipHeight < stakeValidationHeight; i++ { + blockName := fmt.Sprintf("bsv%d", i) + g.AcceptHeader(blockName) + tipHeight++ + } + for i := uint16(0); i < coinbaseMaturity; i++ { + blockName := fmt.Sprintf("bbm%d", i) + g.AcceptHeader(blockName) + } + + // ------------------------------------------------------------------------- + // Ensure that a header that has a context-free (aka sanity) failure is + // rejected as expected. Since invalid headers are not added to the block + // index, attempting to process it again is expected to return the specific + // failure reason as opposed to a known invalid block error. + // + // ... -> bbm# + // \-> b1badhdr + // ------------------------------------------------------------------------- + + g.RejectHeader("b1badhdr", ErrNotEnoughVotes) + g.RejectHeader("b1badhdr", ErrNotEnoughVotes) + g.ExpectBestInvalidHeader("bfbbadchild") + + // ------------------------------------------------------------------------- + // Ensure that a header that has a positional failure is rejected as + // expected. Since invalid headers are not added to the block index, + // attempting to process it again is expected to return the specific failure + // reason as opposed to a known invalid block error. + // + // ... -> bbm# + // \-> b1badhdra + // ------------------------------------------------------------------------- + + g.RejectHeader("b1badhdra", ErrBadBlockHeight) + g.RejectHeader("b1badhdra", ErrBadBlockHeight) + g.ExpectBestInvalidHeader("bfbbadchild") + + // ------------------------------------------------------------------------- + // Ensure both headers and blocks whose parent header is not known are + // rejected as expected. + // + // ... -> bbm# -> b1 (neither header nor block data known to chain) + // \-> b2 + // ------------------------------------------------------------------------- + + g.RejectHeader("b2", ErrMissingParent) + g.RejectBlock("b2", ErrMissingParent) + g.ExpectBestInvalidHeader("bfbbadchild") + + // ------------------------------------------------------------------------- + // Ensure that the first few headers in the test chain that build on the + // final base maturity block are accepted in order. + // + // ... -> bbm# -> b1 -> b2 -> b3 + // ------------------------------------------------------------------------- + + g.AcceptHeader("b1") + g.AcceptHeader("b2") + g.AcceptHeader("b3") + g.ExpectBestHeader("b3") + + // ------------------------------------------------------------------------- + // Ensure that headers that form a block index with a bunch of branches from + // different fork points are accepted and result in the one with the most + // cumulative work being identified as the best header that is not already + // known to be invalid. + // + // Note that the ! below indicates a block that is invalid due to violating + // a consensus rule, but has a valid header. + // + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // | \-> b10a + // \-> b4b -> b5b! -> b6b -> b7b -> b8b + // \-> b4c | \-> b8c -> b9c + // \-> b7d -> b8d -> b9d + // \-> b7e + // ------------------------------------------------------------------------- + + g.AcceptHeader("b4") + g.AcceptHeader("b4b") + g.AcceptHeader("b4c") + g.AcceptHeader("b5") + g.AcceptHeader("b5b") // Invalid block, but header valid. + g.AcceptHeader("b6") + g.AcceptHeader("b6b") + g.AcceptHeader("b7") + g.AcceptHeader("b7b") + g.AcceptHeader("b7d") + g.AcceptHeader("b7e") + g.AcceptHeader("b8") + g.AcceptHeader("b8b") + g.AcceptHeader("b8c") + g.AcceptHeader("b8d") + g.AcceptHeader("b9") + g.AcceptHeader("b9c") + g.AcceptHeader("b9d") + g.AcceptHeader("b10") + g.AcceptHeader("b10a") + g.AcceptHeader("b11") + g.ExpectBestHeader("b11") + + // Even though ultimately one of the tips of the branches that have the same + // cumulative work and are descendants of b5b will become the best known + // invalid header, the block data for b5b is not yet known and thus it is + // not yet known to be invalid. + g.ExpectBestInvalidHeader("bfbbadchild") + + // ------------------------------------------------------------------------- + // Process all of the block data in the test chain through stake validation + // height and the base maturity blocks to reach the point where the more + // complicated branching structure starts. + // + // All of the headers for these blocks are already known, so this also + // exercises in order processing as compared to the later tests which deal + // with out of order processing. + // + // Also ensure duplicate blocks are rejected. + // + // ... -> bfb -> bm0 -> ... -> bm# -> bse0 -> ... -> bse# -> ... + // + // ... bsv0 -> ... -> bsv# -> bbm0 -> ... -> bbm# + // ------------------------------------------------------------------------- + + g.AcceptBlock("bfb") + g.RejectBlock("bfb", ErrDuplicateBlock) + for i := uint16(0); i < coinbaseMaturity; i++ { + blockName := fmt.Sprintf("bm%d", i) + g.AcceptBlock(blockName) + } + tipHeight = int64(coinbaseMaturity) + 1 + for i := int64(0); tipHeight < stakeEnabledHeight; i++ { + blockName := fmt.Sprintf("bse%d", i) + g.AcceptBlock(blockName) + tipHeight++ + } + for i := int64(0); tipHeight < stakeValidationHeight; i++ { + blockName := fmt.Sprintf("bsv%d", i) + g.AcceptBlock(blockName) + tipHeight++ + } + for i := uint16(0); i < coinbaseMaturity; i++ { + blockName := fmt.Sprintf("bbm%d", i) + g.AcceptBlock(blockName) + } + + // ------------------------------------------------------------------------- + // Ensure that a block that has a context-free (aka sanity) failure is + // rejected as expected. + // + // Since invalid blocks that have not had their headers added separately are + // not added to the block index, attempting to process it again must return + // the specific failure reason as opposed to a known invalid block error. + // + // This also means that adding the header of a block that has been rejected + // due to a sanity error will succeed because the original failure is not + // tracked. Thus, ensure that is the case and that processing the bad block + // again fails as expected and is marked as failed such that future attempts + // to either add the header or the block will fail due to being a known + // invalid block. + // + // ... -> bbm# + // \-> b1bad + // ------------------------------------------------------------------------- + + g.RejectBlock("b1bad", ErrNotEnoughStake) + g.RejectBlock("b1bad", ErrNotEnoughStake) + g.ExpectBestInvalidHeader("bfbbadchild") + + g.AcceptHeader("b1bad") + g.RejectBlock("b1bad", ErrNotEnoughStake) + g.ExpectBestInvalidHeader("b1bad") + g.RejectHeader("b1bad", ErrKnownInvalidBlock) + g.RejectBlock("b1bad", ErrKnownInvalidBlock) + + // ------------------------------------------------------------------------- + // Ensure that a block that has a positional failure is rejected as + // expected. Since the header is valid and the block sanity checks pass, + // the header will be added to the index and then end up marked as invalid, + // so attempting to process the block again is expected to return a known + // invalid block error. + // + // ... -> bbm# + // \-> b1bada + // ------------------------------------------------------------------------- + + g.RejectBlock("b1bada", ErrExpiredTx) + g.RejectBlock("b1bada", ErrKnownInvalidBlock) + + // Since both b1bad and b1bada have the same work and the block data is + // rejected for both, whichever has the lowest hash (when treated as a + // little-endian uint256) should be considered the best invalid. + chooseLowestHash := func(blockNames ...string) string { + lowestBlockName := blockNames[0] + lowestHash := g.BlockByName(lowestBlockName).Header.BlockHash() + for _, blockName := range blockNames[1:] { + hash := g.BlockByName(blockName).Header.BlockHash() + if compareHashesAsUint256LE(&hash, &lowestHash) < 0 { + lowestHash = hash + lowestBlockName = blockName + } + } + return lowestBlockName + } + g.ExpectBestInvalidHeader(chooseLowestHash("b1bad", "b1bada")) + + // ------------------------------------------------------------------------- + // Ensure that processing block data for known headers out of order, + // including one on a side chain, end up with the expected block becoming + // the best chain tip. + // + // ... -> b1 -> b2 -> b3 -> b4 -> b5 + // \-> b4b + // ------------------------------------------------------------------------- + + g.AcceptBlockData("b2") + g.AcceptBlockData("b4b") + g.AcceptBlockData("b3") + g.AcceptBlockData("b5") + g.AcceptBlockData("b4") + g.AcceptBlock("b1") + g.ExpectTip("b5") + + // ------------------------------------------------------------------------- + // Ensure that duplicate blocks are rejected without marking either it or + // any of its descendants invalid. + // + // ... -> b1 -> b2 -> b3 -> b4 -> b5 -> b6 -> ... -> b10 -> b11 + // \-> b4b -- --- + // ^^ ^^^ + // current tip best header + // ------------------------------------------------------------------------- + + g.RejectBlock("b1", ErrDuplicateBlock) + g.ExpectTip("b5") + g.ExpectBestHeader("b11") + + // ------------------------------------------------------------------------- + // Ensure that a block on a side chain that has a contextual failure, and + // for which the header is already known, is rejected as expected. Since + // the block is then known to be invalid, also ensure that all of its + // descendant headers and blocks are subsequently rejected due to there + // being a known invalid ancestor. + // + // Note that the ! below indicates a block that is invalid due to violating + // a consensus rule during the contextual checks prior to connection. + // + // current tip best header + // vv vvv + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // | \-> b10a + // \-> b4b -> b5b! -> b6b -> b7b -> b8b + // \-> b4c ---- | \-> b8c -> b9c + // ^^^^ \-> b7d -> b8d -> b9d + // bad block \-> b7e -> b8e + // ------------------------------------------------------------------------- + + // Process the bad block and ensure its header is rejected after since it is + // then a known bad block. + g.RejectBlock("b5b", ErrTicketUnavailable) + g.RejectHeader("b5b", ErrKnownInvalidBlock) + + // Ensure that all of its descendant headers that were already known are + // rejected. + g.RejectHeader("b6b", ErrInvalidAncestorBlock) + g.RejectHeader("b7b", ErrInvalidAncestorBlock) + g.RejectHeader("b8b", ErrInvalidAncestorBlock) + g.RejectHeader("b8c", ErrInvalidAncestorBlock) + g.RejectHeader("b9c", ErrInvalidAncestorBlock) + g.RejectHeader("b7d", ErrInvalidAncestorBlock) + g.RejectHeader("b8d", ErrInvalidAncestorBlock) + g.RejectHeader("b9d", ErrInvalidAncestorBlock) + g.RejectHeader("b7e", ErrInvalidAncestorBlock) + + // Ensure that all of its descendant blocks associated with the headers that + // were already known are rejected. + g.RejectBlock("b6b", ErrInvalidAncestorBlock) + g.RejectBlock("b7b", ErrInvalidAncestorBlock) + g.RejectBlock("b8b", ErrInvalidAncestorBlock) + g.RejectBlock("b8c", ErrInvalidAncestorBlock) + g.RejectBlock("b9c", ErrInvalidAncestorBlock) + g.RejectBlock("b7d", ErrInvalidAncestorBlock) + g.RejectBlock("b8d", ErrInvalidAncestorBlock) + g.RejectBlock("b9d", ErrInvalidAncestorBlock) + g.RejectBlock("b7e", ErrInvalidAncestorBlock) + + // Ensure both the best known invalid and not known invalid headers are + // as expected. + // + // Since both b9c and b9d have the same work and the block data is rejected + // for both, whichever has the lowest hash (when treated as a little-endian + // uint256) should be considered the best invalid. + g.ExpectBestInvalidHeader(chooseLowestHash("b9c", "b9d")) + g.ExpectBestHeader("b11") + + // Ensure that both a descendant header and block of the failed block that + // themselves were not already known are rejected. + g.RejectHeader("b8e", ErrInvalidAncestorBlock) + g.RejectBlock("b8e", ErrInvalidAncestorBlock) + + // ------------------------------------------------------------------------- + // Similar to above, but this time when the invalid block on a side chain + // leading up to a block that causes a reorg. + // + // Note that the ! below indicates a block that is invalid due to violating + // a consensus rule during the contextual checks prior to connection. + // + // current tip best header + // vv vvv + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // \-> b4g -> b5g -> b6g! -> b7g -> b8g + // ---- + // ^^^^ + // bad block + // ------------------------------------------------------------------------- + + // Accept the block data for b4g, b5g, and b7g but only the block header for + // b6g. The data for block b7g should be accepted because b6g is not yet + // known to be invalid. Notice that b8g is intentionally not processed yet. + g.AcceptBlockData("b4g") + g.AcceptBlockData("b5g") + g.AcceptHeader("b6g") + g.AcceptBlockData("b7g") + + // Process the bad block and ensure its header is rejected after since it is + // then a known bad block. + g.RejectBlock("b6g", ErrTicketUnavailable) + g.RejectHeader("b6g", ErrKnownInvalidBlock) + + // Ensure that all of its descendant headers and blocks that were already + // known are rejected. + g.RejectBlock("b6g", ErrDuplicateBlock) + g.RejectBlock("b7g", ErrDuplicateBlock) + + // Ensure that both a descendant header and block of the failed block that + // themselves were not already known are rejected. + g.RejectHeader("b8g", ErrInvalidAncestorBlock) + g.RejectBlock("b8g", ErrInvalidAncestorBlock) + + // Ensure both the best known invalid and not known invalid headers are + // unchanged. + g.ExpectBestInvalidHeader(chooseLowestHash("b9c", "b9d")) + g.ExpectBestHeader("b11") + + // ------------------------------------------------------------------------- + // Similar to above, but this time with a connect failure instead of a + // contextual one on a side chain leading up to a block that causes a reorg. + // + // Note that the @ below indicates a block that is invalid due to violating + // a consensus rule during block connection. + // + // current tip best header + // vv vvv + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // \-> b4h -> b5h@ -> b6h -> b7h + // ---- + // ^^^^ + // bad block + // ------------------------------------------------------------------------- + + // Accept the block data for b4h and b6h, but only the block header for b5h. + // The data for block b6h should be accepted because b5h is not yet known to + // be invalid. + g.AcceptBlockData("b4h") + g.AcceptHeader("b5h") + g.AcceptBlockData("b6h") + + // Process the bad block and ensure its header is rejected after since it is + // then a known bad block. + g.RejectBlock("b5h", ErrMissingTxOut) + g.RejectHeader("b5h", ErrKnownInvalidBlock) + + // Ensure that all of its descendant headers and blocks that were already + // known are rejected. + g.RejectHeader("b6h", ErrInvalidAncestorBlock) + g.RejectHeader("b7h", ErrInvalidAncestorBlock) + g.RejectBlock("b6h", ErrDuplicateBlock) + g.RejectBlock("b7h", ErrInvalidAncestorBlock) + + // Ensure both the best known invalid and not known invalid headers are + // unchanged. + g.ExpectBestInvalidHeader(chooseLowestHash("b9c", "b9d")) + g.ExpectBestHeader("b11") + + // ------------------------------------------------------------------------- + // Similar to above, but this time with a connect failure in a side chain + // block that is the direct target of a reorg. This also doubles to ensure + // that blocks that are seen first take precedence in best chain selection. + // + // Note that the @ below indicates a block that is invalid due to violating + // a consensus rule during block connection. + // + // current tip best header + // vv vvv + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // \-> b4i -> b5i -> b6i@ -> b7i -> b8i + // ---- + // ^^^^ + // bad block + // ------------------------------------------------------------------------- + + // Accept the block data for b4i and b5i, but only the block header for b6i. + // The header for block b6i should be accepted since the block is not yet + // known to be invalid. + g.AcceptBlockData("b4i") + g.AcceptBlockData("b5i") + g.AcceptHeader("b6i") + + // Process the bad block and ensure its header is rejected after since it is + // then a known bad block. The chain should reorg back to b5 since it was + // seen first. + g.RejectBlock("b6i", ErrMissingTxOut) + g.RejectHeader("b6i", ErrKnownInvalidBlock) + g.ExpectTip("b5") + + // Ensure that all of its descendant headers and blocks that were already + // known are rejected. + g.RejectHeader("b7i", ErrInvalidAncestorBlock) + g.RejectBlock("b7i", ErrInvalidAncestorBlock) + + // Ensure that both a descendant header and block of the failed block that + // themselves were not already known are rejected. Notice that the header + // for b7i was never accepted, so these should fail due to a missing parent. + g.RejectHeader("b8i", ErrMissingParent) + g.RejectBlock("b8i", ErrMissingParent) + + // Ensure both the best known invalid and not known invalid headers are + // unchanged. + g.ExpectBestInvalidHeader(chooseLowestHash("b9c", "b9d")) + g.ExpectBestHeader("b11") + + // ------------------------------------------------------------------------- + // Ensure attempting to reorg to an invalid block on a side chain where the + // blocks leading up to it are valid and have more cumulative work than the + // current tip. The chain should reorg to the final valid block since it + // has more cumulative proof of work. + // + // Note that the @ below indicates a block that is invalid due to violating + // a consensus rule during block connection. + // + // current tip best header + // vv vvv + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // \-> b4j -> b5j -> b6j -> b7j@ + // ---- + // ^^^^ + // bad block + // ------------------------------------------------------------------------- + + // Accept the block data for b4j, b5j, and b7j, but only the block header + // for b6j. The block data for b7j should be accepted at this point even + // though it is invalid because it can't be checked yet since the data for + // b6j is not available. + g.ExpectTip("b5") + g.AcceptBlockData("b4j") + g.AcceptBlockData("b5j") + g.AcceptHeader("b6j") + g.AcceptBlockData("b7j") + + // Make the block data for b6j available in order to complete the link + // needed to trigger the reorg and check the blocks. Since errors in reorgs + // are currently attibuted to the block that caused them, the error in b7j + // shoud be attributed to b6j, but b6j should still end up as the tip since + // it is valid. + g.RejectBlock("b6j", ErrMissingTxOut) + g.ExpectTip("b6j") + + // Ensure both the best known invalid and not known invalid headers are + // unchanged. + g.ExpectBestInvalidHeader(chooseLowestHash("b9c", "b9d")) + g.ExpectBestHeader("b11") + + // ------------------------------------------------------------------------- + // Ensure reorganizing to another valid branch when the block data is + // processed out of order works as intended. + // + // best header + // vvv + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // \-> b4f -> b5f -> b6f -> b7f -> b8f -> b9f -> b10f + // \-> b4j -> b5j -> b6j -> b7j@ + // --- + // ^^^ + // current tip + // ------------------------------------------------------------------------- + + g.AcceptHeader("b4f") + g.AcceptHeader("b5f") + g.AcceptHeader("b6f") + g.AcceptHeader("b7f") + g.AcceptHeader("b8f") + g.AcceptHeader("b9f") + g.AcceptHeader("b10f") + g.ExpectBestHeader("b11") + + // Reorganize to a branch that actually has less work than the full initial + // branch, but since its block data is not yet known, the secondary branch + // data becoming available forces a reorg since it will be the best known + // branch. + // + // best header + // vvv + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // \-> b4f -> b5f -> b6f -> b7f -> b8f -> b9f -> b10f + // \-> b4j -> b5j -> b6j ^^^^ + // --- ---- + // ^^^ new tip + // orig tip + g.AcceptBlockDataWithExpectedTip("b9f", "b6j") + g.AcceptBlockDataWithExpectedTip("b7f", "b6j") + g.AcceptBlockDataWithExpectedTip("b8f", "b6j") + g.AcceptBlockDataWithExpectedTip("b10f", "b6j") + g.AcceptBlockDataWithExpectedTip("b6f", "b6j") + g.AcceptBlockDataWithExpectedTip("b4f", "b6j") + g.AcceptBlockDataWithExpectedTip("b5f", "b10f") + + // Make the data for the initial branch available, again in an out of order + // fashion, but with an additional test condition built in such that all of + // the data up to the point the initial branch has the exact same work as + // the active branch is available to ensure the first seen block data takes + // precedence despite the fact the header for the initial branch was seen + // first. + // + // best header + // vvv + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // \-> b4f -> b5f -> b6f -> b7f -> b8f -> b9f -> b10f + // ^^^^ + // ---- + // current tip + g.AcceptBlockDataWithExpectedTip("b8", "b10f") + g.AcceptBlockDataWithExpectedTip("b10", "b10f") + g.AcceptBlockDataWithExpectedTip("b7", "b10f") + g.AcceptBlockDataWithExpectedTip("b9", "b10f") + g.AcceptBlockDataWithExpectedTip("b6", "b10f") + + // Finally, accept the data for final block on the initial branch that + // causes the reorg. + // + // new tip + // vvv + // ... -> b3 -> b4 -> b5 -> b6 -> b7 -> b8 -> b9 -> b10 -> b11 + // \-> b4f -> b5f -> b6f -> b7f -> b8f -> b9f -> b10f + // ^^^^ + // ---- + // orig tip + g.AcceptBlockDataWithExpectedTip("b11", "b11") +} diff --git a/blockchain/prune.go b/blockchain/prune.go index 671c841379..4f5150b774 100644 --- a/blockchain/prune.go +++ b/blockchain/prune.go @@ -74,10 +74,10 @@ func newChainPruner(chain *BlockChain) *chainPruner { } } -// pruneChainIfNeeded checks the current time versus the time of the last pruning. -// If the blockchain hasn't been pruned in this time, it initiates a new pruning. +// pruneChainIfNeeded removes references to old information that should no +// longer be held in memory if the pruning interval has elapsed. // -// pruneChainIfNeeded must be called with the chainLock held for writes. +// This function MUST be called with the chain lock held (for writes). func (c *chainPruner) pruneChainIfNeeded() { now := time.Now() duration := now.Sub(c.lastPruneTime) diff --git a/blockchain/stakenode.go b/blockchain/stakenode.go index a12661c51c..acd3f44d0c 100644 --- a/blockchain/stakenode.go +++ b/blockchain/stakenode.go @@ -76,7 +76,8 @@ func (b *BlockChain) maybeFetchTicketInfo(node *blockNode) error { return err } - node.populateTicketInfo(stake.FindSpentTicketsInBlock(block.MsgBlock())) + ticketInfo := stake.FindSpentTicketsInBlock(block.MsgBlock()) + b.index.PopulateTicketInfo(node, ticketInfo) } return nil @@ -101,7 +102,7 @@ func (b *BlockChain) fetchStakeNode(node *blockNode) (*stake.Node, error) { // Create the requested stake node from the parent stake node if it is // already loaded as an optimization. - if node.parent.stakeNode != nil { + if node.parent != nil && node.parent.stakeNode != nil { // Populate the prunable ticket information as needed. if err := b.maybeFetchTicketInfo(node); err != nil { return nil, err diff --git a/blockchain/stakeversion.go b/blockchain/stakeversion.go index 7b696302cb..20ece1fe45 100644 --- a/blockchain/stakeversion.go +++ b/blockchain/stakeversion.go @@ -303,7 +303,7 @@ func (b *BlockChain) calcStakeVersion(prevNode *blockNode) uint32 { // This function MUST be called with the chain state lock held (for writes). func (b *BlockChain) calcStakeVersionByHash(hash *chainhash.Hash) (uint32, error) { prevNode := b.index.LookupNode(hash) - if prevNode == nil { + if prevNode == nil || !b.index.CanValidate(prevNode) { return 0, fmt.Errorf("block %s is not known", hash) } diff --git a/blockchain/thresholdstate.go b/blockchain/thresholdstate.go index 90ecd0ccfc..392ec5c7cf 100644 --- a/blockchain/thresholdstate.go +++ b/blockchain/thresholdstate.go @@ -523,13 +523,8 @@ func (b *BlockChain) stateLastChanged(version uint32, node *blockNode, checker t // // This function is safe for concurrent access. func (b *BlockChain) StateLastChangedHeight(hash *chainhash.Hash, version uint32, deploymentID string) (int64, error) { - // NOTE: The requirement for the node being fully validated here is strictly - // stronger than what is actually required. In reality, all that is needed - // is for the block data for the node and all of its ancestors to be - // available, but there is not currently any tracking to be able to - // efficiently determine that state. node := b.index.LookupNode(hash) - if node == nil || !b.index.NodeStatus(node).HasValidated() { + if node == nil || !b.index.CanValidate(node) { return 0, unknownBlockError(hash) } @@ -573,13 +568,8 @@ func (b *BlockChain) StateLastChangedHeight(hash *chainhash.Hash, version uint32 // // This function is safe for concurrent access. func (b *BlockChain) NextThresholdState(hash *chainhash.Hash, version uint32, deploymentID string) (ThresholdStateTuple, error) { - // NOTE: The requirement for the node being fully validated here is strictly - // stronger than what is actually required. In reality, all that is needed - // is for the block data for the node and all of its ancestors to be - // available, but there is not currently any tracking to be able to - // efficiently determine that state. node := b.index.LookupNode(hash) - if node == nil || !b.index.NodeStatus(node).HasValidated() { + if node == nil || !b.index.CanValidate(node) { invalidState := ThresholdStateTuple{ State: ThresholdInvalid, Choice: invalidChoice, @@ -631,7 +621,7 @@ func (b *BlockChain) isLNFeaturesAgendaActive(prevNode *blockNode) (bool, error) // This function is safe for concurrent access. func (b *BlockChain) IsLNFeaturesAgendaActive(prevHash *chainhash.Hash) (bool, error) { prevNode := b.index.LookupNode(prevHash) - if prevNode == nil || !b.index.NodeStatus(prevNode).HasValidated() { + if prevNode == nil || !b.index.CanValidate(prevNode) { return false, unknownBlockError(prevHash) } @@ -678,13 +668,8 @@ func (b *BlockChain) isHeaderCommitmentsAgendaActive(prevNode *blockNode) (bool, // // This function is safe for concurrent access. func (b *BlockChain) IsHeaderCommitmentsAgendaActive(prevHash *chainhash.Hash) (bool, error) { - // NOTE: The requirement for the node being fully validated here is strictly - // stronger than what is actually required. In reality, all that is needed - // is for the block data for the node and all of its ancestors to be - // available, but there is not currently any tracking to be able to - // efficiently determine that state. node := b.index.LookupNode(prevHash) - if node == nil || !b.index.NodeStatus(node).HasValidated() { + if node == nil || !b.index.CanValidate(node) { return false, unknownBlockError(prevHash) } @@ -733,7 +718,7 @@ func (b *BlockChain) isTreasuryAgendaActive(prevNode *blockNode) (bool, error) { // This function is safe for concurrent access. func (b *BlockChain) IsTreasuryAgendaActive(prevHash *chainhash.Hash) (bool, error) { prevNode := b.index.LookupNode(prevHash) - if prevNode == nil || !b.index.NodeStatus(prevNode).HasValidated() { + if prevNode == nil || !b.index.CanValidate(prevNode) { return false, unknownBlockError(prevHash) } diff --git a/blockchain/treasury.go b/blockchain/treasury.go index 0ff70a6ef6..7773d238df 100644 --- a/blockchain/treasury.go +++ b/blockchain/treasury.go @@ -519,7 +519,7 @@ type TreasuryBalanceInfo struct { // TreasuryBalance returns treasury balance information as of the given block. func (b *BlockChain) TreasuryBalance(hash *chainhash.Hash) (*TreasuryBalanceInfo, error) { node := b.index.LookupNode(hash) - if node == nil || !b.index.NodeStatus(node).HaveData() { + if node == nil || !b.index.CanValidate(node) { return nil, unknownBlockError(hash) } diff --git a/blockchain/validate.go b/blockchain/validate.go index e95ed04882..47318dc7a2 100644 --- a/blockchain/validate.go +++ b/blockchain/validate.go @@ -907,9 +907,8 @@ func (b *BlockChain) checkBlockHeaderPositional(header *wire.BlockHeader, prevNo expDiff := b.calcNextRequiredDifficulty(prevNode, header.Timestamp) blockDifficulty := header.Bits if blockDifficulty != expDiff { - str := fmt.Sprintf("block difficulty of %d is not the"+ - " expected value of %d", blockDifficulty, - expDiff) + str := fmt.Sprintf("block difficulty of %d is not the "+ + "expected value of %d", blockDifficulty, expDiff) return ruleError(ErrUnexpectedDifficulty, str) } @@ -936,25 +935,29 @@ func (b *BlockChain) checkBlockHeaderPositional(header *wire.BlockHeader, prevNo return ruleError(ErrBadBlockHeight, errStr) } - // Ensure chain matches up to predetermined checkpoints. - blockHash := header.BlockHash() - if !b.verifyCheckpoint(blockHeight, &blockHash) { - str := fmt.Sprintf("block at height %d does not match "+ - "checkpoint hash", blockHeight) - return ruleError(ErrBadCheckpoint, str) - } - // Prevent blocks that fork the main chain before the most recently known // checkpoint. This prevents storage of new, otherwise valid, blocks which // build off of old blocks that are likely at a much easier difficulty and // therefore could be used to waste cache and disk space. - if b.checkpointNode != nil && blockHeight < b.checkpointNode.height { - str := fmt.Sprintf("block at height %d forks the main chain "+ - "before the previous checkpoint at height %d", - blockHeight, b.checkpointNode.height) + checkpoint := b.checkpointNode + blockHash := header.BlockHash() + if checkpoint != nil && blockHeight < checkpoint.height && + (checkpoint.Ancestor(prevNode.height) != prevNode || + b.index.LookupNode(&blockHash) == nil) { + + str := fmt.Sprintf("block at height %d forks the main chain before "+ + "the previous checkpoint at height %d", blockHeight, + checkpoint.height) return ruleError(ErrForkTooOld, str) } + // Ensure chain matches up to predetermined checkpoints. + if !b.verifyCheckpoint(blockHeight, &blockHash) { + str := fmt.Sprintf("block at height %d does not match "+ + "checkpoint hash", blockHeight) + return ruleError(ErrBadCheckpoint, str) + } + if !fastAdd { // Reject old version blocks once a majority of the network has // upgraded. @@ -989,33 +992,23 @@ func (b *BlockChain) checkBlockHeaderPositional(header *wire.BlockHeader, prevNo return nil } -// checkBlockPositional performs several validation checks on the block which -// depend on its position within the block chain and having the headers of all -// ancestors available. These checks do not, and must not, rely on having the -// full block data of all ancestors available. +// checkBlockDataPositional performs several validation checks on the block data +// (not including the header) which depend on its position within the block +// chain and having the headers of all ancestors available. These checks do +// not, and must not, rely on having the full block data of all ancestors +// available. // // The flags modify the behavior of this function as follows: // - BFFastAdd: The transactions are not checked to see if they are expired and // the coinbase height check is not performed. // -// The flags are also passed to checkBlockHeaderPositional. See its -// documentation for how the flags modify its behavior. -func (b *BlockChain) checkBlockPositional(block *dcrutil.Block, prevNode *blockNode, flags BehaviorFlags) error { +// This function MUST be called with the chain state lock held (for reads). +func (b *BlockChain) checkBlockDataPositional(block *dcrutil.Block, prevNode *blockNode, flags BehaviorFlags) error { // The genesis block is valid by definition. if prevNode == nil { return nil } - // Perform all block header related validation checks that depend on its - // position within the block chain and having the headers of all - // ancestors available, but do not rely on having the full block data of - // all ancestors available. - header := &block.MsgBlock().Header - err := b.checkBlockHeaderPositional(header, prevNode, flags) - if err != nil { - return err - } - fastAdd := flags&BFFastAdd == BFFastAdd if !fastAdd { // The height of this block is one more than the referenced previous @@ -1044,6 +1037,37 @@ func (b *BlockChain) checkBlockPositional(block *dcrutil.Block, prevNode *blockN return nil } +// checkBlockPositional performs several validation checks on the block (both +// its header and data) which depend on its position within the block chain and +// having the headers of all ancestors available. These checks do not, and must +// not, rely on having the full block data of all ancestors available. +// +// The flags do not modify the behavior of this function directly, however they +// are needed to pass along to checkBlockHeaderPositional and +// checkBlockDataPositional. +func (b *BlockChain) checkBlockPositional(block *dcrutil.Block, prevNode *blockNode, flags BehaviorFlags) error { + // The genesis block is valid by definition. + if prevNode == nil { + return nil + } + + // Perform all block header related validation checks that depend on its + // position within the block chain and having the headers of all + // ancestors available, but do not rely on having the full block data of + // all ancestors available. + header := &block.MsgBlock().Header + err := b.checkBlockHeaderPositional(header, prevNode, flags) + if err != nil { + return err + } + + // Perform all block data related validation checks that depend on its + // position within the block chain and having the headers of all ancestors + // available, but do not rely on having the full block data of all ancestors + // available. + return b.checkBlockDataPositional(block, prevNode, flags) +} + // checkBlockHeaderContext performs several validation checks on the block // header which depend on having the full block data for all of its ancestors // available. This includes checks which depend on tallying the results of @@ -1418,17 +1442,17 @@ func (b *BlockChain) checkMerkleRoots(block *wire.MsgBlock, prevNode *blockNode) return nil } -// checkBlockContext performs several validation checks on the block which depend -// on having the full block data for all of its ancestors available. This -// includes checks which depend on tallying the results of votes, because votes -// are part of the block data. +// checkBlockContext performs several validation checks on the block which +// depend on having the full block data for all of its ancestors available. +// This includes checks which depend on tallying the results of votes, because +// votes are part of the block data. // // It should be noted that rule changes that have become buried deep enough // typically will eventually be transitioned to using well-known activation // points for efficiency purposes at which point the associated checks no longer // require having direct access to the historical votes, and therefore may be -// transitioned to checkBlockPositional at that time. Conversely, any checks -// in that function which become conditional based on the results of a vote will +// transitioned to checkBlockPositional at that time. Conversely, any checks in +// that function which become conditional based on the results of a vote will // necessarily need to be transitioned to this function. // // The flags modify the behavior of this function as follows: @@ -1444,6 +1468,11 @@ func (b *BlockChain) checkBlockContext(block *dcrutil.Block, prevNode *blockNode return nil } + // No need to check the block again when it has already been checked. + if b.recentContextChecks.Contains(*block.Hash()) { + return nil + } + // Perform all block header related validation checks which depend on // having the full block data for all of its ancestors available. msgBlock := block.MsgBlock() @@ -2891,17 +2920,18 @@ func CountP2SHSigOps(tx *dcrutil.Tx, isCoinBaseTx bool, isStakeBaseTx bool, view return 0, nil } - // Exit in some cases if treasury agenda is enabled. + // Treasury spend and treasurybase transactions have no P2SH inputs, but + // they are only recognized as the associated transactions once the + // treasury agenda is active. + msgTx := tx.MsgTx() if isTreasuryEnabled { - if stake.IsTSpend(tx.MsgTx()) || - stake.IsTreasuryBase(tx.MsgTx()) { + if stake.IsTSpend(msgTx) || stake.IsTreasuryBase(msgTx) { return 0, nil } } // Accumulate the number of signature operations in all transaction // inputs. - msgTx := tx.MsgTx() totalSigOps := 0 for txInIndex, txIn := range msgTx.TxIn { // Ensure the referenced input transaction is available. @@ -3457,12 +3487,6 @@ func (b *BlockChain) tspendChecks(prevNode *blockNode, block *dcrutil.Block) err // // This function MUST be called with the chain state lock held (for writes). func (b *BlockChain) checkConnectBlock(node *blockNode, block, parent *dcrutil.Block, view *UtxoViewpoint, stxos *[]spentTxOut, hdrCommitments *headerCommitmentData) error { - // If the side chain blocks end up in the database, a call to - // CheckBlockSanity should be done here in case a previous version - // allowed a block that is no longer valid. However, since the - // implementation only currently uses memory for the side chain blocks, - // it isn't currently necessary. - // Ensure the view is for the node being checked. parentHash := &block.MsgBlock().Header.PrevBlock if !view.BestHash().IsEqual(parentHash) { @@ -3751,11 +3775,7 @@ func (b *BlockChain) CheckConnectBlockTemplate(block *dcrutil.Block) error { } // Perform context-free sanity checks on the block and its transactions. - isTreasuryEnabled, err := b.isTreasuryAgendaActive(prevNode) - if err != nil { - return err - } - err = checkBlockSanity(block, b.timeSource, flags, b.chainParams) + err := checkBlockSanity(block, b.timeSource, flags, b.chainParams) if err != nil { return err } @@ -3809,10 +3829,13 @@ func (b *BlockChain) CheckConnectBlockTemplate(block *dcrutil.Block) error { } // Load all of the spent txos for the tip block from the spend journal. + isTreasuryEnabled, err := b.isTreasuryAgendaActive(prevNode) + if err != nil { + return err + } var stxos []spentTxOut err = b.db.View(func(dbTx database.Tx) error { - stxos, err = dbFetchSpendJournalEntry(dbTx, tipBlock, - isTreasuryEnabled) + stxos, err = dbFetchSpendJournalEntry(dbTx, tipBlock, isTreasuryEnabled) return err }) if err != nil { @@ -3840,6 +3863,8 @@ func (b *BlockChain) CheckConnectBlockTemplate(block *dcrutil.Block) error { // tickets that is needed to reach the next block at which any outstanding // immature ticket purchases that would provide the necessary live tickets // mature. +// +// This function is safe for concurrent access. func (b *BlockChain) checkTicketExhaustion(prevNode *blockNode, ticketPurchases uint8) error { // Nothing to do if the chain is not far enough along where ticket // exhaustion could be an issue. @@ -3905,9 +3930,11 @@ func (b *BlockChain) checkTicketExhaustion(prevNode *blockNode, ticketPurchases // drops below the number of tickets that is needed to reach the next block at // which any outstanding immature ticket purchases that would provide the // necessary live tickets mature. +// +// This function is safe for concurrent access. func (b *BlockChain) CheckTicketExhaustion(hash *chainhash.Hash, ticketPurchases uint8) error { node := b.index.LookupNode(hash) - if node == nil || !b.index.NodeStatus(node).HaveData() { + if node == nil { return unknownBlockError(hash) } diff --git a/cmd/addblock/import.go b/cmd/addblock/import.go index 699e8ae3bf..379f4d0528 100644 --- a/cmd/addblock/import.go +++ b/cmd/addblock/import.go @@ -110,16 +110,14 @@ func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) { // Skip blocks that already exist. blockHash := block.Hash() - exists := bi.chain.HaveBlock(blockHash) - if exists { + if bi.chain.HaveBlock(blockHash) { return false, nil } // Don't bother trying to process orphans. prevHash := &block.MsgBlock().Header.PrevBlock if !prevHash.IsEqual(&zeroHash) { - exists := bi.chain.HaveBlock(prevHash) - if !exists { + if !bi.chain.HaveBlock(prevHash) { return false, fmt.Errorf("import file contains block "+ "%v which does not link to the available "+ "block chain", prevHash) diff --git a/server.go b/server.go index 73b94065f2..e363431e1e 100644 --- a/server.go +++ b/server.go @@ -2723,6 +2723,9 @@ func (s *server) handleBlockchainNotification(notification *blockchain.Notificat // Stake tickets are spent or missed from the most recently connected block. case blockchain.NTSpentAndMissedTickets: + // WARNING: The chain lock is not released before sending this + // notification, so care must be taken to avoid calling chain functions + // which could result in a deadlock. tnd, ok := notification.Data.(*blockchain.TicketNotificationsData) if !ok { syncLog.Warnf("Tickets connected notification is not " + @@ -2736,6 +2739,9 @@ func (s *server) handleBlockchainNotification(notification *blockchain.Notificat // Stake tickets are matured from the most recently connected block. case blockchain.NTNewTickets: + // WARNING: The chain lock is not released before sending this + // notification, so care must be taken to avoid calling chain functions + // which could result in a deadlock. tnd, ok := notification.Data.(*blockchain.TicketNotificationsData) if !ok { syncLog.Warnf("Tickets connected notification is not " + @@ -2830,18 +2836,27 @@ func (s *server) handleBlockchainNotification(notification *blockchain.Notificat // Chain reorganization has commenced. case blockchain.NTChainReorgStarted: + // WARNING: The chain lock is not released before sending this + // notification, so care must be taken to avoid calling chain functions + // which could result in a deadlock. if s.bg != nil { s.bg.ChainReorgStarted() } // Chain reorganization has concluded. case blockchain.NTChainReorgDone: + // WARNING: The chain lock is not released before sending this + // notification, so care must be taken to avoid calling chain functions + // which could result in a deadlock. if s.bg != nil { s.bg.ChainReorgDone() } // The blockchain is reorganizing. case blockchain.NTReorganization: + // WARNING: The chain lock is not released before sending this + // notification, so care must be taken to avoid calling chain functions + // which could result in a deadlock. rd, ok := notification.Data.(*blockchain.ReorganizationNtfnsData) if !ok { syncLog.Warnf("Chain reorganization notification is malformed") From b29315a2c2655df7fbd805d3630a459e6ca5faa9 Mon Sep 17 00:00:00 2001 From: Dave Collins Date: Wed, 23 Dec 2020 15:44:35 -0600 Subject: [PATCH 5/7] blockchain: Improve current detection. This reworks the logic that is used to determine if the chain believes it is current to be more robust and to take advantage of the fact that the best known header is now available. In particular, it introduces two new constraints: - A latching mechanism that prevents the chain from flipping back to being non current unless no new blocks have been seen for an extended period of time - The chain only latches to current once it is synced to the header with the most cumulative work that is not known to be invalid in addition to the existing conditions --- blockchain/chain.go | 101 +++++++++++++++++++++++++++---------- blockchain/common_test.go | 14 +++++ blockchain/process.go | 2 +- blockchain/process_test.go | 4 +- 4 files changed, 92 insertions(+), 29 deletions(-) diff --git a/blockchain/chain.go b/blockchain/chain.go index 1fed6ca650..81bc7dba34 100644 --- a/blockchain/chain.go +++ b/blockchain/chain.go @@ -173,6 +173,12 @@ type BlockChain struct { index *blockIndex bestChain *chainView + // isCurrentLatch tracks whether or not the chain believes it is current in + // such a way that once it becomes current it latches to that state unless + // the chain falls too far behind again which likely indicates it is forked + // from the network. It is protected by the chain lock. + isCurrentLatch bool + // These fields house caches for blocks to facilitate faster chain reorgs, // block connection, and more efficient recent block serving. // @@ -1265,11 +1271,14 @@ func (b *BlockChain) reorganizeChain(target *blockNode) error { } } - // Log chain reorganizations and send a notification as needed. Notice that - // the tip is reset to whatever the best chain actually is here versus using - // the one from above since it might not match reality if there were errors - // while reorganizing. + // Potentially update whether or not the chain believes it is current based + // on the new tip. Notice that the tip is reset to whatever the best chain + // actually is here versus using the one from above since it might not match + // reality if there were errors while reorganizing. newTip := b.bestChain.Tip() + b.maybeUpdateIsCurrent(newTip) + + // Log chain reorganizations and send a notification as needed. if sentReorgingNtfn && newTip != origTip { // Send a notification that a chain reorganization took place. // @@ -1418,42 +1427,80 @@ func (b *BlockChain) flushBlockIndexWarnOnly() { } } -// isCurrent returns whether or not the chain believes it is current. The -// factors that are used to determine if the chain believes it is current are: -// - Total amount of cumulative work is more than the minimum known work -// specified by the parameters for the network -// - Latest block has a timestamp newer than 24 hours ago +// isOldTimestamp returns whether the given node has a timestamp too far in +// history for the purposes of determining if the chain should be considered +// current. +func (b *BlockChain) isOldTimestamp(node *blockNode) bool { + minus24Hours := b.timeSource.AdjustedTime().Add(-24 * time.Hour).Unix() + return node.timestamp < minus24Hours +} + +// maybeUpdateIsCurrent potentially updates whether or not the chain believes it +// is current. // -// This function MUST be called with the chain state lock held (for reads). -func (b *BlockChain) isCurrent() bool { - // Not current if the latest best block has a cumulative work less than the - // minimum known work specified by the network parameters. - tip := b.bestChain.Tip() - minKnownWork := b.chainParams.MinKnownChainWork - if minKnownWork != nil && tip.workSum.Cmp(minKnownWork) < 0 { - return false +// It makes use of a latching approach such that once the chain becomes current +// it will only switch back to false in the case no new blocks have been seen +// for an extended period of time. +// +// This function MUST be called with the chain state lock held (for writes). +func (b *BlockChain) maybeUpdateIsCurrent(curBest *blockNode) { + // Do some additional checks when the chain is not already latched to being + // current. + if !b.isCurrentLatch { + // Not current if the latest best block has a cumulative work less than + // the minimum known work specified by the network parameters. + minKnownWork := b.chainParams.MinKnownChainWork + if minKnownWork != nil && curBest.workSum.Cmp(minKnownWork) < 0 { + return + } + + // Not current if the best block is not synced to the header with the + // most cumulative work that is not known to be invalid. + b.index.RLock() + bestHeader := b.index.bestHeader + b.index.RUnlock() + syncedToBestHeader := curBest.Ancestor(bestHeader.height) == bestHeader + if !syncedToBestHeader { + return + } } - // Not current if the latest best block has a timestamp before 24 hours - // ago. + // Not current if the latest best block has too old of a timestamp. // - // The chain appears to be current if none of the checks reported - // otherwise. - minus24Hours := b.timeSource.AdjustedTime().Add(-24 * time.Hour).Unix() - return tip.timestamp >= minus24Hours + // The chain appears to be current if none of the checks reported otherwise. + wasLatched := b.isCurrentLatch + b.isCurrentLatch = !b.isOldTimestamp(curBest) + if !wasLatched && b.isCurrentLatch { + log.Debugf("Chain latched to current at block %s (height %d)", + curBest.hash, curBest.height) + } + } -// IsCurrent returns whether or not the chain believes it is current. Several -// factors are used to guess, but the key factors that allow the chain to -// believe it is current are: +// isCurrent returns whether or not the chain believes it is current based on +// the current latched state and an additional check which returns false in the +// case no new blocks have been seen for an extended period of time. +// +// This function MUST be called with the chain state lock held (for reads). +func (b *BlockChain) isCurrent(curBest *blockNode) bool { + return b.isCurrentLatch && !b.isOldTimestamp(curBest) +} + +// IsCurrent returns whether or not the chain believes it is current based on +// the current latched state and an additional check which returns false in the +// case no new blocks have been seen for an extended period of time. +// +// The initial factors that are used to latch the state to current are: // - Total amount of cumulative work is more than the minimum known work // specified by the parameters for the network +// - The best chain is synced to the header with the most cumulative work that +// is not known to be invalid // - Latest block has a timestamp newer than 24 hours ago // // This function is safe for concurrent access. func (b *BlockChain) IsCurrent() bool { b.chainLock.RLock() - isCurrent := b.isCurrent() + isCurrent := b.isCurrent(b.bestChain.Tip()) b.chainLock.RUnlock() return isCurrent } diff --git a/blockchain/common_test.go b/blockchain/common_test.go index 5ac10f8192..60d2595c56 100644 --- a/blockchain/common_test.go +++ b/blockchain/common_test.go @@ -832,6 +832,20 @@ func (g *chaingenHarness) ExpectBestInvalidHeader(blockName string) { } } +// ExpectIsCurrent expects the chain associated with the harness generator to +// report the given is current status. +func (g *chaingenHarness) ExpectIsCurrent(expected bool) { + g.t.Helper() + + best := g.chain.BestSnapshot() + isCurrent := g.chain.IsCurrent() + if isCurrent != expected { + g.t.Fatalf("mismatched is current status for block %q (hash %s, "+ + "height %d) -- got %v, want %v", g.BlockName(&best.Hash), best.Hash, + best.Height, isCurrent, expected) + } +} + // lookupDeploymentVersion returns the version of the deployment with the // provided ID and caches the result for future invocations. An error is // returned if the ID is not found. diff --git a/blockchain/process.go b/blockchain/process.go index 5a180c47b9..7f958b688b 100644 --- a/blockchain/process.go +++ b/blockchain/process.go @@ -278,7 +278,7 @@ func (b *BlockChain) maybeAcceptBlockData(node *blockNode, block *dcrutil.Block, // // This function MUST be called with the chain lock held (for writes). func (b *BlockChain) maybeAcceptBlocks(curTip *blockNode, nodes []*blockNode, flags BehaviorFlags) ([]*blockNode, error) { - isCurrent := b.isCurrent() + isCurrent := b.isCurrent(curTip) for i, n := range nodes { var err error linkedBlock, err := b.fetchBlockByNode(n) diff --git a/blockchain/process_test.go b/blockchain/process_test.go index fd874d9773..5bb5978a97 100644 --- a/blockchain/process_test.go +++ b/blockchain/process_test.go @@ -1157,9 +1157,10 @@ func TestProcessLogic(t *testing.T) { g.AcceptBlockDataWithExpectedTip("b7", "b10f") g.AcceptBlockDataWithExpectedTip("b9", "b10f") g.AcceptBlockDataWithExpectedTip("b6", "b10f") + g.ExpectIsCurrent(false) // Finally, accept the data for final block on the initial branch that - // causes the reorg. + // causes the reorg and ensure the chain latches to believe it is current. // // new tip // vvv @@ -1169,4 +1170,5 @@ func TestProcessLogic(t *testing.T) { // ---- // orig tip g.AcceptBlockDataWithExpectedTip("b11", "b11") + g.ExpectIsCurrent(true) } From f3890ca2cc54855a7816292f54ff97d98bc389e7 Mon Sep 17 00:00:00 2001 From: Dave Collins Date: Wed, 23 Dec 2020 15:44:36 -0600 Subject: [PATCH 6/7] rpcserver: Update getblockchaininfo best header. This modifies the getblockchaininfo RPC handler to use the newly-exposed best header that is not known to be invalid from chain versus the height of the current best chain tip. --- internal/rpcserver/interface.go | 4 ++++ internal/rpcserver/rpcserver.go | 3 ++- internal/rpcserver/rpcserverhandlers_test.go | 9 +++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/internal/rpcserver/interface.go b/internal/rpcserver/interface.go index af7126e2ab..3fb0daf7a7 100644 --- a/internal/rpcserver/interface.go +++ b/internal/rpcserver/interface.go @@ -236,6 +236,10 @@ type Chain interface { // treated as immutable since it is shared by all callers. BestSnapshot() *blockchain.BestState + // BestHeader returns the header with the most cumulative work that is NOT + // known to be invalid. + BestHeader() (chainhash.Hash, int64) + // BlockByHash returns the block for the given hash, regardless of whether the // block is part of the main chain or not. BlockByHash(hash *chainhash.Hash) (*dcrutil.Block, error) diff --git a/internal/rpcserver/rpcserver.go b/internal/rpcserver/rpcserver.go index dcdef31d7c..36dcbc5594 100644 --- a/internal/rpcserver/rpcserver.go +++ b/internal/rpcserver/rpcserver.go @@ -2018,6 +2018,7 @@ func handleGetBlock(_ context.Context, s *Server, cmd interface{}) (interface{}, func handleGetBlockchainInfo(_ context.Context, s *Server, cmd interface{}) (interface{}, error) { chain := s.cfg.Chain best := chain.BestSnapshot() + _, bestHeaderHeight := chain.BestHeader() // Fetch the current chain work using the the best block hash. chainWork, err := chain.ChainWork(&best.Hash) @@ -2087,7 +2088,7 @@ func handleGetBlockchainInfo(_ context.Context, s *Server, cmd interface{}) (int response := types.GetBlockChainInfoResult{ Chain: params.Name, Blocks: best.Height, - Headers: best.Height, + Headers: bestHeaderHeight, SyncHeight: syncHeight, ChainWork: fmt.Sprintf("%064x", chainWork), InitialBlockDownload: !chain.IsCurrent(), diff --git a/internal/rpcserver/rpcserverhandlers_test.go b/internal/rpcserver/rpcserverhandlers_test.go index 65d0317bee..e7a796e233 100644 --- a/internal/rpcserver/rpcserverhandlers_test.go +++ b/internal/rpcserver/rpcserverhandlers_test.go @@ -127,6 +127,8 @@ type tspendVotes struct { // testRPCChain provides a mock block chain by implementing the Chain interface. type testRPCChain struct { bestSnapshot *blockchain.BestState + bestHeaderHash chainhash.Hash + bestHeaderHeight int64 blockByHash *dcrutil.Block blockByHashErr error blockByHeight *dcrutil.Block @@ -193,6 +195,11 @@ func (c *testRPCChain) BestSnapshot() *blockchain.BestState { return c.bestSnapshot } +// BestHeader returns a mocked best header hash and height. +func (c *testRPCChain) BestHeader() (chainhash.Hash, int64) { + return c.bestHeaderHash, c.bestHeaderHeight +} + // BlockByHash returns a mocked block for the given hash. func (c *testRPCChain) BlockByHash(hash *chainhash.Hash) (*dcrutil.Block, error) { return c.blockByHash, c.blockByHashErr @@ -3476,6 +3483,8 @@ func TestHandleGetBlockchainInfo(t *testing.T) { Hash: *hash, PrevHash: *prevHash, } + chain.bestHeaderHash = *hash + chain.bestHeaderHeight = 463073 chain.chainWork = big.NewInt(0).SetBytes([]byte{0x11, 0x5d, 0x28, 0x33, 0x84, 0x90, 0x90, 0xb0, 0x02, 0x65, 0x06}) chain.isCurrent = false From 12e132e6808c4e19caf1082e1e595d3cc43541bb Mon Sep 17 00:00:00 2001 From: Dave Collins Date: Wed, 23 Dec 2020 15:44:36 -0600 Subject: [PATCH 7/7] blockchain: Update README.md and doc.go. This modifies the README.md and doc.go files to match the latest code. --- blockchain/README.md | 56 ++++++++++++++++++++++++++++++++++---------- blockchain/doc.go | 39 ++++++++++++++++++++++++------ 2 files changed, 75 insertions(+), 20 deletions(-) diff --git a/blockchain/README.md b/blockchain/README.md index b510a04166..e994eab99d 100644 --- a/blockchain/README.md +++ b/blockchain/README.md @@ -6,23 +6,24 @@ blockchain [![Doc](https://img.shields.io/badge/doc-reference-blue.svg)](https://pkg.go.dev/github.com/decred/dcrd/blockchain/v3) Package blockchain implements Decred block handling and chain selection rules. -The test coverage is currently only around 60%, but will be increasing over -time. See `test_coverage.txt` for the gocov coverage report. Alternatively, if -you are running a POSIX OS, you can run the `cov_report.sh` script for a -real-time report. Package blockchain is licensed under the liberal ISC license. -There is an associated blog post about the release of this package -[here](https://blog.conformal.com/btcchain-the-bitcoin-chain-package-from-bctd/). +The Decred block handling and chain selection rules are an integral, and quite +likely the most important, part of Decred. At its core, Decred is a distributed +consensus of which blocks are valid and which ones will comprise the main block +chain (public ledger) that ultimately determines accepted transactions, so it is +extremely important that fully validating nodes agree on all rules. -This package has intentionally been designed so it can be used as a standalone -package for any projects needing to handle processing of blocks into the decred -block chain. +At a high level, this package provides support for inserting new blocks into the +block chain according to the aforementioned rules. It includes functionality +such as rejecting duplicate blocks, ensuring blocks and transactions follow all +rules, and best chain selection along with reorganization. -## Installation and Updating +Since this package does not deal with other Decred specifics such as network +communication or wallets, it provides a notification system which gives the +caller a high level of flexibility in how they want to react to certain events +such as newly connected main chain blocks which might result in wallet updates. -```bash -$ go get -u github.com/decred/dcrd/blockchain -``` +A comprehensive suite of tests is provided to ensure proper functionality. ## Decred Chain Processing Overview @@ -55,6 +56,35 @@ is by no means exhaustive: coins - Insert the block into the block database + ## Processing Order + + This package supports headers-first semantics such that block data can be + processed out of order so long as the associated header is already known. + + The headers themselves, however, must be processed in the correct order since + headers that do not properly connect are rejected. In other words, orphan + headers are not allowed. + +The processing code always maintains the best chain as the branch tip that has +the most cumulative proof of work, so it is important to keep that in mind when +considering errors returned from processing blocks. + +Notably, due to the ability to process blocks out of order, and the fact blocks +can only be fully validated once all of their ancestors have the block data +available, it is to be expected that no error is returned immediately for blocks +that are valid enough to make it to the point they require the remaining +ancestor block data to be fully validated even though they might ultimately end +up failing validation. Similarly, because the data for a block becoming +available makes any of its direct descendants that already have their data +available eligible for validation, an error being returned does not necessarily +mean the block being processed is the one that failed validation. + +## Installation and Updating + +```bash +$ go get -u github.com/decred/dcrd/blockchain +``` + ## Examples * [ProcessBlock Example](https://pkg.go.dev/github.com/decred/dcrd/blockchain/v3#example-BlockChain.ProcessBlock) diff --git a/blockchain/doc.go b/blockchain/doc.go index a1b6faba4a..bc673a71c9 100644 --- a/blockchain/doc.go +++ b/blockchain/doc.go @@ -7,7 +7,7 @@ Package blockchain implements Decred block handling and chain selection rules. The Decred block handling and chain selection rules are an integral, and quite -likely the most important, part of decred. At its core, Decred is a distributed +likely the most important, part of Decred. At its core, Decred is a distributed consensus of which blocks are valid and which ones will comprise the main block chain (public ledger) that ultimately determines accepted transactions, so it is extremely important that fully validating nodes agree on all rules. @@ -53,13 +53,38 @@ is by no means exhaustive: coins - Insert the block into the block database +Processing Order + +This package supports headers-first semantics such that block data can be +processed out of order so long as the associated header is already known. + +The headers themselves, however, must be processed in the correct order since +headers that do not properly connect are rejected. In other words, orphan +headers are not allowed. + +The processing code always maintains the best chain as the branch tip that has +the most cumulative proof of work, so it is important to keep that in mind when +considering errors returned from processing blocks. + +Notably, due to the ability to process blocks out of order, and the fact blocks +can only be fully validated once all of their ancestors have the block data +available, it is to be expected that no error is returned immediately for blocks +that are valid enough to make it to the point they require the remaining +ancestor block data to be fully validated even though they might ultimately end +up failing validation. Similarly, because the data for a block becoming +available makes any of its direct descendants that already have their data +available eligible for validation, an error being returned does not necessarily +mean the block being processed is the one that failed validation. + Errors -Errors returned by this package are either the raw errors provided by underlying -calls or of type blockchain.RuleError. This allows the caller to differentiate -between unexpected errors, such as database errors, versus errors due to rule -violations through type assertions. In addition, callers can programmatically -determine the specific rule violation by examining the Errorkind field of the -type asserted blockchain.RuleError. +Errors returned by this package have full support for the standard library +errors.Is and errors.As methods and are either the raw errors provided by +underlying calls or of type blockchain.RuleError, possibly wrapped in a +blockchain.MultiError. This allows the caller to differentiate between +unexpected errors, such as database errors, versus errors due to rule violations +through errors.As. In addition, callers can programmatically determine the +specific rule violation by making use of errors.Is with any of the wrapped error +kinds. */ package blockchain