2016-04-25 11:12:09 +02:00
|
|
|
// Copyright (c) 2013-2017 The btcsuite developers
|
2013-07-18 16:49:28 +02:00
|
|
|
// Use of this source code is governed by an ISC
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2015-01-30 21:54:30 +01:00
|
|
|
package blockchain
|
2013-07-18 16:49:28 +02:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2017-02-04 04:18:39 +01:00
|
|
|
"time"
|
2014-07-02 18:04:59 +02:00
|
|
|
|
2016-08-08 21:04:33 +02:00
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
2015-08-26 11:54:55 +02:00
|
|
|
"github.com/btcsuite/btcd/database"
|
2015-01-15 17:23:47 +01:00
|
|
|
"github.com/btcsuite/btcutil"
|
2013-07-18 16:49:28 +02:00
|
|
|
)
|
|
|
|
|
2014-06-26 22:50:13 +02:00
|
|
|
// BehaviorFlags is a bitmask defining tweaks to the normal behavior when
|
|
|
|
// performing chain processing and consensus rules checks.
|
|
|
|
type BehaviorFlags uint32
|
|
|
|
|
|
|
|
const (
|
|
|
|
// BFFastAdd may be set to indicate that several checks can be avoided
|
|
|
|
// for the block since it is already known to fit into the chain due to
|
|
|
|
// already proving it correct links into the chain up to a known
|
|
|
|
// checkpoint. This is primarily used for headers-first mode.
|
|
|
|
BFFastAdd BehaviorFlags = 1 << iota
|
|
|
|
|
2014-06-26 22:50:13 +02:00
|
|
|
// BFNoPoWCheck may be set to indicate the proof of work check which
|
|
|
|
// ensures a block hashes to a value less than the required target will
|
|
|
|
// not be performed.
|
|
|
|
BFNoPoWCheck
|
|
|
|
|
2014-06-29 22:11:13 +02:00
|
|
|
// BFDryRun may be set to indicate the block should not modify the chain
|
|
|
|
// or memory chain index. This is useful to test that a block is valid
|
|
|
|
// without modifying the current state.
|
|
|
|
BFDryRun
|
|
|
|
|
2014-06-26 22:50:13 +02:00
|
|
|
// BFNone is a convenience value to specifically indicate no flags.
|
|
|
|
BFNone BehaviorFlags = 0
|
|
|
|
)
|
|
|
|
|
2013-07-18 16:49:28 +02:00
|
|
|
// blockExists determines whether a block with the given hash exists either in
|
|
|
|
// the main chain or any side chains.
|
2015-08-26 06:03:18 +02:00
|
|
|
//
|
2017-02-03 19:13:53 +01:00
|
|
|
// This function is safe for concurrent access.
|
2016-08-08 21:04:33 +02:00
|
|
|
func (b *BlockChain) blockExists(hash *chainhash.Hash) (bool, error) {
|
2016-04-25 11:12:09 +02:00
|
|
|
// Check block index first (could be main chain or side chain blocks).
|
|
|
|
if b.index.HaveBlock(hash) {
|
2014-07-07 18:42:28 +02:00
|
|
|
return true, nil
|
2013-07-18 16:49:28 +02:00
|
|
|
}
|
|
|
|
|
2015-08-26 06:03:18 +02:00
|
|
|
// Check in the database.
|
|
|
|
var exists bool
|
|
|
|
err := b.db.View(func(dbTx database.Tx) error {
|
|
|
|
var err error
|
|
|
|
exists, err = dbTx.HasBlock(hash)
|
2017-02-03 08:14:26 +01:00
|
|
|
if err != nil || !exists {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ignore side chain blocks in the database. This is necessary
|
|
|
|
// because there is not currently any record of the associated
|
|
|
|
// block index data such as its block height, so it's not yet
|
|
|
|
// possible to efficiently load the block and do anything useful
|
|
|
|
// with it.
|
|
|
|
//
|
|
|
|
// Ultimately the entire block index should be serialized
|
|
|
|
// instead of only the current main chain so it can be consulted
|
|
|
|
// directly.
|
|
|
|
_, err = dbFetchHeightByHash(dbTx, hash)
|
|
|
|
if isNotInMainChainErr(err) {
|
|
|
|
exists = false
|
|
|
|
return nil
|
|
|
|
}
|
2015-08-26 06:03:18 +02:00
|
|
|
return err
|
|
|
|
})
|
|
|
|
return exists, err
|
2013-07-18 16:49:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// processOrphans determines if there are any orphans which depend on the passed
|
|
|
|
// block hash (they are no longer orphans if true) and potentially accepts them.
|
|
|
|
// It repeats the process for the newly accepted blocks (to detect further
|
|
|
|
// orphans which may no longer be orphans) until there are no more.
|
2014-06-26 22:50:13 +02:00
|
|
|
//
|
|
|
|
// The flags do not modify the behavior of this function directly, however they
|
|
|
|
// are needed to pass along to maybeAcceptBlock.
|
2015-08-26 06:03:18 +02:00
|
|
|
//
|
|
|
|
// This function MUST be called with the chain state lock held (for writes).
|
2016-08-08 21:04:33 +02:00
|
|
|
func (b *BlockChain) processOrphans(hash *chainhash.Hash, flags BehaviorFlags) error {
|
2013-09-26 03:20:53 +02:00
|
|
|
// Start with processing at least the passed hash. Leave a little room
|
|
|
|
// for additional orphan blocks that need to be processed without
|
|
|
|
// needing to grow the array in the common case.
|
2016-08-08 21:04:33 +02:00
|
|
|
processHashes := make([]*chainhash.Hash, 0, 10)
|
2013-09-26 03:20:53 +02:00
|
|
|
processHashes = append(processHashes, hash)
|
2013-07-18 16:49:28 +02:00
|
|
|
for len(processHashes) > 0 {
|
|
|
|
// Pop the first hash to process from the slice.
|
|
|
|
processHash := processHashes[0]
|
2013-09-26 03:20:53 +02:00
|
|
|
processHashes[0] = nil // Prevent GC leak.
|
2013-07-18 16:49:28 +02:00
|
|
|
processHashes = processHashes[1:]
|
|
|
|
|
|
|
|
// Look up all orphans that are parented by the block we just
|
|
|
|
// accepted. This will typically only be one, but it could
|
|
|
|
// be multiple if multiple blocks are mined and broadcast
|
|
|
|
// around the same time. The one with the most proof of work
|
2013-10-15 16:46:16 +02:00
|
|
|
// will eventually win out. An indexing for loop is
|
|
|
|
// intentionally used over a range here as range does not
|
|
|
|
// reevaluate the slice on each iteration nor does it adjust the
|
|
|
|
// index for the modified slice.
|
|
|
|
for i := 0; i < len(b.prevOrphans[*processHash]); i++ {
|
|
|
|
orphan := b.prevOrphans[*processHash][i]
|
|
|
|
if orphan == nil {
|
|
|
|
log.Warnf("Found a nil entry at index %d in the "+
|
|
|
|
"orphan dependency list for block %v", i,
|
|
|
|
processHash)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2013-07-18 16:49:28 +02:00
|
|
|
// Remove the orphan from the orphan pool.
|
2016-08-08 21:04:33 +02:00
|
|
|
orphanHash := orphan.block.Hash()
|
2013-07-18 16:49:28 +02:00
|
|
|
b.removeOrphanBlock(orphan)
|
2013-10-15 16:46:16 +02:00
|
|
|
i--
|
2013-07-18 16:49:28 +02:00
|
|
|
|
|
|
|
// Potentially accept the block into the block chain.
|
2016-10-13 02:43:01 +02:00
|
|
|
_, err := b.maybeAcceptBlock(orphan.block, flags)
|
2013-07-18 16:49:28 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add this block to the list of blocks to process so
|
|
|
|
// any orphan blocks that depend on this block are
|
|
|
|
// handled too.
|
|
|
|
processHashes = append(processHashes, orphanHash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ProcessBlock is the main workhorse for handling insertion of new blocks into
|
|
|
|
// the block chain. It includes functionality such as rejecting duplicate
|
|
|
|
// blocks, ensuring blocks follow all rules, orphan handling, and insertion into
|
|
|
|
// the block chain along with best chain selection and reorganization.
|
2014-06-25 22:47:24 +02:00
|
|
|
//
|
2016-10-13 02:43:01 +02:00
|
|
|
// When no errors occurred during processing, the first return value indicates
|
|
|
|
// whether or not the block is on the main chain and the second indicates
|
|
|
|
// whether or not the block is an orphan.
|
2015-08-26 06:03:18 +02:00
|
|
|
//
|
|
|
|
// This function is safe for concurrent access.
|
2016-10-13 02:43:01 +02:00
|
|
|
func (b *BlockChain) ProcessBlock(block *btcutil.Block, flags BehaviorFlags) (bool, bool, error) {
|
2015-08-26 06:03:18 +02:00
|
|
|
b.chainLock.Lock()
|
|
|
|
defer b.chainLock.Unlock()
|
|
|
|
|
2014-06-26 22:50:13 +02:00
|
|
|
fastAdd := flags&BFFastAdd == BFFastAdd
|
2014-06-29 22:11:13 +02:00
|
|
|
dryRun := flags&BFDryRun == BFDryRun
|
2014-06-26 22:50:13 +02:00
|
|
|
|
2016-08-08 21:04:33 +02:00
|
|
|
blockHash := block.Hash()
|
2013-10-04 20:41:24 +02:00
|
|
|
log.Tracef("Processing block %v", blockHash)
|
2013-07-18 16:49:28 +02:00
|
|
|
|
|
|
|
// The block must not already exist in the main chain or side chains.
|
2014-07-07 18:42:28 +02:00
|
|
|
exists, err := b.blockExists(blockHash)
|
|
|
|
if err != nil {
|
2016-10-13 02:43:01 +02:00
|
|
|
return false, false, err
|
2014-07-07 18:42:28 +02:00
|
|
|
}
|
|
|
|
if exists {
|
2013-07-18 16:49:28 +02:00
|
|
|
str := fmt.Sprintf("already have block %v", blockHash)
|
2016-10-13 02:43:01 +02:00
|
|
|
return false, false, ruleError(ErrDuplicateBlock, str)
|
2013-07-18 16:49:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// The block must not already exist as an orphan.
|
|
|
|
if _, exists := b.orphans[*blockHash]; exists {
|
|
|
|
str := fmt.Sprintf("already have block (orphan) %v", blockHash)
|
2016-10-13 02:43:01 +02:00
|
|
|
return false, false, ruleError(ErrDuplicateBlock, str)
|
2013-07-18 16:49:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Perform preliminary sanity checks on the block and its transactions.
|
2016-07-14 02:36:36 +02:00
|
|
|
err = checkBlockSanity(block, b.chainParams.PowLimit, b.timeSource, flags)
|
2013-07-18 16:49:28 +02:00
|
|
|
if err != nil {
|
2016-10-13 02:43:01 +02:00
|
|
|
return false, false, err
|
2013-07-18 16:49:28 +02:00
|
|
|
}
|
|
|
|
|
2014-02-21 22:03:44 +01:00
|
|
|
// Find the previous checkpoint and perform some additional checks based
|
|
|
|
// on the checkpoint. This provides a few nice properties such as
|
2014-02-21 20:02:59 +01:00
|
|
|
// preventing old side chain blocks before the last checkpoint,
|
|
|
|
// rejecting easy to mine, but otherwise bogus, blocks that could be
|
|
|
|
// used to eat memory, and ensuring expected (versus claimed) proof of
|
2014-02-21 22:03:44 +01:00
|
|
|
// work requirements since the previous checkpoint are met.
|
2014-01-19 19:38:31 +01:00
|
|
|
blockHeader := &block.MsgBlock().Header
|
2017-02-04 04:18:39 +01:00
|
|
|
checkpointNode, err := b.findPreviousCheckpoint()
|
2013-07-18 16:49:28 +02:00
|
|
|
if err != nil {
|
2016-10-13 02:43:01 +02:00
|
|
|
return false, false, err
|
2013-07-18 16:49:28 +02:00
|
|
|
}
|
2017-02-04 04:18:39 +01:00
|
|
|
if checkpointNode != nil {
|
2013-07-18 16:49:28 +02:00
|
|
|
// Ensure the block timestamp is after the checkpoint timestamp.
|
2017-02-04 04:18:39 +01:00
|
|
|
checkpointTime := time.Unix(checkpointNode.timestamp, 0)
|
2013-07-18 16:49:28 +02:00
|
|
|
if blockHeader.Timestamp.Before(checkpointTime) {
|
|
|
|
str := fmt.Sprintf("block %v has timestamp %v before "+
|
|
|
|
"last checkpoint timestamp %v", blockHash,
|
|
|
|
blockHeader.Timestamp, checkpointTime)
|
2016-10-13 02:43:01 +02:00
|
|
|
return false, false, ruleError(ErrCheckpointTimeTooOld, str)
|
2013-07-18 16:49:28 +02:00
|
|
|
}
|
2013-11-18 22:23:51 +01:00
|
|
|
if !fastAdd {
|
|
|
|
// Even though the checks prior to now have already ensured the
|
|
|
|
// proof of work exceeds the claimed amount, the claimed amount
|
|
|
|
// is a field in the block header which could be forged. This
|
|
|
|
// check ensures the proof of work is at least the minimum
|
|
|
|
// expected based on elapsed time since the last checkpoint and
|
|
|
|
// maximum adjustment allowed by the retarget rules.
|
|
|
|
duration := blockHeader.Timestamp.Sub(checkpointTime)
|
|
|
|
requiredTarget := CompactToBig(b.calcEasiestDifficulty(
|
2017-02-04 04:18:39 +01:00
|
|
|
checkpointNode.bits, duration))
|
2013-11-18 22:23:51 +01:00
|
|
|
currentTarget := CompactToBig(blockHeader.Bits)
|
|
|
|
if currentTarget.Cmp(requiredTarget) > 0 {
|
|
|
|
str := fmt.Sprintf("block target difficulty of %064x "+
|
|
|
|
"is too low when compared to the previous "+
|
|
|
|
"checkpoint", currentTarget)
|
2016-10-13 02:43:01 +02:00
|
|
|
return false, false, ruleError(ErrDifficultyTooLow, str)
|
2013-11-18 22:23:51 +01:00
|
|
|
}
|
2013-07-18 16:49:28 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle orphan blocks.
|
|
|
|
prevHash := &blockHeader.PrevBlock
|
2016-10-13 02:43:01 +02:00
|
|
|
prevHashExists, err := b.blockExists(prevHash)
|
|
|
|
if err != nil {
|
|
|
|
return false, false, err
|
|
|
|
}
|
|
|
|
if !prevHashExists {
|
|
|
|
if !dryRun {
|
|
|
|
log.Infof("Adding orphan block %v with parent %v",
|
|
|
|
blockHash, prevHash)
|
|
|
|
b.addOrphanBlock(block)
|
2014-06-29 22:11:13 +02:00
|
|
|
}
|
2013-07-18 16:49:28 +02:00
|
|
|
|
2016-10-13 02:43:01 +02:00
|
|
|
return false, true, nil
|
2013-07-18 16:49:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// The block has passed all context independent checks and appears sane
|
|
|
|
// enough to potentially accept it into the block chain.
|
2016-10-13 02:43:01 +02:00
|
|
|
isMainChain, err := b.maybeAcceptBlock(block, flags)
|
2013-07-18 16:49:28 +02:00
|
|
|
if err != nil {
|
2016-10-13 02:43:01 +02:00
|
|
|
return false, false, err
|
2013-07-18 16:49:28 +02:00
|
|
|
}
|
|
|
|
|
2014-06-29 22:11:13 +02:00
|
|
|
// Don't process any orphans or log when the dry run flag is set.
|
|
|
|
if !dryRun {
|
|
|
|
// Accept any orphan blocks that depend on this block (they are
|
|
|
|
// no longer orphans) and repeat for those accepted blocks until
|
|
|
|
// there are no more.
|
|
|
|
err := b.processOrphans(blockHash, flags)
|
|
|
|
if err != nil {
|
2016-10-13 02:43:01 +02:00
|
|
|
return false, false, err
|
2014-06-29 22:11:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
log.Debugf("Accepted block %v", blockHash)
|
2013-07-18 16:49:28 +02:00
|
|
|
}
|
|
|
|
|
2016-10-13 02:43:01 +02:00
|
|
|
return isMainChain, false, nil
|
2013-07-18 16:49:28 +02:00
|
|
|
}
|