BIP0141+blockchain: implement segwit block validation rules

This commit implements the new block validation rules as defined by
BIP0141. The new rules include the constraints that if a block has
transactions with witness data, then there MUST be a commitment within
the conies transaction to the root of a new merkle tree which commits
to the wtxid of all transactions. Additionally, rather than limiting
the size of a block by size in bytes, blocks are now limited by their
total weight unit. Similarly, a newly define “sig op cost” is now used
to limit the signature validation cost of transactions found within
blocks.
This commit is contained in:
Olaoluwa Osuntokun 2016-10-18 18:33:20 -07:00 committed by Dave Collins
parent 1b80b334bc
commit d0768abcc4
9 changed files with 274 additions and 116 deletions

View file

@ -42,25 +42,29 @@ type orphanBlock struct {
// However, the returned snapshot must be treated as immutable since it is // However, the returned snapshot must be treated as immutable since it is
// shared by all callers. // shared by all callers.
type BestState struct { type BestState struct {
Hash chainhash.Hash // The hash of the block. Hash chainhash.Hash // The hash of the block.
Height int32 // The height of the block. Height int32 // The height of the block.
Bits uint32 // The difficulty bits of the block. Bits uint32 // The difficulty bits of the block.
BlockSize uint64 // The size of the block. BlockSize uint64 // The size of the block.
NumTxns uint64 // The number of txns in the block. BlockWeight uint64 // The weight of the block.
TotalTxns uint64 // The total number of txns in the chain. NumTxns uint64 // The number of txns in the block.
MedianTime time.Time // Median time as per CalcPastMedianTime. TotalTxns uint64 // The total number of txns in the chain.
MedianTime time.Time // Median time as per CalcPastMedianTime.
} }
// newBestState returns a new best stats instance for the given parameters. // newBestState returns a new best stats instance for the given parameters.
func newBestState(node *blockNode, blockSize, numTxns, totalTxns uint64, medianTime time.Time) *BestState { func newBestState(node *blockNode, blockSize, blockWeight, numTxns,
totalTxns uint64, medianTime time.Time) *BestState {
return &BestState{ return &BestState{
Hash: node.hash, Hash: node.hash,
Height: node.height, Height: node.height,
Bits: node.bits, Bits: node.bits,
BlockSize: blockSize, BlockSize: blockSize,
NumTxns: numTxns, BlockWeight: blockWeight,
TotalTxns: totalTxns, NumTxns: numTxns,
MedianTime: medianTime, TotalTxns: totalTxns,
MedianTime: medianTime,
} }
} }
@ -80,6 +84,7 @@ type BlockChain struct {
notifications NotificationCallback notifications NotificationCallback
sigCache *txscript.SigCache sigCache *txscript.SigCache
indexManager IndexManager indexManager IndexManager
hashCache *txscript.HashCache
// The following fields are calculated based upon the provided chain // The following fields are calculated based upon the provided chain
// parameters. They are also set when the instance is created and // parameters. They are also set when the instance is created and
@ -616,8 +621,9 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, view *U
b.stateLock.RUnlock() b.stateLock.RUnlock()
numTxns := uint64(len(block.MsgBlock().Transactions)) numTxns := uint64(len(block.MsgBlock().Transactions))
blockSize := uint64(block.MsgBlock().SerializeSize()) blockSize := uint64(block.MsgBlock().SerializeSize())
state := newBestState(node, blockSize, numTxns, curTotalTxns+numTxns, blockWeight := uint64(GetBlockWeight(block))
medianTime) state := newBestState(node, blockSize, blockWeight, numTxns,
curTotalTxns+numTxns, medianTime)
// Atomically insert info into the database. // Atomically insert info into the database.
err = b.db.Update(func(dbTx database.Tx) error { err = b.db.Update(func(dbTx database.Tx) error {
@ -744,9 +750,10 @@ func (b *BlockChain) disconnectBlock(node *blockNode, block *btcutil.Block, view
b.stateLock.RUnlock() b.stateLock.RUnlock()
numTxns := uint64(len(prevBlock.MsgBlock().Transactions)) numTxns := uint64(len(prevBlock.MsgBlock().Transactions))
blockSize := uint64(prevBlock.MsgBlock().SerializeSize()) blockSize := uint64(prevBlock.MsgBlock().SerializeSize())
blockWeight := uint64(GetBlockWeight(prevBlock))
newTotalTxns := curTotalTxns - uint64(len(block.MsgBlock().Transactions)) newTotalTxns := curTotalTxns - uint64(len(block.MsgBlock().Transactions))
state := newBestState(prevNode, blockSize, numTxns, newTotalTxns, state := newBestState(prevNode, blockSize, blockWeight, numTxns,
medianTime) newTotalTxns, medianTime)
err = b.db.Update(func(dbTx database.Tx) error { err = b.db.Update(func(dbTx database.Tx) error {
// Update best block state. // Update best block state.
@ -1328,6 +1335,16 @@ type Config struct {
// This field can be nil if the caller does not wish to make use of an // This field can be nil if the caller does not wish to make use of an
// index manager. // index manager.
IndexManager IndexManager IndexManager IndexManager
// HashCache defines a transaction hash mid-state cache to use when
// validating transactions. This cache has the potential to greatly
// speed up transaction validation as re-using the pre-calculated
// mid-state eliminates the O(N^2) validation complexity due to the
// SigHashAll flag.
//
// This field can be nil if the caller is not interested in using a
// signature cache.
HashCache *txscript.HashCache
} }
// New returns a BlockChain instance using the provided configuration details. // New returns a BlockChain instance using the provided configuration details.
@ -1378,6 +1395,7 @@ func New(config *Config) (*BlockChain, error) {
maxRetargetTimespan: targetTimespan * adjustmentFactor, maxRetargetTimespan: targetTimespan * adjustmentFactor,
blocksPerRetarget: int32(targetTimespan / targetTimePerBlock), blocksPerRetarget: int32(targetTimespan / targetTimePerBlock),
index: newBlockIndex(config.DB, params), index: newBlockIndex(config.DB, params),
hashCache: config.HashCache,
orphans: make(map[chainhash.Hash]*orphanBlock), orphans: make(map[chainhash.Hash]*orphanBlock),
prevOrphans: make(map[chainhash.Hash][]*orphanBlock), prevOrphans: make(map[chainhash.Hash][]*orphanBlock),
warningCaches: newThresholdCaches(vbNumBits), warningCaches: newThresholdCaches(vbNumBits),

View file

@ -1092,8 +1092,9 @@ func (b *BlockChain) createChainState() error {
// genesis block, use its timestamp for the median time. // genesis block, use its timestamp for the median time.
numTxns := uint64(len(genesisBlock.MsgBlock().Transactions)) numTxns := uint64(len(genesisBlock.MsgBlock().Transactions))
blockSize := uint64(genesisBlock.MsgBlock().SerializeSize()) blockSize := uint64(genesisBlock.MsgBlock().SerializeSize())
b.stateSnapshot = newBestState(b.bestNode, blockSize, numTxns, numTxns, blockWeight := uint64(GetBlockWeight(genesisBlock))
time.Unix(b.bestNode.timestamp, 0)) b.stateSnapshot = newBestState(b.bestNode, blockSize, blockWeight,
numTxns, numTxns, time.Unix(b.bestNode.timestamp, 0))
// Create the initial the database chain state including creating the // Create the initial the database chain state including creating the
// necessary index buckets and inserting the genesis block. // necessary index buckets and inserting the genesis block.
@ -1197,11 +1198,12 @@ func (b *BlockChain) initChainState() error {
// Initialize the state related to the best block. // Initialize the state related to the best block.
blockSize := uint64(len(blockBytes)) blockSize := uint64(len(blockBytes))
blockWeight := uint64(GetBlockWeight(btcutil.NewBlock(&block)))
numTxns := uint64(len(block.Transactions)) numTxns := uint64(len(block.Transactions))
b.stateSnapshot = newBestState(b.bestNode, blockSize, numTxns, b.stateSnapshot = newBestState(b.bestNode, blockSize, blockWeight,
state.totalTxns, medianTime) numTxns, state.totalTxns, medianTime)
isStateInitialized = true isStateInitialized = true
return nil return nil
}) })
if err != nil { if err != nil {

View file

@ -41,6 +41,10 @@ const (
// maximum allowed size. // maximum allowed size.
ErrBlockTooBig ErrBlockTooBig
// ErrBlockWeightTooHigh indicates that the block's computed weight
// metric exceeds the maximum allowed value.
ErrBlockWeightTooHigh
// ErrBlockVersionTooOld indicates the block version is too old and is // ErrBlockVersionTooOld indicates the block version is too old and is
// no longer accepted since the majority of the network has upgraded // no longer accepted since the majority of the network has upgraded
// to a newer version. // to a newer version.
@ -198,48 +202,66 @@ const (
// such signature verification failures and execution past the end of // such signature verification failures and execution past the end of
// the stack. // the stack.
ErrScriptValidation ErrScriptValidation
// ErrUnexpectedWitness indicates that a block includes transactions
// with witness data, but doesn't also have a witness commitment within
// the coinbase transaction.
ErrUnexpectedWitness
// ErrInvalidWitnessCommitment indicates that a block's witness
// commitment is not well formed.
ErrInvalidWitnessCommitment
// ErrWitnessCommitmentMismatch indicates that the witness commitment
// included in the block's coinbase transaction doesn't match the
// manually computed witness commitment.
ErrWitnessCommitmentMismatch
) )
// Map of ErrorCode values back to their constant names for pretty printing. // Map of ErrorCode values back to their constant names for pretty printing.
var errorCodeStrings = map[ErrorCode]string{ var errorCodeStrings = map[ErrorCode]string{
ErrDuplicateBlock: "ErrDuplicateBlock", ErrDuplicateBlock: "ErrDuplicateBlock",
ErrBlockTooBig: "ErrBlockTooBig", ErrBlockTooBig: "ErrBlockTooBig",
ErrBlockVersionTooOld: "ErrBlockVersionTooOld", ErrBlockVersionTooOld: "ErrBlockVersionTooOld",
ErrInvalidTime: "ErrInvalidTime", ErrBlockWeightTooHigh: "ErrBlockWeightTooHigh",
ErrTimeTooOld: "ErrTimeTooOld", ErrInvalidTime: "ErrInvalidTime",
ErrTimeTooNew: "ErrTimeTooNew", ErrTimeTooOld: "ErrTimeTooOld",
ErrDifficultyTooLow: "ErrDifficultyTooLow", ErrTimeTooNew: "ErrTimeTooNew",
ErrUnexpectedDifficulty: "ErrUnexpectedDifficulty", ErrDifficultyTooLow: "ErrDifficultyTooLow",
ErrHighHash: "ErrHighHash", ErrUnexpectedDifficulty: "ErrUnexpectedDifficulty",
ErrBadMerkleRoot: "ErrBadMerkleRoot", ErrHighHash: "ErrHighHash",
ErrBadCheckpoint: "ErrBadCheckpoint", ErrBadMerkleRoot: "ErrBadMerkleRoot",
ErrForkTooOld: "ErrForkTooOld", ErrBadCheckpoint: "ErrBadCheckpoint",
ErrCheckpointTimeTooOld: "ErrCheckpointTimeTooOld", ErrForkTooOld: "ErrForkTooOld",
ErrNoTransactions: "ErrNoTransactions", ErrCheckpointTimeTooOld: "ErrCheckpointTimeTooOld",
ErrTooManyTransactions: "ErrTooManyTransactions", ErrNoTransactions: "ErrNoTransactions",
ErrNoTxInputs: "ErrNoTxInputs", ErrTooManyTransactions: "ErrTooManyTransactions",
ErrNoTxOutputs: "ErrNoTxOutputs", ErrNoTxInputs: "ErrNoTxInputs",
ErrTxTooBig: "ErrTxTooBig", ErrNoTxOutputs: "ErrNoTxOutputs",
ErrBadTxOutValue: "ErrBadTxOutValue", ErrTxTooBig: "ErrTxTooBig",
ErrDuplicateTxInputs: "ErrDuplicateTxInputs", ErrBadTxOutValue: "ErrBadTxOutValue",
ErrBadTxInput: "ErrBadTxInput", ErrDuplicateTxInputs: "ErrDuplicateTxInputs",
ErrMissingTx: "ErrMissingTx", ErrBadTxInput: "ErrBadTxInput",
ErrUnfinalizedTx: "ErrUnfinalizedTx", ErrMissingTx: "ErrMissingTx",
ErrDuplicateTx: "ErrDuplicateTx", ErrUnfinalizedTx: "ErrUnfinalizedTx",
ErrOverwriteTx: "ErrOverwriteTx", ErrDuplicateTx: "ErrDuplicateTx",
ErrImmatureSpend: "ErrImmatureSpend", ErrOverwriteTx: "ErrOverwriteTx",
ErrDoubleSpend: "ErrDoubleSpend", ErrImmatureSpend: "ErrImmatureSpend",
ErrSpendTooHigh: "ErrSpendTooHigh", ErrDoubleSpend: "ErrDoubleSpend",
ErrBadFees: "ErrBadFees", ErrSpendTooHigh: "ErrSpendTooHigh",
ErrTooManySigOps: "ErrTooManySigOps", ErrBadFees: "ErrBadFees",
ErrFirstTxNotCoinbase: "ErrFirstTxNotCoinbase", ErrTooManySigOps: "ErrTooManySigOps",
ErrMultipleCoinbases: "ErrMultipleCoinbases", ErrFirstTxNotCoinbase: "ErrFirstTxNotCoinbase",
ErrBadCoinbaseScriptLen: "ErrBadCoinbaseScriptLen", ErrMultipleCoinbases: "ErrMultipleCoinbases",
ErrBadCoinbaseValue: "ErrBadCoinbaseValue", ErrBadCoinbaseScriptLen: "ErrBadCoinbaseScriptLen",
ErrMissingCoinbaseHeight: "ErrMissingCoinbaseHeight", ErrBadCoinbaseValue: "ErrBadCoinbaseValue",
ErrBadCoinbaseHeight: "ErrBadCoinbaseHeight", ErrMissingCoinbaseHeight: "ErrMissingCoinbaseHeight",
ErrScriptMalformed: "ErrScriptMalformed", ErrBadCoinbaseHeight: "ErrBadCoinbaseHeight",
ErrScriptValidation: "ErrScriptValidation", ErrScriptMalformed: "ErrScriptMalformed",
ErrScriptValidation: "ErrScriptValidation",
ErrUnexpectedWitness: "ErrUnexpectedWitness",
ErrInvalidWitnessCommitment: "ErrInvalidWitnessCommitment",
ErrWitnessCommitmentMismatch: "ErrWitnessCommitmentMismatch",
} }
// String returns the ErrorCode as a human-readable name. // String returns the ErrorCode as a human-readable name.

View file

@ -117,7 +117,7 @@ func TestFullBlocks(t *testing.T) {
// Ensure there is an error due to deserializing the block. // Ensure there is an error due to deserializing the block.
var msgBlock wire.MsgBlock var msgBlock wire.MsgBlock
err := msgBlock.BtcDecode(bytes.NewReader(item.RawBlock), 0) err := msgBlock.BtcDecode(bytes.NewReader(item.RawBlock), 0, wire.BaseEncoding)
if _, ok := err.(*wire.MessageError); !ok { if _, ok := err.(*wire.MessageError); !ok {
t.Fatalf("block %q (hash %s, height %d) should have "+ t.Fatalf("block %q (hash %s, height %d) should have "+
"failed to decode", item.Name, blockHash, "failed to decode", item.Name, blockHash,

View file

@ -309,7 +309,7 @@ func calcMerkleRoot(txns []*wire.MsgTx) chainhash.Hash {
for _, tx := range txns { for _, tx := range txns {
utilTxns = append(utilTxns, btcutil.NewTx(tx)) utilTxns = append(utilTxns, btcutil.NewTx(tx))
} }
merkles := blockchain.BuildMerkleTreeStore(utilTxns) merkles := blockchain.BuildMerkleTreeStore(utilTxns, false)
return *merkles[len(merkles)-1] return *merkles[len(merkles)-1]
} }
@ -635,10 +635,10 @@ func nonCanonicalVarInt(val uint32) []byte {
// encoding. // encoding.
func encodeNonCanonicalBlock(b *wire.MsgBlock) []byte { func encodeNonCanonicalBlock(b *wire.MsgBlock) []byte {
var buf bytes.Buffer var buf bytes.Buffer
b.Header.BtcEncode(&buf, 0) b.Header.BtcEncode(&buf, 0, wire.BaseEncoding)
buf.Write(nonCanonicalVarInt(uint32(len(b.Transactions)))) buf.Write(nonCanonicalVarInt(uint32(len(b.Transactions))))
for _, tx := range b.Transactions { for _, tx := range b.Transactions {
tx.BtcEncode(&buf, 0) tx.BtcEncode(&buf, 0, wire.BaseEncoding)
} }
return buf.Bytes() return buf.Bytes()
} }

View file

@ -8,6 +8,7 @@ import (
"fmt" "fmt"
"math" "math"
"runtime" "runtime"
"time"
"github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
@ -19,6 +20,7 @@ type txValidateItem struct {
txInIndex int txInIndex int
txIn *wire.TxIn txIn *wire.TxIn
tx *btcutil.Tx tx *btcutil.Tx
sigHashes *txscript.TxSigHashes
} }
// txValidator provides a type which asynchronously validates transaction // txValidator provides a type which asynchronously validates transaction
@ -31,6 +33,7 @@ type txValidator struct {
utxoView *UtxoViewpoint utxoView *UtxoViewpoint
flags txscript.ScriptFlags flags txscript.ScriptFlags
sigCache *txscript.SigCache sigCache *txscript.SigCache
hashCache *txscript.HashCache
} }
// sendResult sends the result of a script pair validation on the internal // sendResult sends the result of a script pair validation on the internal
@ -83,15 +86,19 @@ out:
// Create a new script engine for the script pair. // Create a new script engine for the script pair.
sigScript := txIn.SignatureScript sigScript := txIn.SignatureScript
witness := txIn.Witness
inputAmount := txEntry.AmountByIndex(originTxIndex)
vm, err := txscript.NewEngine(pkScript, txVI.tx.MsgTx(), vm, err := txscript.NewEngine(pkScript, txVI.tx.MsgTx(),
txVI.txInIndex, v.flags, v.sigCache) txVI.txInIndex, v.flags, v.sigCache, txVI.sigHashes,
inputAmount)
if err != nil { if err != nil {
str := fmt.Sprintf("failed to parse input "+ str := fmt.Sprintf("failed to parse input "+
"%s:%d which references output %s:%d - "+ "%s:%d which references output %s:%d - "+
"%v (input script bytes %x, prev output "+ "%v (input witness %x, input script "+
"script bytes %x)", txVI.tx.Hash(), "bytes %x, prev output script bytes %x)",
txVI.txInIndex, originTxHash, txVI.tx.Hash(), txVI.txInIndex, originTxHash,
originTxIndex, err, sigScript, pkScript) originTxIndex, err, witness, sigScript,
pkScript)
err := ruleError(ErrScriptMalformed, str) err := ruleError(ErrScriptMalformed, str)
v.sendResult(err) v.sendResult(err)
break out break out
@ -101,10 +108,11 @@ out:
if err := vm.Execute(); err != nil { if err := vm.Execute(); err != nil {
str := fmt.Sprintf("failed to validate input "+ str := fmt.Sprintf("failed to validate input "+
"%s:%d which references output %s:%d - "+ "%s:%d which references output %s:%d - "+
"%v (input script bytes %x, prev output "+ "%v (input witness %x, input script "+
"script bytes %x)", txVI.tx.Hash(), "bytes %x, prev output script bytes %x)",
txVI.txInIndex, originTxHash, txVI.tx.Hash(), txVI.txInIndex,
originTxIndex, err, sigScript, pkScript) originTxHash, originTxIndex, err,
witness, sigScript, pkScript)
err := ruleError(ErrScriptValidation, str) err := ruleError(ErrScriptValidation, str)
v.sendResult(err) v.sendResult(err)
break out break out
@ -179,20 +187,38 @@ func (v *txValidator) Validate(items []*txValidateItem) error {
// newTxValidator returns a new instance of txValidator to be used for // newTxValidator returns a new instance of txValidator to be used for
// validating transaction scripts asynchronously. // validating transaction scripts asynchronously.
func newTxValidator(utxoView *UtxoViewpoint, flags txscript.ScriptFlags, sigCache *txscript.SigCache) *txValidator { func newTxValidator(utxoView *UtxoViewpoint, flags txscript.ScriptFlags,
sigCache *txscript.SigCache, hashCache *txscript.HashCache) *txValidator {
return &txValidator{ return &txValidator{
validateChan: make(chan *txValidateItem), validateChan: make(chan *txValidateItem),
quitChan: make(chan struct{}), quitChan: make(chan struct{}),
resultChan: make(chan error), resultChan: make(chan error),
utxoView: utxoView, utxoView: utxoView,
sigCache: sigCache, sigCache: sigCache,
hashCache: hashCache,
flags: flags, flags: flags,
} }
} }
// ValidateTransactionScripts validates the scripts for the passed transaction // ValidateTransactionScripts validates the scripts for the passed transaction
// using multiple goroutines. // using multiple goroutines.
func ValidateTransactionScripts(tx *btcutil.Tx, utxoView *UtxoViewpoint, flags txscript.ScriptFlags, sigCache *txscript.SigCache) error { func ValidateTransactionScripts(tx *btcutil.Tx, utxoView *UtxoViewpoint,
flags txscript.ScriptFlags, sigCache *txscript.SigCache,
hashCache *txscript.HashCache) error {
// If the hashcache doesn't yet has the sighash midstate for this
// transaction, then we'll compute them now so we can re-use them
// amongst all worker validation goroutines.
if !hashCache.ContainsHashes(tx.Hash()) {
hashCache.AddSigHashes(tx.MsgTx())
}
// The same pointer to the transaction's sighash midstate will be
// re-used amongst all validation goroutines. By pre-computing the
// sighash here instead of during validation, we ensure the sighashes
// are only computed once.
cachedHashes, _ := hashCache.GetSigHashes(tx.Hash())
// Collect all of the transaction inputs and required information for // Collect all of the transaction inputs and required information for
// validation. // validation.
txIns := tx.MsgTx().TxIn txIns := tx.MsgTx().TxIn
@ -207,18 +233,22 @@ func ValidateTransactionScripts(tx *btcutil.Tx, utxoView *UtxoViewpoint, flags t
txInIndex: txInIdx, txInIndex: txInIdx,
txIn: txIn, txIn: txIn,
tx: tx, tx: tx,
sigHashes: cachedHashes,
} }
txValItems = append(txValItems, txVI) txValItems = append(txValItems, txVI)
} }
// Validate all of the inputs. // Validate all of the inputs.
validator := newTxValidator(utxoView, flags, sigCache) validator := newTxValidator(utxoView, flags, sigCache, hashCache)
return validator.Validate(txValItems) return validator.Validate(txValItems)
} }
// checkBlockScripts executes and validates the scripts for all transactions in // checkBlockScripts executes and validates the scripts for all transactions in
// the passed block using multiple goroutines. // the passed block using multiple goroutines.
func checkBlockScripts(block *btcutil.Block, utxoView *UtxoViewpoint, scriptFlags txscript.ScriptFlags, sigCache *txscript.SigCache) error { func checkBlockScripts(block *btcutil.Block, utxoView *UtxoViewpoint,
scriptFlags txscript.ScriptFlags, sigCache *txscript.SigCache,
hashCache *txscript.HashCache) error {
// Collect all of the transaction inputs and required information for // Collect all of the transaction inputs and required information for
// validation for all transactions in the block into a single slice. // validation for all transactions in the block into a single slice.
numInputs := 0 numInputs := 0
@ -227,6 +257,28 @@ func checkBlockScripts(block *btcutil.Block, utxoView *UtxoViewpoint, scriptFlag
} }
txValItems := make([]*txValidateItem, 0, numInputs) txValItems := make([]*txValidateItem, 0, numInputs)
for _, tx := range block.Transactions() { for _, tx := range block.Transactions() {
hash := tx.Hash()
// If the HashCache is present, and it doesn't yet contain the
// partial sighashes for this transaction, then we add the
// sighashes for the transaction. This allows us to take
// advantage of the potential speed savings due to the new
// digest algorithm (BIP0143).
if segwitActive && tx.MsgTx().HasWitness() && hashCache != nil &&
!hashCache.ContainsHashes(hash) {
hashCache.AddSigHashes(tx.MsgTx())
}
var cachedHashes *txscript.TxSigHashes
if segwitActive && tx.MsgTx().HasWitness() {
if hashCache != nil {
cachedHashes, _ = hashCache.GetSigHashes(hash)
} else {
cachedHashes = txscript.NewTxSigHashes(tx.MsgTx())
}
}
for txInIdx, txIn := range tx.MsgTx().TxIn { for txInIdx, txIn := range tx.MsgTx().TxIn {
// Skip coinbases. // Skip coinbases.
if txIn.PreviousOutPoint.Index == math.MaxUint32 { if txIn.PreviousOutPoint.Index == math.MaxUint32 {
@ -237,12 +289,30 @@ func checkBlockScripts(block *btcutil.Block, utxoView *UtxoViewpoint, scriptFlag
txInIndex: txInIdx, txInIndex: txInIdx,
txIn: txIn, txIn: txIn,
tx: tx, tx: tx,
sigHashes: cachedHashes,
} }
txValItems = append(txValItems, txVI) txValItems = append(txValItems, txVI)
} }
} }
// Validate all of the inputs. // Validate all of the inputs.
validator := newTxValidator(utxoView, scriptFlags, sigCache) validator := newTxValidator(utxoView, scriptFlags, sigCache, hashCache)
return validator.Validate(txValItems) start := time.Now()
if err := validator.Validate(txValItems); err != nil {
return err
}
elapsed := time.Since(start)
log.Tracef("block %v took %v to verify", block.Hash(), elapsed)
// If the HashCache is present, once we have validated the block, we no
// longer need the cached hashes for these transactions, so we purge
// them from the cache.
if hashCache != nil {
for _, tx := range block.Transactions() {
hashCache.PurgeSigHashes(tx.Hash())
}
}
return nil
} }

View file

@ -43,7 +43,7 @@ func TestCheckBlockScripts(t *testing.T) {
scriptFlags := txscript.ScriptBip16 scriptFlags := txscript.ScriptBip16
err = blockchain.TstCheckBlockScripts(blocks[0], view, scriptFlags, err = blockchain.TstCheckBlockScripts(blocks[0], view, scriptFlags,
nil) nil, nil)
if err != nil { if err != nil {
t.Errorf("Transaction script validation failed: %v\n", err) t.Errorf("Transaction script validation failed: %v\n", err)
return return

View file

@ -19,10 +19,6 @@ import (
) )
const ( const (
// MaxSigOpsPerBlock is the maximum number of signature operations
// allowed for a block. It is a fraction of the max block payload size.
MaxSigOpsPerBlock = wire.MaxBlockPayload / 50
// MaxTimeOffsetSeconds is the maximum number of seconds a block time // MaxTimeOffsetSeconds is the maximum number of seconds a block time
// is allowed to be ahead of the current time. This is currently 2 // is allowed to be ahead of the current time. This is currently 2
// hours. // hours.
@ -220,10 +216,10 @@ func CheckTransactionSanity(tx *btcutil.Tx) error {
// A transaction must not exceed the maximum allowed block payload when // A transaction must not exceed the maximum allowed block payload when
// serialized. // serialized.
serializedTxSize := tx.MsgTx().SerializeSize() serializedTxSize := tx.MsgTx().SerializeSizeStripped()
if serializedTxSize > wire.MaxBlockPayload { if serializedTxSize > MaxBlockBaseSize {
str := fmt.Sprintf("serialized transaction is too big - got "+ str := fmt.Sprintf("serialized transaction is too big - got "+
"%d, max %d", serializedTxSize, wire.MaxBlockPayload) "%d, max %d", serializedTxSize, MaxBlockBaseSize)
return ruleError(ErrTxTooBig, str) return ruleError(ErrTxTooBig, str)
} }
@ -494,10 +490,10 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median
// A block must not exceed the maximum allowed block payload when // A block must not exceed the maximum allowed block payload when
// serialized. // serialized.
serializedSize := msgBlock.SerializeSize() serializedSize := msgBlock.SerializeSizeStripped()
if serializedSize > wire.MaxBlockPayload { if serializedSize > MaxBlockBaseSize {
str := fmt.Sprintf("serialized block is too big - got %d, "+ str := fmt.Sprintf("serialized block is too big - got %d, "+
"max %d", serializedSize, wire.MaxBlockPayload) "max %d", serializedSize, MaxBlockBaseSize)
return ruleError(ErrBlockTooBig, str) return ruleError(ErrBlockTooBig, str)
} }
@ -532,7 +528,7 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median
// checks. Bitcoind builds the tree here and checks the merkle root // checks. Bitcoind builds the tree here and checks the merkle root
// after the following checks, but there is no reason not to check the // after the following checks, but there is no reason not to check the
// merkle root matches here. // merkle root matches here.
merkles := BuildMerkleTreeStore(block.Transactions()) merkles := BuildMerkleTreeStore(block.Transactions(), false)
calculatedMerkleRoot := merkles[len(merkles)-1] calculatedMerkleRoot := merkles[len(merkles)-1]
if !header.MerkleRoot.IsEqual(calculatedMerkleRoot) { if !header.MerkleRoot.IsEqual(calculatedMerkleRoot) {
str := fmt.Sprintf("block merkle root is invalid - block "+ str := fmt.Sprintf("block merkle root is invalid - block "+
@ -541,6 +537,17 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median
return ruleError(ErrBadMerkleRoot, str) return ruleError(ErrBadMerkleRoot, str)
} }
// Next, validate the witness commitment (if any) within the block.
// This involves asserting that if the coinbase contains the special
// commitment output, then this merkle root matches a computed merkle
// root of all the wtxid's of the transactions within the block. In
// addition, various other checks against the coinbase's witness stack.
// TODO(roasbeef): only perform this check if we expect block to have a
// witness commitment
if err := ValidateWitnessCommitment(block); err != nil {
return err
}
// Check for duplicate transactions. This check will be fairly quick // Check for duplicate transactions. This check will be fairly quick
// since the transaction hashes are already cached due to building the // since the transaction hashes are already cached due to building the
// merkle tree above. // merkle tree above.
@ -562,11 +569,11 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median
// We could potentially overflow the accumulator so check for // We could potentially overflow the accumulator so check for
// overflow. // overflow.
lastSigOps := totalSigOps lastSigOps := totalSigOps
totalSigOps += CountSigOps(tx) totalSigOps += (CountSigOps(tx) * WitnessScaleFactor)
if totalSigOps < lastSigOps || totalSigOps > MaxSigOpsPerBlock { if totalSigOps < lastSigOps || totalSigOps > MaxBlockSigOpsCost {
str := fmt.Sprintf("block contains too many signature "+ str := fmt.Sprintf("block contains too many signature "+
"operations - got %v, max %v", totalSigOps, "operations - got %v, max %v", totalSigOps,
MaxSigOpsPerBlock) MaxBlockSigOpsCost)
return ruleError(ErrTooManySigOps, str) return ruleError(ErrTooManySigOps, str)
} }
} }
@ -801,6 +808,42 @@ func (b *BlockChain) checkBlockContext(block *btcutil.Block, prevNode *blockNode
return err return err
} }
} }
// Query for the Version Bits state for the segwit soft-fork
// deployment. If segwit is active, we'll switch over to
// enforcing all the new rules.
segwitState, err := b.deploymentState(prevNode,
chaincfg.DeploymentSegwit)
if err != nil {
return err
}
// If segwit is active, then we'll need to fully validate the
// new witness commitment for adherance to the rules.
if segwitState == ThresholdActive {
// Validate the witness commitment (if any) within the
// block. This involves asserting that if the coinbase
// contains the special commitment output, then this
// merkle root matches a computed merkle root of all
// the wtxid's of the transactions within the block. In
// addition, various other checks against the
// coinbase's witness stack.
if err := ValidateWitnessCommitment(block); err != nil {
return err
}
// Once the witness commitment, witness nonce, and sig
// op cost have been validated, we can finally assert
// that the block's weight doesn't exceed the current
// consensus parameter.
blockWeight := GetBlockWeight(block)
if blockWeight > MaxBlockWeight {
str := fmt.Sprintf("block's weight metric is "+
"too high - got %v, max %v",
blockWeight, MaxBlockWeight)
return ruleError(ErrBlockVersionTooOld, str)
}
}
} }
return nil return nil
@ -1033,6 +1076,8 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi
// after the timestamp defined by txscript.Bip16Activation. See // after the timestamp defined by txscript.Bip16Activation. See
// https://en.bitcoin.it/wiki/BIP_0016 for more details. // https://en.bitcoin.it/wiki/BIP_0016 for more details.
enforceBIP0016 := node.timestamp >= txscript.Bip16Activation.Unix() enforceBIP0016 := node.timestamp >= txscript.Bip16Activation.Unix()
// TODO(roasbeef): should check flag, consult bip 9 log etc
enforceSegWit := true
// The number of signature operations must be less than the maximum // The number of signature operations must be less than the maximum
// allowed per block. Note that the preliminary sanity checks on a // allowed per block. Note that the preliminary sanity checks on a
@ -1041,31 +1086,29 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi
// signature operations in each of the input transaction public key // signature operations in each of the input transaction public key
// scripts. // scripts.
transactions := block.Transactions() transactions := block.Transactions()
totalSigOps := 0 totalSigOpCost := 0
for i, tx := range transactions { for i, tx := range transactions {
numsigOps := CountSigOps(tx) // Since the first (and only the first) transaction has
if enforceBIP0016 { // already been verified to be a coinbase transaction,
// Since the first (and only the first) transaction has // use i == 0 as an optimization for the flag to
// already been verified to be a coinbase transaction, // countP2SHSigOps for whether or not the transaction is
// use i == 0 as an optimization for the flag to // a coinbase transaction rather than having to do a
// countP2SHSigOps for whether or not the transaction is // full coinbase check again.
// a coinbase transaction rather than having to do a sigOpCost, err := GetSigOpCost(tx, i == 0, view, enforceBIP0016,
// full coinbase check again. enforceSegWit)
numP2SHSigOps, err := CountP2SHSigOps(tx, i == 0, view) if err != nil {
if err != nil { return err
return err
}
numsigOps += numP2SHSigOps
} }
// Check for overflow or going over the limits. We have to do // Check for overflow or going over the limits. We have to do
// this on every loop iteration to avoid overflow. // this on every loop iteration to avoid overflow.
lastSigops := totalSigOps lastSigOpCost := totalSigOpCost
totalSigOps += numsigOps totalSigOpCost += sigOpCost
if totalSigOps < lastSigops || totalSigOps > MaxSigOpsPerBlock { if totalSigOpCost < lastSigOpCost || totalSigOpCost > MaxBlockSigOpsCost {
// TODO(roasbeef): modify error
str := fmt.Sprintf("block contains too many "+ str := fmt.Sprintf("block contains too many "+
"signature operations - got %v, max %v", "signature operations - got %v, max %v",
totalSigOps, MaxSigOpsPerBlock) totalSigOpCost, MaxBlockSigOpsCost)
return ruleError(ErrTooManySigOps, str) return ruleError(ErrTooManySigOps, str)
} }
} }
@ -1196,6 +1239,8 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi
} }
} }
// TODO(roasbeef): check bip9 for segwit here, others also
scriptFlags |= txscript.ScriptVerifyWitness
scriptFlags |= txscript.ScriptStrictMultiSig scriptFlags |= txscript.ScriptStrictMultiSig
// Now that the inexpensive checks are done and have passed, verify the // Now that the inexpensive checks are done and have passed, verify the
@ -1203,7 +1248,8 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi
// expensive ECDSA signature check scripts. Doing this last helps // expensive ECDSA signature check scripts. Doing this last helps
// prevent CPU exhaustion attacks. // prevent CPU exhaustion attacks.
if runScripts { if runScripts {
err := checkBlockScripts(block, view, scriptFlags, b.sigCache) err := checkBlockScripts(block, view, scriptFlags, b.sigCache,
b.hashCache)
if err != nil { if err != nil {
return err return err
} }

View file

@ -112,7 +112,7 @@ func TestCheckSerializedHeight(t *testing.T) {
// Create an empty coinbase template to be used in the tests below. // Create an empty coinbase template to be used in the tests below.
coinbaseOutpoint := wire.NewOutPoint(&chainhash.Hash{}, math.MaxUint32) coinbaseOutpoint := wire.NewOutPoint(&chainhash.Hash{}, math.MaxUint32)
coinbaseTx := wire.NewMsgTx(1) coinbaseTx := wire.NewMsgTx(1)
coinbaseTx.AddTxIn(wire.NewTxIn(coinbaseOutpoint, nil)) coinbaseTx.AddTxIn(wire.NewTxIn(coinbaseOutpoint, nil, nil))
// Expected rule errors. // Expected rule errors.
missingHeightError := blockchain.RuleError{ missingHeightError := blockchain.RuleError{