BIP0141+blockchain: implement segwit block validation rules

This commit implements the new block validation rules as defined by
BIP0141. The new rules include the constraints that if a block has
transactions with witness data, then there MUST be a commitment within
the conies transaction to the root of a new merkle tree which commits
to the wtxid of all transactions. Additionally, rather than limiting
the size of a block by size in bytes, blocks are now limited by their
total weight unit. Similarly, a newly define “sig op cost” is now used
to limit the signature validation cost of transactions found within
blocks.
This commit is contained in:
Olaoluwa Osuntokun 2016-10-18 18:33:20 -07:00 committed by Dave Collins
parent 1b80b334bc
commit d0768abcc4
9 changed files with 274 additions and 116 deletions

View file

@ -46,18 +46,22 @@ type BestState struct {
Height int32 // The height of the block.
Bits uint32 // The difficulty bits of the block.
BlockSize uint64 // The size of the block.
BlockWeight uint64 // The weight of the block.
NumTxns uint64 // The number of txns in the block.
TotalTxns uint64 // The total number of txns in the chain.
MedianTime time.Time // Median time as per CalcPastMedianTime.
}
// newBestState returns a new best stats instance for the given parameters.
func newBestState(node *blockNode, blockSize, numTxns, totalTxns uint64, medianTime time.Time) *BestState {
func newBestState(node *blockNode, blockSize, blockWeight, numTxns,
totalTxns uint64, medianTime time.Time) *BestState {
return &BestState{
Hash: node.hash,
Height: node.height,
Bits: node.bits,
BlockSize: blockSize,
BlockWeight: blockWeight,
NumTxns: numTxns,
TotalTxns: totalTxns,
MedianTime: medianTime,
@ -80,6 +84,7 @@ type BlockChain struct {
notifications NotificationCallback
sigCache *txscript.SigCache
indexManager IndexManager
hashCache *txscript.HashCache
// The following fields are calculated based upon the provided chain
// parameters. They are also set when the instance is created and
@ -616,8 +621,9 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, view *U
b.stateLock.RUnlock()
numTxns := uint64(len(block.MsgBlock().Transactions))
blockSize := uint64(block.MsgBlock().SerializeSize())
state := newBestState(node, blockSize, numTxns, curTotalTxns+numTxns,
medianTime)
blockWeight := uint64(GetBlockWeight(block))
state := newBestState(node, blockSize, blockWeight, numTxns,
curTotalTxns+numTxns, medianTime)
// Atomically insert info into the database.
err = b.db.Update(func(dbTx database.Tx) error {
@ -744,9 +750,10 @@ func (b *BlockChain) disconnectBlock(node *blockNode, block *btcutil.Block, view
b.stateLock.RUnlock()
numTxns := uint64(len(prevBlock.MsgBlock().Transactions))
blockSize := uint64(prevBlock.MsgBlock().SerializeSize())
blockWeight := uint64(GetBlockWeight(prevBlock))
newTotalTxns := curTotalTxns - uint64(len(block.MsgBlock().Transactions))
state := newBestState(prevNode, blockSize, numTxns, newTotalTxns,
medianTime)
state := newBestState(prevNode, blockSize, blockWeight, numTxns,
newTotalTxns, medianTime)
err = b.db.Update(func(dbTx database.Tx) error {
// Update best block state.
@ -1328,6 +1335,16 @@ type Config struct {
// This field can be nil if the caller does not wish to make use of an
// index manager.
IndexManager IndexManager
// HashCache defines a transaction hash mid-state cache to use when
// validating transactions. This cache has the potential to greatly
// speed up transaction validation as re-using the pre-calculated
// mid-state eliminates the O(N^2) validation complexity due to the
// SigHashAll flag.
//
// This field can be nil if the caller is not interested in using a
// signature cache.
HashCache *txscript.HashCache
}
// New returns a BlockChain instance using the provided configuration details.
@ -1378,6 +1395,7 @@ func New(config *Config) (*BlockChain, error) {
maxRetargetTimespan: targetTimespan * adjustmentFactor,
blocksPerRetarget: int32(targetTimespan / targetTimePerBlock),
index: newBlockIndex(config.DB, params),
hashCache: config.HashCache,
orphans: make(map[chainhash.Hash]*orphanBlock),
prevOrphans: make(map[chainhash.Hash][]*orphanBlock),
warningCaches: newThresholdCaches(vbNumBits),

View file

@ -1092,8 +1092,9 @@ func (b *BlockChain) createChainState() error {
// genesis block, use its timestamp for the median time.
numTxns := uint64(len(genesisBlock.MsgBlock().Transactions))
blockSize := uint64(genesisBlock.MsgBlock().SerializeSize())
b.stateSnapshot = newBestState(b.bestNode, blockSize, numTxns, numTxns,
time.Unix(b.bestNode.timestamp, 0))
blockWeight := uint64(GetBlockWeight(genesisBlock))
b.stateSnapshot = newBestState(b.bestNode, blockSize, blockWeight,
numTxns, numTxns, time.Unix(b.bestNode.timestamp, 0))
// Create the initial the database chain state including creating the
// necessary index buckets and inserting the genesis block.
@ -1197,11 +1198,12 @@ func (b *BlockChain) initChainState() error {
// Initialize the state related to the best block.
blockSize := uint64(len(blockBytes))
blockWeight := uint64(GetBlockWeight(btcutil.NewBlock(&block)))
numTxns := uint64(len(block.Transactions))
b.stateSnapshot = newBestState(b.bestNode, blockSize, numTxns,
state.totalTxns, medianTime)
b.stateSnapshot = newBestState(b.bestNode, blockSize, blockWeight,
numTxns, state.totalTxns, medianTime)
isStateInitialized = true
return nil
})
if err != nil {

View file

@ -41,6 +41,10 @@ const (
// maximum allowed size.
ErrBlockTooBig
// ErrBlockWeightTooHigh indicates that the block's computed weight
// metric exceeds the maximum allowed value.
ErrBlockWeightTooHigh
// ErrBlockVersionTooOld indicates the block version is too old and is
// no longer accepted since the majority of the network has upgraded
// to a newer version.
@ -198,6 +202,20 @@ const (
// such signature verification failures and execution past the end of
// the stack.
ErrScriptValidation
// ErrUnexpectedWitness indicates that a block includes transactions
// with witness data, but doesn't also have a witness commitment within
// the coinbase transaction.
ErrUnexpectedWitness
// ErrInvalidWitnessCommitment indicates that a block's witness
// commitment is not well formed.
ErrInvalidWitnessCommitment
// ErrWitnessCommitmentMismatch indicates that the witness commitment
// included in the block's coinbase transaction doesn't match the
// manually computed witness commitment.
ErrWitnessCommitmentMismatch
)
// Map of ErrorCode values back to their constant names for pretty printing.
@ -205,6 +223,7 @@ var errorCodeStrings = map[ErrorCode]string{
ErrDuplicateBlock: "ErrDuplicateBlock",
ErrBlockTooBig: "ErrBlockTooBig",
ErrBlockVersionTooOld: "ErrBlockVersionTooOld",
ErrBlockWeightTooHigh: "ErrBlockWeightTooHigh",
ErrInvalidTime: "ErrInvalidTime",
ErrTimeTooOld: "ErrTimeTooOld",
ErrTimeTooNew: "ErrTimeTooNew",
@ -240,6 +259,9 @@ var errorCodeStrings = map[ErrorCode]string{
ErrBadCoinbaseHeight: "ErrBadCoinbaseHeight",
ErrScriptMalformed: "ErrScriptMalformed",
ErrScriptValidation: "ErrScriptValidation",
ErrUnexpectedWitness: "ErrUnexpectedWitness",
ErrInvalidWitnessCommitment: "ErrInvalidWitnessCommitment",
ErrWitnessCommitmentMismatch: "ErrWitnessCommitmentMismatch",
}
// String returns the ErrorCode as a human-readable name.

View file

@ -117,7 +117,7 @@ func TestFullBlocks(t *testing.T) {
// Ensure there is an error due to deserializing the block.
var msgBlock wire.MsgBlock
err := msgBlock.BtcDecode(bytes.NewReader(item.RawBlock), 0)
err := msgBlock.BtcDecode(bytes.NewReader(item.RawBlock), 0, wire.BaseEncoding)
if _, ok := err.(*wire.MessageError); !ok {
t.Fatalf("block %q (hash %s, height %d) should have "+
"failed to decode", item.Name, blockHash,

View file

@ -309,7 +309,7 @@ func calcMerkleRoot(txns []*wire.MsgTx) chainhash.Hash {
for _, tx := range txns {
utilTxns = append(utilTxns, btcutil.NewTx(tx))
}
merkles := blockchain.BuildMerkleTreeStore(utilTxns)
merkles := blockchain.BuildMerkleTreeStore(utilTxns, false)
return *merkles[len(merkles)-1]
}
@ -635,10 +635,10 @@ func nonCanonicalVarInt(val uint32) []byte {
// encoding.
func encodeNonCanonicalBlock(b *wire.MsgBlock) []byte {
var buf bytes.Buffer
b.Header.BtcEncode(&buf, 0)
b.Header.BtcEncode(&buf, 0, wire.BaseEncoding)
buf.Write(nonCanonicalVarInt(uint32(len(b.Transactions))))
for _, tx := range b.Transactions {
tx.BtcEncode(&buf, 0)
tx.BtcEncode(&buf, 0, wire.BaseEncoding)
}
return buf.Bytes()
}

View file

@ -8,6 +8,7 @@ import (
"fmt"
"math"
"runtime"
"time"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
@ -19,6 +20,7 @@ type txValidateItem struct {
txInIndex int
txIn *wire.TxIn
tx *btcutil.Tx
sigHashes *txscript.TxSigHashes
}
// txValidator provides a type which asynchronously validates transaction
@ -31,6 +33,7 @@ type txValidator struct {
utxoView *UtxoViewpoint
flags txscript.ScriptFlags
sigCache *txscript.SigCache
hashCache *txscript.HashCache
}
// sendResult sends the result of a script pair validation on the internal
@ -83,15 +86,19 @@ out:
// Create a new script engine for the script pair.
sigScript := txIn.SignatureScript
witness := txIn.Witness
inputAmount := txEntry.AmountByIndex(originTxIndex)
vm, err := txscript.NewEngine(pkScript, txVI.tx.MsgTx(),
txVI.txInIndex, v.flags, v.sigCache)
txVI.txInIndex, v.flags, v.sigCache, txVI.sigHashes,
inputAmount)
if err != nil {
str := fmt.Sprintf("failed to parse input "+
"%s:%d which references output %s:%d - "+
"%v (input script bytes %x, prev output "+
"script bytes %x)", txVI.tx.Hash(),
txVI.txInIndex, originTxHash,
originTxIndex, err, sigScript, pkScript)
"%v (input witness %x, input script "+
"bytes %x, prev output script bytes %x)",
txVI.tx.Hash(), txVI.txInIndex, originTxHash,
originTxIndex, err, witness, sigScript,
pkScript)
err := ruleError(ErrScriptMalformed, str)
v.sendResult(err)
break out
@ -101,10 +108,11 @@ out:
if err := vm.Execute(); err != nil {
str := fmt.Sprintf("failed to validate input "+
"%s:%d which references output %s:%d - "+
"%v (input script bytes %x, prev output "+
"script bytes %x)", txVI.tx.Hash(),
txVI.txInIndex, originTxHash,
originTxIndex, err, sigScript, pkScript)
"%v (input witness %x, input script "+
"bytes %x, prev output script bytes %x)",
txVI.tx.Hash(), txVI.txInIndex,
originTxHash, originTxIndex, err,
witness, sigScript, pkScript)
err := ruleError(ErrScriptValidation, str)
v.sendResult(err)
break out
@ -179,20 +187,38 @@ func (v *txValidator) Validate(items []*txValidateItem) error {
// newTxValidator returns a new instance of txValidator to be used for
// validating transaction scripts asynchronously.
func newTxValidator(utxoView *UtxoViewpoint, flags txscript.ScriptFlags, sigCache *txscript.SigCache) *txValidator {
func newTxValidator(utxoView *UtxoViewpoint, flags txscript.ScriptFlags,
sigCache *txscript.SigCache, hashCache *txscript.HashCache) *txValidator {
return &txValidator{
validateChan: make(chan *txValidateItem),
quitChan: make(chan struct{}),
resultChan: make(chan error),
utxoView: utxoView,
sigCache: sigCache,
hashCache: hashCache,
flags: flags,
}
}
// ValidateTransactionScripts validates the scripts for the passed transaction
// using multiple goroutines.
func ValidateTransactionScripts(tx *btcutil.Tx, utxoView *UtxoViewpoint, flags txscript.ScriptFlags, sigCache *txscript.SigCache) error {
func ValidateTransactionScripts(tx *btcutil.Tx, utxoView *UtxoViewpoint,
flags txscript.ScriptFlags, sigCache *txscript.SigCache,
hashCache *txscript.HashCache) error {
// If the hashcache doesn't yet has the sighash midstate for this
// transaction, then we'll compute them now so we can re-use them
// amongst all worker validation goroutines.
if !hashCache.ContainsHashes(tx.Hash()) {
hashCache.AddSigHashes(tx.MsgTx())
}
// The same pointer to the transaction's sighash midstate will be
// re-used amongst all validation goroutines. By pre-computing the
// sighash here instead of during validation, we ensure the sighashes
// are only computed once.
cachedHashes, _ := hashCache.GetSigHashes(tx.Hash())
// Collect all of the transaction inputs and required information for
// validation.
txIns := tx.MsgTx().TxIn
@ -207,18 +233,22 @@ func ValidateTransactionScripts(tx *btcutil.Tx, utxoView *UtxoViewpoint, flags t
txInIndex: txInIdx,
txIn: txIn,
tx: tx,
sigHashes: cachedHashes,
}
txValItems = append(txValItems, txVI)
}
// Validate all of the inputs.
validator := newTxValidator(utxoView, flags, sigCache)
validator := newTxValidator(utxoView, flags, sigCache, hashCache)
return validator.Validate(txValItems)
}
// checkBlockScripts executes and validates the scripts for all transactions in
// the passed block using multiple goroutines.
func checkBlockScripts(block *btcutil.Block, utxoView *UtxoViewpoint, scriptFlags txscript.ScriptFlags, sigCache *txscript.SigCache) error {
func checkBlockScripts(block *btcutil.Block, utxoView *UtxoViewpoint,
scriptFlags txscript.ScriptFlags, sigCache *txscript.SigCache,
hashCache *txscript.HashCache) error {
// Collect all of the transaction inputs and required information for
// validation for all transactions in the block into a single slice.
numInputs := 0
@ -227,6 +257,28 @@ func checkBlockScripts(block *btcutil.Block, utxoView *UtxoViewpoint, scriptFlag
}
txValItems := make([]*txValidateItem, 0, numInputs)
for _, tx := range block.Transactions() {
hash := tx.Hash()
// If the HashCache is present, and it doesn't yet contain the
// partial sighashes for this transaction, then we add the
// sighashes for the transaction. This allows us to take
// advantage of the potential speed savings due to the new
// digest algorithm (BIP0143).
if segwitActive && tx.MsgTx().HasWitness() && hashCache != nil &&
!hashCache.ContainsHashes(hash) {
hashCache.AddSigHashes(tx.MsgTx())
}
var cachedHashes *txscript.TxSigHashes
if segwitActive && tx.MsgTx().HasWitness() {
if hashCache != nil {
cachedHashes, _ = hashCache.GetSigHashes(hash)
} else {
cachedHashes = txscript.NewTxSigHashes(tx.MsgTx())
}
}
for txInIdx, txIn := range tx.MsgTx().TxIn {
// Skip coinbases.
if txIn.PreviousOutPoint.Index == math.MaxUint32 {
@ -237,12 +289,30 @@ func checkBlockScripts(block *btcutil.Block, utxoView *UtxoViewpoint, scriptFlag
txInIndex: txInIdx,
txIn: txIn,
tx: tx,
sigHashes: cachedHashes,
}
txValItems = append(txValItems, txVI)
}
}
// Validate all of the inputs.
validator := newTxValidator(utxoView, scriptFlags, sigCache)
return validator.Validate(txValItems)
validator := newTxValidator(utxoView, scriptFlags, sigCache, hashCache)
start := time.Now()
if err := validator.Validate(txValItems); err != nil {
return err
}
elapsed := time.Since(start)
log.Tracef("block %v took %v to verify", block.Hash(), elapsed)
// If the HashCache is present, once we have validated the block, we no
// longer need the cached hashes for these transactions, so we purge
// them from the cache.
if hashCache != nil {
for _, tx := range block.Transactions() {
hashCache.PurgeSigHashes(tx.Hash())
}
}
return nil
}

View file

@ -43,7 +43,7 @@ func TestCheckBlockScripts(t *testing.T) {
scriptFlags := txscript.ScriptBip16
err = blockchain.TstCheckBlockScripts(blocks[0], view, scriptFlags,
nil)
nil, nil)
if err != nil {
t.Errorf("Transaction script validation failed: %v\n", err)
return

View file

@ -19,10 +19,6 @@ import (
)
const (
// MaxSigOpsPerBlock is the maximum number of signature operations
// allowed for a block. It is a fraction of the max block payload size.
MaxSigOpsPerBlock = wire.MaxBlockPayload / 50
// MaxTimeOffsetSeconds is the maximum number of seconds a block time
// is allowed to be ahead of the current time. This is currently 2
// hours.
@ -220,10 +216,10 @@ func CheckTransactionSanity(tx *btcutil.Tx) error {
// A transaction must not exceed the maximum allowed block payload when
// serialized.
serializedTxSize := tx.MsgTx().SerializeSize()
if serializedTxSize > wire.MaxBlockPayload {
serializedTxSize := tx.MsgTx().SerializeSizeStripped()
if serializedTxSize > MaxBlockBaseSize {
str := fmt.Sprintf("serialized transaction is too big - got "+
"%d, max %d", serializedTxSize, wire.MaxBlockPayload)
"%d, max %d", serializedTxSize, MaxBlockBaseSize)
return ruleError(ErrTxTooBig, str)
}
@ -494,10 +490,10 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median
// A block must not exceed the maximum allowed block payload when
// serialized.
serializedSize := msgBlock.SerializeSize()
if serializedSize > wire.MaxBlockPayload {
serializedSize := msgBlock.SerializeSizeStripped()
if serializedSize > MaxBlockBaseSize {
str := fmt.Sprintf("serialized block is too big - got %d, "+
"max %d", serializedSize, wire.MaxBlockPayload)
"max %d", serializedSize, MaxBlockBaseSize)
return ruleError(ErrBlockTooBig, str)
}
@ -532,7 +528,7 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median
// checks. Bitcoind builds the tree here and checks the merkle root
// after the following checks, but there is no reason not to check the
// merkle root matches here.
merkles := BuildMerkleTreeStore(block.Transactions())
merkles := BuildMerkleTreeStore(block.Transactions(), false)
calculatedMerkleRoot := merkles[len(merkles)-1]
if !header.MerkleRoot.IsEqual(calculatedMerkleRoot) {
str := fmt.Sprintf("block merkle root is invalid - block "+
@ -541,6 +537,17 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median
return ruleError(ErrBadMerkleRoot, str)
}
// Next, validate the witness commitment (if any) within the block.
// This involves asserting that if the coinbase contains the special
// commitment output, then this merkle root matches a computed merkle
// root of all the wtxid's of the transactions within the block. In
// addition, various other checks against the coinbase's witness stack.
// TODO(roasbeef): only perform this check if we expect block to have a
// witness commitment
if err := ValidateWitnessCommitment(block); err != nil {
return err
}
// Check for duplicate transactions. This check will be fairly quick
// since the transaction hashes are already cached due to building the
// merkle tree above.
@ -562,11 +569,11 @@ func checkBlockSanity(block *btcutil.Block, powLimit *big.Int, timeSource Median
// We could potentially overflow the accumulator so check for
// overflow.
lastSigOps := totalSigOps
totalSigOps += CountSigOps(tx)
if totalSigOps < lastSigOps || totalSigOps > MaxSigOpsPerBlock {
totalSigOps += (CountSigOps(tx) * WitnessScaleFactor)
if totalSigOps < lastSigOps || totalSigOps > MaxBlockSigOpsCost {
str := fmt.Sprintf("block contains too many signature "+
"operations - got %v, max %v", totalSigOps,
MaxSigOpsPerBlock)
MaxBlockSigOpsCost)
return ruleError(ErrTooManySigOps, str)
}
}
@ -801,6 +808,42 @@ func (b *BlockChain) checkBlockContext(block *btcutil.Block, prevNode *blockNode
return err
}
}
// Query for the Version Bits state for the segwit soft-fork
// deployment. If segwit is active, we'll switch over to
// enforcing all the new rules.
segwitState, err := b.deploymentState(prevNode,
chaincfg.DeploymentSegwit)
if err != nil {
return err
}
// If segwit is active, then we'll need to fully validate the
// new witness commitment for adherance to the rules.
if segwitState == ThresholdActive {
// Validate the witness commitment (if any) within the
// block. This involves asserting that if the coinbase
// contains the special commitment output, then this
// merkle root matches a computed merkle root of all
// the wtxid's of the transactions within the block. In
// addition, various other checks against the
// coinbase's witness stack.
if err := ValidateWitnessCommitment(block); err != nil {
return err
}
// Once the witness commitment, witness nonce, and sig
// op cost have been validated, we can finally assert
// that the block's weight doesn't exceed the current
// consensus parameter.
blockWeight := GetBlockWeight(block)
if blockWeight > MaxBlockWeight {
str := fmt.Sprintf("block's weight metric is "+
"too high - got %v, max %v",
blockWeight, MaxBlockWeight)
return ruleError(ErrBlockVersionTooOld, str)
}
}
}
return nil
@ -1033,6 +1076,8 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi
// after the timestamp defined by txscript.Bip16Activation. See
// https://en.bitcoin.it/wiki/BIP_0016 for more details.
enforceBIP0016 := node.timestamp >= txscript.Bip16Activation.Unix()
// TODO(roasbeef): should check flag, consult bip 9 log etc
enforceSegWit := true
// The number of signature operations must be less than the maximum
// allowed per block. Note that the preliminary sanity checks on a
@ -1041,31 +1086,29 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi
// signature operations in each of the input transaction public key
// scripts.
transactions := block.Transactions()
totalSigOps := 0
totalSigOpCost := 0
for i, tx := range transactions {
numsigOps := CountSigOps(tx)
if enforceBIP0016 {
// Since the first (and only the first) transaction has
// already been verified to be a coinbase transaction,
// use i == 0 as an optimization for the flag to
// countP2SHSigOps for whether or not the transaction is
// a coinbase transaction rather than having to do a
// full coinbase check again.
numP2SHSigOps, err := CountP2SHSigOps(tx, i == 0, view)
sigOpCost, err := GetSigOpCost(tx, i == 0, view, enforceBIP0016,
enforceSegWit)
if err != nil {
return err
}
numsigOps += numP2SHSigOps
}
// Check for overflow or going over the limits. We have to do
// this on every loop iteration to avoid overflow.
lastSigops := totalSigOps
totalSigOps += numsigOps
if totalSigOps < lastSigops || totalSigOps > MaxSigOpsPerBlock {
lastSigOpCost := totalSigOpCost
totalSigOpCost += sigOpCost
if totalSigOpCost < lastSigOpCost || totalSigOpCost > MaxBlockSigOpsCost {
// TODO(roasbeef): modify error
str := fmt.Sprintf("block contains too many "+
"signature operations - got %v, max %v",
totalSigOps, MaxSigOpsPerBlock)
totalSigOpCost, MaxBlockSigOpsCost)
return ruleError(ErrTooManySigOps, str)
}
}
@ -1196,6 +1239,8 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi
}
}
// TODO(roasbeef): check bip9 for segwit here, others also
scriptFlags |= txscript.ScriptVerifyWitness
scriptFlags |= txscript.ScriptStrictMultiSig
// Now that the inexpensive checks are done and have passed, verify the
@ -1203,7 +1248,8 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi
// expensive ECDSA signature check scripts. Doing this last helps
// prevent CPU exhaustion attacks.
if runScripts {
err := checkBlockScripts(block, view, scriptFlags, b.sigCache)
err := checkBlockScripts(block, view, scriptFlags, b.sigCache,
b.hashCache)
if err != nil {
return err
}

View file

@ -112,7 +112,7 @@ func TestCheckSerializedHeight(t *testing.T) {
// Create an empty coinbase template to be used in the tests below.
coinbaseOutpoint := wire.NewOutPoint(&chainhash.Hash{}, math.MaxUint32)
coinbaseTx := wire.NewMsgTx(1)
coinbaseTx.AddTxIn(wire.NewTxIn(coinbaseOutpoint, nil))
coinbaseTx.AddTxIn(wire.NewTxIn(coinbaseOutpoint, nil, nil))
// Expected rule errors.
missingHeightError := blockchain.RuleError{