lbcd/blockchain/chain_test.go
Dave Collins 34b1373a68
blockchain: Refactor inv discovery for chain view.
This refactors the code that locates blocks (inventory discovery) out of
server and into blockchain where it can make use of the new much more
efficient chain view and more easily be tested.  As an aside, it really
belongs in blockchain anyways since it's purely dealing with the block
index and best chain.

Since the majority of the network has moved to header-based semantics,
this also provides an additional optimization to allow headers to be
located directly versus needing to first discover the hashes and then
fetch the headers.

The new functions are named LocateBlocks and LocateHeaders.  The former
returns a slice of located hashes and the latter returns a slice of
located headers.

Finally, it also updates the RPC server getheaders call and related
plumbing to use the new LocateHeaders function.

A comprehensive suite of tests is provided to ensure both functions
behave correctly for both correct and incorrect block locators.
2017-08-24 14:15:34 -05:00

802 lines
26 KiB
Go

// Copyright (c) 2013-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockchain
import (
"reflect"
"testing"
"time"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
)
// TestHaveBlock tests the HaveBlock API to ensure proper functionality.
func TestHaveBlock(t *testing.T) {
// Load up blocks such that there is a side chain.
// (genesis block) -> 1 -> 2 -> 3 -> 4
// \-> 3a
testFiles := []string{
"blk_0_to_4.dat.bz2",
"blk_3A.dat.bz2",
}
var blocks []*btcutil.Block
for _, file := range testFiles {
blockTmp, err := loadBlocks(file)
if err != nil {
t.Errorf("Error loading file: %v\n", err)
return
}
blocks = append(blocks, blockTmp...)
}
// Create a new database and chain instance to run tests against.
chain, teardownFunc, err := chainSetup("haveblock",
&chaincfg.MainNetParams)
if err != nil {
t.Errorf("Failed to setup chain instance: %v", err)
return
}
defer teardownFunc()
// Since we're not dealing with the real block chain, set the coinbase
// maturity to 1.
chain.TstSetCoinbaseMaturity(1)
for i := 1; i < len(blocks); i++ {
_, isOrphan, err := chain.ProcessBlock(blocks[i], BFNone)
if err != nil {
t.Errorf("ProcessBlock fail on block %v: %v\n", i, err)
return
}
if isOrphan {
t.Errorf("ProcessBlock incorrectly returned block %v "+
"is an orphan\n", i)
return
}
}
// Insert an orphan block.
_, isOrphan, err := chain.ProcessBlock(btcutil.NewBlock(&Block100000),
BFNone)
if err != nil {
t.Errorf("Unable to process block: %v", err)
return
}
if !isOrphan {
t.Errorf("ProcessBlock indicated block is an not orphan when " +
"it should be\n")
return
}
tests := []struct {
hash string
want bool
}{
// Genesis block should be present (in the main chain).
{hash: chaincfg.MainNetParams.GenesisHash.String(), want: true},
// Block 3a should be present (on a side chain).
{hash: "00000000474284d20067a4d33f6a02284e6ef70764a3a26d6a5b9df52ef663dd", want: true},
// Block 100000 should be present (as an orphan).
{hash: "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506", want: true},
// Random hashes should not be available.
{hash: "123", want: false},
}
for i, test := range tests {
hash, err := chainhash.NewHashFromStr(test.hash)
if err != nil {
t.Errorf("NewHashFromStr: %v", err)
continue
}
result, err := chain.HaveBlock(hash)
if err != nil {
t.Errorf("HaveBlock #%d unexpected error: %v", i, err)
return
}
if result != test.want {
t.Errorf("HaveBlock #%d got %v want %v", i, result,
test.want)
continue
}
}
}
// TestCalcSequenceLock tests the LockTimeToSequence function, and the
// CalcSequenceLock method of a Chain instance. The tests exercise several
// combinations of inputs to the CalcSequenceLock function in order to ensure
// the returned SequenceLocks are correct for each test instance.
func TestCalcSequenceLock(t *testing.T) {
netParams := &chaincfg.SimNetParams
// We need to activate CSV in order to test the processing logic, so
// manually craft the block version that's used to signal the soft-fork
// activation.
csvBit := netParams.Deployments[chaincfg.DeploymentCSV].BitNumber
blockVersion := int32(0x20000000 | (uint32(1) << csvBit))
// Generate enough synthetic blocks to activate CSV.
chain := newFakeChain(netParams)
node := chain.bestChain.Tip()
blockTime := node.Header().Timestamp
numBlocksToActivate := (netParams.MinerConfirmationWindow * 3)
for i := uint32(0); i < numBlocksToActivate; i++ {
blockTime = blockTime.Add(time.Second)
node = newFakeNode(node, blockVersion, 0, blockTime)
chain.index.AddNode(node)
chain.bestChain.SetTip(node)
}
// Create a utxo view with a fake utxo for the inputs used in the
// transactions created below. This utxo is added such that it has an
// age of 4 blocks.
targetTx := btcutil.NewTx(&wire.MsgTx{
TxOut: []*wire.TxOut{{
PkScript: nil,
Value: 10,
}},
})
utxoView := NewUtxoViewpoint()
utxoView.AddTxOuts(targetTx, int32(numBlocksToActivate)-4)
utxoView.SetBestHash(&node.hash)
// Create a utxo that spends the fake utxo created above for use in the
// transactions created in the tests. It has an age of 4 blocks. Note
// that the sequence lock heights are always calculated from the same
// point of view that they were originally calculated from for a given
// utxo. That is to say, the height prior to it.
utxo := wire.OutPoint{
Hash: *targetTx.Hash(),
Index: 0,
}
prevUtxoHeight := int32(numBlocksToActivate) - 4
// Obtain the median time past from the PoV of the input created above.
// The MTP for the input is the MTP from the PoV of the block *prior*
// to the one that included it.
medianTime := node.RelativeAncestor(5).CalcPastMedianTime().Unix()
// The median time calculated from the PoV of the best block in the
// test chain. For unconfirmed inputs, this value will be used since
// the MTP will be calculated from the PoV of the yet-to-be-mined
// block.
nextMedianTime := node.CalcPastMedianTime().Unix()
nextBlockHeight := int32(numBlocksToActivate) + 1
// Add an additional transaction which will serve as our unconfirmed
// output.
unConfTx := &wire.MsgTx{
TxOut: []*wire.TxOut{{
PkScript: nil,
Value: 5,
}},
}
unConfUtxo := wire.OutPoint{
Hash: unConfTx.TxHash(),
Index: 0,
}
// Adding a utxo with a height of 0x7fffffff indicates that the output
// is currently unmined.
utxoView.AddTxOuts(btcutil.NewTx(unConfTx), 0x7fffffff)
tests := []struct {
tx *wire.MsgTx
view *UtxoViewpoint
mempool bool
want *SequenceLock
}{
// A transaction of version one should disable sequence locks
// as the new sequence number semantics only apply to
// transactions version 2 or higher.
{
tx: &wire.MsgTx{
Version: 1,
TxIn: []*wire.TxIn{{
PreviousOutPoint: utxo,
Sequence: LockTimeToSequence(false, 3),
}},
},
view: utxoView,
want: &SequenceLock{
Seconds: -1,
BlockHeight: -1,
},
},
// A transaction with a single input with max sequence number.
// This sequence number has the high bit set, so sequence locks
// should be disabled.
{
tx: &wire.MsgTx{
Version: 2,
TxIn: []*wire.TxIn{{
PreviousOutPoint: utxo,
Sequence: wire.MaxTxInSequenceNum,
}},
},
view: utxoView,
want: &SequenceLock{
Seconds: -1,
BlockHeight: -1,
},
},
// A transaction with a single input whose lock time is
// expressed in seconds. However, the specified lock time is
// below the required floor for time based lock times since
// they have time granularity of 512 seconds. As a result, the
// seconds lock-time should be just before the median time of
// the targeted block.
{
tx: &wire.MsgTx{
Version: 2,
TxIn: []*wire.TxIn{{
PreviousOutPoint: utxo,
Sequence: LockTimeToSequence(true, 2),
}},
},
view: utxoView,
want: &SequenceLock{
Seconds: medianTime - 1,
BlockHeight: -1,
},
},
// A transaction with a single input whose lock time is
// expressed in seconds. The number of seconds should be 1023
// seconds after the median past time of the last block in the
// chain.
{
tx: &wire.MsgTx{
Version: 2,
TxIn: []*wire.TxIn{{
PreviousOutPoint: utxo,
Sequence: LockTimeToSequence(true, 1024),
}},
},
view: utxoView,
want: &SequenceLock{
Seconds: medianTime + 1023,
BlockHeight: -1,
},
},
// A transaction with multiple inputs. The first input has a
// lock time expressed in seconds. The second input has a
// sequence lock in blocks with a value of 4. The last input
// has a sequence number with a value of 5, but has the disable
// bit set. So the first lock should be selected as it's the
// latest lock that isn't disabled.
{
tx: &wire.MsgTx{
Version: 2,
TxIn: []*wire.TxIn{{
PreviousOutPoint: utxo,
Sequence: LockTimeToSequence(true, 2560),
}, {
PreviousOutPoint: utxo,
Sequence: LockTimeToSequence(false, 4),
}, {
PreviousOutPoint: utxo,
Sequence: LockTimeToSequence(false, 5) |
wire.SequenceLockTimeDisabled,
}},
},
view: utxoView,
want: &SequenceLock{
Seconds: medianTime + (5 << wire.SequenceLockTimeGranularity) - 1,
BlockHeight: prevUtxoHeight + 3,
},
},
// Transaction with a single input. The input's sequence number
// encodes a relative lock-time in blocks (3 blocks). The
// sequence lock should have a value of -1 for seconds, but a
// height of 2 meaning it can be included at height 3.
{
tx: &wire.MsgTx{
Version: 2,
TxIn: []*wire.TxIn{{
PreviousOutPoint: utxo,
Sequence: LockTimeToSequence(false, 3),
}},
},
view: utxoView,
want: &SequenceLock{
Seconds: -1,
BlockHeight: prevUtxoHeight + 2,
},
},
// A transaction with two inputs with lock times expressed in
// seconds. The selected sequence lock value for seconds should
// be the time further in the future.
{
tx: &wire.MsgTx{
Version: 2,
TxIn: []*wire.TxIn{{
PreviousOutPoint: utxo,
Sequence: LockTimeToSequence(true, 5120),
}, {
PreviousOutPoint: utxo,
Sequence: LockTimeToSequence(true, 2560),
}},
},
view: utxoView,
want: &SequenceLock{
Seconds: medianTime + (10 << wire.SequenceLockTimeGranularity) - 1,
BlockHeight: -1,
},
},
// A transaction with two inputs with lock times expressed in
// blocks. The selected sequence lock value for blocks should
// be the height further in the future, so a height of 10
// indicating it can be included at height 11.
{
tx: &wire.MsgTx{
Version: 2,
TxIn: []*wire.TxIn{{
PreviousOutPoint: utxo,
Sequence: LockTimeToSequence(false, 1),
}, {
PreviousOutPoint: utxo,
Sequence: LockTimeToSequence(false, 11),
}},
},
view: utxoView,
want: &SequenceLock{
Seconds: -1,
BlockHeight: prevUtxoHeight + 10,
},
},
// A transaction with multiple inputs. Two inputs are time
// based, and the other two are block based. The lock lying
// further into the future for both inputs should be chosen.
{
tx: &wire.MsgTx{
Version: 2,
TxIn: []*wire.TxIn{{
PreviousOutPoint: utxo,
Sequence: LockTimeToSequence(true, 2560),
}, {
PreviousOutPoint: utxo,
Sequence: LockTimeToSequence(true, 6656),
}, {
PreviousOutPoint: utxo,
Sequence: LockTimeToSequence(false, 3),
}, {
PreviousOutPoint: utxo,
Sequence: LockTimeToSequence(false, 9),
}},
},
view: utxoView,
want: &SequenceLock{
Seconds: medianTime + (13 << wire.SequenceLockTimeGranularity) - 1,
BlockHeight: prevUtxoHeight + 8,
},
},
// A transaction with a single unconfirmed input. As the input
// is confirmed, the height of the input should be interpreted
// as the height of the *next* block. So, a 2 block relative
// lock means the sequence lock should be for 1 block after the
// *next* block height, indicating it can be included 2 blocks
// after that.
{
tx: &wire.MsgTx{
Version: 2,
TxIn: []*wire.TxIn{{
PreviousOutPoint: unConfUtxo,
Sequence: LockTimeToSequence(false, 2),
}},
},
view: utxoView,
mempool: true,
want: &SequenceLock{
Seconds: -1,
BlockHeight: nextBlockHeight + 1,
},
},
// A transaction with a single unconfirmed input. The input has
// a time based lock, so the lock time should be based off the
// MTP of the *next* block.
{
tx: &wire.MsgTx{
Version: 2,
TxIn: []*wire.TxIn{{
PreviousOutPoint: unConfUtxo,
Sequence: LockTimeToSequence(true, 1024),
}},
},
view: utxoView,
mempool: true,
want: &SequenceLock{
Seconds: nextMedianTime + 1023,
BlockHeight: -1,
},
},
}
t.Logf("Running %v SequenceLock tests", len(tests))
for i, test := range tests {
utilTx := btcutil.NewTx(test.tx)
seqLock, err := chain.CalcSequenceLock(utilTx, test.view, test.mempool)
if err != nil {
t.Fatalf("test #%d, unable to calc sequence lock: %v", i, err)
}
if seqLock.Seconds != test.want.Seconds {
t.Fatalf("test #%d got %v seconds want %v seconds",
i, seqLock.Seconds, test.want.Seconds)
}
if seqLock.BlockHeight != test.want.BlockHeight {
t.Fatalf("test #%d got height of %v want height of %v ",
i, seqLock.BlockHeight, test.want.BlockHeight)
}
}
}
// nodeHashes is a convenience function that returns the hashes for all of the
// passed indexes of the provided nodes. It is used to construct expected hash
// slices in the tests.
func nodeHashes(nodes []*blockNode, indexes ...int) []chainhash.Hash {
hashes := make([]chainhash.Hash, 0, len(indexes))
for _, idx := range indexes {
hashes = append(hashes, nodes[idx].hash)
}
return hashes
}
// nodeHeaders is a convenience function that returns the headers for all of
// the passed indexes of the provided nodes. It is used to construct expected
// located headers in the tests.
func nodeHeaders(nodes []*blockNode, indexes ...int) []wire.BlockHeader {
headers := make([]wire.BlockHeader, 0, len(indexes))
for _, idx := range indexes {
headers = append(headers, nodes[idx].Header())
}
return headers
}
// TestLocateInventory ensures that locating inventory via the LocateHeaders and
// LocateBlocks functions behaves as expected.
func TestLocateInventory(t *testing.T) {
// Construct a synthetic block chain with a block index consisting of
// the following structure.
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
// \-> 16a -> 17a
tip := tstTip
chain := newFakeChain(&chaincfg.MainNetParams)
branch0Nodes := chainedNodes(chain.bestChain.Genesis(), 18)
branch1Nodes := chainedNodes(branch0Nodes[14], 2)
for _, node := range branch0Nodes {
chain.index.AddNode(node)
}
for _, node := range branch1Nodes {
chain.index.AddNode(node)
}
chain.bestChain.SetTip(tip(branch0Nodes))
// Create chain views for different branches of the overall chain to
// simulate a local and remote node on different parts of the chain.
localView := newChainView(tip(branch0Nodes))
remoteView := newChainView(tip(branch1Nodes))
// Create a chain view for a completely unrelated block chain to
// simulate a remote node on a totally different chain.
unrelatedBranchNodes := chainedNodes(nil, 5)
unrelatedView := newChainView(tip(unrelatedBranchNodes))
tests := []struct {
name string
locator BlockLocator // locator for requested inventory
hashStop chainhash.Hash // stop hash for locator
maxAllowed uint32 // max to locate, 0 = wire const
headers []wire.BlockHeader // expected located headers
hashes []chainhash.Hash // expected located hashes
}{
{
// Empty block locators and unknown stop hash. No
// inventory should be located.
name: "no locators, no stop",
locator: nil,
hashStop: chainhash.Hash{},
headers: nil,
hashes: nil,
},
{
// Empty block locators and stop hash in side chain.
// The expected result is the requested block.
name: "no locators, stop in side",
locator: nil,
hashStop: tip(branch1Nodes).hash,
headers: nodeHeaders(branch1Nodes, 1),
hashes: nodeHashes(branch1Nodes, 1),
},
{
// Empty block locators and stop hash in main chain.
// The expected result is the requested block.
name: "no locators, stop in main",
locator: nil,
hashStop: branch0Nodes[12].hash,
headers: nodeHeaders(branch0Nodes, 12),
hashes: nodeHashes(branch0Nodes, 12),
},
{
// Locators based on remote being on side chain and a
// stop hash local node doesn't know about. The
// expected result is the blocks after the fork point in
// the main chain and the stop hash has no effect.
name: "remote side chain, unknown stop",
locator: remoteView.BlockLocator(nil),
hashStop: chainhash.Hash{0x01},
headers: nodeHeaders(branch0Nodes, 15, 16, 17),
hashes: nodeHashes(branch0Nodes, 15, 16, 17),
},
{
// Locators based on remote being on side chain and a
// stop hash in side chain. The expected result is the
// blocks after the fork point in the main chain and the
// stop hash has no effect.
name: "remote side chain, stop in side",
locator: remoteView.BlockLocator(nil),
hashStop: tip(branch1Nodes).hash,
headers: nodeHeaders(branch0Nodes, 15, 16, 17),
hashes: nodeHashes(branch0Nodes, 15, 16, 17),
},
{
// Locators based on remote being on side chain and a
// stop hash in main chain, but before fork point. The
// expected result is the blocks after the fork point in
// the main chain and the stop hash has no effect.
name: "remote side chain, stop in main before",
locator: remoteView.BlockLocator(nil),
hashStop: branch0Nodes[13].hash,
headers: nodeHeaders(branch0Nodes, 15, 16, 17),
hashes: nodeHashes(branch0Nodes, 15, 16, 17),
},
{
// Locators based on remote being on side chain and a
// stop hash in main chain, but exactly at the fork
// point. The expected result is the blocks after the
// fork point in the main chain and the stop hash has no
// effect.
name: "remote side chain, stop in main exact",
locator: remoteView.BlockLocator(nil),
hashStop: branch0Nodes[14].hash,
headers: nodeHeaders(branch0Nodes, 15, 16, 17),
hashes: nodeHashes(branch0Nodes, 15, 16, 17),
},
{
// Locators based on remote being on side chain and a
// stop hash in main chain just after the fork point.
// The expected result is the blocks after the fork
// point in the main chain up to and including the stop
// hash.
name: "remote side chain, stop in main after",
locator: remoteView.BlockLocator(nil),
hashStop: branch0Nodes[15].hash,
headers: nodeHeaders(branch0Nodes, 15),
hashes: nodeHashes(branch0Nodes, 15),
},
{
// Locators based on remote being on side chain and a
// stop hash in main chain some time after the fork
// point. The expected result is the blocks after the
// fork point in the main chain up to and including the
// stop hash.
name: "remote side chain, stop in main after more",
locator: remoteView.BlockLocator(nil),
hashStop: branch0Nodes[16].hash,
headers: nodeHeaders(branch0Nodes, 15, 16),
hashes: nodeHashes(branch0Nodes, 15, 16),
},
{
// Locators based on remote being on main chain in the
// past and a stop hash local node doesn't know about.
// The expected result is the blocks after the known
// point in the main chain and the stop hash has no
// effect.
name: "remote main chain past, unknown stop",
locator: localView.BlockLocator(branch0Nodes[12]),
hashStop: chainhash.Hash{0x01},
headers: nodeHeaders(branch0Nodes, 13, 14, 15, 16, 17),
hashes: nodeHashes(branch0Nodes, 13, 14, 15, 16, 17),
},
{
// Locators based on remote being on main chain in the
// past and a stop hash in a side chain. The expected
// result is the blocks after the known point in the
// main chain and the stop hash has no effect.
name: "remote main chain past, stop in side",
locator: localView.BlockLocator(branch0Nodes[12]),
hashStop: tip(branch1Nodes).hash,
headers: nodeHeaders(branch0Nodes, 13, 14, 15, 16, 17),
hashes: nodeHashes(branch0Nodes, 13, 14, 15, 16, 17),
},
{
// Locators based on remote being on main chain in the
// past and a stop hash in the main chain before that
// point. The expected result is the blocks after the
// known point in the main chain and the stop hash has
// no effect.
name: "remote main chain past, stop in main before",
locator: localView.BlockLocator(branch0Nodes[12]),
hashStop: branch0Nodes[11].hash,
headers: nodeHeaders(branch0Nodes, 13, 14, 15, 16, 17),
hashes: nodeHashes(branch0Nodes, 13, 14, 15, 16, 17),
},
{
// Locators based on remote being on main chain in the
// past and a stop hash in the main chain exactly at that
// point. The expected result is the blocks after the
// known point in the main chain and the stop hash has
// no effect.
name: "remote main chain past, stop in main exact",
locator: localView.BlockLocator(branch0Nodes[12]),
hashStop: branch0Nodes[12].hash,
headers: nodeHeaders(branch0Nodes, 13, 14, 15, 16, 17),
hashes: nodeHashes(branch0Nodes, 13, 14, 15, 16, 17),
},
{
// Locators based on remote being on main chain in the
// past and a stop hash in the main chain just after
// that point. The expected result is the blocks after
// the known point in the main chain and the stop hash
// has no effect.
name: "remote main chain past, stop in main after",
locator: localView.BlockLocator(branch0Nodes[12]),
hashStop: branch0Nodes[13].hash,
headers: nodeHeaders(branch0Nodes, 13),
hashes: nodeHashes(branch0Nodes, 13),
},
{
// Locators based on remote being on main chain in the
// past and a stop hash in the main chain some time
// after that point. The expected result is the blocks
// after the known point in the main chain and the stop
// hash has no effect.
name: "remote main chain past, stop in main after more",
locator: localView.BlockLocator(branch0Nodes[12]),
hashStop: branch0Nodes[15].hash,
headers: nodeHeaders(branch0Nodes, 13, 14, 15),
hashes: nodeHashes(branch0Nodes, 13, 14, 15),
},
{
// Locators based on remote being at exactly the same
// point in the main chain and a stop hash local node
// doesn't know about. The expected result is no
// located inventory.
name: "remote main chain same, unknown stop",
locator: localView.BlockLocator(nil),
hashStop: chainhash.Hash{0x01},
headers: nil,
hashes: nil,
},
{
// Locators based on remote being at exactly the same
// point in the main chain and a stop hash at exactly
// the same point. The expected result is no located
// inventory.
name: "remote main chain same, stop same point",
locator: localView.BlockLocator(nil),
hashStop: tip(branch0Nodes).hash,
headers: nil,
hashes: nil,
},
{
// Locators from remote that don't include any blocks
// the local node knows. This would happen if the
// remote node is on a completely separate chain that
// isn't rooted with the same genesis block. The
// expected result is the blocks after the genesis
// block.
name: "remote unrelated chain",
locator: unrelatedView.BlockLocator(nil),
hashStop: chainhash.Hash{},
headers: nodeHeaders(branch0Nodes, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17),
hashes: nodeHashes(branch0Nodes, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17),
},
{
// Locators from remote for second block in main chain
// and no stop hash, but with an overridden max limit.
// The expected result is the blocks after the second
// block limited by the max.
name: "remote genesis",
locator: locatorHashes(branch0Nodes, 0),
hashStop: chainhash.Hash{},
maxAllowed: 3,
headers: nodeHeaders(branch0Nodes, 1, 2, 3),
hashes: nodeHashes(branch0Nodes, 1, 2, 3),
},
{
// Poorly formed locator.
//
// Locator from remote that only includes a single
// block on a side chain the local node knows. The
// expected result is the blocks after the genesis
// block since even though the block is known, it is on
// a side chain and there are no more locators to find
// the fork point.
name: "weak locator, single known side block",
locator: locatorHashes(branch1Nodes, 1),
hashStop: chainhash.Hash{},
headers: nodeHeaders(branch0Nodes, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17),
hashes: nodeHashes(branch0Nodes, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17),
},
{
// Poorly formed locator.
//
// Locator from remote that only includes multiple
// blocks on a side chain the local node knows however
// none in the main chain. The expected result is the
// blocks after the genesis block since even though the
// blocks are known, they are all on a side chain and
// there are no more locators to find the fork point.
name: "weak locator, multiple known side blocks",
locator: locatorHashes(branch1Nodes, 1),
hashStop: chainhash.Hash{},
headers: nodeHeaders(branch0Nodes, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17),
hashes: nodeHashes(branch0Nodes, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17),
},
{
// Poorly formed locator.
//
// Locator from remote that only includes multiple
// blocks on a side chain the local node knows however
// none in the main chain but includes a stop hash in
// the main chain. The expected result is the blocks
// after the genesis block up to the stop hash since
// even though the blocks are known, they are all on a
// side chain and there are no more locators to find the
// fork point.
name: "weak locator, multiple known side blocks, stop in main",
locator: locatorHashes(branch1Nodes, 1),
hashStop: branch0Nodes[5].hash,
headers: nodeHeaders(branch0Nodes, 0, 1, 2, 3, 4, 5),
hashes: nodeHashes(branch0Nodes, 0, 1, 2, 3, 4, 5),
},
}
for _, test := range tests {
// Ensure the expected headers are located.
var headers []wire.BlockHeader
if test.maxAllowed != 0 {
// Need to use the unexported function to override the
// max allowed for headers.
chain.chainLock.RLock()
headers = chain.locateHeaders(test.locator,
&test.hashStop, test.maxAllowed)
chain.chainLock.RUnlock()
} else {
headers = chain.LocateHeaders(test.locator,
&test.hashStop)
}
if !reflect.DeepEqual(headers, test.headers) {
t.Errorf("%s: unxpected headers -- got %v, want %v",
test.name, headers, test.headers)
continue
}
// Ensure the expected block hashes are located.
maxAllowed := uint32(wire.MaxBlocksPerMsg)
if test.maxAllowed != 0 {
maxAllowed = test.maxAllowed
}
hashes := chain.LocateBlocks(test.locator, &test.hashStop,
maxAllowed)
if !reflect.DeepEqual(hashes, test.hashes) {
t.Errorf("%s: unxpected hashes -- got %v, want %v",
test.name, hashes, test.hashes)
continue
}
}
}