2017-08-20 01:54:33 +02:00
|
|
|
// Copyright (c) 2013-2017 The btcsuite developers
|
2013-10-10 01:43:22 +02:00
|
|
|
// Use of this source code is governed by an ISC
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2017-08-20 02:21:36 +02:00
|
|
|
package blockchain
|
2013-10-10 01:43:22 +02:00
|
|
|
|
|
|
|
import (
|
2017-08-20 03:35:37 +02:00
|
|
|
"reflect"
|
2014-07-02 18:04:59 +02:00
|
|
|
"testing"
|
2016-11-13 04:44:41 +01:00
|
|
|
"time"
|
2014-07-02 18:04:59 +02:00
|
|
|
|
2021-10-15 07:45:32 +02:00
|
|
|
"github.com/lbryio/lbcd/chaincfg"
|
|
|
|
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
|
|
|
"github.com/lbryio/lbcd/wire"
|
|
|
|
btcutil "github.com/lbryio/lbcutil"
|
2013-10-10 01:43:22 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// TestHaveBlock tests the HaveBlock API to ensure proper functionality.
|
|
|
|
func TestHaveBlock(t *testing.T) {
|
|
|
|
// Load up blocks such that there is a side chain.
|
|
|
|
// (genesis block) -> 1 -> 2 -> 3 -> 4
|
|
|
|
// \-> 3a
|
|
|
|
testFiles := []string{
|
|
|
|
"blk_0_to_4.dat.bz2",
|
|
|
|
"blk_3A.dat.bz2",
|
|
|
|
}
|
|
|
|
|
|
|
|
var blocks []*btcutil.Block
|
|
|
|
for _, file := range testFiles {
|
|
|
|
blockTmp, err := loadBlocks(file)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Error loading file: %v\n", err)
|
|
|
|
return
|
|
|
|
}
|
2016-11-03 05:02:04 +01:00
|
|
|
blocks = append(blocks, blockTmp...)
|
2013-10-10 01:43:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new database and chain instance to run tests against.
|
2016-09-14 01:11:12 +02:00
|
|
|
chain, teardownFunc, err := chainSetup("haveblock",
|
|
|
|
&chaincfg.MainNetParams)
|
2013-10-10 01:43:22 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Failed to setup chain instance: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer teardownFunc()
|
|
|
|
|
2017-01-18 23:58:38 +01:00
|
|
|
// Since we're not dealing with the real block chain, set the coinbase
|
|
|
|
// maturity to 1.
|
2016-08-10 23:02:23 +02:00
|
|
|
chain.TstSetCoinbaseMaturity(1)
|
2013-10-10 01:43:22 +02:00
|
|
|
|
|
|
|
for i := 1; i < len(blocks); i++ {
|
2017-08-20 02:21:36 +02:00
|
|
|
_, isOrphan, err := chain.ProcessBlock(blocks[i], BFNone)
|
2013-10-10 01:43:22 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Errorf("ProcessBlock fail on block %v: %v\n", i, err)
|
|
|
|
return
|
|
|
|
}
|
2014-06-25 22:47:24 +02:00
|
|
|
if isOrphan {
|
|
|
|
t.Errorf("ProcessBlock incorrectly returned block %v "+
|
|
|
|
"is an orphan\n", i)
|
|
|
|
return
|
|
|
|
}
|
2013-10-10 01:43:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Insert an orphan block.
|
2016-10-13 02:43:01 +02:00
|
|
|
_, isOrphan, err := chain.ProcessBlock(btcutil.NewBlock(&Block100000),
|
2017-08-20 02:21:36 +02:00
|
|
|
BFNone)
|
2014-06-25 22:47:24 +02:00
|
|
|
if err != nil {
|
2013-10-10 01:43:22 +02:00
|
|
|
t.Errorf("Unable to process block: %v", err)
|
|
|
|
return
|
|
|
|
}
|
2014-06-25 22:47:24 +02:00
|
|
|
if !isOrphan {
|
|
|
|
t.Errorf("ProcessBlock indicated block is an not orphan when " +
|
|
|
|
"it should be\n")
|
|
|
|
return
|
|
|
|
}
|
2013-10-10 01:43:22 +02:00
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
hash string
|
|
|
|
want bool
|
|
|
|
}{
|
|
|
|
// Genesis block should be present (in the main chain).
|
2015-02-06 06:18:27 +01:00
|
|
|
{hash: chaincfg.MainNetParams.GenesisHash.String(), want: true},
|
2013-10-10 01:43:22 +02:00
|
|
|
|
|
|
|
// Block 3a should be present (on a side chain).
|
|
|
|
{hash: "00000000474284d20067a4d33f6a02284e6ef70764a3a26d6a5b9df52ef663dd", want: true},
|
|
|
|
|
|
|
|
// Block 100000 should be present (as an orphan).
|
|
|
|
{hash: "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506", want: true},
|
|
|
|
|
2016-02-25 18:17:12 +01:00
|
|
|
// Random hashes should not be available.
|
2013-10-10 01:43:22 +02:00
|
|
|
{hash: "123", want: false},
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, test := range tests {
|
2016-08-08 21:04:33 +02:00
|
|
|
hash, err := chainhash.NewHashFromStr(test.hash)
|
2013-10-10 01:43:22 +02:00
|
|
|
if err != nil {
|
2016-08-08 21:04:33 +02:00
|
|
|
t.Errorf("NewHashFromStr: %v", err)
|
2013-10-10 01:43:22 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2014-07-07 18:42:28 +02:00
|
|
|
result, err := chain.HaveBlock(hash)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("HaveBlock #%d unexpected error: %v", i, err)
|
|
|
|
return
|
|
|
|
}
|
2013-10-10 01:43:22 +02:00
|
|
|
if result != test.want {
|
|
|
|
t.Errorf("HaveBlock #%d got %v want %v", i, result,
|
|
|
|
test.want)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-08-24 23:14:42 +02:00
|
|
|
|
|
|
|
// TestCalcSequenceLock tests the LockTimeToSequence function, and the
|
|
|
|
// CalcSequenceLock method of a Chain instance. The tests exercise several
|
|
|
|
// combinations of inputs to the CalcSequenceLock function in order to ensure
|
|
|
|
// the returned SequenceLocks are correct for each test instance.
|
|
|
|
func TestCalcSequenceLock(t *testing.T) {
|
2016-11-13 04:44:41 +01:00
|
|
|
netParams := &chaincfg.SimNetParams
|
2016-08-24 23:14:42 +02:00
|
|
|
|
2016-11-13 04:44:41 +01:00
|
|
|
// We need to activate CSV in order to test the processing logic, so
|
|
|
|
// manually craft the block version that's used to signal the soft-fork
|
|
|
|
// activation.
|
|
|
|
csvBit := netParams.Deployments[chaincfg.DeploymentCSV].BitNumber
|
|
|
|
blockVersion := int32(0x20000000 | (uint32(1) << csvBit))
|
|
|
|
|
2017-08-20 01:54:33 +02:00
|
|
|
// Generate enough synthetic blocks to activate CSV.
|
2017-08-20 02:21:36 +02:00
|
|
|
chain := newFakeChain(netParams)
|
2017-08-18 14:25:54 +02:00
|
|
|
node := chain.bestChain.Tip()
|
2017-08-20 01:54:33 +02:00
|
|
|
blockTime := node.Header().Timestamp
|
2016-11-13 04:44:41 +01:00
|
|
|
numBlocksToActivate := (netParams.MinerConfirmationWindow * 3)
|
|
|
|
for i := uint32(0); i < numBlocksToActivate; i++ {
|
2017-08-20 01:54:33 +02:00
|
|
|
blockTime = blockTime.Add(time.Second)
|
2017-08-20 02:21:36 +02:00
|
|
|
node = newFakeNode(node, blockVersion, 0, blockTime)
|
|
|
|
chain.index.AddNode(node)
|
2017-08-18 14:25:54 +02:00
|
|
|
chain.bestChain.SetTip(node)
|
2016-08-24 23:14:42 +02:00
|
|
|
}
|
|
|
|
|
2017-08-20 01:54:33 +02:00
|
|
|
// Create a utxo view with a fake utxo for the inputs used in the
|
|
|
|
// transactions created below. This utxo is added such that it has an
|
|
|
|
// age of 4 blocks.
|
|
|
|
targetTx := btcutil.NewTx(&wire.MsgTx{
|
|
|
|
TxOut: []*wire.TxOut{{
|
|
|
|
PkScript: nil,
|
|
|
|
Value: 10,
|
|
|
|
}},
|
|
|
|
})
|
2017-08-20 02:21:36 +02:00
|
|
|
utxoView := NewUtxoViewpoint()
|
2017-08-20 01:54:33 +02:00
|
|
|
utxoView.AddTxOuts(targetTx, int32(numBlocksToActivate)-4)
|
2017-08-20 02:21:36 +02:00
|
|
|
utxoView.SetBestHash(&node.hash)
|
2017-08-20 01:54:33 +02:00
|
|
|
|
|
|
|
// Create a utxo that spends the fake utxo created above for use in the
|
|
|
|
// transactions created in the tests. It has an age of 4 blocks. Note
|
|
|
|
// that the sequence lock heights are always calculated from the same
|
|
|
|
// point of view that they were originally calculated from for a given
|
|
|
|
// utxo. That is to say, the height prior to it.
|
2016-08-24 23:14:42 +02:00
|
|
|
utxo := wire.OutPoint{
|
|
|
|
Hash: *targetTx.Hash(),
|
|
|
|
Index: 0,
|
|
|
|
}
|
2017-08-20 01:54:33 +02:00
|
|
|
prevUtxoHeight := int32(numBlocksToActivate) - 4
|
2016-08-24 23:14:42 +02:00
|
|
|
|
2016-11-13 04:44:41 +01:00
|
|
|
// Obtain the median time past from the PoV of the input created above.
|
|
|
|
// The MTP for the input is the MTP from the PoV of the block *prior*
|
|
|
|
// to the one that included it.
|
2017-08-20 01:54:33 +02:00
|
|
|
medianTime := node.RelativeAncestor(5).CalcPastMedianTime().Unix()
|
2016-11-13 04:44:41 +01:00
|
|
|
|
2017-08-20 01:54:33 +02:00
|
|
|
// The median time calculated from the PoV of the best block in the
|
2017-02-02 07:21:46 +01:00
|
|
|
// test chain. For unconfirmed inputs, this value will be used since
|
|
|
|
// the MTP will be calculated from the PoV of the yet-to-be-mined
|
|
|
|
// block.
|
2017-08-20 01:54:33 +02:00
|
|
|
nextMedianTime := node.CalcPastMedianTime().Unix()
|
|
|
|
nextBlockHeight := int32(numBlocksToActivate) + 1
|
2017-02-02 07:21:46 +01:00
|
|
|
|
2016-08-24 23:14:42 +02:00
|
|
|
// Add an additional transaction which will serve as our unconfirmed
|
|
|
|
// output.
|
|
|
|
unConfTx := &wire.MsgTx{
|
2016-10-28 06:16:07 +02:00
|
|
|
TxOut: []*wire.TxOut{{
|
2017-08-20 01:54:33 +02:00
|
|
|
PkScript: nil,
|
2016-10-28 06:16:07 +02:00
|
|
|
Value: 5,
|
|
|
|
}},
|
2016-08-24 23:14:42 +02:00
|
|
|
}
|
|
|
|
unConfUtxo := wire.OutPoint{
|
|
|
|
Hash: unConfTx.TxHash(),
|
|
|
|
Index: 0,
|
|
|
|
}
|
2016-11-13 04:44:41 +01:00
|
|
|
|
2016-08-24 23:14:42 +02:00
|
|
|
// Adding a utxo with a height of 0x7fffffff indicates that the output
|
|
|
|
// is currently unmined.
|
|
|
|
utxoView.AddTxOuts(btcutil.NewTx(unConfTx), 0x7fffffff)
|
|
|
|
|
|
|
|
tests := []struct {
|
2017-08-20 01:54:33 +02:00
|
|
|
tx *wire.MsgTx
|
2017-08-20 02:21:36 +02:00
|
|
|
view *UtxoViewpoint
|
2016-08-24 23:14:42 +02:00
|
|
|
mempool bool
|
2017-08-20 02:21:36 +02:00
|
|
|
want *SequenceLock
|
2016-08-24 23:14:42 +02:00
|
|
|
}{
|
|
|
|
// A transaction of version one should disable sequence locks
|
|
|
|
// as the new sequence number semantics only apply to
|
|
|
|
// transactions version 2 or higher.
|
|
|
|
{
|
2017-08-20 01:54:33 +02:00
|
|
|
tx: &wire.MsgTx{
|
2016-08-24 23:14:42 +02:00
|
|
|
Version: 1,
|
2016-10-28 06:16:07 +02:00
|
|
|
TxIn: []*wire.TxIn{{
|
|
|
|
PreviousOutPoint: utxo,
|
2017-08-20 02:21:36 +02:00
|
|
|
Sequence: LockTimeToSequence(false, 3),
|
2016-10-28 06:16:07 +02:00
|
|
|
}},
|
2017-08-20 01:54:33 +02:00
|
|
|
},
|
2016-08-24 23:14:42 +02:00
|
|
|
view: utxoView,
|
2017-08-20 02:21:36 +02:00
|
|
|
want: &SequenceLock{
|
2016-08-24 23:14:42 +02:00
|
|
|
Seconds: -1,
|
|
|
|
BlockHeight: -1,
|
|
|
|
},
|
|
|
|
},
|
2017-02-02 07:21:46 +01:00
|
|
|
// A transaction with a single input with max sequence number.
|
|
|
|
// This sequence number has the high bit set, so sequence locks
|
|
|
|
// should be disabled.
|
2016-08-24 23:14:42 +02:00
|
|
|
{
|
2017-08-20 01:54:33 +02:00
|
|
|
tx: &wire.MsgTx{
|
2016-08-24 23:14:42 +02:00
|
|
|
Version: 2,
|
2016-10-28 06:16:07 +02:00
|
|
|
TxIn: []*wire.TxIn{{
|
|
|
|
PreviousOutPoint: utxo,
|
|
|
|
Sequence: wire.MaxTxInSequenceNum,
|
|
|
|
}},
|
2017-08-20 01:54:33 +02:00
|
|
|
},
|
2016-08-24 23:14:42 +02:00
|
|
|
view: utxoView,
|
2017-08-20 02:21:36 +02:00
|
|
|
want: &SequenceLock{
|
2016-08-24 23:14:42 +02:00
|
|
|
Seconds: -1,
|
|
|
|
BlockHeight: -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// A transaction with a single input whose lock time is
|
2017-02-02 07:21:46 +01:00
|
|
|
// expressed in seconds. However, the specified lock time is
|
2016-08-24 23:14:42 +02:00
|
|
|
// below the required floor for time based lock times since
|
2017-02-02 07:21:46 +01:00
|
|
|
// they have time granularity of 512 seconds. As a result, the
|
2016-08-24 23:14:42 +02:00
|
|
|
// seconds lock-time should be just before the median time of
|
|
|
|
// the targeted block.
|
|
|
|
{
|
2017-08-20 01:54:33 +02:00
|
|
|
tx: &wire.MsgTx{
|
2016-08-24 23:14:42 +02:00
|
|
|
Version: 2,
|
2016-10-28 06:16:07 +02:00
|
|
|
TxIn: []*wire.TxIn{{
|
|
|
|
PreviousOutPoint: utxo,
|
2017-08-20 02:21:36 +02:00
|
|
|
Sequence: LockTimeToSequence(true, 2),
|
2016-10-28 06:16:07 +02:00
|
|
|
}},
|
2017-08-20 01:54:33 +02:00
|
|
|
},
|
2016-08-24 23:14:42 +02:00
|
|
|
view: utxoView,
|
2017-08-20 02:21:36 +02:00
|
|
|
want: &SequenceLock{
|
2016-08-24 23:14:42 +02:00
|
|
|
Seconds: medianTime - 1,
|
|
|
|
BlockHeight: -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// A transaction with a single input whose lock time is
|
2017-02-02 07:21:46 +01:00
|
|
|
// expressed in seconds. The number of seconds should be 1023
|
2016-08-24 23:14:42 +02:00
|
|
|
// seconds after the median past time of the last block in the
|
|
|
|
// chain.
|
|
|
|
{
|
2017-08-20 01:54:33 +02:00
|
|
|
tx: &wire.MsgTx{
|
2016-08-24 23:14:42 +02:00
|
|
|
Version: 2,
|
2016-10-28 06:16:07 +02:00
|
|
|
TxIn: []*wire.TxIn{{
|
|
|
|
PreviousOutPoint: utxo,
|
2017-08-20 02:21:36 +02:00
|
|
|
Sequence: LockTimeToSequence(true, 1024),
|
2016-10-28 06:16:07 +02:00
|
|
|
}},
|
2017-08-20 01:54:33 +02:00
|
|
|
},
|
2016-08-24 23:14:42 +02:00
|
|
|
view: utxoView,
|
2017-08-20 02:21:36 +02:00
|
|
|
want: &SequenceLock{
|
2016-08-24 23:14:42 +02:00
|
|
|
Seconds: medianTime + 1023,
|
|
|
|
BlockHeight: -1,
|
|
|
|
},
|
|
|
|
},
|
2017-02-02 07:21:46 +01:00
|
|
|
// A transaction with multiple inputs. The first input has a
|
|
|
|
// lock time expressed in seconds. The second input has a
|
|
|
|
// sequence lock in blocks with a value of 4. The last input
|
2016-08-24 23:14:42 +02:00
|
|
|
// has a sequence number with a value of 5, but has the disable
|
2017-02-02 07:21:46 +01:00
|
|
|
// bit set. So the first lock should be selected as it's the
|
|
|
|
// latest lock that isn't disabled.
|
2016-08-24 23:14:42 +02:00
|
|
|
{
|
2017-08-20 01:54:33 +02:00
|
|
|
tx: &wire.MsgTx{
|
2016-08-24 23:14:42 +02:00
|
|
|
Version: 2,
|
2016-10-28 06:16:07 +02:00
|
|
|
TxIn: []*wire.TxIn{{
|
|
|
|
PreviousOutPoint: utxo,
|
2017-08-20 02:21:36 +02:00
|
|
|
Sequence: LockTimeToSequence(true, 2560),
|
2016-10-28 06:16:07 +02:00
|
|
|
}, {
|
|
|
|
PreviousOutPoint: utxo,
|
2017-08-20 02:21:36 +02:00
|
|
|
Sequence: LockTimeToSequence(false, 4),
|
2016-10-28 06:16:07 +02:00
|
|
|
}, {
|
|
|
|
PreviousOutPoint: utxo,
|
2017-08-20 02:21:36 +02:00
|
|
|
Sequence: LockTimeToSequence(false, 5) |
|
2017-02-02 07:21:46 +01:00
|
|
|
wire.SequenceLockTimeDisabled,
|
2016-10-28 06:16:07 +02:00
|
|
|
}},
|
2017-08-20 01:54:33 +02:00
|
|
|
},
|
2016-08-24 23:14:42 +02:00
|
|
|
view: utxoView,
|
2017-08-20 02:21:36 +02:00
|
|
|
want: &SequenceLock{
|
2016-08-24 23:14:42 +02:00
|
|
|
Seconds: medianTime + (5 << wire.SequenceLockTimeGranularity) - 1,
|
2017-02-02 07:21:46 +01:00
|
|
|
BlockHeight: prevUtxoHeight + 3,
|
2016-08-24 23:14:42 +02:00
|
|
|
},
|
|
|
|
},
|
2017-02-02 07:21:46 +01:00
|
|
|
// Transaction with a single input. The input's sequence number
|
|
|
|
// encodes a relative lock-time in blocks (3 blocks). The
|
|
|
|
// sequence lock should have a value of -1 for seconds, but a
|
|
|
|
// height of 2 meaning it can be included at height 3.
|
2016-08-24 23:14:42 +02:00
|
|
|
{
|
2017-08-20 01:54:33 +02:00
|
|
|
tx: &wire.MsgTx{
|
2016-08-24 23:14:42 +02:00
|
|
|
Version: 2,
|
2016-10-28 06:16:07 +02:00
|
|
|
TxIn: []*wire.TxIn{{
|
|
|
|
PreviousOutPoint: utxo,
|
2017-08-20 02:21:36 +02:00
|
|
|
Sequence: LockTimeToSequence(false, 3),
|
2016-10-28 06:16:07 +02:00
|
|
|
}},
|
2017-08-20 01:54:33 +02:00
|
|
|
},
|
2016-08-24 23:14:42 +02:00
|
|
|
view: utxoView,
|
2017-08-20 02:21:36 +02:00
|
|
|
want: &SequenceLock{
|
2016-08-24 23:14:42 +02:00
|
|
|
Seconds: -1,
|
2017-02-02 07:21:46 +01:00
|
|
|
BlockHeight: prevUtxoHeight + 2,
|
2016-08-24 23:14:42 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
// A transaction with two inputs with lock times expressed in
|
2017-02-02 07:21:46 +01:00
|
|
|
// seconds. The selected sequence lock value for seconds should
|
2016-08-24 23:14:42 +02:00
|
|
|
// be the time further in the future.
|
|
|
|
{
|
2017-08-20 01:54:33 +02:00
|
|
|
tx: &wire.MsgTx{
|
2016-08-24 23:14:42 +02:00
|
|
|
Version: 2,
|
2016-10-28 06:16:07 +02:00
|
|
|
TxIn: []*wire.TxIn{{
|
|
|
|
PreviousOutPoint: utxo,
|
2017-08-20 02:21:36 +02:00
|
|
|
Sequence: LockTimeToSequence(true, 5120),
|
2016-10-28 06:16:07 +02:00
|
|
|
}, {
|
|
|
|
PreviousOutPoint: utxo,
|
2017-08-20 02:21:36 +02:00
|
|
|
Sequence: LockTimeToSequence(true, 2560),
|
2016-10-28 06:16:07 +02:00
|
|
|
}},
|
2017-08-20 01:54:33 +02:00
|
|
|
},
|
2016-08-24 23:14:42 +02:00
|
|
|
view: utxoView,
|
2017-08-20 02:21:36 +02:00
|
|
|
want: &SequenceLock{
|
2016-08-24 23:14:42 +02:00
|
|
|
Seconds: medianTime + (10 << wire.SequenceLockTimeGranularity) - 1,
|
|
|
|
BlockHeight: -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// A transaction with two inputs with lock times expressed in
|
2017-02-02 07:21:46 +01:00
|
|
|
// blocks. The selected sequence lock value for blocks should
|
|
|
|
// be the height further in the future, so a height of 10
|
|
|
|
// indicating it can be included at height 11.
|
2016-08-24 23:14:42 +02:00
|
|
|
{
|
2017-08-20 01:54:33 +02:00
|
|
|
tx: &wire.MsgTx{
|
2016-08-24 23:14:42 +02:00
|
|
|
Version: 2,
|
2016-10-28 06:16:07 +02:00
|
|
|
TxIn: []*wire.TxIn{{
|
|
|
|
PreviousOutPoint: utxo,
|
2017-08-20 02:21:36 +02:00
|
|
|
Sequence: LockTimeToSequence(false, 1),
|
2016-10-28 06:16:07 +02:00
|
|
|
}, {
|
|
|
|
PreviousOutPoint: utxo,
|
2017-08-20 02:21:36 +02:00
|
|
|
Sequence: LockTimeToSequence(false, 11),
|
2016-10-28 06:16:07 +02:00
|
|
|
}},
|
2017-08-20 01:54:33 +02:00
|
|
|
},
|
2016-08-24 23:14:42 +02:00
|
|
|
view: utxoView,
|
2017-08-20 02:21:36 +02:00
|
|
|
want: &SequenceLock{
|
2016-08-24 23:14:42 +02:00
|
|
|
Seconds: -1,
|
2017-02-02 07:21:46 +01:00
|
|
|
BlockHeight: prevUtxoHeight + 10,
|
2016-08-24 23:14:42 +02:00
|
|
|
},
|
|
|
|
},
|
2017-02-02 07:21:46 +01:00
|
|
|
// A transaction with multiple inputs. Two inputs are time
|
|
|
|
// based, and the other two are block based. The lock lying
|
|
|
|
// further into the future for both inputs should be chosen.
|
2016-08-24 23:14:42 +02:00
|
|
|
{
|
2017-08-20 01:54:33 +02:00
|
|
|
tx: &wire.MsgTx{
|
2016-08-24 23:14:42 +02:00
|
|
|
Version: 2,
|
2016-10-28 06:16:07 +02:00
|
|
|
TxIn: []*wire.TxIn{{
|
|
|
|
PreviousOutPoint: utxo,
|
2017-08-20 02:21:36 +02:00
|
|
|
Sequence: LockTimeToSequence(true, 2560),
|
2016-10-28 06:16:07 +02:00
|
|
|
}, {
|
|
|
|
PreviousOutPoint: utxo,
|
2017-08-20 02:21:36 +02:00
|
|
|
Sequence: LockTimeToSequence(true, 6656),
|
2016-10-28 06:16:07 +02:00
|
|
|
}, {
|
|
|
|
PreviousOutPoint: utxo,
|
2017-08-20 02:21:36 +02:00
|
|
|
Sequence: LockTimeToSequence(false, 3),
|
2016-10-28 06:16:07 +02:00
|
|
|
}, {
|
|
|
|
PreviousOutPoint: utxo,
|
2017-08-20 02:21:36 +02:00
|
|
|
Sequence: LockTimeToSequence(false, 9),
|
2016-10-28 06:16:07 +02:00
|
|
|
}},
|
2017-08-20 01:54:33 +02:00
|
|
|
},
|
2016-08-24 23:14:42 +02:00
|
|
|
view: utxoView,
|
2017-08-20 02:21:36 +02:00
|
|
|
want: &SequenceLock{
|
2016-08-24 23:14:42 +02:00
|
|
|
Seconds: medianTime + (13 << wire.SequenceLockTimeGranularity) - 1,
|
2017-02-02 07:21:46 +01:00
|
|
|
BlockHeight: prevUtxoHeight + 8,
|
2016-08-24 23:14:42 +02:00
|
|
|
},
|
|
|
|
},
|
2017-02-02 07:21:46 +01:00
|
|
|
// A transaction with a single unconfirmed input. As the input
|
2016-08-24 23:14:42 +02:00
|
|
|
// is confirmed, the height of the input should be interpreted
|
2017-02-02 07:21:46 +01:00
|
|
|
// as the height of the *next* block. So, a 2 block relative
|
|
|
|
// lock means the sequence lock should be for 1 block after the
|
|
|
|
// *next* block height, indicating it can be included 2 blocks
|
|
|
|
// after that.
|
2016-08-24 23:14:42 +02:00
|
|
|
{
|
2017-08-20 01:54:33 +02:00
|
|
|
tx: &wire.MsgTx{
|
2016-08-24 23:14:42 +02:00
|
|
|
Version: 2,
|
2016-10-28 06:16:07 +02:00
|
|
|
TxIn: []*wire.TxIn{{
|
|
|
|
PreviousOutPoint: unConfUtxo,
|
2017-08-20 02:21:36 +02:00
|
|
|
Sequence: LockTimeToSequence(false, 2),
|
2016-10-28 06:16:07 +02:00
|
|
|
}},
|
2017-08-20 01:54:33 +02:00
|
|
|
},
|
2017-02-02 07:21:46 +01:00
|
|
|
view: utxoView,
|
|
|
|
mempool: true,
|
2017-08-20 02:21:36 +02:00
|
|
|
want: &SequenceLock{
|
2016-08-24 23:14:42 +02:00
|
|
|
Seconds: -1,
|
2017-02-02 07:21:46 +01:00
|
|
|
BlockHeight: nextBlockHeight + 1,
|
2016-08-24 23:14:42 +02:00
|
|
|
},
|
|
|
|
},
|
2017-02-02 07:21:46 +01:00
|
|
|
// A transaction with a single unconfirmed input. The input has
|
2016-08-24 23:14:42 +02:00
|
|
|
// a time based lock, so the lock time should be based off the
|
|
|
|
// MTP of the *next* block.
|
|
|
|
{
|
2017-08-20 01:54:33 +02:00
|
|
|
tx: &wire.MsgTx{
|
2016-08-24 23:14:42 +02:00
|
|
|
Version: 2,
|
2016-10-28 06:16:07 +02:00
|
|
|
TxIn: []*wire.TxIn{{
|
|
|
|
PreviousOutPoint: unConfUtxo,
|
2017-08-20 02:21:36 +02:00
|
|
|
Sequence: LockTimeToSequence(true, 1024),
|
2016-10-28 06:16:07 +02:00
|
|
|
}},
|
2017-08-20 01:54:33 +02:00
|
|
|
},
|
2017-02-02 07:21:46 +01:00
|
|
|
view: utxoView,
|
|
|
|
mempool: true,
|
2017-08-20 02:21:36 +02:00
|
|
|
want: &SequenceLock{
|
2016-08-24 23:14:42 +02:00
|
|
|
Seconds: nextMedianTime + 1023,
|
|
|
|
BlockHeight: -1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Logf("Running %v SequenceLock tests", len(tests))
|
|
|
|
for i, test := range tests {
|
2017-08-20 01:54:33 +02:00
|
|
|
utilTx := btcutil.NewTx(test.tx)
|
|
|
|
seqLock, err := chain.CalcSequenceLock(utilTx, test.view, test.mempool)
|
2016-08-24 23:14:42 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("test #%d, unable to calc sequence lock: %v", i, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if seqLock.Seconds != test.want.Seconds {
|
|
|
|
t.Fatalf("test #%d got %v seconds want %v seconds",
|
|
|
|
i, seqLock.Seconds, test.want.Seconds)
|
|
|
|
}
|
|
|
|
if seqLock.BlockHeight != test.want.BlockHeight {
|
|
|
|
t.Fatalf("test #%d got height of %v want height of %v ",
|
|
|
|
i, seqLock.BlockHeight, test.want.BlockHeight)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-08-20 03:35:37 +02:00
|
|
|
|
|
|
|
// nodeHashes is a convenience function that returns the hashes for all of the
|
|
|
|
// passed indexes of the provided nodes. It is used to construct expected hash
|
|
|
|
// slices in the tests.
|
|
|
|
func nodeHashes(nodes []*blockNode, indexes ...int) []chainhash.Hash {
|
|
|
|
hashes := make([]chainhash.Hash, 0, len(indexes))
|
|
|
|
for _, idx := range indexes {
|
|
|
|
hashes = append(hashes, nodes[idx].hash)
|
|
|
|
}
|
|
|
|
return hashes
|
|
|
|
}
|
|
|
|
|
|
|
|
// nodeHeaders is a convenience function that returns the headers for all of
|
|
|
|
// the passed indexes of the provided nodes. It is used to construct expected
|
|
|
|
// located headers in the tests.
|
|
|
|
func nodeHeaders(nodes []*blockNode, indexes ...int) []wire.BlockHeader {
|
|
|
|
headers := make([]wire.BlockHeader, 0, len(indexes))
|
|
|
|
for _, idx := range indexes {
|
|
|
|
headers = append(headers, nodes[idx].Header())
|
|
|
|
}
|
|
|
|
return headers
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestLocateInventory ensures that locating inventory via the LocateHeaders and
|
|
|
|
// LocateBlocks functions behaves as expected.
|
|
|
|
func TestLocateInventory(t *testing.T) {
|
|
|
|
// Construct a synthetic block chain with a block index consisting of
|
|
|
|
// the following structure.
|
|
|
|
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
|
|
|
|
// \-> 16a -> 17a
|
|
|
|
tip := tstTip
|
|
|
|
chain := newFakeChain(&chaincfg.MainNetParams)
|
|
|
|
branch0Nodes := chainedNodes(chain.bestChain.Genesis(), 18)
|
|
|
|
branch1Nodes := chainedNodes(branch0Nodes[14], 2)
|
|
|
|
for _, node := range branch0Nodes {
|
|
|
|
chain.index.AddNode(node)
|
|
|
|
}
|
|
|
|
for _, node := range branch1Nodes {
|
|
|
|
chain.index.AddNode(node)
|
|
|
|
}
|
|
|
|
chain.bestChain.SetTip(tip(branch0Nodes))
|
|
|
|
|
|
|
|
// Create chain views for different branches of the overall chain to
|
|
|
|
// simulate a local and remote node on different parts of the chain.
|
|
|
|
localView := newChainView(tip(branch0Nodes))
|
|
|
|
remoteView := newChainView(tip(branch1Nodes))
|
|
|
|
|
|
|
|
// Create a chain view for a completely unrelated block chain to
|
|
|
|
// simulate a remote node on a totally different chain.
|
|
|
|
unrelatedBranchNodes := chainedNodes(nil, 5)
|
|
|
|
unrelatedView := newChainView(tip(unrelatedBranchNodes))
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
locator BlockLocator // locator for requested inventory
|
|
|
|
hashStop chainhash.Hash // stop hash for locator
|
|
|
|
maxAllowed uint32 // max to locate, 0 = wire const
|
|
|
|
headers []wire.BlockHeader // expected located headers
|
|
|
|
hashes []chainhash.Hash // expected located hashes
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
// Empty block locators and unknown stop hash. No
|
|
|
|
// inventory should be located.
|
|
|
|
name: "no locators, no stop",
|
|
|
|
locator: nil,
|
|
|
|
hashStop: chainhash.Hash{},
|
|
|
|
headers: nil,
|
|
|
|
hashes: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Empty block locators and stop hash in side chain.
|
|
|
|
// The expected result is the requested block.
|
|
|
|
name: "no locators, stop in side",
|
|
|
|
locator: nil,
|
|
|
|
hashStop: tip(branch1Nodes).hash,
|
|
|
|
headers: nodeHeaders(branch1Nodes, 1),
|
|
|
|
hashes: nodeHashes(branch1Nodes, 1),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Empty block locators and stop hash in main chain.
|
|
|
|
// The expected result is the requested block.
|
|
|
|
name: "no locators, stop in main",
|
|
|
|
locator: nil,
|
|
|
|
hashStop: branch0Nodes[12].hash,
|
|
|
|
headers: nodeHeaders(branch0Nodes, 12),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 12),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Locators based on remote being on side chain and a
|
|
|
|
// stop hash local node doesn't know about. The
|
|
|
|
// expected result is the blocks after the fork point in
|
|
|
|
// the main chain and the stop hash has no effect.
|
|
|
|
name: "remote side chain, unknown stop",
|
|
|
|
locator: remoteView.BlockLocator(nil),
|
|
|
|
hashStop: chainhash.Hash{0x01},
|
|
|
|
headers: nodeHeaders(branch0Nodes, 15, 16, 17),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 15, 16, 17),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Locators based on remote being on side chain and a
|
|
|
|
// stop hash in side chain. The expected result is the
|
|
|
|
// blocks after the fork point in the main chain and the
|
|
|
|
// stop hash has no effect.
|
|
|
|
name: "remote side chain, stop in side",
|
|
|
|
locator: remoteView.BlockLocator(nil),
|
|
|
|
hashStop: tip(branch1Nodes).hash,
|
|
|
|
headers: nodeHeaders(branch0Nodes, 15, 16, 17),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 15, 16, 17),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Locators based on remote being on side chain and a
|
|
|
|
// stop hash in main chain, but before fork point. The
|
|
|
|
// expected result is the blocks after the fork point in
|
|
|
|
// the main chain and the stop hash has no effect.
|
|
|
|
name: "remote side chain, stop in main before",
|
|
|
|
locator: remoteView.BlockLocator(nil),
|
|
|
|
hashStop: branch0Nodes[13].hash,
|
|
|
|
headers: nodeHeaders(branch0Nodes, 15, 16, 17),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 15, 16, 17),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Locators based on remote being on side chain and a
|
|
|
|
// stop hash in main chain, but exactly at the fork
|
|
|
|
// point. The expected result is the blocks after the
|
|
|
|
// fork point in the main chain and the stop hash has no
|
|
|
|
// effect.
|
|
|
|
name: "remote side chain, stop in main exact",
|
|
|
|
locator: remoteView.BlockLocator(nil),
|
|
|
|
hashStop: branch0Nodes[14].hash,
|
|
|
|
headers: nodeHeaders(branch0Nodes, 15, 16, 17),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 15, 16, 17),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Locators based on remote being on side chain and a
|
|
|
|
// stop hash in main chain just after the fork point.
|
|
|
|
// The expected result is the blocks after the fork
|
|
|
|
// point in the main chain up to and including the stop
|
|
|
|
// hash.
|
|
|
|
name: "remote side chain, stop in main after",
|
|
|
|
locator: remoteView.BlockLocator(nil),
|
|
|
|
hashStop: branch0Nodes[15].hash,
|
|
|
|
headers: nodeHeaders(branch0Nodes, 15),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 15),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Locators based on remote being on side chain and a
|
|
|
|
// stop hash in main chain some time after the fork
|
|
|
|
// point. The expected result is the blocks after the
|
|
|
|
// fork point in the main chain up to and including the
|
|
|
|
// stop hash.
|
|
|
|
name: "remote side chain, stop in main after more",
|
|
|
|
locator: remoteView.BlockLocator(nil),
|
|
|
|
hashStop: branch0Nodes[16].hash,
|
|
|
|
headers: nodeHeaders(branch0Nodes, 15, 16),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 15, 16),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Locators based on remote being on main chain in the
|
|
|
|
// past and a stop hash local node doesn't know about.
|
|
|
|
// The expected result is the blocks after the known
|
|
|
|
// point in the main chain and the stop hash has no
|
|
|
|
// effect.
|
|
|
|
name: "remote main chain past, unknown stop",
|
|
|
|
locator: localView.BlockLocator(branch0Nodes[12]),
|
|
|
|
hashStop: chainhash.Hash{0x01},
|
|
|
|
headers: nodeHeaders(branch0Nodes, 13, 14, 15, 16, 17),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 13, 14, 15, 16, 17),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Locators based on remote being on main chain in the
|
|
|
|
// past and a stop hash in a side chain. The expected
|
|
|
|
// result is the blocks after the known point in the
|
|
|
|
// main chain and the stop hash has no effect.
|
|
|
|
name: "remote main chain past, stop in side",
|
|
|
|
locator: localView.BlockLocator(branch0Nodes[12]),
|
|
|
|
hashStop: tip(branch1Nodes).hash,
|
|
|
|
headers: nodeHeaders(branch0Nodes, 13, 14, 15, 16, 17),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 13, 14, 15, 16, 17),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Locators based on remote being on main chain in the
|
|
|
|
// past and a stop hash in the main chain before that
|
|
|
|
// point. The expected result is the blocks after the
|
|
|
|
// known point in the main chain and the stop hash has
|
|
|
|
// no effect.
|
|
|
|
name: "remote main chain past, stop in main before",
|
|
|
|
locator: localView.BlockLocator(branch0Nodes[12]),
|
|
|
|
hashStop: branch0Nodes[11].hash,
|
|
|
|
headers: nodeHeaders(branch0Nodes, 13, 14, 15, 16, 17),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 13, 14, 15, 16, 17),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Locators based on remote being on main chain in the
|
|
|
|
// past and a stop hash in the main chain exactly at that
|
|
|
|
// point. The expected result is the blocks after the
|
|
|
|
// known point in the main chain and the stop hash has
|
|
|
|
// no effect.
|
|
|
|
name: "remote main chain past, stop in main exact",
|
|
|
|
locator: localView.BlockLocator(branch0Nodes[12]),
|
|
|
|
hashStop: branch0Nodes[12].hash,
|
|
|
|
headers: nodeHeaders(branch0Nodes, 13, 14, 15, 16, 17),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 13, 14, 15, 16, 17),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Locators based on remote being on main chain in the
|
|
|
|
// past and a stop hash in the main chain just after
|
|
|
|
// that point. The expected result is the blocks after
|
|
|
|
// the known point in the main chain and the stop hash
|
|
|
|
// has no effect.
|
|
|
|
name: "remote main chain past, stop in main after",
|
|
|
|
locator: localView.BlockLocator(branch0Nodes[12]),
|
|
|
|
hashStop: branch0Nodes[13].hash,
|
|
|
|
headers: nodeHeaders(branch0Nodes, 13),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 13),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Locators based on remote being on main chain in the
|
|
|
|
// past and a stop hash in the main chain some time
|
|
|
|
// after that point. The expected result is the blocks
|
|
|
|
// after the known point in the main chain and the stop
|
|
|
|
// hash has no effect.
|
|
|
|
name: "remote main chain past, stop in main after more",
|
|
|
|
locator: localView.BlockLocator(branch0Nodes[12]),
|
|
|
|
hashStop: branch0Nodes[15].hash,
|
|
|
|
headers: nodeHeaders(branch0Nodes, 13, 14, 15),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 13, 14, 15),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Locators based on remote being at exactly the same
|
|
|
|
// point in the main chain and a stop hash local node
|
|
|
|
// doesn't know about. The expected result is no
|
|
|
|
// located inventory.
|
|
|
|
name: "remote main chain same, unknown stop",
|
|
|
|
locator: localView.BlockLocator(nil),
|
|
|
|
hashStop: chainhash.Hash{0x01},
|
|
|
|
headers: nil,
|
|
|
|
hashes: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Locators based on remote being at exactly the same
|
|
|
|
// point in the main chain and a stop hash at exactly
|
|
|
|
// the same point. The expected result is no located
|
|
|
|
// inventory.
|
|
|
|
name: "remote main chain same, stop same point",
|
|
|
|
locator: localView.BlockLocator(nil),
|
|
|
|
hashStop: tip(branch0Nodes).hash,
|
|
|
|
headers: nil,
|
|
|
|
hashes: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Locators from remote that don't include any blocks
|
|
|
|
// the local node knows. This would happen if the
|
|
|
|
// remote node is on a completely separate chain that
|
|
|
|
// isn't rooted with the same genesis block. The
|
|
|
|
// expected result is the blocks after the genesis
|
|
|
|
// block.
|
|
|
|
name: "remote unrelated chain",
|
|
|
|
locator: unrelatedView.BlockLocator(nil),
|
|
|
|
hashStop: chainhash.Hash{},
|
|
|
|
headers: nodeHeaders(branch0Nodes, 0, 1, 2, 3, 4, 5, 6,
|
|
|
|
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 0, 1, 2, 3, 4, 5, 6,
|
|
|
|
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Locators from remote for second block in main chain
|
|
|
|
// and no stop hash, but with an overridden max limit.
|
|
|
|
// The expected result is the blocks after the second
|
|
|
|
// block limited by the max.
|
|
|
|
name: "remote genesis",
|
|
|
|
locator: locatorHashes(branch0Nodes, 0),
|
|
|
|
hashStop: chainhash.Hash{},
|
|
|
|
maxAllowed: 3,
|
|
|
|
headers: nodeHeaders(branch0Nodes, 1, 2, 3),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 1, 2, 3),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Poorly formed locator.
|
|
|
|
//
|
|
|
|
// Locator from remote that only includes a single
|
|
|
|
// block on a side chain the local node knows. The
|
|
|
|
// expected result is the blocks after the genesis
|
|
|
|
// block since even though the block is known, it is on
|
|
|
|
// a side chain and there are no more locators to find
|
|
|
|
// the fork point.
|
|
|
|
name: "weak locator, single known side block",
|
|
|
|
locator: locatorHashes(branch1Nodes, 1),
|
|
|
|
hashStop: chainhash.Hash{},
|
|
|
|
headers: nodeHeaders(branch0Nodes, 0, 1, 2, 3, 4, 5, 6,
|
|
|
|
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 0, 1, 2, 3, 4, 5, 6,
|
|
|
|
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Poorly formed locator.
|
|
|
|
//
|
|
|
|
// Locator from remote that only includes multiple
|
|
|
|
// blocks on a side chain the local node knows however
|
|
|
|
// none in the main chain. The expected result is the
|
|
|
|
// blocks after the genesis block since even though the
|
|
|
|
// blocks are known, they are all on a side chain and
|
|
|
|
// there are no more locators to find the fork point.
|
|
|
|
name: "weak locator, multiple known side blocks",
|
|
|
|
locator: locatorHashes(branch1Nodes, 1),
|
|
|
|
hashStop: chainhash.Hash{},
|
|
|
|
headers: nodeHeaders(branch0Nodes, 0, 1, 2, 3, 4, 5, 6,
|
|
|
|
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 0, 1, 2, 3, 4, 5, 6,
|
|
|
|
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Poorly formed locator.
|
|
|
|
//
|
|
|
|
// Locator from remote that only includes multiple
|
|
|
|
// blocks on a side chain the local node knows however
|
|
|
|
// none in the main chain but includes a stop hash in
|
|
|
|
// the main chain. The expected result is the blocks
|
|
|
|
// after the genesis block up to the stop hash since
|
|
|
|
// even though the blocks are known, they are all on a
|
|
|
|
// side chain and there are no more locators to find the
|
|
|
|
// fork point.
|
|
|
|
name: "weak locator, multiple known side blocks, stop in main",
|
|
|
|
locator: locatorHashes(branch1Nodes, 1),
|
|
|
|
hashStop: branch0Nodes[5].hash,
|
|
|
|
headers: nodeHeaders(branch0Nodes, 0, 1, 2, 3, 4, 5),
|
|
|
|
hashes: nodeHashes(branch0Nodes, 0, 1, 2, 3, 4, 5),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, test := range tests {
|
|
|
|
// Ensure the expected headers are located.
|
|
|
|
var headers []wire.BlockHeader
|
|
|
|
if test.maxAllowed != 0 {
|
|
|
|
// Need to use the unexported function to override the
|
|
|
|
// max allowed for headers.
|
|
|
|
chain.chainLock.RLock()
|
|
|
|
headers = chain.locateHeaders(test.locator,
|
|
|
|
&test.hashStop, test.maxAllowed)
|
|
|
|
chain.chainLock.RUnlock()
|
|
|
|
} else {
|
|
|
|
headers = chain.LocateHeaders(test.locator,
|
|
|
|
&test.hashStop)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(headers, test.headers) {
|
|
|
|
t.Errorf("%s: unxpected headers -- got %v, want %v",
|
|
|
|
test.name, headers, test.headers)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the expected block hashes are located.
|
|
|
|
maxAllowed := uint32(wire.MaxBlocksPerMsg)
|
|
|
|
if test.maxAllowed != 0 {
|
|
|
|
maxAllowed = test.maxAllowed
|
|
|
|
}
|
|
|
|
hashes := chain.LocateBlocks(test.locator, &test.hashStop,
|
|
|
|
maxAllowed)
|
|
|
|
if !reflect.DeepEqual(hashes, test.hashes) {
|
|
|
|
t.Errorf("%s: unxpected hashes -- got %v, want %v",
|
|
|
|
test.name, hashes, test.hashes)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-01-19 20:59:34 +01:00
|
|
|
|
|
|
|
// TestHeightToHashRange ensures that fetching a range of block hashes by start
|
|
|
|
// height and end hash works as expected.
|
|
|
|
func TestHeightToHashRange(t *testing.T) {
|
|
|
|
// Construct a synthetic block chain with a block index consisting of
|
|
|
|
// the following structure.
|
|
|
|
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
|
|
|
|
// \-> 16a -> 17a -> 18a (unvalidated)
|
|
|
|
tip := tstTip
|
|
|
|
chain := newFakeChain(&chaincfg.MainNetParams)
|
|
|
|
branch0Nodes := chainedNodes(chain.bestChain.Genesis(), 18)
|
|
|
|
branch1Nodes := chainedNodes(branch0Nodes[14], 3)
|
|
|
|
for _, node := range branch0Nodes {
|
|
|
|
chain.index.SetStatusFlags(node, statusValid)
|
|
|
|
chain.index.AddNode(node)
|
|
|
|
}
|
|
|
|
for _, node := range branch1Nodes {
|
|
|
|
if node.height < 18 {
|
|
|
|
chain.index.SetStatusFlags(node, statusValid)
|
|
|
|
}
|
|
|
|
chain.index.AddNode(node)
|
|
|
|
}
|
|
|
|
chain.bestChain.SetTip(tip(branch0Nodes))
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
startHeight int32 // locator for requested inventory
|
|
|
|
endHash chainhash.Hash // stop hash for locator
|
|
|
|
maxResults int // max to locate, 0 = wire const
|
|
|
|
hashes []chainhash.Hash // expected located hashes
|
|
|
|
expectError bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "blocks below tip",
|
|
|
|
startHeight: 11,
|
|
|
|
endHash: branch0Nodes[14].hash,
|
|
|
|
maxResults: 10,
|
|
|
|
hashes: nodeHashes(branch0Nodes, 10, 11, 12, 13, 14),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "blocks on main chain",
|
|
|
|
startHeight: 15,
|
|
|
|
endHash: branch0Nodes[17].hash,
|
|
|
|
maxResults: 10,
|
|
|
|
hashes: nodeHashes(branch0Nodes, 14, 15, 16, 17),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "blocks on stale chain",
|
|
|
|
startHeight: 15,
|
|
|
|
endHash: branch1Nodes[1].hash,
|
|
|
|
maxResults: 10,
|
|
|
|
hashes: append(nodeHashes(branch0Nodes, 14),
|
|
|
|
nodeHashes(branch1Nodes, 0, 1)...),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "invalid start height",
|
|
|
|
startHeight: 19,
|
|
|
|
endHash: branch0Nodes[17].hash,
|
|
|
|
maxResults: 10,
|
|
|
|
expectError: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "too many results",
|
|
|
|
startHeight: 1,
|
|
|
|
endHash: branch0Nodes[17].hash,
|
|
|
|
maxResults: 10,
|
|
|
|
expectError: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "unvalidated block",
|
|
|
|
startHeight: 15,
|
|
|
|
endHash: branch1Nodes[2].hash,
|
|
|
|
maxResults: 10,
|
|
|
|
expectError: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, test := range tests {
|
|
|
|
hashes, err := chain.HeightToHashRange(test.startHeight, &test.endHash,
|
|
|
|
test.maxResults)
|
|
|
|
if err != nil {
|
|
|
|
if !test.expectError {
|
|
|
|
t.Errorf("%s: unexpected error: %v", test.name, err)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(hashes, test.hashes) {
|
|
|
|
t.Errorf("%s: unxpected hashes -- got %v, want %v",
|
|
|
|
test.name, hashes, test.hashes)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-01-23 05:38:25 +01:00
|
|
|
|
|
|
|
// TestIntervalBlockHashes ensures that fetching block hashes at specified
|
|
|
|
// intervals by end hash works as expected.
|
|
|
|
func TestIntervalBlockHashes(t *testing.T) {
|
|
|
|
// Construct a synthetic block chain with a block index consisting of
|
|
|
|
// the following structure.
|
|
|
|
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
|
|
|
|
// \-> 16a -> 17a -> 18a (unvalidated)
|
|
|
|
tip := tstTip
|
|
|
|
chain := newFakeChain(&chaincfg.MainNetParams)
|
|
|
|
branch0Nodes := chainedNodes(chain.bestChain.Genesis(), 18)
|
|
|
|
branch1Nodes := chainedNodes(branch0Nodes[14], 3)
|
|
|
|
for _, node := range branch0Nodes {
|
|
|
|
chain.index.SetStatusFlags(node, statusValid)
|
|
|
|
chain.index.AddNode(node)
|
|
|
|
}
|
|
|
|
for _, node := range branch1Nodes {
|
|
|
|
if node.height < 18 {
|
|
|
|
chain.index.SetStatusFlags(node, statusValid)
|
|
|
|
}
|
|
|
|
chain.index.AddNode(node)
|
|
|
|
}
|
|
|
|
chain.bestChain.SetTip(tip(branch0Nodes))
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
endHash chainhash.Hash
|
|
|
|
interval int
|
|
|
|
hashes []chainhash.Hash
|
|
|
|
expectError bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "blocks on main chain",
|
|
|
|
endHash: branch0Nodes[17].hash,
|
|
|
|
interval: 8,
|
|
|
|
hashes: nodeHashes(branch0Nodes, 7, 15),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "blocks on stale chain",
|
|
|
|
endHash: branch1Nodes[1].hash,
|
|
|
|
interval: 8,
|
|
|
|
hashes: append(nodeHashes(branch0Nodes, 7),
|
|
|
|
nodeHashes(branch1Nodes, 0)...),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "no results",
|
|
|
|
endHash: branch0Nodes[17].hash,
|
|
|
|
interval: 20,
|
|
|
|
hashes: []chainhash.Hash{},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "unvalidated block",
|
|
|
|
endHash: branch1Nodes[2].hash,
|
|
|
|
interval: 8,
|
|
|
|
expectError: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, test := range tests {
|
|
|
|
hashes, err := chain.IntervalBlockHashes(&test.endHash, test.interval)
|
|
|
|
if err != nil {
|
|
|
|
if !test.expectError {
|
|
|
|
t.Errorf("%s: unexpected error: %v", test.name, err)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(hashes, test.hashes) {
|
|
|
|
t.Errorf("%s: unxpected hashes -- got %v, want %v",
|
|
|
|
test.name, hashes, test.hashes)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|