lbcwallet/spvsvc/spvchain/sync_test.go

728 lines
21 KiB
Go
Raw Normal View History

// TODO: Break up tests into bite-sized pieces.
package spvchain_test
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"reflect"
"sync"
"testing"
"time"
"github.com/aakselrod/btctestlog"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/rpctest"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btclog"
"github.com/btcsuite/btcrpcclient"
"github.com/btcsuite/btcutil"
2017-04-27 03:00:41 +02:00
"github.com/btcsuite/btcutil/gcs/builder"
"github.com/btcsuite/btcwallet/spvsvc/spvchain"
"github.com/btcsuite/btcwallet/waddrmgr"
"github.com/btcsuite/btcwallet/walletdb"
_ "github.com/btcsuite/btcwallet/walletdb/bdb"
)
2017-04-27 00:34:05 +02:00
var (
logLevel = btclog.Off
2017-04-27 00:34:05 +02:00
syncTimeout = 30 * time.Second
syncUpdate = time.Second
// Don't set this too high for your platform, or the tests will miss
// messages.
// TODO: Make this a benchmark instead.
// TODO: Implement load limiting for both outgoing and incoming
// messages.
2017-04-27 00:34:05 +02:00
numQueryThreads = 50
queryOptions = []spvchain.QueryOption{
//spvchain.NumRetries(5),
}
// The sequence of connecting blocks.
conn = func() []int32 {
blocks := []int32{}
for i := 801; i <= 928; i++ {
blocks = append(blocks, int32(i))
}
for i := 926; i <= 930; i++ {
blocks = append(blocks, int32(i))
}
for i := 926; i <= 935; i++ {
blocks = append(blocks, int32(i))
}
return blocks
}
// The sequence of disconnecting blocks.
dconn = func() []int32 {
blocks := []int32{}
for i := 928; i >= 926; i-- {
blocks = append(blocks, int32(i))
}
for i := 930; i >= 926; i-- {
blocks = append(blocks, int32(i))
}
return blocks
}
// Blocks with relevant transactions
relevant = []int32{801, 929, 930}
// Blocks with receive transactions
receive = []int32{801}
// Blocks with redeeming transactions
redeem = []int32{929, 930}
2017-04-18 01:55:42 +02:00
)
func TestSetup(t *testing.T) {
2017-04-27 00:34:05 +02:00
// Set up logging.
logger, err := btctestlog.NewTestLogger(t)
if err != nil {
t.Fatalf("Could not set up logger: %s", err)
}
chainLogger := btclog.NewSubsystemLogger(logger, "CHAIN: ")
chainLogger.SetLevel(logLevel)
spvchain.UseLogger(chainLogger)
rpcLogger := btclog.NewSubsystemLogger(logger, "RPCC: ")
rpcLogger.SetLevel(logLevel)
btcrpcclient.UseLogger(rpcLogger)
// Create a btcd SimNet node and generate 500 blocks
h1, err := rpctest.New(&chaincfg.SimNetParams, nil, nil)
if err != nil {
t.Fatalf("Couldn't create harness: %s", err)
}
defer h1.TearDown()
err = h1.SetUp(false, 0)
if err != nil {
t.Fatalf("Couldn't set up harness: %s", err)
}
_, err = h1.Node.Generate(500)
if err != nil {
t.Fatalf("Couldn't generate blocks: %s", err)
}
// Create a second btcd SimNet node
h2, err := rpctest.New(&chaincfg.SimNetParams, nil, nil)
if err != nil {
t.Fatalf("Couldn't create harness: %s", err)
}
defer h2.TearDown()
err = h2.SetUp(false, 0)
if err != nil {
t.Fatalf("Couldn't set up harness: %s", err)
}
// Create a third btcd SimNet node and generate 900 blocks
h3, err := rpctest.New(&chaincfg.SimNetParams, nil, nil)
if err != nil {
t.Fatalf("Couldn't create harness: %s", err)
}
defer h3.TearDown()
err = h3.SetUp(false, 0)
if err != nil {
t.Fatalf("Couldn't set up harness: %s", err)
}
_, err = h3.Node.Generate(900)
if err != nil {
t.Fatalf("Couldn't generate blocks: %s", err)
}
// Connect, sync, and disconnect h1 and h2
err = csd([]*rpctest.Harness{h1, h2})
if err != nil {
t.Fatalf("Couldn't connect/sync/disconnect h1 and h2: %s", err)
}
// Generate 300 blocks on the first node and 350 on the second
_, err = h1.Node.Generate(300)
if err != nil {
t.Fatalf("Couldn't generate blocks: %s", err)
}
_, err = h2.Node.Generate(350)
if err != nil {
t.Fatalf("Couldn't generate blocks: %s", err)
}
// Now we have a node with 800 blocks (h1), 850 blocks (h2), and
// 900 blocks (h3). The chains of nodes h1 and h2 match up to block
// 500. By default, a synchronizing wallet connected to all three
// should synchronize to h3. However, we're going to take checkpoints
// from h1 at 111, 333, 555, and 777, and add those to the
// synchronizing wallet's chain parameters so that it should
// disconnect from h3 at block 111, and from h2 at block 555, and
// then synchronize to block 800 from h1. Order of connection is
// unfortunately not guaranteed, so the reorg may not happen with every
// test.
// Copy parameters and insert checkpoints
modParams := chaincfg.SimNetParams
for _, height := range []int64{111, 333, 555, 777} {
hash, err := h1.Node.GetBlockHash(height)
if err != nil {
t.Fatalf("Couldn't get block hash for height %d: %s",
height, err)
}
modParams.Checkpoints = append(modParams.Checkpoints,
chaincfg.Checkpoint{
Hash: hash,
Height: int32(height),
})
}
// Create a temporary directory, initialize an empty walletdb with an
// SPV chain namespace, and create a configuration for the ChainService.
tempDir, err := ioutil.TempDir("", "spvchain")
if err != nil {
t.Fatalf("Failed to create temporary directory: %s", err)
}
defer os.RemoveAll(tempDir)
db, err := walletdb.Create("bdb", tempDir+"/weks.db")
defer db.Close()
if err != nil {
t.Fatalf("Error opening DB: %s\n", err)
}
if err != nil {
t.Fatalf("Error geting namespace: %s\n", err)
}
config := spvchain.Config{
DataDir: tempDir,
Database: db,
ChainParams: modParams,
AddPeers: []string{
h3.P2PAddress(),
h2.P2PAddress(),
h1.P2PAddress(),
},
}
spvchain.Services = 0
spvchain.MaxPeers = 3
spvchain.BanDuration = 5 * time.Second
spvchain.RequiredServices = wire.SFNodeNetwork
2017-04-18 01:55:42 +02:00
spvchain.WaitForMoreCFHeaders = time.Second
svc, err := spvchain.NewChainService(config)
if err != nil {
t.Fatalf("Error creating ChainService: %s", err)
}
svc.Start()
defer svc.Stop()
// Make sure the client synchronizes with the correct node
2017-04-18 01:55:42 +02:00
err = waitForSync(t, svc, h1)
if err != nil {
t.Fatalf("Couldn't sync ChainService: %s", err)
}
2017-04-27 00:34:05 +02:00
// Test that we can get blocks and cfilters via P2P and decide which are
// valid and which aren't.
err = testRandomBlocks(t, svc, h1)
if err != nil {
t.Fatalf("Testing blocks and cfilters failed: %s", err)
}
// Generate an address and send it some coins on the h1 chain. We use
// this to test rescans and notifications.
privKey, err := btcec.NewPrivateKey(btcec.S256())
if err != nil {
t.Fatalf("Couldn't generate private key: %s", err)
}
pubKeyHash := btcutil.Hash160(privKey.PubKey().SerializeCompressed())
addr, err := btcutil.NewAddressWitnessPubKeyHash(pubKeyHash, &modParams)
if err != nil {
t.Fatalf("Couldn't create address from key: %s", err)
}
script, err := txscript.PayToAddrScript(addr)
if err != nil {
t.Fatalf("Couldn't create script from address: %s", err)
}
out := wire.TxOut{
PkScript: script,
Value: 1000000000,
}
tx1, err := h1.CreateTransaction([]*wire.TxOut{&out}, 1000)
if err != nil {
t.Fatalf("Couldn't create transaction from script: %s", err)
}
utx1 := btcutil.NewTx(tx1)
utx1.SetIndex(1)
tx2, err := h1.CreateTransaction([]*wire.TxOut{&out}, 1000)
if err != nil {
t.Fatalf("Couldn't create transaction from script: %s", err)
}
utx2 := btcutil.NewTx(tx2)
utx2.SetIndex(2)
if tx1.TxHash() == tx2.TxHash() {
t.Fatalf("Created two identical transactions")
}
_, err = h1.GenerateAndSubmitBlock([]*btcutil.Tx{utx1, utx2},
-1, time.Time{})
if err != nil {
t.Fatalf("Couldn't generate/submit block: %s")
}
// Generate 124 blocks on h1 to make sure it reorgs the other nodes.
// Ensure the ChainService instance stays caught up.
h1.Node.Generate(124)
2017-04-18 01:55:42 +02:00
err = waitForSync(t, svc, h1)
if err != nil {
t.Fatalf("Couldn't sync ChainService: %s", err)
}
// Connect/sync/disconnect h2 to make it reorg to the h1 chain.
err = csd([]*rpctest.Harness{h1, h2})
if err != nil {
t.Fatalf("Couldn't sync h2 to h1: %s", err)
}
// Spend the outputs we sent ourselves.
_ = func(tx wire.MsgTx) func(target btcutil.Amount) (
total btcutil.Amount, inputs []*wire.TxIn,
inputValues []btcutil.Amount, scripts [][]byte, err error) {
ourIndex := 1 << 30 // Should work on 32-bit systems
for i, txo := range tx.TxOut {
if bytes.Equal(txo.PkScript, script) {
ourIndex = i
}
}
return func(target btcutil.Amount) (total btcutil.Amount,
inputs []*wire.TxIn, inputValues []btcutil.Amount,
scripts [][]byte, err error) {
if ourIndex == 1<<30 {
err = fmt.Errorf("Couldn't find our address " +
"in the passed transaction's outputs.")
return
}
total = target
inputs = []*wire.TxIn{
&wire.TxIn{
PreviousOutPoint: wire.OutPoint{
Hash: tx.TxHash(),
Index: uint32(ourIndex),
},
},
}
inputValues = []btcutil.Amount{
btcutil.Amount(tx.TxOut[ourIndex].Value)}
scripts = [][]byte{tx.TxOut[ourIndex].PkScript}
err = nil
return
}
}
// Generate 3 blocks on h1, one at a time, to make sure the
// ChainService instance stays caught up.
for i := 0; i < 3; i++ {
h1.Node.Generate(1)
2017-04-18 01:55:42 +02:00
err = waitForSync(t, svc, h1)
if err != nil {
t.Fatalf("Couldn't sync ChainService: %s", err)
}
}
// Generate 5 blocks on h2 and wait for ChainService to sync to the
// newly-best chain on h2.
2017-04-18 01:55:42 +02:00
h2.Node.Generate(5)
err = waitForSync(t, svc, h2)
if err != nil {
t.Fatalf("Couldn't sync ChainService: %s", err)
}
// Generate 7 blocks on h1 and wait for ChainService to sync to the
// newly-best chain on h1.
h1.Node.Generate(7)
2017-04-18 01:55:42 +02:00
err = waitForSync(t, svc, h1)
if err != nil {
t.Fatalf("Couldn't sync ChainService: %s", err)
2017-04-18 01:55:42 +02:00
}
}
// csd does a connect-sync-disconnect between nodes in order to support
// reorg testing. It brings up and tears down a temporary node, otherwise the
// nodes try to reconnect to each other which results in unintended reorgs.
func csd(harnesses []*rpctest.Harness) error {
hTemp, err := rpctest.New(&chaincfg.SimNetParams, nil, nil)
if err != nil {
return err
}
// Tear down node at the end of the function.
defer hTemp.TearDown()
err = hTemp.SetUp(false, 0)
if err != nil {
return err
}
for _, harness := range harnesses {
err = rpctest.ConnectNode(hTemp, harness)
if err != nil {
return err
}
}
return rpctest.JoinNodes(harnesses, rpctest.Blocks)
}
// waitForSync waits for the ChainService to sync to the current chain state.
func waitForSync(t *testing.T, svc *spvchain.ChainService,
2017-04-18 01:55:42 +02:00
correctSyncNode *rpctest.Harness) error {
knownBestHash, knownBestHeight, err :=
correctSyncNode.Node.GetBestBlock()
if err != nil {
return err
}
if logLevel != btclog.Off {
t.Logf("Syncing to %d (%s)", knownBestHeight, knownBestHash)
}
var haveBest *waddrmgr.BlockStamp
haveBest, err = svc.BestSnapshot()
if err != nil {
return fmt.Errorf("Couldn't get best snapshot from "+
"ChainService: %s", err)
}
var total time.Duration
for haveBest.Hash != *knownBestHash {
2017-04-18 01:55:42 +02:00
if total > syncTimeout {
return fmt.Errorf("Timed out after %v waiting for "+
2017-04-18 01:55:42 +02:00
"header synchronization.", syncTimeout)
}
if haveBest.Height > knownBestHeight {
return fmt.Errorf("Synchronized to the wrong chain.")
}
2017-04-18 01:55:42 +02:00
time.Sleep(syncUpdate)
total += syncUpdate
haveBest, err = svc.BestSnapshot()
if err != nil {
return fmt.Errorf("Couldn't get best snapshot from "+
"ChainService: %s", err)
}
if logLevel != btclog.Off {
t.Logf("Synced to %d (%s)", haveBest.Height,
haveBest.Hash)
}
}
// Check if we're current.
if !svc.IsCurrent() {
return fmt.Errorf("ChainService doesn't see itself as current!")
}
// Check if we have all of the cfheaders.
knownBasicHeader, err := correctSyncNode.Node.GetCFilterHeader(
knownBestHash, false)
if err != nil {
return fmt.Errorf("Couldn't get latest basic header from "+
"%s: %s", correctSyncNode.P2PAddress(), err)
}
knownExtHeader, err := correctSyncNode.Node.GetCFilterHeader(
knownBestHash, true)
if err != nil {
return fmt.Errorf("Couldn't get latest extended header from "+
"%s: %s", correctSyncNode.P2PAddress(), err)
}
haveBasicHeader := &chainhash.Hash{}
haveExtHeader := &chainhash.Hash{}
for (*knownBasicHeader.HeaderHashes[0] != *haveBasicHeader) &&
(*knownExtHeader.HeaderHashes[0] != *haveExtHeader) {
if total > syncTimeout {
return fmt.Errorf("Timed out after %v waiting for "+
"cfheaders synchronization.", syncTimeout)
}
haveBasicHeader, _ = svc.GetBasicHeader(*knownBestHash)
haveExtHeader, _ = svc.GetExtHeader(*knownBestHash)
2017-04-18 01:55:42 +02:00
time.Sleep(syncUpdate)
total += syncUpdate
}
if logLevel != btclog.Off {
t.Logf("Synced cfheaders to %d (%s)", haveBest.Height,
haveBest.Hash)
}
// At this point, we know the latest cfheader is stored in the
// ChainService database. We now compare each cfheader the
// harness knows about to what's stored in the ChainService
// database to see if we've missed anything or messed anything
// up.
for i := int32(0); i <= haveBest.Height; i++ {
head, err := svc.GetBlockByHeight(uint32(i))
if err != nil {
return fmt.Errorf("Couldn't read block by "+
"height: %s", err)
}
hash := head.BlockHash()
haveBasicHeader, err = svc.GetBasicHeader(hash)
if err != nil {
return fmt.Errorf("Couldn't get basic header "+
"for %d (%s) from DB", i, hash)
}
haveExtHeader, err = svc.GetExtHeader(hash)
if err != nil {
return fmt.Errorf("Couldn't get extended "+
"header for %d (%s) from DB", i, hash)
}
knownBasicHeader, err =
correctSyncNode.Node.GetCFilterHeader(&hash,
false)
if err != nil {
return fmt.Errorf("Couldn't get basic header "+
"for %d (%s) from node %s", i, hash,
correctSyncNode.P2PAddress())
}
knownExtHeader, err =
correctSyncNode.Node.GetCFilterHeader(&hash,
true)
if err != nil {
return fmt.Errorf("Couldn't get extended "+
"header for %d (%s) from node %s", i,
hash, correctSyncNode.P2PAddress())
}
if *haveBasicHeader !=
*knownBasicHeader.HeaderHashes[0] {
return fmt.Errorf("Basic header for %d (%s) "+
"doesn't match node %s. DB: %s, node: "+
"%s", i, hash,
correctSyncNode.P2PAddress(),
haveBasicHeader,
knownBasicHeader.HeaderHashes[0])
}
if *haveExtHeader !=
*knownExtHeader.HeaderHashes[0] {
return fmt.Errorf("Extended header for %d (%s)"+
" doesn't match node %s. DB: %s, node:"+
" %s", i, hash,
correctSyncNode.P2PAddress(),
haveExtHeader,
knownExtHeader.HeaderHashes[0])
}
}
return nil
}
// testRandomBlocks goes through numTestBlocks random blocks and ensures we
// can correctly get filters from them. We don't go through *all* the blocks
// because it can be a little slow, but we'll improve that soon-ish hopefully
// to the point where we can do it.
2017-04-27 00:34:05 +02:00
// TODO: Make this a benchmark instead.
func testRandomBlocks(t *testing.T, svc *spvchain.ChainService,
correctSyncNode *rpctest.Harness) error {
var haveBest *waddrmgr.BlockStamp
haveBest, err := svc.BestSnapshot()
if err != nil {
return fmt.Errorf("Couldn't get best snapshot from "+
"ChainService: %s", err)
}
2017-04-27 00:34:05 +02:00
// Keep track of an error channel with enough buffer space to track one
// error per block.
errChan := make(chan error, haveBest.Height)
// Test getting all of the blocks and filters.
var wg sync.WaitGroup
2017-04-27 00:34:05 +02:00
workerQueue := make(chan struct{}, numQueryThreads)
for i := int32(1); i <= haveBest.Height; i++ {
wg.Add(1)
2017-04-27 00:34:05 +02:00
height := uint32(i)
// Wait until there's room in the worker queue.
workerQueue <- struct{}{}
go func() {
2017-04-27 00:34:05 +02:00
// On exit, open a spot in workerQueue and tell the
// wait group we're done.
defer func() {
<-workerQueue
}()
defer wg.Done()
// Get block header from database.
blockHeader, err := svc.GetBlockByHeight(height)
if err != nil {
errChan <- fmt.Errorf("Couldn't get block "+
"header by height %d: %s", height, err)
return
}
blockHash := blockHeader.BlockHash()
// Get block via RPC.
2017-04-27 00:34:05 +02:00
wantBlock, err := correctSyncNode.Node.GetBlock(
&blockHash)
if err != nil {
2017-04-27 00:34:05 +02:00
errChan <- fmt.Errorf("Couldn't get block %d "+
"(%s) by RPC", height, blockHash)
return
}
// Get block from network.
2017-04-27 00:34:05 +02:00
haveBlock := svc.GetBlockFromNetwork(blockHash,
queryOptions...)
if haveBlock == nil {
2017-04-27 00:34:05 +02:00
errChan <- fmt.Errorf("Couldn't get block %d "+
"(%s) from network", height, blockHash)
return
}
// Check that network and RPC blocks match.
2017-04-27 00:34:05 +02:00
if !reflect.DeepEqual(*haveBlock.MsgBlock(),
*wantBlock) {
errChan <- fmt.Errorf("Block from network "+
"doesn't match block from RPC. Want: "+
"%s, RPC: %s, network: %s", blockHash,
wantBlock.BlockHash(),
haveBlock.MsgBlock().BlockHash())
return
}
// Check that block height matches what we have.
if int32(height) != haveBlock.Height() {
2017-04-27 00:34:05 +02:00
errChan <- fmt.Errorf("Block height from "+
"network doesn't match expected "+
"height. Want: %s, network: %s",
height, haveBlock.Height())
return
}
// Get basic cfilter from network.
2017-04-27 00:34:05 +02:00
haveFilter := svc.GetCFilter(blockHash, false,
queryOptions...)
if haveFilter == nil {
errChan <- fmt.Errorf("Couldn't get basic "+
"filter for block %d", height)
return
}
// Get basic cfilter from RPC.
2017-04-27 00:34:05 +02:00
wantFilter, err := correctSyncNode.Node.GetCFilter(
&blockHash, false)
if err != nil {
2017-04-27 00:34:05 +02:00
errChan <- fmt.Errorf("Couldn't get basic "+
"filter for block %d via RPC: %s",
height, err)
return
}
// Check that network and RPC cfilters match.
if !bytes.Equal(haveFilter.NBytes(), wantFilter.Data) {
2017-04-27 00:34:05 +02:00
errChan <- fmt.Errorf("Basic filter from P2P "+
"network/DB doesn't match RPC value "+
"for block %d", height)
return
}
// Calculate basic filter from block.
2017-04-27 03:00:41 +02:00
calcFilter, err := builder.BuildBasicFilter(
haveBlock.MsgBlock())
if err != nil {
2017-04-27 00:34:05 +02:00
errChan <- fmt.Errorf("Couldn't build basic "+
"filter for block %d (%s): %s", height,
blockHash, err)
return
}
2017-04-27 00:34:05 +02:00
// Check that the network value matches the calculated
// value from the block.
if !reflect.DeepEqual(*haveFilter, *calcFilter) {
2017-04-27 00:34:05 +02:00
errChan <- fmt.Errorf("Basic filter from P2P "+
"network/DB doesn't match calculated "+
"value for block %d", height)
return
}
// Get previous basic filter header from the database.
2017-04-27 00:34:05 +02:00
prevHeader, err := svc.GetBasicHeader(
blockHeader.PrevBlock)
if err != nil {
2017-04-27 00:34:05 +02:00
errChan <- fmt.Errorf("Couldn't get basic "+
"filter header for block %d (%s) from "+
"DB: %s", height-1,
blockHeader.PrevBlock, err)
return
}
// Get current basic filter header from the database.
curHeader, err := svc.GetBasicHeader(blockHash)
if err != nil {
2017-04-27 00:34:05 +02:00
errChan <- fmt.Errorf("Couldn't get basic "+
"filter header for block %d (%s) from "+
"DB: %s", height-1, blockHash, err)
return
}
// Check that the filter and header line up.
2017-04-27 03:00:41 +02:00
calcHeader := builder.MakeHeaderForFilter(calcFilter,
*prevHeader)
if !bytes.Equal(curHeader[:], calcHeader[:]) {
2017-04-27 00:34:05 +02:00
errChan <- fmt.Errorf("Filter header doesn't "+
"match. Want: %s, got: %s", curHeader,
calcHeader)
return
}
// Get extended cfilter from network
2017-04-27 00:34:05 +02:00
haveFilter = svc.GetCFilter(blockHash, true,
queryOptions...)
if haveFilter == nil {
errChan <- fmt.Errorf("Couldn't get extended "+
"filter for block %d", height)
return
}
// Get extended cfilter from RPC
2017-04-27 00:34:05 +02:00
wantFilter, err = correctSyncNode.Node.GetCFilter(
&blockHash, true)
if err != nil {
2017-04-27 00:34:05 +02:00
errChan <- fmt.Errorf("Couldn't get extended "+
"filter for block %d via RPC: %s",
height, err)
return
}
// Check that network and RPC cfilters match
if !bytes.Equal(haveFilter.NBytes(), wantFilter.Data) {
2017-04-27 00:34:05 +02:00
errChan <- fmt.Errorf("Extended filter from "+
"P2P network/DB doesn't match RPC "+
"value for block %d", height)
return
}
// Calculate extended filter from block
2017-04-27 03:00:41 +02:00
calcFilter, err = builder.BuildExtFilter(
haveBlock.MsgBlock())
if err != nil {
2017-04-27 00:34:05 +02:00
errChan <- fmt.Errorf("Couldn't build extended"+
" filter for block %d (%s): %s", height,
blockHash, err)
return
}
2017-04-27 00:34:05 +02:00
// Check that the network value matches the calculated
// value from the block.
if !reflect.DeepEqual(*haveFilter, *calcFilter) {
2017-04-27 00:34:05 +02:00
errChan <- fmt.Errorf("Extended filter from "+
"P2P network/DB doesn't match "+
"calculated value for block %d", height)
return
}
2017-04-27 00:34:05 +02:00
// Get previous extended filter header from the
// database.
prevHeader, err = svc.GetExtHeader(
blockHeader.PrevBlock)
if err != nil {
2017-04-27 00:34:05 +02:00
errChan <- fmt.Errorf("Couldn't get extended "+
"filter header for block %d (%s) from "+
"DB: %s", height-1,
blockHeader.PrevBlock, err)
return
}
// Get current basic filter header from the database.
curHeader, err = svc.GetExtHeader(blockHash)
if err != nil {
2017-04-27 00:34:05 +02:00
errChan <- fmt.Errorf("Couldn't get extended "+
"filter header for block %d (%s) from "+
"DB: %s", height-1,
blockHash, err)
return
}
// Check that the filter and header line up.
2017-04-27 03:00:41 +02:00
calcHeader = builder.MakeHeaderForFilter(calcFilter,
*prevHeader)
if !bytes.Equal(curHeader[:], calcHeader[:]) {
2017-04-27 00:34:05 +02:00
errChan <- fmt.Errorf("Filter header doesn't "+
"match. Want: %s, got: %s", curHeader,
calcHeader)
return
}
}()
}
// Wait for all queries to finish.
wg.Wait()
// Close the error channel to make the error monitoring goroutine
// finish.
close(errChan)
2017-04-27 00:34:05 +02:00
var lastErr error
for err := range errChan {
if err != nil {
t.Errorf("%s", err)
lastErr = fmt.Errorf("Couldn't validate all " +
"blocks, filters, and filter headers.")
}
}
if logLevel != btclog.Off {
t.Logf("Finished checking %d blocks and their cfilters",
haveBest.Height)
}
return lastErr
}