2017-05-02 22:15:57 +02:00
|
|
|
// TODO: Break up tests into bite-sized pieces.
|
|
|
|
|
2017-03-25 01:11:50 +01:00
|
|
|
package spvchain_test
|
|
|
|
|
|
|
|
import (
|
2017-04-25 05:44:45 +02:00
|
|
|
"bytes"
|
2017-03-25 01:11:50 +01:00
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
2017-04-25 21:21:29 +02:00
|
|
|
"reflect"
|
|
|
|
"sync"
|
2017-03-25 01:11:50 +01:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2017-04-11 21:24:26 +02:00
|
|
|
"github.com/aakselrod/btctestlog"
|
2017-05-02 22:15:57 +02:00
|
|
|
"github.com/btcsuite/btcd/btcec"
|
2017-05-04 08:05:05 +02:00
|
|
|
"github.com/btcsuite/btcd/btcjson"
|
2017-03-25 01:11:50 +01:00
|
|
|
"github.com/btcsuite/btcd/chaincfg"
|
2017-04-25 05:44:45 +02:00
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
2017-03-25 01:11:50 +01:00
|
|
|
"github.com/btcsuite/btcd/rpctest"
|
2017-05-02 22:15:57 +02:00
|
|
|
"github.com/btcsuite/btcd/txscript"
|
2017-03-25 01:11:50 +01:00
|
|
|
"github.com/btcsuite/btcd/wire"
|
2017-04-11 21:24:26 +02:00
|
|
|
"github.com/btcsuite/btclog"
|
2017-04-25 05:44:45 +02:00
|
|
|
"github.com/btcsuite/btcrpcclient"
|
2017-05-02 22:15:57 +02:00
|
|
|
"github.com/btcsuite/btcutil"
|
2017-04-27 03:00:41 +02:00
|
|
|
"github.com/btcsuite/btcutil/gcs/builder"
|
2017-03-25 01:11:50 +01:00
|
|
|
"github.com/btcsuite/btcwallet/spvsvc/spvchain"
|
|
|
|
"github.com/btcsuite/btcwallet/waddrmgr"
|
2017-05-04 08:05:05 +02:00
|
|
|
"github.com/btcsuite/btcwallet/wallet/txauthor"
|
2017-03-25 01:11:50 +01:00
|
|
|
"github.com/btcsuite/btcwallet/walletdb"
|
|
|
|
_ "github.com/btcsuite/btcwallet/walletdb/bdb"
|
|
|
|
)
|
|
|
|
|
2017-04-27 00:34:05 +02:00
|
|
|
var (
|
2017-05-04 20:46:54 +02:00
|
|
|
// Try btclog.InfoLvl for output like you'd see in normal operation, or
|
|
|
|
// btclog.TraceLvl to help debug code. Anything but btclog.Off turns on
|
|
|
|
// log messages from the tests themselves as well. Keep in mind some
|
|
|
|
// log messages may not appear in order due to use of multiple query
|
|
|
|
// goroutines in the tests.
|
2017-04-28 19:36:05 +02:00
|
|
|
logLevel = btclog.Off
|
2017-04-27 00:34:05 +02:00
|
|
|
syncTimeout = 30 * time.Second
|
|
|
|
syncUpdate = time.Second
|
|
|
|
// Don't set this too high for your platform, or the tests will miss
|
|
|
|
// messages.
|
|
|
|
// TODO: Make this a benchmark instead.
|
2017-05-02 22:15:57 +02:00
|
|
|
// TODO: Implement load limiting for both outgoing and incoming
|
|
|
|
// messages.
|
2017-04-27 00:34:05 +02:00
|
|
|
numQueryThreads = 50
|
|
|
|
queryOptions = []spvchain.QueryOption{
|
|
|
|
//spvchain.NumRetries(5),
|
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
// The logged sequence of events we want to see. The value of i
|
|
|
|
// represents the block for which a loop is generating a log entry,
|
|
|
|
// given for readability only.
|
|
|
|
// "bc": OnBlockConnected
|
|
|
|
// "fc" xx: OnFilteredBlockConnected with xx (uint8) relevant TXs
|
|
|
|
// "rv": OnRecvTx
|
|
|
|
// "rd": OnRedeemingTx
|
|
|
|
// "bd": OnBlockDisconnected
|
|
|
|
// "fd": OnFilteredBlockDisconnected
|
|
|
|
wantLog = func() (log []byte) {
|
|
|
|
for i := 796; i <= 800; i++ {
|
|
|
|
// BlockConnected and FilteredBlockConnected
|
|
|
|
log = append(log, []byte("bcfc")...)
|
|
|
|
// 0 relevant TXs
|
|
|
|
log = append(log, 0x00)
|
2017-05-02 22:15:57 +02:00
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
// Block with two relevant (receive) transactions
|
|
|
|
log = append(log, []byte("bcrvrvfc")...)
|
|
|
|
log = append(log, 0x02)
|
|
|
|
// 124 blocks with nothing
|
|
|
|
for i := 802; i <= 925; i++ {
|
|
|
|
log = append(log, []byte("bcfc")...)
|
|
|
|
log = append(log, 0x00)
|
2017-05-02 22:15:57 +02:00
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
// 2 blocks with 1 redeeming transaction each
|
|
|
|
for i := 926; i <= 927; i++ {
|
|
|
|
log = append(log, []byte("bcrdfc")...)
|
|
|
|
log = append(log, 0x01)
|
2017-05-02 22:15:57 +02:00
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
// Block with nothing
|
|
|
|
log = append(log, []byte("bcfc")...)
|
|
|
|
log = append(log, 0x00)
|
|
|
|
// 3 block rollback
|
2017-05-02 22:15:57 +02:00
|
|
|
for i := 928; i >= 926; i-- {
|
2017-05-04 08:05:05 +02:00
|
|
|
log = append(log, []byte("fdbd")...)
|
|
|
|
}
|
|
|
|
// 5 block empty reorg
|
|
|
|
for i := 926; i <= 930; i++ {
|
|
|
|
log = append(log, []byte("bcfc")...)
|
|
|
|
log = append(log, 0x00)
|
2017-05-02 22:15:57 +02:00
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
// 5 block rollback
|
2017-05-02 22:15:57 +02:00
|
|
|
for i := 930; i >= 926; i-- {
|
2017-05-04 08:05:05 +02:00
|
|
|
log = append(log, []byte("fdbd")...)
|
2017-05-02 22:15:57 +02:00
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
// 2 blocks with 1 redeeming transaction each
|
|
|
|
for i := 926; i <= 927; i++ {
|
|
|
|
log = append(log, []byte("bcrdfc")...)
|
|
|
|
log = append(log, 0x01)
|
|
|
|
}
|
|
|
|
// 8 block rest of reorg
|
|
|
|
for i := 928; i <= 935; i++ {
|
|
|
|
log = append(log, []byte("bcfc")...)
|
|
|
|
log = append(log, 0x00)
|
|
|
|
}
|
|
|
|
return log
|
|
|
|
}()
|
|
|
|
|
|
|
|
// rescanMtx locks all the variables to which the rescan goroutine's
|
|
|
|
// notifications write.
|
|
|
|
rescanMtx sync.RWMutex
|
|
|
|
|
|
|
|
// gotLog is where we accumulate the event log from the rescan. Then we
|
|
|
|
// compare it to wantLog to see if the series of events the rescan saw
|
|
|
|
// happened as expected.
|
|
|
|
gotLog []byte
|
|
|
|
|
|
|
|
// curBlockHeight lets the rescan goroutine track where it thinks the
|
|
|
|
// chain is based on OnBlockConnected and OnBlockDisconnected.
|
|
|
|
curBlockHeight int32
|
|
|
|
|
|
|
|
// curFilteredBlockHeight lets the rescan goroutine track where it
|
|
|
|
// thinks the chain is based on OnFilteredBlockConnected and
|
|
|
|
// OnFilteredBlockDisconnected.
|
|
|
|
curFilteredBlockHeight int32
|
|
|
|
|
|
|
|
// ourKnownTxsByBlock lets the rescan goroutine keep track of
|
|
|
|
// transactions we're interested in that are in the blockchain we're
|
|
|
|
// following as signalled by OnBlockConnected, OnBlockDisconnected,
|
|
|
|
// OnRecvTx, and OnRedeemingTx.
|
|
|
|
ourKnownTxsByBlock = make(map[chainhash.Hash][]*btcutil.Tx)
|
|
|
|
|
|
|
|
// ourKnownTxsByFilteredBlock lets the rescan goroutine keep track of
|
|
|
|
// transactions we're interested in that are in the blockchain we're
|
|
|
|
// following as signalled by OnFilteredBlockConnected and
|
|
|
|
// OnFilteredBlockDisconnected.
|
|
|
|
ourKnownTxsByFilteredBlock = make(map[chainhash.Hash][]*btcutil.Tx)
|
2017-04-18 01:55:42 +02:00
|
|
|
)
|
2017-04-11 21:24:26 +02:00
|
|
|
|
2017-05-04 08:05:05 +02:00
|
|
|
// secSource is an implementation of btcwallet/txauthor/SecretsSource that
|
|
|
|
// stores WitnessPubKeyHash addresses.
|
|
|
|
type secSource struct {
|
|
|
|
keys map[string]*btcec.PrivateKey
|
|
|
|
scripts map[string]*[]byte
|
|
|
|
params *chaincfg.Params
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *secSource) add(privKey *btcec.PrivateKey) (btcutil.Address, error) {
|
|
|
|
pubKeyHash := btcutil.Hash160(privKey.PubKey().SerializeCompressed())
|
|
|
|
addr, err := btcutil.NewAddressWitnessPubKeyHash(pubKeyHash, s.params)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
script, err := txscript.PayToAddrScript(addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
s.keys[addr.String()] = privKey
|
|
|
|
s.scripts[addr.String()] = &script
|
|
|
|
_, addrs, _, err := txscript.ExtractPkScriptAddrs(script, s.params)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if addrs[0].String() != addr.String() {
|
|
|
|
return nil, fmt.Errorf("Encoded and decoded addresses don't "+
|
|
|
|
"match. Encoded: %s, decoded: %s", addr, addrs[0])
|
|
|
|
}
|
|
|
|
return addr, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetKey is required by the txscript.KeyDB interface
|
|
|
|
func (s *secSource) GetKey(addr btcutil.Address) (*btcec.PrivateKey, bool,
|
|
|
|
error) {
|
|
|
|
privKey, ok := s.keys[addr.String()]
|
|
|
|
if !ok {
|
|
|
|
return nil, true, fmt.Errorf("No key for address %s", addr)
|
|
|
|
}
|
|
|
|
return privKey, true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetScript is required by the txscript.ScriptDB interface
|
|
|
|
func (s *secSource) GetScript(addr btcutil.Address) ([]byte, error) {
|
|
|
|
script, ok := s.scripts[addr.String()]
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("No script for address %s", addr)
|
|
|
|
}
|
|
|
|
return *script, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ChainParams is required by the SecretsSource interface
|
|
|
|
func (s *secSource) ChainParams() *chaincfg.Params {
|
|
|
|
return s.params
|
|
|
|
}
|
|
|
|
|
|
|
|
func newSecSource(params *chaincfg.Params) *secSource {
|
|
|
|
return &secSource{
|
|
|
|
keys: make(map[string]*btcec.PrivateKey),
|
|
|
|
scripts: make(map[string]*[]byte),
|
|
|
|
params: params,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-25 01:11:50 +01:00
|
|
|
func TestSetup(t *testing.T) {
|
2017-04-27 00:34:05 +02:00
|
|
|
// Set up logging.
|
|
|
|
logger, err := btctestlog.NewTestLogger(t)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Could not set up logger: %s", err)
|
|
|
|
}
|
|
|
|
chainLogger := btclog.NewSubsystemLogger(logger, "CHAIN: ")
|
|
|
|
chainLogger.SetLevel(logLevel)
|
|
|
|
spvchain.UseLogger(chainLogger)
|
|
|
|
rpcLogger := btclog.NewSubsystemLogger(logger, "RPCC: ")
|
|
|
|
rpcLogger.SetLevel(logLevel)
|
|
|
|
btcrpcclient.UseLogger(rpcLogger)
|
|
|
|
|
2017-03-25 01:11:50 +01:00
|
|
|
// Create a btcd SimNet node and generate 500 blocks
|
|
|
|
h1, err := rpctest.New(&chaincfg.SimNetParams, nil, nil)
|
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Couldn't create harness: %s", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
defer h1.TearDown()
|
|
|
|
err = h1.SetUp(false, 0)
|
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Couldn't set up harness: %s", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
_, err = h1.Node.Generate(500)
|
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Couldn't generate blocks: %s", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a second btcd SimNet node
|
|
|
|
h2, err := rpctest.New(&chaincfg.SimNetParams, nil, nil)
|
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Couldn't create harness: %s", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
defer h2.TearDown()
|
|
|
|
err = h2.SetUp(false, 0)
|
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Couldn't set up harness: %s", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a third btcd SimNet node and generate 900 blocks
|
|
|
|
h3, err := rpctest.New(&chaincfg.SimNetParams, nil, nil)
|
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Couldn't create harness: %s", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
defer h3.TearDown()
|
|
|
|
err = h3.SetUp(false, 0)
|
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Couldn't set up harness: %s", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
_, err = h3.Node.Generate(900)
|
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Couldn't generate blocks: %s", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Connect, sync, and disconnect h1 and h2
|
|
|
|
err = csd([]*rpctest.Harness{h1, h2})
|
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Couldn't connect/sync/disconnect h1 and h2: %s", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate 300 blocks on the first node and 350 on the second
|
|
|
|
_, err = h1.Node.Generate(300)
|
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Couldn't generate blocks: %s", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
_, err = h2.Node.Generate(350)
|
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Couldn't generate blocks: %s", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now we have a node with 800 blocks (h1), 850 blocks (h2), and
|
|
|
|
// 900 blocks (h3). The chains of nodes h1 and h2 match up to block
|
|
|
|
// 500. By default, a synchronizing wallet connected to all three
|
|
|
|
// should synchronize to h3. However, we're going to take checkpoints
|
|
|
|
// from h1 at 111, 333, 555, and 777, and add those to the
|
|
|
|
// synchronizing wallet's chain parameters so that it should
|
|
|
|
// disconnect from h3 at block 111, and from h2 at block 555, and
|
|
|
|
// then synchronize to block 800 from h1. Order of connection is
|
|
|
|
// unfortunately not guaranteed, so the reorg may not happen with every
|
|
|
|
// test.
|
|
|
|
|
|
|
|
// Copy parameters and insert checkpoints
|
|
|
|
modParams := chaincfg.SimNetParams
|
|
|
|
for _, height := range []int64{111, 333, 555, 777} {
|
|
|
|
hash, err := h1.Node.GetBlockHash(height)
|
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Couldn't get block hash for height %d: %s",
|
2017-03-25 01:11:50 +01:00
|
|
|
height, err)
|
|
|
|
}
|
|
|
|
modParams.Checkpoints = append(modParams.Checkpoints,
|
|
|
|
chaincfg.Checkpoint{
|
|
|
|
Hash: hash,
|
|
|
|
Height: int32(height),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a temporary directory, initialize an empty walletdb with an
|
|
|
|
// SPV chain namespace, and create a configuration for the ChainService.
|
|
|
|
tempDir, err := ioutil.TempDir("", "spvchain")
|
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Failed to create temporary directory: %s", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
defer os.RemoveAll(tempDir)
|
|
|
|
db, err := walletdb.Create("bdb", tempDir+"/weks.db")
|
|
|
|
defer db.Close()
|
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Error opening DB: %s\n", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Error geting namespace: %s\n", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
config := spvchain.Config{
|
|
|
|
DataDir: tempDir,
|
2017-04-28 19:36:05 +02:00
|
|
|
Database: db,
|
2017-03-25 01:11:50 +01:00
|
|
|
ChainParams: modParams,
|
|
|
|
AddPeers: []string{
|
|
|
|
h3.P2PAddress(),
|
|
|
|
h2.P2PAddress(),
|
|
|
|
h1.P2PAddress(),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
spvchain.Services = 0
|
|
|
|
spvchain.MaxPeers = 3
|
2017-04-11 21:24:26 +02:00
|
|
|
spvchain.BanDuration = 5 * time.Second
|
2017-03-25 01:11:50 +01:00
|
|
|
spvchain.RequiredServices = wire.SFNodeNetwork
|
2017-04-18 01:55:42 +02:00
|
|
|
spvchain.WaitForMoreCFHeaders = time.Second
|
2017-03-25 01:11:50 +01:00
|
|
|
svc, err := spvchain.NewChainService(config)
|
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Error creating ChainService: %s", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
svc.Start()
|
|
|
|
defer svc.Stop()
|
|
|
|
|
|
|
|
// Make sure the client synchronizes with the correct node
|
2017-04-18 01:55:42 +02:00
|
|
|
err = waitForSync(t, svc, h1)
|
2017-03-25 01:11:50 +01:00
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Couldn't sync ChainService: %s", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
|
2017-04-27 00:34:05 +02:00
|
|
|
// Test that we can get blocks and cfilters via P2P and decide which are
|
|
|
|
// valid and which aren't.
|
2017-05-05 03:42:40 +02:00
|
|
|
// TODO: This test is disabled until I factor it out into a benchmark.
|
|
|
|
// Otherwise, it takes too long.
|
|
|
|
/*err = testRandomBlocks(t, svc, h1)
|
2017-04-27 00:34:05 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Testing blocks and cfilters failed: %s", err)
|
2017-05-05 03:42:40 +02:00
|
|
|
}*/
|
2017-04-27 00:34:05 +02:00
|
|
|
|
2017-05-02 22:15:57 +02:00
|
|
|
// Generate an address and send it some coins on the h1 chain. We use
|
|
|
|
// this to test rescans and notifications.
|
2017-05-04 08:05:05 +02:00
|
|
|
secSrc := newSecSource(&modParams)
|
|
|
|
privKey1, err := btcec.NewPrivateKey(btcec.S256())
|
2017-05-02 22:15:57 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't generate private key: %s", err)
|
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
addr1, err := secSrc.add(privKey1)
|
2017-05-02 22:15:57 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't create address from key: %s", err)
|
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
script1, err := secSrc.GetScript(addr1)
|
2017-05-02 22:15:57 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't create script from address: %s", err)
|
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
out1 := wire.TxOut{
|
|
|
|
PkScript: script1,
|
2017-05-02 22:15:57 +02:00
|
|
|
Value: 1000000000,
|
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
// Fee rate is satoshis per byte
|
|
|
|
tx1, err := h1.CreateTransaction([]*wire.TxOut{&out1}, 1000)
|
2017-05-02 22:15:57 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't create transaction from script: %s", err)
|
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
_, err = h1.Node.SendRawTransaction(tx1, true)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to send raw transaction to node: %s", err)
|
|
|
|
}
|
|
|
|
// Fee rate is satoshis per byte
|
|
|
|
tx2, err := h1.CreateTransaction([]*wire.TxOut{&out1}, 1000)
|
2017-05-02 22:15:57 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't create transaction from script: %s", err)
|
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
_, err = h1.Node.SendRawTransaction(tx2, true)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to send raw transaction to node: %s", err)
|
2017-05-02 22:15:57 +02:00
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
_, err = h1.Node.Generate(1)
|
2017-05-02 22:15:57 +02:00
|
|
|
if err != nil {
|
2017-05-04 08:05:05 +02:00
|
|
|
t.Fatalf("Couldn't generate/submit block: %s", err)
|
|
|
|
}
|
2017-05-05 03:42:40 +02:00
|
|
|
err = waitForSync(t, svc, h1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't sync ChainService: %s", err)
|
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
|
2017-05-05 03:42:40 +02:00
|
|
|
// Do a rescan that searches only for a specific TXID
|
|
|
|
startBlock := waddrmgr.BlockStamp{Height: 795}
|
|
|
|
endBlock := waddrmgr.BlockStamp{Height: 801}
|
|
|
|
var foundTx *btcutil.Tx
|
|
|
|
err = svc.Rescan(
|
|
|
|
spvchain.StartBlock(&startBlock),
|
|
|
|
spvchain.EndBlock(&endBlock),
|
|
|
|
spvchain.WatchTXIDs(tx1.TxHash()),
|
|
|
|
spvchain.NotificationHandlers(btcrpcclient.NotificationHandlers{
|
|
|
|
OnFilteredBlockConnected: func(height int32,
|
|
|
|
header *wire.BlockHeader,
|
|
|
|
relevantTxs []*btcutil.Tx) {
|
|
|
|
if height == 801 {
|
|
|
|
if len(relevantTxs) != 1 {
|
|
|
|
t.Fatalf("Didn't get expected "+
|
|
|
|
"number of relevant "+
|
|
|
|
"transactions from "+
|
|
|
|
"rescan: want 1, got "+
|
|
|
|
"%d", len(relevantTxs))
|
|
|
|
}
|
|
|
|
if *(relevantTxs[0].Hash()) !=
|
|
|
|
tx1.TxHash() {
|
|
|
|
t.Fatalf("Didn't get expected "+
|
|
|
|
"relevant transaction:"+
|
|
|
|
" want %s, got %s",
|
|
|
|
tx1.TxHash(),
|
|
|
|
relevantTxs[0].Hash())
|
|
|
|
}
|
|
|
|
foundTx = relevantTxs[0]
|
|
|
|
}
|
|
|
|
},
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
if err != nil || foundTx == nil || *(foundTx.Hash()) != tx1.TxHash() {
|
|
|
|
t.Fatalf("Couldn't rescan chain for transaction %s: %s",
|
|
|
|
tx1.TxHash(), err)
|
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
// Start a rescan with notifications in another goroutine. We'll kill
|
|
|
|
// it with a quit channel at the end and make sure we got the expected
|
|
|
|
// results.
|
|
|
|
quitRescan := make(chan struct{})
|
2017-05-05 03:42:40 +02:00
|
|
|
startBlock = waddrmgr.BlockStamp{Height: 795}
|
|
|
|
err = startRescan(t, svc, addr1, &startBlock, quitRescan)
|
2017-05-04 08:05:05 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't start a rescan for %s: %s", addr1, err)
|
|
|
|
}
|
|
|
|
err = waitForSync(t, svc, h1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't sync ChainService: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
numTXs, _, err := checkRescanStatus()
|
|
|
|
if numTXs != 2 {
|
|
|
|
t.Fatalf("Wrong number of relevant transactions. Want: 2, got:"+
|
|
|
|
" %d", numTXs)
|
2017-05-02 22:15:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate 124 blocks on h1 to make sure it reorgs the other nodes.
|
2017-03-25 01:11:50 +01:00
|
|
|
// Ensure the ChainService instance stays caught up.
|
2017-05-02 22:15:57 +02:00
|
|
|
h1.Node.Generate(124)
|
2017-04-18 01:55:42 +02:00
|
|
|
err = waitForSync(t, svc, h1)
|
2017-03-25 01:11:50 +01:00
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Couldn't sync ChainService: %s", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
|
2017-03-27 21:30:23 +02:00
|
|
|
// Connect/sync/disconnect h2 to make it reorg to the h1 chain.
|
|
|
|
err = csd([]*rpctest.Harness{h1, h2})
|
2017-03-25 01:11:50 +01:00
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Couldn't sync h2 to h1: %s", err)
|
2017-03-27 21:30:23 +02:00
|
|
|
}
|
|
|
|
|
2017-05-04 08:05:05 +02:00
|
|
|
// Spend the outputs we sent ourselves over two blocks.
|
|
|
|
inSrc := func(tx wire.MsgTx) func(target btcutil.Amount) (
|
2017-05-02 22:15:57 +02:00
|
|
|
total btcutil.Amount, inputs []*wire.TxIn,
|
|
|
|
inputValues []btcutil.Amount, scripts [][]byte, err error) {
|
|
|
|
ourIndex := 1 << 30 // Should work on 32-bit systems
|
|
|
|
for i, txo := range tx.TxOut {
|
2017-05-04 08:05:05 +02:00
|
|
|
if bytes.Equal(txo.PkScript, script1) {
|
2017-05-02 22:15:57 +02:00
|
|
|
ourIndex = i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return func(target btcutil.Amount) (total btcutil.Amount,
|
|
|
|
inputs []*wire.TxIn, inputValues []btcutil.Amount,
|
|
|
|
scripts [][]byte, err error) {
|
|
|
|
if ourIndex == 1<<30 {
|
|
|
|
err = fmt.Errorf("Couldn't find our address " +
|
|
|
|
"in the passed transaction's outputs.")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
total = target
|
|
|
|
inputs = []*wire.TxIn{
|
2017-05-04 08:05:05 +02:00
|
|
|
{
|
2017-05-02 22:15:57 +02:00
|
|
|
PreviousOutPoint: wire.OutPoint{
|
|
|
|
Hash: tx.TxHash(),
|
|
|
|
Index: uint32(ourIndex),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
inputValues = []btcutil.Amount{
|
|
|
|
btcutil.Amount(tx.TxOut[ourIndex].Value)}
|
|
|
|
scripts = [][]byte{tx.TxOut[ourIndex].PkScript}
|
|
|
|
err = nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
// Create another address to send to so we don't trip the rescan with
|
|
|
|
// the old address and we can test monitoring both OutPoint usage and
|
|
|
|
// receipt by addresses.
|
|
|
|
privKey2, err := btcec.NewPrivateKey(btcec.S256())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't generate private key: %s", err)
|
|
|
|
}
|
|
|
|
addr2, err := secSrc.add(privKey2)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't create address from key: %s", err)
|
|
|
|
}
|
|
|
|
script2, err := secSrc.GetScript(addr2)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't create script from address: %s", err)
|
|
|
|
}
|
|
|
|
out2 := wire.TxOut{
|
|
|
|
PkScript: script2,
|
|
|
|
Value: 500000000,
|
|
|
|
}
|
|
|
|
// Spend the first transaction and mine a block.
|
|
|
|
authTx1, err := txauthor.NewUnsignedTransaction(
|
|
|
|
[]*wire.TxOut{
|
|
|
|
&out2,
|
|
|
|
},
|
|
|
|
// Fee rate is satoshis per kilobyte
|
|
|
|
1024000,
|
|
|
|
inSrc(*tx1),
|
|
|
|
func() ([]byte, error) {
|
|
|
|
return script2, nil
|
|
|
|
},
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't create unsigned transaction: %s", err)
|
|
|
|
}
|
|
|
|
err = authTx1.AddAllInputScripts(secSrc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't sign transaction: %s", err)
|
|
|
|
}
|
|
|
|
_, err = h1.Node.SendRawTransaction(authTx1.Tx, true)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to send raw transaction to node: %s", err)
|
|
|
|
}
|
|
|
|
_, err = h1.Node.Generate(1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't generate/submit block: %s", err)
|
|
|
|
}
|
|
|
|
err = waitForSync(t, svc, h1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't sync ChainService: %s", err)
|
|
|
|
}
|
|
|
|
numTXs, _, err = checkRescanStatus()
|
|
|
|
if numTXs != 3 {
|
|
|
|
t.Fatalf("Wrong number of relevant transactions. Want: 3, got:"+
|
|
|
|
" %d", numTXs)
|
|
|
|
}
|
|
|
|
// Spend the second transaction and mine a block.
|
|
|
|
authTx2, err := txauthor.NewUnsignedTransaction(
|
|
|
|
[]*wire.TxOut{
|
|
|
|
&out2,
|
|
|
|
},
|
|
|
|
// Fee rate is satoshis per kilobyte
|
|
|
|
1024000,
|
|
|
|
inSrc(*tx2),
|
|
|
|
func() ([]byte, error) {
|
|
|
|
return script2, nil
|
|
|
|
},
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't create unsigned transaction: %s", err)
|
|
|
|
}
|
|
|
|
err = authTx2.AddAllInputScripts(secSrc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't sign transaction: %s", err)
|
|
|
|
}
|
|
|
|
_, err = h1.Node.SendRawTransaction(authTx2.Tx, true)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to send raw transaction to node: %s", err)
|
|
|
|
}
|
|
|
|
_, err = h1.Node.Generate(1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't generate/submit block: %s", err)
|
|
|
|
}
|
|
|
|
err = waitForSync(t, svc, h1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't sync ChainService: %s", err)
|
|
|
|
}
|
|
|
|
numTXs, _, err = checkRescanStatus()
|
|
|
|
if numTXs != 4 {
|
|
|
|
t.Fatalf("Wrong number of relevant transactions. Want: 4, got:"+
|
|
|
|
" %d", numTXs)
|
|
|
|
}
|
|
|
|
// Generate 1 blocks on h1, one at a time, to make sure the
|
2017-03-27 21:30:23 +02:00
|
|
|
// ChainService instance stays caught up.
|
2017-05-04 08:05:05 +02:00
|
|
|
for i := 0; i < 1; i++ {
|
|
|
|
_, err = h1.Node.Generate(1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't generate/submit block: %s", err)
|
|
|
|
}
|
2017-04-18 01:55:42 +02:00
|
|
|
err = waitForSync(t, svc, h1)
|
2017-03-27 21:30:23 +02:00
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Couldn't sync ChainService: %s", err)
|
2017-03-27 21:30:23 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-28 06:45:02 +02:00
|
|
|
// Generate 5 blocks on h2 and wait for ChainService to sync to the
|
|
|
|
// newly-best chain on h2.
|
2017-05-04 08:05:05 +02:00
|
|
|
_, err = h2.Node.Generate(5)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't generate/submit blocks: %s", err)
|
|
|
|
}
|
2017-04-18 01:55:42 +02:00
|
|
|
err = waitForSync(t, svc, h2)
|
2017-03-27 21:30:23 +02:00
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
t.Fatalf("Couldn't sync ChainService: %s", err)
|
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
numTXs, _, err = checkRescanStatus()
|
|
|
|
if numTXs != 2 {
|
|
|
|
t.Fatalf("Wrong number of relevant transactions. Want: 2, got:"+
|
|
|
|
" %d", numTXs)
|
|
|
|
}
|
2017-03-28 06:45:02 +02:00
|
|
|
|
|
|
|
// Generate 7 blocks on h1 and wait for ChainService to sync to the
|
|
|
|
// newly-best chain on h1.
|
2017-05-04 08:05:05 +02:00
|
|
|
_, err = h1.Node.Generate(7)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't generate/submit block: %s", err)
|
|
|
|
}
|
2017-04-18 01:55:42 +02:00
|
|
|
err = waitForSync(t, svc, h1)
|
2017-03-28 06:45:02 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't sync ChainService: %s", err)
|
2017-04-18 01:55:42 +02:00
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
numTXs, _, err = checkRescanStatus()
|
|
|
|
if numTXs != 4 {
|
|
|
|
t.Fatalf("Wrong number of relevant transactions. Want: 4, got:"+
|
|
|
|
" %d", numTXs)
|
|
|
|
}
|
|
|
|
|
|
|
|
close(quitRescan)
|
|
|
|
if !bytes.Equal(wantLog, gotLog) {
|
|
|
|
t.Fatalf("Rescan event logs incorrect.\nWant: %s\nGot: %s\n",
|
|
|
|
wantLog, gotLog)
|
|
|
|
}
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// csd does a connect-sync-disconnect between nodes in order to support
|
|
|
|
// reorg testing. It brings up and tears down a temporary node, otherwise the
|
|
|
|
// nodes try to reconnect to each other which results in unintended reorgs.
|
|
|
|
func csd(harnesses []*rpctest.Harness) error {
|
|
|
|
hTemp, err := rpctest.New(&chaincfg.SimNetParams, nil, nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Tear down node at the end of the function.
|
|
|
|
defer hTemp.TearDown()
|
|
|
|
err = hTemp.SetUp(false, 0)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, harness := range harnesses {
|
|
|
|
err = rpctest.ConnectNode(hTemp, harness)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return rpctest.JoinNodes(harnesses, rpctest.Blocks)
|
|
|
|
}
|
|
|
|
|
|
|
|
// waitForSync waits for the ChainService to sync to the current chain state.
|
|
|
|
func waitForSync(t *testing.T, svc *spvchain.ChainService,
|
2017-04-18 01:55:42 +02:00
|
|
|
correctSyncNode *rpctest.Harness) error {
|
2017-03-25 01:11:50 +01:00
|
|
|
knownBestHash, knownBestHeight, err :=
|
|
|
|
correctSyncNode.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-04-11 21:24:26 +02:00
|
|
|
if logLevel != btclog.Off {
|
|
|
|
t.Logf("Syncing to %d (%s)", knownBestHeight, knownBestHash)
|
|
|
|
}
|
2017-03-25 01:11:50 +01:00
|
|
|
var haveBest *waddrmgr.BlockStamp
|
|
|
|
haveBest, err = svc.BestSnapshot()
|
|
|
|
if err != nil {
|
2017-04-14 04:38:38 +02:00
|
|
|
return fmt.Errorf("Couldn't get best snapshot from "+
|
|
|
|
"ChainService: %s", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
var total time.Duration
|
|
|
|
for haveBest.Hash != *knownBestHash {
|
2017-04-18 01:55:42 +02:00
|
|
|
if total > syncTimeout {
|
2017-04-14 04:38:38 +02:00
|
|
|
return fmt.Errorf("Timed out after %v waiting for "+
|
2017-04-18 01:55:42 +02:00
|
|
|
"header synchronization.", syncTimeout)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
if haveBest.Height > knownBestHeight {
|
|
|
|
return fmt.Errorf("Synchronized to the wrong chain.")
|
|
|
|
}
|
2017-04-18 01:55:42 +02:00
|
|
|
time.Sleep(syncUpdate)
|
|
|
|
total += syncUpdate
|
2017-03-25 01:11:50 +01:00
|
|
|
haveBest, err = svc.BestSnapshot()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Couldn't get best snapshot from "+
|
2017-03-28 06:45:02 +02:00
|
|
|
"ChainService: %s", err)
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
|
|
|
}
|
2017-04-14 04:38:38 +02:00
|
|
|
// Check if we're current.
|
2017-03-25 01:11:50 +01:00
|
|
|
if !svc.IsCurrent() {
|
|
|
|
return fmt.Errorf("ChainService doesn't see itself as current!")
|
|
|
|
}
|
2017-04-14 04:38:38 +02:00
|
|
|
// Check if we have all of the cfheaders.
|
|
|
|
knownBasicHeader, err := correctSyncNode.Node.GetCFilterHeader(
|
|
|
|
knownBestHash, false)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Couldn't get latest basic header from "+
|
|
|
|
"%s: %s", correctSyncNode.P2PAddress(), err)
|
|
|
|
}
|
|
|
|
knownExtHeader, err := correctSyncNode.Node.GetCFilterHeader(
|
|
|
|
knownBestHash, true)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Couldn't get latest extended header from "+
|
|
|
|
"%s: %s", correctSyncNode.P2PAddress(), err)
|
|
|
|
}
|
2017-04-25 05:44:45 +02:00
|
|
|
haveBasicHeader := &chainhash.Hash{}
|
|
|
|
haveExtHeader := &chainhash.Hash{}
|
|
|
|
for (*knownBasicHeader.HeaderHashes[0] != *haveBasicHeader) &&
|
|
|
|
(*knownExtHeader.HeaderHashes[0] != *haveExtHeader) {
|
|
|
|
if total > syncTimeout {
|
|
|
|
return fmt.Errorf("Timed out after %v waiting for "+
|
|
|
|
"cfheaders synchronization.", syncTimeout)
|
|
|
|
}
|
|
|
|
haveBasicHeader, _ = svc.GetBasicHeader(*knownBestHash)
|
|
|
|
haveExtHeader, _ = svc.GetExtHeader(*knownBestHash)
|
2017-04-18 01:55:42 +02:00
|
|
|
time.Sleep(syncUpdate)
|
|
|
|
total += syncUpdate
|
2017-04-25 05:44:45 +02:00
|
|
|
}
|
|
|
|
if logLevel != btclog.Off {
|
|
|
|
t.Logf("Synced cfheaders to %d (%s)", haveBest.Height,
|
|
|
|
haveBest.Hash)
|
|
|
|
}
|
|
|
|
// At this point, we know the latest cfheader is stored in the
|
|
|
|
// ChainService database. We now compare each cfheader the
|
|
|
|
// harness knows about to what's stored in the ChainService
|
|
|
|
// database to see if we've missed anything or messed anything
|
|
|
|
// up.
|
|
|
|
for i := int32(0); i <= haveBest.Height; i++ {
|
2017-04-28 19:36:05 +02:00
|
|
|
head, err := svc.GetBlockByHeight(uint32(i))
|
2017-04-14 04:38:38 +02:00
|
|
|
if err != nil {
|
2017-04-25 05:44:45 +02:00
|
|
|
return fmt.Errorf("Couldn't read block by "+
|
|
|
|
"height: %s", err)
|
2017-04-14 04:38:38 +02:00
|
|
|
}
|
2017-04-25 05:44:45 +02:00
|
|
|
hash := head.BlockHash()
|
|
|
|
haveBasicHeader, err = svc.GetBasicHeader(hash)
|
2017-04-14 04:38:38 +02:00
|
|
|
if err != nil {
|
2017-04-25 05:44:45 +02:00
|
|
|
return fmt.Errorf("Couldn't get basic header "+
|
|
|
|
"for %d (%s) from DB", i, hash)
|
2017-04-14 04:38:38 +02:00
|
|
|
}
|
2017-04-25 05:44:45 +02:00
|
|
|
haveExtHeader, err = svc.GetExtHeader(hash)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Couldn't get extended "+
|
|
|
|
"header for %d (%s) from DB", i, hash)
|
2017-04-14 04:38:38 +02:00
|
|
|
}
|
2017-04-25 05:44:45 +02:00
|
|
|
knownBasicHeader, err =
|
|
|
|
correctSyncNode.Node.GetCFilterHeader(&hash,
|
|
|
|
false)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Couldn't get basic header "+
|
|
|
|
"for %d (%s) from node %s", i, hash,
|
|
|
|
correctSyncNode.P2PAddress())
|
2017-04-14 04:38:38 +02:00
|
|
|
}
|
2017-04-25 05:44:45 +02:00
|
|
|
knownExtHeader, err =
|
|
|
|
correctSyncNode.Node.GetCFilterHeader(&hash,
|
|
|
|
true)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Couldn't get extended "+
|
|
|
|
"header for %d (%s) from node %s", i,
|
|
|
|
hash, correctSyncNode.P2PAddress())
|
2017-04-14 15:08:17 +02:00
|
|
|
}
|
2017-04-25 05:44:45 +02:00
|
|
|
if *haveBasicHeader !=
|
|
|
|
*knownBasicHeader.HeaderHashes[0] {
|
|
|
|
return fmt.Errorf("Basic header for %d (%s) "+
|
|
|
|
"doesn't match node %s. DB: %s, node: "+
|
|
|
|
"%s", i, hash,
|
|
|
|
correctSyncNode.P2PAddress(),
|
|
|
|
haveBasicHeader,
|
|
|
|
knownBasicHeader.HeaderHashes[0])
|
|
|
|
}
|
|
|
|
if *haveExtHeader !=
|
|
|
|
*knownExtHeader.HeaderHashes[0] {
|
|
|
|
return fmt.Errorf("Extended header for %d (%s)"+
|
|
|
|
" doesn't match node %s. DB: %s, node:"+
|
|
|
|
" %s", i, hash,
|
|
|
|
correctSyncNode.P2PAddress(),
|
|
|
|
haveExtHeader,
|
|
|
|
knownExtHeader.HeaderHashes[0])
|
|
|
|
}
|
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
// At this point, we know we have good cfheaders. Now we wait for the
|
|
|
|
// rescan, if one is going, to catch up.
|
|
|
|
for {
|
|
|
|
time.Sleep(syncUpdate)
|
|
|
|
total += syncUpdate
|
|
|
|
rescanMtx.RLock()
|
|
|
|
// We don't want to do this if we haven't started a rescan
|
|
|
|
// yet.
|
|
|
|
if len(gotLog) == 0 {
|
|
|
|
rescanMtx.RUnlock()
|
|
|
|
break
|
|
|
|
}
|
|
|
|
_, rescanHeight, err := checkRescanStatus()
|
|
|
|
if err != nil {
|
|
|
|
rescanMtx.RUnlock()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if logLevel != btclog.Off {
|
|
|
|
t.Logf("Rescan caught up to block %d", rescanHeight)
|
|
|
|
}
|
|
|
|
if rescanHeight == haveBest.Height {
|
|
|
|
rescanMtx.RUnlock()
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if total > syncTimeout {
|
|
|
|
rescanMtx.RUnlock()
|
|
|
|
return fmt.Errorf("Timed out after %v waiting for "+
|
|
|
|
"rescan to catch up.", syncTimeout)
|
|
|
|
}
|
|
|
|
rescanMtx.RUnlock()
|
|
|
|
}
|
2017-04-25 21:21:29 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-05-04 08:05:05 +02:00
|
|
|
// testRandomBlocks goes through all blocks in random order and ensures we can
|
|
|
|
// correctly get cfilters from them. It uses numQueryThreads goroutines running
|
|
|
|
// at the same time to go through this. 50 is comfortable on my somewhat dated
|
|
|
|
// laptop with default query optimization settings.
|
2017-04-27 00:34:05 +02:00
|
|
|
// TODO: Make this a benchmark instead.
|
2017-04-25 21:21:29 +02:00
|
|
|
func testRandomBlocks(t *testing.T, svc *spvchain.ChainService,
|
|
|
|
correctSyncNode *rpctest.Harness) error {
|
|
|
|
var haveBest *waddrmgr.BlockStamp
|
|
|
|
haveBest, err := svc.BestSnapshot()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Couldn't get best snapshot from "+
|
|
|
|
"ChainService: %s", err)
|
|
|
|
}
|
2017-04-27 00:34:05 +02:00
|
|
|
// Keep track of an error channel with enough buffer space to track one
|
|
|
|
// error per block.
|
|
|
|
errChan := make(chan error, haveBest.Height)
|
|
|
|
// Test getting all of the blocks and filters.
|
2017-04-25 21:21:29 +02:00
|
|
|
var wg sync.WaitGroup
|
2017-04-27 00:34:05 +02:00
|
|
|
workerQueue := make(chan struct{}, numQueryThreads)
|
|
|
|
for i := int32(1); i <= haveBest.Height; i++ {
|
2017-04-25 21:21:29 +02:00
|
|
|
wg.Add(1)
|
2017-04-27 00:34:05 +02:00
|
|
|
height := uint32(i)
|
|
|
|
// Wait until there's room in the worker queue.
|
|
|
|
workerQueue <- struct{}{}
|
2017-04-25 21:21:29 +02:00
|
|
|
go func() {
|
2017-04-27 00:34:05 +02:00
|
|
|
// On exit, open a spot in workerQueue and tell the
|
|
|
|
// wait group we're done.
|
|
|
|
defer func() {
|
|
|
|
<-workerQueue
|
|
|
|
}()
|
2017-04-25 21:21:29 +02:00
|
|
|
defer wg.Done()
|
|
|
|
// Get block header from database.
|
2017-04-28 19:36:05 +02:00
|
|
|
blockHeader, err := svc.GetBlockByHeight(height)
|
2017-04-25 21:21:29 +02:00
|
|
|
if err != nil {
|
|
|
|
errChan <- fmt.Errorf("Couldn't get block "+
|
|
|
|
"header by height %d: %s", height, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
blockHash := blockHeader.BlockHash()
|
|
|
|
// Get block via RPC.
|
2017-04-27 00:34:05 +02:00
|
|
|
wantBlock, err := correctSyncNode.Node.GetBlock(
|
|
|
|
&blockHash)
|
2017-04-25 21:21:29 +02:00
|
|
|
if err != nil {
|
2017-04-27 00:34:05 +02:00
|
|
|
errChan <- fmt.Errorf("Couldn't get block %d "+
|
|
|
|
"(%s) by RPC", height, blockHash)
|
2017-04-25 21:21:29 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// Get block from network.
|
2017-04-27 00:34:05 +02:00
|
|
|
haveBlock := svc.GetBlockFromNetwork(blockHash,
|
|
|
|
queryOptions...)
|
2017-04-25 21:21:29 +02:00
|
|
|
if haveBlock == nil {
|
2017-04-27 00:34:05 +02:00
|
|
|
errChan <- fmt.Errorf("Couldn't get block %d "+
|
|
|
|
"(%s) from network", height, blockHash)
|
2017-04-25 21:21:29 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// Check that network and RPC blocks match.
|
2017-04-27 00:34:05 +02:00
|
|
|
if !reflect.DeepEqual(*haveBlock.MsgBlock(),
|
|
|
|
*wantBlock) {
|
|
|
|
errChan <- fmt.Errorf("Block from network "+
|
|
|
|
"doesn't match block from RPC. Want: "+
|
|
|
|
"%s, RPC: %s, network: %s", blockHash,
|
|
|
|
wantBlock.BlockHash(),
|
2017-04-25 21:21:29 +02:00
|
|
|
haveBlock.MsgBlock().BlockHash())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Check that block height matches what we have.
|
2017-04-28 19:36:05 +02:00
|
|
|
if int32(height) != haveBlock.Height() {
|
2017-04-27 00:34:05 +02:00
|
|
|
errChan <- fmt.Errorf("Block height from "+
|
|
|
|
"network doesn't match expected "+
|
|
|
|
"height. Want: %s, network: %s",
|
2017-04-28 19:36:05 +02:00
|
|
|
height, haveBlock.Height())
|
2017-04-25 21:21:29 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// Get basic cfilter from network.
|
2017-04-27 00:34:05 +02:00
|
|
|
haveFilter := svc.GetCFilter(blockHash, false,
|
|
|
|
queryOptions...)
|
2017-04-25 21:21:29 +02:00
|
|
|
if haveFilter == nil {
|
|
|
|
errChan <- fmt.Errorf("Couldn't get basic "+
|
|
|
|
"filter for block %d", height)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Get basic cfilter from RPC.
|
2017-04-27 00:34:05 +02:00
|
|
|
wantFilter, err := correctSyncNode.Node.GetCFilter(
|
|
|
|
&blockHash, false)
|
2017-04-25 21:21:29 +02:00
|
|
|
if err != nil {
|
2017-04-27 00:34:05 +02:00
|
|
|
errChan <- fmt.Errorf("Couldn't get basic "+
|
|
|
|
"filter for block %d via RPC: %s",
|
|
|
|
height, err)
|
2017-04-25 21:21:29 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// Check that network and RPC cfilters match.
|
|
|
|
if !bytes.Equal(haveFilter.NBytes(), wantFilter.Data) {
|
2017-04-27 00:34:05 +02:00
|
|
|
errChan <- fmt.Errorf("Basic filter from P2P "+
|
|
|
|
"network/DB doesn't match RPC value "+
|
|
|
|
"for block %d", height)
|
2017-04-25 21:21:29 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// Calculate basic filter from block.
|
2017-04-27 03:00:41 +02:00
|
|
|
calcFilter, err := builder.BuildBasicFilter(
|
2017-04-25 21:21:29 +02:00
|
|
|
haveBlock.MsgBlock())
|
|
|
|
if err != nil {
|
2017-04-27 00:34:05 +02:00
|
|
|
errChan <- fmt.Errorf("Couldn't build basic "+
|
|
|
|
"filter for block %d (%s): %s", height,
|
|
|
|
blockHash, err)
|
2017-04-25 21:21:29 +02:00
|
|
|
return
|
|
|
|
}
|
2017-04-27 00:34:05 +02:00
|
|
|
// Check that the network value matches the calculated
|
|
|
|
// value from the block.
|
2017-04-25 21:21:29 +02:00
|
|
|
if !reflect.DeepEqual(*haveFilter, *calcFilter) {
|
2017-04-27 00:34:05 +02:00
|
|
|
errChan <- fmt.Errorf("Basic filter from P2P "+
|
|
|
|
"network/DB doesn't match calculated "+
|
|
|
|
"value for block %d", height)
|
2017-04-25 21:21:29 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// Get previous basic filter header from the database.
|
2017-04-27 00:34:05 +02:00
|
|
|
prevHeader, err := svc.GetBasicHeader(
|
|
|
|
blockHeader.PrevBlock)
|
2017-04-25 21:21:29 +02:00
|
|
|
if err != nil {
|
2017-04-27 00:34:05 +02:00
|
|
|
errChan <- fmt.Errorf("Couldn't get basic "+
|
|
|
|
"filter header for block %d (%s) from "+
|
|
|
|
"DB: %s", height-1,
|
2017-04-25 21:21:29 +02:00
|
|
|
blockHeader.PrevBlock, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Get current basic filter header from the database.
|
|
|
|
curHeader, err := svc.GetBasicHeader(blockHash)
|
|
|
|
if err != nil {
|
2017-04-27 00:34:05 +02:00
|
|
|
errChan <- fmt.Errorf("Couldn't get basic "+
|
|
|
|
"filter header for block %d (%s) from "+
|
|
|
|
"DB: %s", height-1, blockHash, err)
|
2017-04-25 21:21:29 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// Check that the filter and header line up.
|
2017-04-27 03:00:41 +02:00
|
|
|
calcHeader := builder.MakeHeaderForFilter(calcFilter,
|
2017-04-25 21:21:29 +02:00
|
|
|
*prevHeader)
|
|
|
|
if !bytes.Equal(curHeader[:], calcHeader[:]) {
|
2017-04-27 00:34:05 +02:00
|
|
|
errChan <- fmt.Errorf("Filter header doesn't "+
|
|
|
|
"match. Want: %s, got: %s", curHeader,
|
|
|
|
calcHeader)
|
2017-04-25 21:21:29 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// Get extended cfilter from network
|
2017-04-27 00:34:05 +02:00
|
|
|
haveFilter = svc.GetCFilter(blockHash, true,
|
|
|
|
queryOptions...)
|
2017-04-25 21:21:29 +02:00
|
|
|
if haveFilter == nil {
|
|
|
|
errChan <- fmt.Errorf("Couldn't get extended "+
|
|
|
|
"filter for block %d", height)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Get extended cfilter from RPC
|
2017-04-27 00:34:05 +02:00
|
|
|
wantFilter, err = correctSyncNode.Node.GetCFilter(
|
|
|
|
&blockHash, true)
|
2017-04-25 21:21:29 +02:00
|
|
|
if err != nil {
|
2017-04-27 00:34:05 +02:00
|
|
|
errChan <- fmt.Errorf("Couldn't get extended "+
|
|
|
|
"filter for block %d via RPC: %s",
|
|
|
|
height, err)
|
2017-04-25 21:21:29 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// Check that network and RPC cfilters match
|
|
|
|
if !bytes.Equal(haveFilter.NBytes(), wantFilter.Data) {
|
2017-04-27 00:34:05 +02:00
|
|
|
errChan <- fmt.Errorf("Extended filter from "+
|
|
|
|
"P2P network/DB doesn't match RPC "+
|
|
|
|
"value for block %d", height)
|
2017-04-25 21:21:29 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// Calculate extended filter from block
|
2017-04-27 03:00:41 +02:00
|
|
|
calcFilter, err = builder.BuildExtFilter(
|
2017-04-25 21:21:29 +02:00
|
|
|
haveBlock.MsgBlock())
|
|
|
|
if err != nil {
|
2017-04-27 00:34:05 +02:00
|
|
|
errChan <- fmt.Errorf("Couldn't build extended"+
|
|
|
|
" filter for block %d (%s): %s", height,
|
|
|
|
blockHash, err)
|
2017-04-25 21:21:29 +02:00
|
|
|
return
|
|
|
|
}
|
2017-04-27 00:34:05 +02:00
|
|
|
// Check that the network value matches the calculated
|
|
|
|
// value from the block.
|
2017-04-25 21:21:29 +02:00
|
|
|
if !reflect.DeepEqual(*haveFilter, *calcFilter) {
|
2017-04-27 00:34:05 +02:00
|
|
|
errChan <- fmt.Errorf("Extended filter from "+
|
|
|
|
"P2P network/DB doesn't match "+
|
|
|
|
"calculated value for block %d", height)
|
2017-04-25 21:21:29 +02:00
|
|
|
return
|
|
|
|
}
|
2017-04-27 00:34:05 +02:00
|
|
|
// Get previous extended filter header from the
|
|
|
|
// database.
|
|
|
|
prevHeader, err = svc.GetExtHeader(
|
|
|
|
blockHeader.PrevBlock)
|
2017-04-25 21:21:29 +02:00
|
|
|
if err != nil {
|
2017-04-27 00:34:05 +02:00
|
|
|
errChan <- fmt.Errorf("Couldn't get extended "+
|
|
|
|
"filter header for block %d (%s) from "+
|
|
|
|
"DB: %s", height-1,
|
2017-04-25 21:21:29 +02:00
|
|
|
blockHeader.PrevBlock, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Get current basic filter header from the database.
|
|
|
|
curHeader, err = svc.GetExtHeader(blockHash)
|
|
|
|
if err != nil {
|
2017-04-27 00:34:05 +02:00
|
|
|
errChan <- fmt.Errorf("Couldn't get extended "+
|
|
|
|
"filter header for block %d (%s) from "+
|
|
|
|
"DB: %s", height-1,
|
2017-04-25 21:21:29 +02:00
|
|
|
blockHash, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Check that the filter and header line up.
|
2017-04-27 03:00:41 +02:00
|
|
|
calcHeader = builder.MakeHeaderForFilter(calcFilter,
|
2017-04-25 21:21:29 +02:00
|
|
|
*prevHeader)
|
|
|
|
if !bytes.Equal(curHeader[:], calcHeader[:]) {
|
2017-04-27 00:34:05 +02:00
|
|
|
errChan <- fmt.Errorf("Filter header doesn't "+
|
|
|
|
"match. Want: %s, got: %s", curHeader,
|
|
|
|
calcHeader)
|
2017-04-25 21:21:29 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
// Wait for all queries to finish.
|
|
|
|
wg.Wait()
|
|
|
|
// Close the error channel to make the error monitoring goroutine
|
|
|
|
// finish.
|
|
|
|
close(errChan)
|
2017-04-27 00:34:05 +02:00
|
|
|
var lastErr error
|
|
|
|
for err := range errChan {
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("%s", err)
|
|
|
|
lastErr = fmt.Errorf("Couldn't validate all " +
|
|
|
|
"blocks, filters, and filter headers.")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if logLevel != btclog.Off {
|
|
|
|
t.Logf("Finished checking %d blocks and their cfilters",
|
|
|
|
haveBest.Height)
|
|
|
|
}
|
2017-04-25 21:21:29 +02:00
|
|
|
return lastErr
|
2017-03-25 01:11:50 +01:00
|
|
|
}
|
2017-05-04 08:05:05 +02:00
|
|
|
|
|
|
|
// startRescan starts a rescan in another goroutine, and logs all notifications
|
|
|
|
// from the rescan. At the end, the log should match one we precomputed based
|
|
|
|
// on the flow of the test. The rescan starts at the genesis block and the
|
|
|
|
// notifications continue until the `quit` channel is closed.
|
|
|
|
func startRescan(t *testing.T, svc *spvchain.ChainService, addr btcutil.Address,
|
|
|
|
startBlock *waddrmgr.BlockStamp, quit <-chan struct{}) error {
|
|
|
|
go svc.Rescan(
|
|
|
|
spvchain.QuitChan(quit),
|
|
|
|
spvchain.WatchAddrs(addr),
|
|
|
|
spvchain.StartBlock(startBlock),
|
|
|
|
spvchain.NotificationHandlers(btcrpcclient.NotificationHandlers{
|
|
|
|
OnBlockConnected: func(hash *chainhash.Hash,
|
|
|
|
height int32, time time.Time) {
|
|
|
|
rescanMtx.Lock()
|
|
|
|
gotLog = append(gotLog, []byte("bc")...)
|
|
|
|
curBlockHeight = height
|
|
|
|
rescanMtx.Unlock()
|
|
|
|
},
|
|
|
|
OnBlockDisconnected: func(hash *chainhash.Hash,
|
|
|
|
height int32, time time.Time) {
|
|
|
|
rescanMtx.Lock()
|
|
|
|
delete(ourKnownTxsByBlock, *hash)
|
|
|
|
gotLog = append(gotLog, []byte("bd")...)
|
|
|
|
curBlockHeight = height - 1
|
|
|
|
rescanMtx.Unlock()
|
|
|
|
},
|
|
|
|
OnRecvTx: func(tx *btcutil.Tx,
|
|
|
|
details *btcjson.BlockDetails) {
|
|
|
|
rescanMtx.Lock()
|
|
|
|
hash, err := chainhash.NewHashFromStr(
|
|
|
|
details.Hash)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Couldn't decode hash %s: %s",
|
|
|
|
details.Hash, err)
|
|
|
|
}
|
|
|
|
ourKnownTxsByBlock[*hash] = append(
|
|
|
|
ourKnownTxsByBlock[*hash], tx)
|
|
|
|
gotLog = append(gotLog, []byte("rv")...)
|
|
|
|
rescanMtx.Unlock()
|
|
|
|
},
|
|
|
|
OnRedeemingTx: func(tx *btcutil.Tx,
|
|
|
|
details *btcjson.BlockDetails) {
|
|
|
|
rescanMtx.Lock()
|
|
|
|
hash, err := chainhash.NewHashFromStr(
|
|
|
|
details.Hash)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Couldn't decode hash %s: %s",
|
|
|
|
details.Hash, err)
|
|
|
|
}
|
|
|
|
ourKnownTxsByBlock[*hash] = append(
|
|
|
|
ourKnownTxsByBlock[*hash], tx)
|
|
|
|
gotLog = append(gotLog, []byte("rd")...)
|
|
|
|
rescanMtx.Unlock()
|
|
|
|
},
|
|
|
|
OnFilteredBlockConnected: func(height int32,
|
|
|
|
header *wire.BlockHeader,
|
|
|
|
relevantTxs []*btcutil.Tx) {
|
|
|
|
rescanMtx.Lock()
|
|
|
|
ourKnownTxsByFilteredBlock[header.BlockHash()] =
|
|
|
|
relevantTxs
|
|
|
|
gotLog = append(gotLog, []byte("fc")...)
|
|
|
|
gotLog = append(gotLog, uint8(len(relevantTxs)))
|
|
|
|
curFilteredBlockHeight = height
|
|
|
|
rescanMtx.Unlock()
|
|
|
|
},
|
|
|
|
OnFilteredBlockDisconnected: func(height int32,
|
|
|
|
header *wire.BlockHeader) {
|
|
|
|
rescanMtx.Lock()
|
|
|
|
delete(ourKnownTxsByFilteredBlock,
|
|
|
|
header.BlockHash())
|
|
|
|
gotLog = append(gotLog, []byte("fd")...)
|
|
|
|
curFilteredBlockHeight = height - 1
|
|
|
|
rescanMtx.Unlock()
|
|
|
|
},
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// checkRescanStatus returns the number of relevant transactions we currently
|
|
|
|
// know about and the currently known height.
|
|
|
|
func checkRescanStatus() (int, int32, error) {
|
|
|
|
var txCount [2]int
|
|
|
|
rescanMtx.RLock()
|
|
|
|
defer rescanMtx.RUnlock()
|
|
|
|
for _, list := range ourKnownTxsByBlock {
|
|
|
|
for range list {
|
|
|
|
txCount[0]++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, list := range ourKnownTxsByFilteredBlock {
|
|
|
|
for range list {
|
|
|
|
txCount[1]++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if txCount[0] != txCount[1] {
|
|
|
|
return 0, 0, fmt.Errorf("Conflicting transaction count " +
|
|
|
|
"between notifications.")
|
|
|
|
}
|
|
|
|
if curBlockHeight != curFilteredBlockHeight {
|
|
|
|
return 0, 0, fmt.Errorf("Conflicting block height between " +
|
|
|
|
"notifications.")
|
|
|
|
}
|
|
|
|
return txCount[0], curBlockHeight, nil
|
|
|
|
}
|