Fix reorgs, improve string formatting/errors/logging.
This commit is contained in:
parent
f2aceaf363
commit
0a8de495cc
4 changed files with 237 additions and 145 deletions
|
@ -2,6 +2,7 @@ package spvchain
|
|||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
@ -127,6 +128,7 @@ type blockManager struct {
|
|||
quit chan struct{}
|
||||
|
||||
headerList *list.List
|
||||
reorgList *list.List
|
||||
startHeader *list.Element
|
||||
nextCheckpoint *chaincfg.Checkpoint
|
||||
|
||||
|
@ -148,6 +150,7 @@ func newBlockManager(s *ChainService) (*blockManager, error) {
|
|||
progressLogger: newBlockProgressLogger("Processed", log),
|
||||
msgChan: make(chan interface{}, MaxPeers*3),
|
||||
headerList: list.New(),
|
||||
reorgList: list.New(),
|
||||
quit: make(chan struct{}),
|
||||
blocksPerRetarget: int32(targetTimespan / targetTimePerBlock),
|
||||
minRetargetTimespan: targetTimespan / adjustmentFactor,
|
||||
|
@ -258,8 +261,7 @@ func (b *blockManager) handleDonePeerMsg(peers *list.List, sp *serverPeer) {
|
|||
}
|
||||
|
||||
// Attempt to find a new peer to sync from if the quitting peer is the
|
||||
// sync peer. Also, reset the headers-first state if in headers-first
|
||||
// mode so
|
||||
// sync peer. Also, reset the header state.
|
||||
if b.syncPeer != nil && b.syncPeer == sp {
|
||||
b.syncPeer = nil
|
||||
header, height, err := b.server.LatestBlock()
|
||||
|
@ -423,7 +425,7 @@ func (b *blockManager) startSync(peers *list.List) {
|
|||
best, err := b.server.BestSnapshot()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get hash and height for the "+
|
||||
"latest block: %v", err)
|
||||
"latest block: %s", err)
|
||||
return
|
||||
}
|
||||
var bestPeer *serverPeer
|
||||
|
@ -460,11 +462,11 @@ func (b *blockManager) startSync(peers *list.List) {
|
|||
locator, err := b.server.LatestBlockLocator()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get block locator for the "+
|
||||
"latest block: %v", err)
|
||||
"latest block: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Syncing to block height %d from peer %v",
|
||||
log.Infof("Syncing to block height %d from peer %s",
|
||||
bestPeer.LastBlock(), bestPeer.Addr())
|
||||
|
||||
// When the current height is less than a known checkpoint we
|
||||
|
@ -599,9 +601,9 @@ func (b *blockManager) handleInvMsg(imsg *invMsg) {
|
|||
}
|
||||
}
|
||||
|
||||
// If this is the sync peer and we're not current, get the headers
|
||||
// If this is the sync peer or we're current, get the headers
|
||||
// for the announced blocks and update the last announced block.
|
||||
if lastBlock != -1 && imsg.peer == b.syncPeer /*&& !b.current()*/ {
|
||||
if lastBlock != -1 && (imsg.peer == b.syncPeer || b.current()) {
|
||||
// Make a locator starting from the latest known header we've
|
||||
// processed.
|
||||
locator := make(blockchain.BlockLocator, 0,
|
||||
|
@ -614,7 +616,7 @@ func (b *blockManager) handleInvMsg(imsg *invMsg) {
|
|||
locator = append(locator, knownLocator...)
|
||||
}
|
||||
// Get headers based on locator.
|
||||
b.syncPeer.PushGetHeadersMsg(locator, &invVects[lastBlock].Hash)
|
||||
imsg.peer.PushGetHeadersMsg(locator, &invVects[lastBlock].Hash)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -650,7 +652,7 @@ func (b *blockManager) handleHeadersMsg(hmsg *headersMsg) {
|
|||
receivedCheckpoint := false
|
||||
var finalHash *chainhash.Hash
|
||||
var finalHeight int32
|
||||
for _, blockHeader := range msg.Headers {
|
||||
for i, blockHeader := range msg.Headers {
|
||||
blockHash := blockHeader.BlockHash()
|
||||
finalHash = &blockHash
|
||||
|
||||
|
@ -671,30 +673,11 @@ func (b *blockManager) handleHeadersMsg(hmsg *headersMsg) {
|
|||
prevNode := prevNodeEl.Value.(*headerNode)
|
||||
prevHash := prevNode.header.BlockHash()
|
||||
if prevHash.IsEqual(&blockHeader.PrevBlock) {
|
||||
diff, err := b.calcNextRequiredDifficulty(
|
||||
blockHeader.Timestamp)
|
||||
err := b.checkHeaderSanity(blockHeader, maxTimestamp,
|
||||
false)
|
||||
if err != nil {
|
||||
log.Warnf("Unable to calculate next difficulty"+
|
||||
": %v -- disconnecting peer", err)
|
||||
hmsg.peer.Disconnect()
|
||||
return
|
||||
}
|
||||
stubBlock := btcutil.NewBlock(&wire.MsgBlock{
|
||||
Header: *blockHeader,
|
||||
})
|
||||
err = blockchain.CheckProofOfWork(stubBlock,
|
||||
blockchain.CompactToBig(diff))
|
||||
if err != nil {
|
||||
log.Warnf("Received header doesn't match "+
|
||||
"required difficulty: %v -- "+
|
||||
"disconnecting peer", err)
|
||||
hmsg.peer.Disconnect()
|
||||
return
|
||||
}
|
||||
// Ensure the block time is not too far in the future.
|
||||
if blockHeader.Timestamp.After(maxTimestamp) {
|
||||
log.Warnf("block timestamp of %v is too far in"+
|
||||
" the future", blockHeader.Timestamp)
|
||||
log.Warnf("Header doesn't pass sanity check: "+
|
||||
"%s -- disconnecting peer", err)
|
||||
hmsg.peer.Disconnect()
|
||||
return
|
||||
}
|
||||
|
@ -704,13 +687,16 @@ func (b *blockManager) handleHeadersMsg(hmsg *headersMsg) {
|
|||
uint32(node.height))
|
||||
if err != nil {
|
||||
log.Criticalf("Couldn't write block to "+
|
||||
"database: %v", err)
|
||||
"database: %s", err)
|
||||
// Should we panic here?
|
||||
}
|
||||
err = b.server.putMaxBlockHeight(uint32(node.height))
|
||||
if err != nil {
|
||||
log.Criticalf("Couldn't write max block height"+
|
||||
" to database: %v", err)
|
||||
" to database: %s", err)
|
||||
// Should we panic here?
|
||||
}
|
||||
hmsg.peer.UpdateLastBlockHeight(node.height)
|
||||
e := b.headerList.PushBack(&node)
|
||||
if b.startHeader == nil {
|
||||
b.startHeader = e
|
||||
|
@ -723,30 +709,33 @@ func (b *blockManager) handleHeadersMsg(hmsg *headersMsg) {
|
|||
// If we got these headers from a peer that's not our
|
||||
// sync peer, they might not be aligned correctly or
|
||||
// even on the right chain. Just ignore the rest of the
|
||||
// message.
|
||||
if hmsg.peer != b.syncPeer {
|
||||
// message. However, if we're current, this might be a
|
||||
// reorg, in which case we'll either change our sync
|
||||
// peer or disconnect the peer that sent us these
|
||||
// bad headers.
|
||||
if hmsg.peer != b.syncPeer && !b.current() {
|
||||
return
|
||||
}
|
||||
// Check if this block is known. If so, we continue to
|
||||
// the next one.
|
||||
_, _, err := b.server.GetBlockByHash(
|
||||
blockHeader.BlockHash())
|
||||
_, _, err := b.server.GetBlockByHash(blockHash)
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
// Check if the previous block is known. If it is, this
|
||||
// is probably a reorg based on the estimated latest
|
||||
// block that matches between us and the sync peer as
|
||||
// block that matches between us and the peer as
|
||||
// derived from the block locator we sent to request
|
||||
// these headers. Otherwise, the headers don't connect
|
||||
// to anything we know and we should disconnect the
|
||||
// peer.
|
||||
_, backHeight, err := b.server.GetBlockByHash(
|
||||
backHead, backHeight, err := b.server.GetBlockByHash(
|
||||
blockHeader.PrevBlock)
|
||||
if err != nil {
|
||||
log.Errorf("Couldn't get block by hash from "+
|
||||
"the database (%v) -- disconnecting "+
|
||||
"peer %s", err, hmsg.peer.Addr())
|
||||
log.Warnf("Received block header that does not"+
|
||||
" properly connect to the chain from"+
|
||||
" peer %s (%s) -- disconnecting",
|
||||
hmsg.peer.Addr(), err)
|
||||
hmsg.peer.Disconnect()
|
||||
return
|
||||
}
|
||||
|
@ -757,19 +746,90 @@ func (b *blockManager) handleHeadersMsg(hmsg *headersMsg) {
|
|||
prevCheckpoint := b.findPreviousHeaderCheckpoint(
|
||||
prevNode.height)
|
||||
if backHeight < uint32(prevCheckpoint.Height) {
|
||||
log.Errorf("Attempt at a reorg earlier (%v) than a "+
|
||||
"checkpoint (%v) past which we've already "+
|
||||
log.Errorf("Attempt at a reorg earlier than a "+
|
||||
"checkpoint past which we've already "+
|
||||
"synchronized -- disconnecting peer "+
|
||||
"%s", backHeight, prevCheckpoint.Height, hmsg.peer.Addr())
|
||||
"%s", hmsg.peer.Addr())
|
||||
hmsg.peer.Disconnect()
|
||||
return
|
||||
}
|
||||
// TODO: Add real reorg handling here
|
||||
log.Warnf("Received block header that does not "+
|
||||
"properly connect to the chain from peer %s "+
|
||||
"-- disconnecting", hmsg.peer.Addr())
|
||||
hmsg.peer.Disconnect()
|
||||
return
|
||||
// Check the sanity of the new branch. If any of the
|
||||
// blocks don't pass sanity checks, disconnect the peer.
|
||||
// We also keep track of the work represented by these
|
||||
// headers so we can compare it to the work in the known
|
||||
// good chain.
|
||||
b.reorgList.Init()
|
||||
b.reorgList.PushBack(&headerNode{
|
||||
header: &backHead,
|
||||
height: int32(backHeight),
|
||||
})
|
||||
totalWork := big.NewInt(0)
|
||||
for _, reorgHeader := range msg.Headers[i:] {
|
||||
err = b.checkHeaderSanity(reorgHeader,
|
||||
maxTimestamp, true)
|
||||
if err != nil {
|
||||
log.Warnf("Header doesn't pass sanity"+
|
||||
" check: %s -- disconnecting "+
|
||||
"peer", err)
|
||||
hmsg.peer.Disconnect()
|
||||
return
|
||||
}
|
||||
totalWork.Add(totalWork,
|
||||
blockchain.CalcWork(reorgHeader.Bits))
|
||||
}
|
||||
log.Tracef("Sane reorg attempted. Total work from "+
|
||||
"reorg chain: %v", totalWork)
|
||||
// All the headers pass sanity checks. Now we calculate
|
||||
// the total work for the known chain.
|
||||
knownWork := big.NewInt(0)
|
||||
// This should NEVER be nil because the most recent
|
||||
// block is always pushed back by resetHeaderState
|
||||
knownEl := b.headerList.Back()
|
||||
var knownHead wire.BlockHeader
|
||||
for j := uint32(prevNode.height); j > backHeight; j-- {
|
||||
if knownEl != nil {
|
||||
knownHead = *knownEl.Value.(*headerNode).header
|
||||
knownEl = knownEl.Prev()
|
||||
} else {
|
||||
knownHead, _, err = b.server.GetBlockByHash(
|
||||
knownHead.PrevBlock)
|
||||
if err != nil {
|
||||
log.Criticalf("Can't get block"+
|
||||
"header for hash %s: "+
|
||||
"%v",
|
||||
knownHead.PrevBlock,
|
||||
err)
|
||||
// Should we panic here?
|
||||
}
|
||||
}
|
||||
knownWork.Add(knownWork,
|
||||
blockchain.CalcWork(knownHead.Bits))
|
||||
}
|
||||
log.Tracef("Total work from known chain: %v", knownWork)
|
||||
// Compare the two work totals and reject the new chain
|
||||
// if it doesn't have more work than the previously
|
||||
// known chain.
|
||||
if knownWork.Cmp(totalWork) >= 0 {
|
||||
log.Warnf("Reorg attempt that does not have "+
|
||||
"more work than known chain from peer "+
|
||||
"%s -- disconnecting", hmsg.peer.Addr())
|
||||
hmsg.peer.Disconnect()
|
||||
return
|
||||
}
|
||||
// At this point, we have a valid reorg, so we roll
|
||||
// back the existing chain and add the new block header.
|
||||
// We also change the sync peer. Then we can continue
|
||||
// with the rest of the headers in the message as if
|
||||
// nothing has happened.
|
||||
b.syncPeer = hmsg.peer
|
||||
b.server.rollbackToHeight(backHeight)
|
||||
b.server.putBlock(*blockHeader, backHeight+1)
|
||||
b.server.putMaxBlockHeight(backHeight + 1)
|
||||
b.resetHeaderState(&backHead, int32(backHeight))
|
||||
b.headerList.PushBack(&headerNode{
|
||||
header: blockHeader,
|
||||
height: int32(backHeight + 1),
|
||||
})
|
||||
}
|
||||
|
||||
// Verify the header at the next checkpoint height matches.
|
||||
|
@ -792,8 +852,13 @@ func (b *blockManager) handleHeadersMsg(hmsg *headersMsg) {
|
|||
"checkpoint at height %d/hash %s",
|
||||
prevCheckpoint.Height,
|
||||
prevCheckpoint.Hash)
|
||||
b.server.rollbackToHeight(uint32(
|
||||
_, err := b.server.rollbackToHeight(uint32(
|
||||
prevCheckpoint.Height))
|
||||
if err != nil {
|
||||
log.Criticalf("Rollback failed: %s",
|
||||
err)
|
||||
// Should we panic here?
|
||||
}
|
||||
hmsg.peer.Disconnect()
|
||||
return
|
||||
}
|
||||
|
@ -829,16 +894,46 @@ func (b *blockManager) handleHeadersMsg(hmsg *headersMsg) {
|
|||
err := hmsg.peer.PushGetHeadersMsg(locator, &nextHash)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to send getheaders message to "+
|
||||
"peer %s: %v", hmsg.peer.Addr(), err)
|
||||
"peer %s: %s", hmsg.peer.Addr(), err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// checkHeaderSanity checks the PoW, and timestamp of a block header.
|
||||
func (b *blockManager) checkHeaderSanity(blockHeader *wire.BlockHeader,
|
||||
maxTimestamp time.Time, reorgAttempt bool) error {
|
||||
diff, err := b.calcNextRequiredDifficulty(
|
||||
blockHeader.Timestamp, reorgAttempt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stubBlock := btcutil.NewBlock(&wire.MsgBlock{
|
||||
Header: *blockHeader,
|
||||
})
|
||||
err = blockchain.CheckProofOfWork(stubBlock,
|
||||
blockchain.CompactToBig(diff))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Ensure the block time is not too far in the future.
|
||||
if blockHeader.Timestamp.After(maxTimestamp) {
|
||||
return fmt.Errorf("block timestamp of %v is too far in the "+
|
||||
"future", blockHeader.Timestamp)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// calcNextRequiredDifficulty calculates the required difficulty for the block
|
||||
// after the passed previous block node based on the difficulty retarget rules.
|
||||
func (b *blockManager) calcNextRequiredDifficulty(newBlockTime time.Time) (uint32, error) {
|
||||
func (b *blockManager) calcNextRequiredDifficulty(newBlockTime time.Time,
|
||||
reorgAttempt bool) (uint32, error) {
|
||||
|
||||
lastNodeEl := b.headerList.Back()
|
||||
hList := b.headerList
|
||||
if reorgAttempt {
|
||||
hList = b.reorgList
|
||||
}
|
||||
|
||||
lastNodeEl := hList.Back()
|
||||
|
||||
// Genesis block.
|
||||
if lastNodeEl == nil {
|
||||
|
@ -868,7 +963,7 @@ func (b *blockManager) calcNextRequiredDifficulty(newBlockTime time.Time) (uint3
|
|||
// The block was mined within the desired timeframe, so
|
||||
// return the difficulty for the last block which did
|
||||
// not have the special minimum difficulty rule applied.
|
||||
prevBits, err := b.findPrevTestNetDifficulty()
|
||||
prevBits, err := b.findPrevTestNetDifficulty(hList)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -934,8 +1029,8 @@ func (b *blockManager) calcNextRequiredDifficulty(newBlockTime time.Time) (uint3
|
|||
|
||||
// findPrevTestNetDifficulty returns the difficulty of the previous block which
|
||||
// did not have the special testnet minimum difficulty rule applied.
|
||||
func (b *blockManager) findPrevTestNetDifficulty() (uint32, error) {
|
||||
startNodeEl := b.headerList.Back()
|
||||
func (b *blockManager) findPrevTestNetDifficulty(hList *list.List) (uint32, error) {
|
||||
startNodeEl := hList.Back()
|
||||
|
||||
// Genesis block.
|
||||
if startNodeEl == nil {
|
||||
|
@ -964,7 +1059,7 @@ func (b *blockManager) findPrevTestNetDifficulty() (uint32, error) {
|
|||
} else {
|
||||
node, _, err := b.server.GetBlockByHeight(uint32(iterHeight))
|
||||
if err != nil {
|
||||
log.Errorf("GetBlockByHeight: %v", err)
|
||||
log.Errorf("GetBlockByHeight: %s", err)
|
||||
return 0, err
|
||||
}
|
||||
iterNode = &node
|
||||
|
|
|
@ -3,6 +3,7 @@ package spvchain
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/blockchain"
|
||||
|
@ -62,8 +63,8 @@ func fetchDBVersion(tx walletdb.Tx) (uint32, error) {
|
|||
bucket := tx.RootBucket().Bucket(spvBucketName)
|
||||
verBytes := bucket.Get(dbVersionName)
|
||||
if verBytes == nil {
|
||||
str := "required version number not stored in database"
|
||||
return 0, log.Error(str)
|
||||
return 0, fmt.Errorf("required version number not stored in " +
|
||||
"database")
|
||||
}
|
||||
version := binary.LittleEndian.Uint32(verBytes)
|
||||
return version, nil
|
||||
|
@ -74,12 +75,7 @@ func putDBVersion(tx walletdb.Tx, version uint32) error {
|
|||
bucket := tx.RootBucket().Bucket(spvBucketName)
|
||||
|
||||
verBytes := uint32ToBytes(version)
|
||||
err := bucket.Put(dbVersionName, verBytes)
|
||||
if err != nil {
|
||||
str := "failed to store version: %v"
|
||||
return log.Errorf(str, err)
|
||||
}
|
||||
return nil
|
||||
return bucket.Put(dbVersionName, verBytes)
|
||||
}
|
||||
|
||||
// putMaxBlockHeight stores the max block height to the database.
|
||||
|
@ -89,8 +85,7 @@ func putMaxBlockHeight(tx walletdb.Tx, maxBlockHeight uint32) error {
|
|||
maxBlockHeightBytes := uint32ToBytes(maxBlockHeight)
|
||||
err := bucket.Put(maxBlockHeightName, maxBlockHeightBytes)
|
||||
if err != nil {
|
||||
str := "failed to store max block height: %v"
|
||||
return log.Errorf(str, err)
|
||||
return fmt.Errorf("failed to store max block height: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -113,14 +108,12 @@ func putBlock(tx walletdb.Tx, header wire.BlockHeader, height uint32) error {
|
|||
|
||||
err = bucket.Put(blockHash[:], buf.Bytes())
|
||||
if err != nil {
|
||||
str := "failed to store SPV block info: %v"
|
||||
return log.Errorf(str, err)
|
||||
return fmt.Errorf("failed to store SPV block info: %s", err)
|
||||
}
|
||||
|
||||
err = bucket.Put(uint32ToBytes(height), blockHash[:])
|
||||
if err != nil {
|
||||
str := "failed to store block height info: %v"
|
||||
return log.Errorf(str, err)
|
||||
return fmt.Errorf("failed to store block height info: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -140,8 +133,7 @@ func putFilter(tx walletdb.Tx, blockHash chainhash.Hash, bucketName []byte,
|
|||
|
||||
err = bucket.Put(blockHash[:], buf.Bytes())
|
||||
if err != nil {
|
||||
str := "failed to store filter: %v"
|
||||
return log.Errorf(str, err)
|
||||
return fmt.Errorf("failed to store filter: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -170,8 +162,7 @@ func putHeader(tx walletdb.Tx, blockHash chainhash.Hash, bucketName []byte,
|
|||
|
||||
err := bucket.Put(blockHash[:], filterTip[:])
|
||||
if err != nil {
|
||||
str := "failed to store filter header: %v"
|
||||
return log.Errorf(str, err)
|
||||
return fmt.Errorf("failed to store filter header: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -222,19 +213,22 @@ func GetBlockByHash(tx walletdb.Tx, blockHash chainhash.Hash) (wire.BlockHeader,
|
|||
bucket := tx.RootBucket().Bucket(spvBucketName).Bucket(blockHeaderBucketName)
|
||||
blockBytes := bucket.Get(blockHash[:])
|
||||
if len(blockBytes) == 0 {
|
||||
str := "failed to retrieve block info for hash: %s"
|
||||
return wire.BlockHeader{}, 0, log.Errorf(str, blockHash)
|
||||
return wire.BlockHeader{}, 0,
|
||||
fmt.Errorf("failed to retrieve block info for hash: %s",
|
||||
blockHash)
|
||||
}
|
||||
|
||||
buf := bytes.NewReader(blockBytes[:wire.MaxBlockHeaderPayload])
|
||||
var header wire.BlockHeader
|
||||
err := header.Deserialize(buf)
|
||||
if err != nil {
|
||||
str := "failed to deserialize block header for hash: %s"
|
||||
return wire.BlockHeader{}, 0, log.Errorf(str, blockHash)
|
||||
return wire.BlockHeader{}, 0,
|
||||
fmt.Errorf("failed to deserialize block header for "+
|
||||
"hash: %s", blockHash)
|
||||
}
|
||||
|
||||
height := binary.LittleEndian.Uint32(blockBytes[wire.MaxBlockHeaderPayload : wire.MaxBlockHeaderPayload+4])
|
||||
height := binary.LittleEndian.Uint32(
|
||||
blockBytes[wire.MaxBlockHeaderPayload : wire.MaxBlockHeaderPayload+4])
|
||||
|
||||
return header, height, nil
|
||||
}
|
||||
|
@ -246,8 +240,7 @@ func GetBlockHashByHeight(tx walletdb.Tx, height uint32) (chainhash.Hash,
|
|||
var hash chainhash.Hash
|
||||
hashBytes := bucket.Get(uint32ToBytes(height))
|
||||
if hashBytes == nil {
|
||||
str := "no block hash for height %v"
|
||||
return hash, log.Errorf(str, height)
|
||||
return hash, fmt.Errorf("no block hash for height %d", height)
|
||||
}
|
||||
hash.SetBytes(hashBytes)
|
||||
return hash, nil
|
||||
|
@ -283,8 +276,8 @@ func LatestBlock(tx walletdb.Tx) (wire.BlockHeader, uint32, error) {
|
|||
|
||||
maxBlockHeightBytes := bucket.Get(maxBlockHeightName)
|
||||
if maxBlockHeightBytes == nil {
|
||||
str := "no max block height stored"
|
||||
return wire.BlockHeader{}, 0, log.Error(str)
|
||||
return wire.BlockHeader{}, 0,
|
||||
fmt.Errorf("no max block height stored")
|
||||
}
|
||||
|
||||
maxBlockHeight := binary.LittleEndian.Uint32(maxBlockHeightBytes)
|
||||
|
@ -293,8 +286,8 @@ func LatestBlock(tx walletdb.Tx) (wire.BlockHeader, uint32, error) {
|
|||
return wire.BlockHeader{}, 0, err
|
||||
}
|
||||
if height != maxBlockHeight {
|
||||
str := "max block height inconsistent"
|
||||
return wire.BlockHeader{}, 0, log.Error(str)
|
||||
return wire.BlockHeader{}, 0,
|
||||
fmt.Errorf("max block height inconsistent")
|
||||
}
|
||||
return header, height, nil
|
||||
}
|
||||
|
@ -341,47 +334,47 @@ func createSPVNS(namespace walletdb.Namespace, params *chaincfg.Params) error {
|
|||
rootBucket := tx.RootBucket()
|
||||
spvBucket, err := rootBucket.CreateBucketIfNotExists(spvBucketName)
|
||||
if err != nil {
|
||||
str := "failed to create main bucket: %v"
|
||||
return log.Errorf(str, err)
|
||||
return fmt.Errorf("failed to create main bucket: %s",
|
||||
err)
|
||||
}
|
||||
|
||||
_, err = spvBucket.CreateBucketIfNotExists(blockHeaderBucketName)
|
||||
if err != nil {
|
||||
str := "failed to create block header bucket: %v"
|
||||
return log.Errorf(str, err)
|
||||
return fmt.Errorf("failed to create block header "+
|
||||
"bucket: %s", err)
|
||||
}
|
||||
|
||||
_, err = spvBucket.CreateBucketIfNotExists(basicFilterBucketName)
|
||||
if err != nil {
|
||||
str := "failed to create basic filter bucket: %v"
|
||||
return log.Errorf(str, err)
|
||||
return fmt.Errorf("failed to create basic filter "+
|
||||
"bucket: %s", err)
|
||||
}
|
||||
|
||||
_, err = spvBucket.CreateBucketIfNotExists(basicHeaderBucketName)
|
||||
if err != nil {
|
||||
str := "failed to create basic header bucket: %v"
|
||||
return log.Errorf(str, err)
|
||||
return fmt.Errorf("failed to create basic header "+
|
||||
"bucket: %s", err)
|
||||
}
|
||||
|
||||
_, err = spvBucket.CreateBucketIfNotExists(extFilterBucketName)
|
||||
if err != nil {
|
||||
str := "failed to create extended filter bucket: %v"
|
||||
return log.Errorf(str, err)
|
||||
return fmt.Errorf("failed to create extended filter "+
|
||||
"bucket: %s", err)
|
||||
}
|
||||
|
||||
_, err = spvBucket.CreateBucketIfNotExists(extHeaderBucketName)
|
||||
if err != nil {
|
||||
str := "failed to create extended header bucket: %v"
|
||||
return log.Errorf(str, err)
|
||||
return fmt.Errorf("failed to create extended header "+
|
||||
"bucket: %s", err)
|
||||
}
|
||||
|
||||
createDate := spvBucket.Get(dbCreateDateName)
|
||||
if createDate != nil {
|
||||
log.Infof("Wallet SPV namespace already created.")
|
||||
log.Info("Wallet SPV namespace already created.")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Infof("Creating wallet SPV namespace.")
|
||||
log.Info("Creating wallet SPV namespace.")
|
||||
|
||||
basicFilter, err := buildBasicFilter(params.GenesisBlock)
|
||||
if err != nil {
|
||||
|
@ -437,15 +430,14 @@ func createSPVNS(namespace walletdb.Namespace, params *chaincfg.Params) error {
|
|||
err = spvBucket.Put(dbCreateDateName,
|
||||
uint64ToBytes(uint64(time.Now().Unix())))
|
||||
if err != nil {
|
||||
str := "failed to store database creation time: %v"
|
||||
return log.Errorf(str, err)
|
||||
return fmt.Errorf("failed to store database creation "+
|
||||
"time: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
str := "failed to update database: %v"
|
||||
return log.Errorf(str, err)
|
||||
return fmt.Errorf("failed to update database: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -250,7 +250,7 @@ func (sp *serverPeer) OnVersion(_ *peer.Peer, msg *wire.MsgVersion) {
|
|||
// OnBlock is invoked when a peer receives a block bitcoin message. It
|
||||
// blocks until the bitcoin block has been fully processed.
|
||||
func (sp *serverPeer) OnBlock(_ *peer.Peer, msg *wire.MsgBlock, buf []byte) {
|
||||
log.Tracef("got block %v", msg.BlockHash())
|
||||
log.Tracef("got block %s", msg.BlockHash())
|
||||
// Convert the raw MsgBlock to a btcutil.Block which provides some
|
||||
// convenience methods and things such as hash caching.
|
||||
block := btcutil.NewBlockFromBlockAndBytes(msg, buf)
|
||||
|
@ -283,7 +283,7 @@ func (sp *serverPeer) OnInv(p *peer.Peer, msg *wire.MsgInv) {
|
|||
newInv := wire.NewMsgInvSizeHint(uint(len(msg.InvList)))
|
||||
for _, invVect := range msg.InvList {
|
||||
if invVect.Type == wire.InvTypeTx {
|
||||
log.Tracef("Ignoring tx %v in inv from %v -- "+
|
||||
log.Tracef("Ignoring tx %s in inv from %v -- "+
|
||||
"SPV mode", invVect.Hash, sp)
|
||||
if sp.ProtocolVersion() >= wire.BIP0037Version {
|
||||
log.Infof("Peer %v is announcing "+
|
||||
|
@ -295,7 +295,7 @@ func (sp *serverPeer) OnInv(p *peer.Peer, msg *wire.MsgInv) {
|
|||
}
|
||||
err := newInv.AddInvVect(invVect)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to add inventory vector: %v", err)
|
||||
log.Errorf("Failed to add inventory vector: %s", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -882,7 +882,7 @@ func (s *ChainService) handleAddPeerMsg(state *peerState, sp *serverPeer) bool {
|
|||
// Disconnect banned peers.
|
||||
host, _, err := net.SplitHostPort(sp.Addr())
|
||||
if err != nil {
|
||||
log.Debugf("can't split hostport %v", err)
|
||||
log.Debugf("can't split host/port: %s", err)
|
||||
sp.Disconnect()
|
||||
return false
|
||||
}
|
||||
|
@ -962,7 +962,7 @@ func (s *ChainService) handleDonePeerMsg(state *peerState, sp *serverPeer) {
|
|||
func (s *ChainService) handleBanPeerMsg(state *peerState, sp *serverPeer) {
|
||||
host, _, err := net.SplitHostPort(sp.Addr())
|
||||
if err != nil {
|
||||
log.Debugf("can't split ban peer %s %v", sp.Addr(), err)
|
||||
log.Debugf("can't split ban peer %s: %s", sp.Addr(), err)
|
||||
return
|
||||
}
|
||||
log.Infof("Banned peer %s for %v", host, BanDuration)
|
||||
|
@ -1096,7 +1096,7 @@ func (s *ChainService) outboundPeerConnected(c *connmgr.ConnReq, conn net.Conn)
|
|||
sp := newServerPeer(s, c.Permanent)
|
||||
p, err := peer.NewOutboundPeer(newPeerConfig(sp), c.Addr.String())
|
||||
if err != nil {
|
||||
log.Debugf("Cannot create outbound peer %s: %v", c.Addr, err)
|
||||
log.Debugf("Cannot create outbound peer %s: %s", c.Addr, err)
|
||||
s.connManager.Disconnect(c.ID())
|
||||
}
|
||||
sp.Peer = p
|
||||
|
|
|
@ -7,11 +7,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aakselrod/btctestlog"
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/rpctest"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/btcsuite/btcwallet/spvsvc/spvchain"
|
||||
"github.com/btcsuite/btcwallet/waddrmgr"
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
|
@ -22,58 +20,58 @@ func TestSetup(t *testing.T) {
|
|||
// Create a btcd SimNet node and generate 500 blocks
|
||||
h1, err := rpctest.New(&chaincfg.SimNetParams, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create harness: %v", err)
|
||||
t.Fatalf("Couldn't create harness: %s", err)
|
||||
}
|
||||
defer h1.TearDown()
|
||||
err = h1.SetUp(false, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't set up harness: %v", err)
|
||||
t.Fatalf("Couldn't set up harness: %s", err)
|
||||
}
|
||||
_, err = h1.Node.Generate(500)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't generate blocks: %v", err)
|
||||
t.Fatalf("Couldn't generate blocks: %s", err)
|
||||
}
|
||||
|
||||
// Create a second btcd SimNet node
|
||||
h2, err := rpctest.New(&chaincfg.SimNetParams, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create harness: %v", err)
|
||||
t.Fatalf("Couldn't create harness: %s", err)
|
||||
}
|
||||
defer h2.TearDown()
|
||||
err = h2.SetUp(false, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't set up harness: %v", err)
|
||||
t.Fatalf("Couldn't set up harness: %s", err)
|
||||
}
|
||||
|
||||
// Create a third btcd SimNet node and generate 900 blocks
|
||||
h3, err := rpctest.New(&chaincfg.SimNetParams, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create harness: %v", err)
|
||||
t.Fatalf("Couldn't create harness: %s", err)
|
||||
}
|
||||
defer h3.TearDown()
|
||||
err = h3.SetUp(false, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't set up harness: %v", err)
|
||||
t.Fatalf("Couldn't set up harness: %s", err)
|
||||
}
|
||||
_, err = h3.Node.Generate(900)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't generate blocks: %v", err)
|
||||
t.Fatalf("Couldn't generate blocks: %s", err)
|
||||
}
|
||||
|
||||
// Connect, sync, and disconnect h1 and h2
|
||||
err = csd([]*rpctest.Harness{h1, h2})
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't connect/sync/disconnect h1 and h2: %v", err)
|
||||
t.Fatalf("Couldn't connect/sync/disconnect h1 and h2: %s", err)
|
||||
}
|
||||
|
||||
// Generate 300 blocks on the first node and 350 on the second
|
||||
_, err = h1.Node.Generate(300)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't generate blocks: %v", err)
|
||||
t.Fatalf("Couldn't generate blocks: %s", err)
|
||||
}
|
||||
_, err = h2.Node.Generate(350)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't generate blocks: %v", err)
|
||||
t.Fatalf("Couldn't generate blocks: %s", err)
|
||||
}
|
||||
|
||||
// Now we have a node with 800 blocks (h1), 850 blocks (h2), and
|
||||
|
@ -92,7 +90,7 @@ func TestSetup(t *testing.T) {
|
|||
for _, height := range []int64{111, 333, 555, 777} {
|
||||
hash, err := h1.Node.GetBlockHash(height)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't get block hash for height %v: %v",
|
||||
t.Fatalf("Couldn't get block hash for height %d: %s",
|
||||
height, err)
|
||||
}
|
||||
modParams.Checkpoints = append(modParams.Checkpoints,
|
||||
|
@ -106,17 +104,17 @@ func TestSetup(t *testing.T) {
|
|||
// SPV chain namespace, and create a configuration for the ChainService.
|
||||
tempDir, err := ioutil.TempDir("", "spvchain")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temporary directory: %v", err)
|
||||
t.Fatalf("Failed to create temporary directory: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
db, err := walletdb.Create("bdb", tempDir+"/weks.db")
|
||||
defer db.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("Error opening DB: %v\n", err)
|
||||
t.Fatalf("Error opening DB: %s\n", err)
|
||||
}
|
||||
ns, err := db.Namespace([]byte("weks"))
|
||||
if err != nil {
|
||||
t.Fatalf("Error geting namespace: %v\n", err)
|
||||
t.Fatalf("Error geting namespace: %s\n", err)
|
||||
}
|
||||
config := spvchain.Config{
|
||||
DataDir: tempDir,
|
||||
|
@ -132,16 +130,16 @@ func TestSetup(t *testing.T) {
|
|||
spvchain.Services = 0
|
||||
spvchain.MaxPeers = 3
|
||||
spvchain.RequiredServices = wire.SFNodeNetwork
|
||||
logger, err := btctestlog.NewTestLogger(t)
|
||||
/*logger, err := btctestlog.NewTestLogger(t)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not set up logger: %v", err)
|
||||
t.Fatalf("Could not set up logger: %s", err)
|
||||
}
|
||||
chainLogger := btclog.NewSubsystemLogger(logger, "CHAIN: ")
|
||||
chainLogger.SetLevel(btclog.TraceLvl)
|
||||
chainLogger.SetLevel(btclog.InfoLvl)
|
||||
spvchain.UseLogger(chainLogger) //*/
|
||||
svc, err := spvchain.NewChainService(config)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating ChainService: %v", err)
|
||||
t.Fatalf("Error creating ChainService: %s", err)
|
||||
}
|
||||
svc.Start()
|
||||
defer svc.Stop()
|
||||
|
@ -149,7 +147,7 @@ func TestSetup(t *testing.T) {
|
|||
// Make sure the client synchronizes with the correct node
|
||||
err = waitForSync(t, svc, h1, time.Second, 30*time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't sync ChainService: %v", err)
|
||||
t.Fatalf("Couldn't sync ChainService: %s", err)
|
||||
}
|
||||
|
||||
// Generate 125 blocks on h1 to make sure it reorgs the other nodes.
|
||||
|
@ -157,13 +155,13 @@ func TestSetup(t *testing.T) {
|
|||
h1.Node.Generate(125)
|
||||
err = waitForSync(t, svc, h1, time.Second, 30*time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't sync ChainService: %v", err)
|
||||
t.Fatalf("Couldn't sync ChainService: %s", err)
|
||||
}
|
||||
|
||||
// Connect/sync/disconnect h2 to make it reorg to the h1 chain.
|
||||
err = csd([]*rpctest.Harness{h1, h2})
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't sync h2 to h1: %v", err)
|
||||
t.Fatalf("Couldn't sync h2 to h1: %s", err)
|
||||
}
|
||||
|
||||
// Generate 3 blocks on h1, one at a time, to make sure the
|
||||
|
@ -172,17 +170,24 @@ func TestSetup(t *testing.T) {
|
|||
h1.Node.Generate(1)
|
||||
err = waitForSync(t, svc, h1, time.Second, 30*time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't sync ChainService: %v", err)
|
||||
t.Fatalf("Couldn't sync ChainService: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate 5 blocks on h2.
|
||||
// Generate 5 blocks on h2 and wait for ChainService to sync to the
|
||||
// newly-best chain on h2.
|
||||
h2.Node.Generate(5)
|
||||
|
||||
// Wait for ChainService to sync to the newly-best chain on h12
|
||||
err = waitForSync(t, svc, h2, time.Second, 30*time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't sync ChainService: %v", err)
|
||||
t.Fatalf("Couldn't sync ChainService: %s", err)
|
||||
}
|
||||
|
||||
// Generate 7 blocks on h1 and wait for ChainService to sync to the
|
||||
// newly-best chain on h1.
|
||||
h1.Node.Generate(7)
|
||||
err = waitForSync(t, svc, h1, time.Second, 30*time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't sync ChainService: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -218,7 +223,7 @@ func waitForSync(t *testing.T, svc *spvchain.ChainService,
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Logf("Syncing to %v (%v)", knownBestHeight, knownBestHash)
|
||||
//t.Logf("Syncing to %d (%s)", knownBestHeight, knownBestHash)
|
||||
var haveBest *waddrmgr.BlockStamp
|
||||
haveBest, err = svc.BestSnapshot()
|
||||
if err != nil {
|
||||
|
@ -237,9 +242,9 @@ func waitForSync(t *testing.T, svc *spvchain.ChainService,
|
|||
haveBest, err = svc.BestSnapshot()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't get best snapshot from "+
|
||||
"ChainService: %v", err)
|
||||
"ChainService: %s", err)
|
||||
}
|
||||
t.Logf("Synced to %v (%v)", haveBest.Height, haveBest.Hash)
|
||||
//t.Logf("Synced to %d (%s)", haveBest.Height, haveBest.Hash)
|
||||
}
|
||||
// Check if we're current
|
||||
if !svc.IsCurrent() {
|
||||
|
|
Loading…
Reference in a new issue