2017-05-02 22:15:57 +02:00
|
|
|
// NOTE: THIS API IS UNSTABLE RIGHT NOW AND WILL GO MOSTLY PRIVATE SOON.
|
|
|
|
|
2017-02-14 00:00:02 +01:00
|
|
|
package spvchain
|
|
|
|
|
|
|
|
import (
|
|
|
|
"container/list"
|
2017-03-28 06:45:02 +02:00
|
|
|
"fmt"
|
2017-02-14 00:00:02 +01:00
|
|
|
"math/big"
|
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/btcsuite/btcd/blockchain"
|
|
|
|
"github.com/btcsuite/btcd/chaincfg"
|
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/btcsuite/btcutil"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// minInFlightBlocks is the minimum number of blocks that should be
|
|
|
|
// in the request queue for headers-first mode before requesting
|
|
|
|
// more.
|
|
|
|
minInFlightBlocks = 10
|
|
|
|
|
|
|
|
// blockDbNamePrefix is the prefix for the block database name. The
|
|
|
|
// database type is appended to this value to form the full block
|
|
|
|
// database name.
|
|
|
|
blockDbNamePrefix = "blocks"
|
|
|
|
|
|
|
|
// maxRequestedBlocks is the maximum number of requested block
|
|
|
|
// hashes to store in memory.
|
|
|
|
maxRequestedBlocks = wire.MaxInvPerMsg
|
2017-03-17 00:17:15 +01:00
|
|
|
|
|
|
|
// maxTimeOffset is the maximum duration a block time is allowed to be
|
|
|
|
// ahead of the curent time. This is currently 2 hours.
|
|
|
|
maxTimeOffset = 2 * time.Hour
|
2017-02-14 00:00:02 +01:00
|
|
|
)
|
|
|
|
|
2017-04-25 05:44:45 +02:00
|
|
|
// TODO: Redo this using query API.
|
2017-04-18 01:55:42 +02:00
|
|
|
var (
|
|
|
|
// WaitForMoreCFHeaders is a configurable time to wait for CFHeaders
|
|
|
|
// messages from peers. It defaults to 3 seconds but can be increased
|
|
|
|
// for higher security and decreased for faster synchronization.
|
|
|
|
WaitForMoreCFHeaders = 3 * time.Second
|
|
|
|
)
|
|
|
|
|
2017-02-14 00:00:02 +01:00
|
|
|
// zeroHash is the zero value hash (all zeros). It is defined as a convenience.
|
|
|
|
var zeroHash chainhash.Hash
|
|
|
|
|
|
|
|
// newPeerMsg signifies a newly connected peer to the block handler.
|
|
|
|
type newPeerMsg struct {
|
|
|
|
peer *serverPeer
|
|
|
|
}
|
|
|
|
|
|
|
|
// blockMsg packages a bitcoin block message and the peer it came from together
|
|
|
|
// so the block handler has access to that information.
|
|
|
|
type blockMsg struct {
|
|
|
|
block *btcutil.Block
|
|
|
|
peer *serverPeer
|
|
|
|
}
|
|
|
|
|
|
|
|
// invMsg packages a bitcoin inv message and the peer it came from together
|
|
|
|
// so the block handler has access to that information.
|
|
|
|
type invMsg struct {
|
|
|
|
inv *wire.MsgInv
|
|
|
|
peer *serverPeer
|
|
|
|
}
|
|
|
|
|
|
|
|
// headersMsg packages a bitcoin headers message and the peer it came from
|
|
|
|
// together so the block handler has access to that information.
|
|
|
|
type headersMsg struct {
|
|
|
|
headers *wire.MsgHeaders
|
|
|
|
peer *serverPeer
|
|
|
|
}
|
|
|
|
|
2017-04-11 21:24:26 +02:00
|
|
|
// cfheadersMsg packages a bitcoin cfheaders message and the peer it came from
|
|
|
|
// together so the block handler has access to that information.
|
|
|
|
type cfheadersMsg struct {
|
|
|
|
cfheaders *wire.MsgCFHeaders
|
|
|
|
peer *serverPeer
|
|
|
|
}
|
|
|
|
|
|
|
|
// cfheadersProcessedMsg tells the block manager to try to see if there are
|
|
|
|
// enough samples of cfheaders messages to process the committed filter header
|
|
|
|
// chain. This is kind of a hack until these get soft-forked in, but we do
|
|
|
|
// verification to avoid getting bamboozled by malicious nodes.
|
|
|
|
type processCFHeadersMsg struct {
|
|
|
|
earliestNode *headerNode
|
|
|
|
stopHash chainhash.Hash
|
|
|
|
extended bool
|
|
|
|
}
|
|
|
|
|
2017-02-14 00:00:02 +01:00
|
|
|
// donePeerMsg signifies a newly disconnected peer to the block handler.
|
|
|
|
type donePeerMsg struct {
|
|
|
|
peer *serverPeer
|
|
|
|
}
|
|
|
|
|
|
|
|
// txMsg packages a bitcoin tx message and the peer it came from together
|
|
|
|
// so the block handler has access to that information.
|
|
|
|
type txMsg struct {
|
|
|
|
tx *btcutil.Tx
|
|
|
|
peer *serverPeer
|
|
|
|
}
|
|
|
|
|
|
|
|
// isCurrentMsg is a message type to be sent across the message channel for
|
|
|
|
// requesting whether or not the block manager believes it is synced with
|
|
|
|
// the currently connected peers.
|
|
|
|
type isCurrentMsg struct {
|
|
|
|
reply chan bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// headerNode is used as a node in a list of headers that are linked together
|
|
|
|
// between checkpoints.
|
|
|
|
type headerNode struct {
|
|
|
|
height int32
|
|
|
|
header *wire.BlockHeader
|
|
|
|
}
|
|
|
|
|
|
|
|
// blockManager provides a concurrency safe block manager for handling all
|
|
|
|
// incoming blocks.
|
|
|
|
type blockManager struct {
|
|
|
|
server *ChainService
|
|
|
|
started int32
|
|
|
|
shutdown int32
|
|
|
|
requestedBlocks map[chainhash.Hash]struct{}
|
|
|
|
progressLogger *blockProgressLogger
|
|
|
|
syncPeer *serverPeer
|
2017-05-04 08:05:05 +02:00
|
|
|
syncPeerMutex sync.Mutex
|
2017-04-14 04:38:38 +02:00
|
|
|
// Channel for messages that come from peers
|
|
|
|
peerChan chan interface{}
|
|
|
|
// Channel for messages that come from internal commands
|
|
|
|
intChan chan interface{}
|
|
|
|
wg sync.WaitGroup
|
|
|
|
quit chan struct{}
|
2017-02-14 00:00:02 +01:00
|
|
|
|
|
|
|
headerList *list.List
|
2017-03-28 06:45:02 +02:00
|
|
|
reorgList *list.List
|
2017-02-14 00:00:02 +01:00
|
|
|
startHeader *list.Element
|
|
|
|
nextCheckpoint *chaincfg.Checkpoint
|
2017-03-29 00:06:46 +02:00
|
|
|
lastRequested chainhash.Hash
|
2017-02-14 00:00:02 +01:00
|
|
|
|
2017-04-14 04:38:38 +02:00
|
|
|
basicHeaders map[chainhash.Hash]map[chainhash.Hash][]*serverPeer
|
|
|
|
lastBasicCFHeaderHeight int32
|
2017-04-14 23:52:58 +02:00
|
|
|
numBasicCFHeadersMsgs int32
|
2017-04-14 04:38:38 +02:00
|
|
|
extendedHeaders map[chainhash.Hash]map[chainhash.Hash][]*serverPeer
|
|
|
|
lastExtCFHeaderHeight int32
|
2017-04-14 23:52:58 +02:00
|
|
|
numExtCFHeadersMsgs int32
|
2017-04-14 15:08:17 +02:00
|
|
|
mapMutex sync.Mutex
|
2017-04-11 21:24:26 +02:00
|
|
|
|
2017-02-14 00:00:02 +01:00
|
|
|
minRetargetTimespan int64 // target timespan / adjustment factor
|
|
|
|
maxRetargetTimespan int64 // target timespan * adjustment factor
|
|
|
|
blocksPerRetarget int32 // target timespan / target time per block
|
|
|
|
}
|
|
|
|
|
|
|
|
// newBlockManager returns a new bitcoin block manager.
|
|
|
|
// Use Start to begin processing asynchronous block and inv updates.
|
|
|
|
func newBlockManager(s *ChainService) (*blockManager, error) {
|
|
|
|
targetTimespan := int64(s.chainParams.TargetTimespan / time.Second)
|
|
|
|
targetTimePerBlock := int64(s.chainParams.TargetTimePerBlock / time.Second)
|
|
|
|
adjustmentFactor := s.chainParams.RetargetAdjustmentFactor
|
|
|
|
|
|
|
|
bm := blockManager{
|
|
|
|
server: s,
|
|
|
|
requestedBlocks: make(map[chainhash.Hash]struct{}),
|
|
|
|
progressLogger: newBlockProgressLogger("Processed", log),
|
2017-04-14 04:38:38 +02:00
|
|
|
peerChan: make(chan interface{}, MaxPeers*3),
|
|
|
|
intChan: make(chan interface{}, 1),
|
2017-02-14 00:00:02 +01:00
|
|
|
headerList: list.New(),
|
2017-03-28 06:45:02 +02:00
|
|
|
reorgList: list.New(),
|
2017-02-14 00:00:02 +01:00
|
|
|
quit: make(chan struct{}),
|
|
|
|
blocksPerRetarget: int32(targetTimespan / targetTimePerBlock),
|
|
|
|
minRetargetTimespan: targetTimespan / adjustmentFactor,
|
|
|
|
maxRetargetTimespan: targetTimespan * adjustmentFactor,
|
2017-04-11 21:24:26 +02:00
|
|
|
basicHeaders: make(
|
|
|
|
map[chainhash.Hash]map[chainhash.Hash][]*serverPeer,
|
|
|
|
),
|
|
|
|
extendedHeaders: make(
|
|
|
|
map[chainhash.Hash]map[chainhash.Hash][]*serverPeer,
|
|
|
|
),
|
2017-02-14 00:00:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize the next checkpoint based on the current height.
|
|
|
|
header, height, err := s.LatestBlock()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
bm.nextCheckpoint = bm.findNextHeaderCheckpoint(int32(height))
|
|
|
|
bm.resetHeaderState(&header, int32(height))
|
|
|
|
|
|
|
|
return &bm, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start begins the core block handler which processes block and inv messages.
|
|
|
|
func (b *blockManager) Start() {
|
|
|
|
// Already started?
|
|
|
|
if atomic.AddInt32(&b.started, 1) != 1 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Trace("Starting block manager")
|
|
|
|
b.wg.Add(1)
|
|
|
|
go b.blockHandler()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop gracefully shuts down the block manager by stopping all asynchronous
|
|
|
|
// handlers and waiting for them to finish.
|
|
|
|
func (b *blockManager) Stop() error {
|
|
|
|
if atomic.AddInt32(&b.shutdown, 1) != 1 {
|
|
|
|
log.Warnf("Block manager is already in the process of " +
|
|
|
|
"shutting down")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("Block manager shutting down")
|
|
|
|
close(b.quit)
|
|
|
|
b.wg.Wait()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewPeer informs the block manager of a newly active peer.
|
|
|
|
func (b *blockManager) NewPeer(sp *serverPeer) {
|
|
|
|
// Ignore if we are shutting down.
|
|
|
|
if atomic.LoadInt32(&b.shutdown) != 0 {
|
|
|
|
return
|
|
|
|
}
|
2017-04-14 04:38:38 +02:00
|
|
|
b.peerChan <- &newPeerMsg{peer: sp}
|
2017-02-14 00:00:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// handleNewPeerMsg deals with new peers that have signalled they may
|
|
|
|
// be considered as a sync peer (they have already successfully negotiated). It
|
|
|
|
// also starts syncing if needed. It is invoked from the syncHandler goroutine.
|
|
|
|
func (b *blockManager) handleNewPeerMsg(peers *list.List, sp *serverPeer) {
|
|
|
|
// Ignore if in the process of shutting down.
|
|
|
|
if atomic.LoadInt32(&b.shutdown) != 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("New valid peer %s (%s)", sp, sp.UserAgent())
|
|
|
|
|
|
|
|
// Ignore the peer if it's not a sync candidate.
|
|
|
|
if !b.isSyncCandidate(sp) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the peer as a candidate to sync from.
|
|
|
|
peers.PushBack(sp)
|
|
|
|
|
|
|
|
// Start syncing by choosing the best candidate if needed.
|
|
|
|
b.startSync(peers)
|
|
|
|
}
|
|
|
|
|
|
|
|
// DonePeer informs the blockmanager that a peer has disconnected.
|
|
|
|
func (b *blockManager) DonePeer(sp *serverPeer) {
|
|
|
|
// Ignore if we are shutting down.
|
|
|
|
if atomic.LoadInt32(&b.shutdown) != 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-14 04:38:38 +02:00
|
|
|
b.peerChan <- &donePeerMsg{peer: sp}
|
2017-02-14 00:00:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// handleDonePeerMsg deals with peers that have signalled they are done. It
|
|
|
|
// removes the peer as a candidate for syncing and in the case where it was
|
|
|
|
// the current sync peer, attempts to select a new best peer to sync from. It
|
|
|
|
// is invoked from the syncHandler goroutine.
|
|
|
|
func (b *blockManager) handleDonePeerMsg(peers *list.List, sp *serverPeer) {
|
|
|
|
// Remove the peer from the list of candidate peers.
|
|
|
|
for e := peers.Front(); e != nil; e = e.Next() {
|
|
|
|
if e.Value == sp {
|
|
|
|
peers.Remove(e)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("Lost peer %s", sp)
|
|
|
|
|
|
|
|
// Attempt to find a new peer to sync from if the quitting peer is the
|
2017-03-28 06:45:02 +02:00
|
|
|
// sync peer. Also, reset the header state.
|
2017-02-14 00:00:02 +01:00
|
|
|
if b.syncPeer != nil && b.syncPeer == sp {
|
2017-05-04 08:05:05 +02:00
|
|
|
b.syncPeerMutex.Lock()
|
2017-02-14 00:00:02 +01:00
|
|
|
b.syncPeer = nil
|
2017-05-04 08:05:05 +02:00
|
|
|
b.syncPeerMutex.Unlock()
|
2017-02-14 00:00:02 +01:00
|
|
|
header, height, err := b.server.LatestBlock()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
b.resetHeaderState(&header, int32(height))
|
|
|
|
b.startSync(peers)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// blockHandler is the main handler for the block manager. It must be run
|
|
|
|
// as a goroutine. It processes block and inv messages in a separate goroutine
|
|
|
|
// from the peer handlers so the block (MsgBlock) messages are handled by a
|
|
|
|
// single thread without needing to lock memory data structures. This is
|
|
|
|
// important because the block manager controls which blocks are needed and how
|
|
|
|
// the fetching should proceed.
|
|
|
|
func (b *blockManager) blockHandler() {
|
|
|
|
candidatePeers := list.New()
|
|
|
|
out:
|
|
|
|
for {
|
2017-04-14 04:38:38 +02:00
|
|
|
// Check internal messages channel first and continue if there's
|
|
|
|
// nothing to process.
|
2017-02-14 00:00:02 +01:00
|
|
|
select {
|
2017-04-14 04:38:38 +02:00
|
|
|
case m := <-b.intChan:
|
|
|
|
switch msg := m.(type) {
|
|
|
|
case *processCFHeadersMsg:
|
|
|
|
b.handleProcessCFHeadersMsg(msg)
|
|
|
|
|
|
|
|
default:
|
|
|
|
log.Warnf("Invalid message type in block "+
|
|
|
|
"handler: %T", msg)
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
// Now check peer messages and quit channels.
|
|
|
|
select {
|
|
|
|
case m := <-b.peerChan:
|
2017-02-14 00:00:02 +01:00
|
|
|
switch msg := m.(type) {
|
|
|
|
case *newPeerMsg:
|
|
|
|
b.handleNewPeerMsg(candidatePeers, msg.peer)
|
|
|
|
|
|
|
|
case *invMsg:
|
|
|
|
b.handleInvMsg(msg)
|
|
|
|
|
|
|
|
case *headersMsg:
|
|
|
|
b.handleHeadersMsg(msg)
|
|
|
|
|
2017-04-11 21:24:26 +02:00
|
|
|
case *cfheadersMsg:
|
|
|
|
b.handleCFHeadersMsg(msg)
|
|
|
|
|
2017-02-14 00:00:02 +01:00
|
|
|
case *donePeerMsg:
|
|
|
|
b.handleDonePeerMsg(candidatePeers, msg.peer)
|
|
|
|
|
|
|
|
case isCurrentMsg:
|
|
|
|
msg.reply <- b.current()
|
|
|
|
|
|
|
|
default:
|
|
|
|
log.Warnf("Invalid message type in block "+
|
|
|
|
"handler: %T", msg)
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-b.quit:
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
b.wg.Done()
|
|
|
|
log.Trace("Block handler done")
|
|
|
|
}
|
|
|
|
|
2017-05-04 08:05:05 +02:00
|
|
|
// SyncPeer returns the current sync peer.
|
|
|
|
func (b *blockManager) SyncPeer() *serverPeer {
|
|
|
|
b.syncPeerMutex.Lock()
|
|
|
|
defer b.syncPeerMutex.Unlock()
|
|
|
|
return b.syncPeer
|
|
|
|
}
|
2017-04-14 04:38:38 +02:00
|
|
|
|
2017-02-14 00:00:02 +01:00
|
|
|
// isSyncCandidate returns whether or not the peer is a candidate to consider
|
|
|
|
// syncing from.
|
|
|
|
func (b *blockManager) isSyncCandidate(sp *serverPeer) bool {
|
|
|
|
// The peer is not a candidate for sync if it's not a full node.
|
|
|
|
return sp.Services()&wire.SFNodeNetwork == wire.SFNodeNetwork
|
|
|
|
}
|
|
|
|
|
|
|
|
// findNextHeaderCheckpoint returns the next checkpoint after the passed height.
|
|
|
|
// It returns nil when there is not one either because the height is already
|
2017-03-17 00:17:15 +01:00
|
|
|
// later than the final checkpoint or there are none for the current network.
|
2017-02-14 00:00:02 +01:00
|
|
|
func (b *blockManager) findNextHeaderCheckpoint(height int32) *chaincfg.Checkpoint {
|
2017-03-17 00:17:15 +01:00
|
|
|
// There is no next checkpoint if there are none for this current
|
|
|
|
// network.
|
2017-02-14 00:00:02 +01:00
|
|
|
checkpoints := b.server.chainParams.Checkpoints
|
|
|
|
if len(checkpoints) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// There is no next checkpoint if the height is already after the final
|
|
|
|
// checkpoint.
|
|
|
|
finalCheckpoint := &checkpoints[len(checkpoints)-1]
|
|
|
|
if height >= finalCheckpoint.Height {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find the next checkpoint.
|
|
|
|
nextCheckpoint := finalCheckpoint
|
|
|
|
for i := len(checkpoints) - 2; i >= 0; i-- {
|
|
|
|
if height >= checkpoints[i].Height {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
nextCheckpoint = &checkpoints[i]
|
|
|
|
}
|
|
|
|
return nextCheckpoint
|
|
|
|
}
|
|
|
|
|
2017-03-17 00:17:15 +01:00
|
|
|
// findPreviousHeaderCheckpoint returns the last checkpoint before the passed
|
|
|
|
// height. It returns a checkpoint matching the genesis block when the height
|
|
|
|
// is earlier than the first checkpoint or there are no checkpoints for the
|
|
|
|
// current network. This is used for resettng state when a malicious peer sends
|
|
|
|
// us headers that don't lead up to a known checkpoint.
|
|
|
|
func (b *blockManager) findPreviousHeaderCheckpoint(height int32) *chaincfg.Checkpoint {
|
|
|
|
// Start with the genesis block - earliest checkpoint to which our
|
|
|
|
// code will want to reset
|
|
|
|
prevCheckpoint := &chaincfg.Checkpoint{
|
|
|
|
Height: 0,
|
|
|
|
Hash: b.server.chainParams.GenesisHash,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find the latest checkpoint lower than height or return genesis block
|
|
|
|
// if there are none.
|
|
|
|
checkpoints := b.server.chainParams.Checkpoints
|
2017-03-25 01:11:50 +01:00
|
|
|
for i := 0; i < len(checkpoints); i++ {
|
|
|
|
if height <= checkpoints[i].Height {
|
2017-03-17 00:17:15 +01:00
|
|
|
break
|
|
|
|
}
|
2017-03-25 01:11:50 +01:00
|
|
|
prevCheckpoint = &checkpoints[i]
|
2017-03-17 00:17:15 +01:00
|
|
|
}
|
|
|
|
return prevCheckpoint
|
|
|
|
}
|
|
|
|
|
2017-02-14 00:00:02 +01:00
|
|
|
// resetHeaderState sets the headers-first mode state to values appropriate for
|
|
|
|
// syncing from a new peer.
|
|
|
|
func (b *blockManager) resetHeaderState(newestHeader *wire.BlockHeader,
|
|
|
|
newestHeight int32) {
|
|
|
|
b.headerList.Init()
|
|
|
|
b.startHeader = nil
|
2017-04-14 15:08:17 +02:00
|
|
|
b.mapMutex.Lock()
|
2017-04-11 21:24:26 +02:00
|
|
|
b.basicHeaders = make(
|
|
|
|
map[chainhash.Hash]map[chainhash.Hash][]*serverPeer,
|
|
|
|
)
|
|
|
|
b.extendedHeaders = make(
|
|
|
|
map[chainhash.Hash]map[chainhash.Hash][]*serverPeer,
|
|
|
|
)
|
2017-04-14 15:08:17 +02:00
|
|
|
b.mapMutex.Unlock()
|
2017-02-14 00:00:02 +01:00
|
|
|
|
|
|
|
// Add an entry for the latest known block into the header pool.
|
|
|
|
// This allows the next downloaded header to prove it links to the chain
|
|
|
|
// properly.
|
|
|
|
node := headerNode{header: newestHeader, height: newestHeight}
|
|
|
|
b.headerList.PushBack(&node)
|
2017-04-14 15:08:17 +02:00
|
|
|
b.mapMutex.Lock()
|
2017-04-11 21:24:26 +02:00
|
|
|
b.basicHeaders[newestHeader.BlockHash()] = make(
|
|
|
|
map[chainhash.Hash][]*serverPeer,
|
|
|
|
)
|
|
|
|
b.extendedHeaders[newestHeader.BlockHash()] = make(
|
|
|
|
map[chainhash.Hash][]*serverPeer,
|
|
|
|
)
|
2017-04-14 15:08:17 +02:00
|
|
|
b.mapMutex.Unlock()
|
2017-02-14 00:00:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// startSync will choose the best peer among the available candidate peers to
|
|
|
|
// download/sync the blockchain from. When syncing is already running, it
|
|
|
|
// simply returns. It also examines the candidates for any which are no longer
|
|
|
|
// candidates and removes them as needed.
|
|
|
|
func (b *blockManager) startSync(peers *list.List) {
|
|
|
|
// Return now if we're already syncing.
|
|
|
|
if b.syncPeer != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
best, err := b.server.BestSnapshot()
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("Failed to get hash and height for the "+
|
2017-03-28 06:45:02 +02:00
|
|
|
"latest block: %s", err)
|
2017-02-14 00:00:02 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
var bestPeer *serverPeer
|
|
|
|
var enext *list.Element
|
|
|
|
for e := peers.Front(); e != nil; e = enext {
|
|
|
|
enext = e.Next()
|
|
|
|
sp := e.Value.(*serverPeer)
|
|
|
|
|
|
|
|
// Remove sync candidate peers that are no longer candidates due
|
|
|
|
// to passing their latest known block. NOTE: The < is
|
|
|
|
// intentional as opposed to <=. While techcnically the peer
|
|
|
|
// doesn't have a later block when it's equal, it will likely
|
|
|
|
// have one soon so it is a reasonable choice. It also allows
|
|
|
|
// the case where both are at 0 such as during regression test.
|
|
|
|
if sp.LastBlock() < best.Height {
|
|
|
|
peers.Remove(e)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: Use a better algorithm to choose the best peer.
|
|
|
|
// For now, just pick the candidate with the highest last block.
|
|
|
|
if bestPeer == nil || sp.LastBlock() > bestPeer.LastBlock() {
|
|
|
|
bestPeer = sp
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start syncing from the best peer if one was selected.
|
|
|
|
if bestPeer != nil {
|
|
|
|
// Clear the requestedBlocks if the sync peer changes, otherwise
|
|
|
|
// we may ignore blocks we need that the last sync peer failed
|
|
|
|
// to send.
|
|
|
|
b.requestedBlocks = make(map[chainhash.Hash]struct{})
|
|
|
|
|
|
|
|
locator, err := b.server.LatestBlockLocator()
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("Failed to get block locator for the "+
|
2017-03-28 06:45:02 +02:00
|
|
|
"latest block: %s", err)
|
2017-02-14 00:00:02 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-03-28 06:45:02 +02:00
|
|
|
log.Infof("Syncing to block height %d from peer %s",
|
2017-02-14 00:00:02 +01:00
|
|
|
bestPeer.LastBlock(), bestPeer.Addr())
|
|
|
|
|
|
|
|
// When the current height is less than a known checkpoint we
|
|
|
|
// can use block headers to learn about which blocks comprise
|
|
|
|
// the chain up to the checkpoint and perform less validation
|
|
|
|
// for them. This is possible since each header contains the
|
|
|
|
// hash of the previous header and a merkle root. Therefore if
|
|
|
|
// we validate all of the received headers link together
|
|
|
|
// properly and the checkpoint hashes match, we can be sure the
|
|
|
|
// hashes for the blocks in between are accurate. Further, once
|
|
|
|
// the full blocks are downloaded, the merkle root is computed
|
|
|
|
// and compared against the value in the header which proves the
|
|
|
|
// full block hasn't been tampered with.
|
|
|
|
//
|
|
|
|
// Once we have passed the final checkpoint, or checkpoints are
|
|
|
|
// disabled, use standard inv messages learn about the blocks
|
|
|
|
// and fully validate them. Finally, regression test mode does
|
|
|
|
// not support the headers-first approach so do normal block
|
|
|
|
// downloads when in regression test mode.
|
2017-05-04 08:05:05 +02:00
|
|
|
b.syncPeerMutex.Lock()
|
2017-02-14 00:00:02 +01:00
|
|
|
b.syncPeer = bestPeer
|
2017-05-04 08:05:05 +02:00
|
|
|
b.syncPeerMutex.Unlock()
|
2017-02-14 00:00:02 +01:00
|
|
|
if b.nextCheckpoint != nil &&
|
|
|
|
best.Height < b.nextCheckpoint.Height {
|
|
|
|
|
|
|
|
b.syncPeer.PushGetHeadersMsg(locator, b.nextCheckpoint.Hash)
|
|
|
|
log.Infof("Downloading headers for blocks %d to "+
|
|
|
|
"%d from peer %s", best.Height+1,
|
|
|
|
b.nextCheckpoint.Height, bestPeer.Addr())
|
|
|
|
// This will get adjusted when we process headers if
|
|
|
|
// we request more headers than the peer is willing to
|
|
|
|
// give us in one message.
|
|
|
|
} else {
|
|
|
|
b.syncPeer.PushGetBlocksMsg(locator, &zeroHash)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
log.Warnf("No sync peer candidates available")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// current returns true if we believe we are synced with our peers, false if we
|
|
|
|
// still have blocks to check
|
|
|
|
func (b *blockManager) current() bool {
|
|
|
|
// Figure out the latest block we know.
|
|
|
|
header, height, err := b.server.LatestBlock()
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// There is no last checkpoint if checkpoints are disabled or there are
|
|
|
|
// none for this current network.
|
|
|
|
checkpoints := b.server.chainParams.Checkpoints
|
|
|
|
if len(checkpoints) != 0 {
|
|
|
|
// We aren't current if the newest block we know of isn't ahead
|
|
|
|
// of all checkpoints.
|
|
|
|
if checkpoints[len(checkpoints)-1].Height >= int32(height) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have a syncPeer and are below the block we are syncing to, we
|
|
|
|
// are not current.
|
|
|
|
if b.syncPeer != nil && int32(height) < b.syncPeer.LastBlock() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// If our time source (median times of all the connected peers) is at
|
|
|
|
// least 24 hours ahead of our best known block, we aren't current.
|
|
|
|
minus24Hours := b.server.timeSource.AdjustedTime().Add(-24 * time.Hour)
|
|
|
|
return !header.Timestamp.Before(minus24Hours)
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsCurrent returns whether or not the block manager believes it is synced with
|
|
|
|
// the connected peers.
|
|
|
|
func (b *blockManager) IsCurrent() bool {
|
|
|
|
reply := make(chan bool)
|
2017-04-14 04:38:38 +02:00
|
|
|
b.peerChan <- isCurrentMsg{reply: reply}
|
2017-02-14 00:00:02 +01:00
|
|
|
return <-reply
|
|
|
|
}
|
|
|
|
|
|
|
|
// QueueInv adds the passed inv message and peer to the block handling queue.
|
|
|
|
func (b *blockManager) QueueInv(inv *wire.MsgInv, sp *serverPeer) {
|
|
|
|
// No channel handling here because peers do not need to block on inv
|
|
|
|
// messages.
|
|
|
|
if atomic.LoadInt32(&b.shutdown) != 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-14 04:38:38 +02:00
|
|
|
b.peerChan <- &invMsg{inv: inv, peer: sp}
|
2017-02-14 00:00:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// handleInvMsg handles inv messages from all peers.
|
|
|
|
// We examine the inventory advertised by the remote peer and act accordingly.
|
|
|
|
func (b *blockManager) handleInvMsg(imsg *invMsg) {
|
|
|
|
// Attempt to find the final block in the inventory list. There may
|
|
|
|
// not be one.
|
|
|
|
lastBlock := -1
|
|
|
|
invVects := imsg.inv.InvList
|
|
|
|
for i := len(invVects) - 1; i >= 0; i-- {
|
|
|
|
if invVects[i].Type == wire.InvTypeBlock {
|
|
|
|
lastBlock = i
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this inv contains a block announcement, and this isn't coming from
|
|
|
|
// our current sync peer or we're current, then update the last
|
|
|
|
// announced block for this peer. We'll use this information later to
|
|
|
|
// update the heights of peers based on blocks we've accepted that they
|
|
|
|
// previously announced.
|
|
|
|
if lastBlock != -1 && (imsg.peer != b.syncPeer || b.current()) {
|
|
|
|
imsg.peer.UpdateLastAnnouncedBlock(&invVects[lastBlock].Hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ignore invs from peers that aren't the sync if we are not current.
|
|
|
|
// Helps prevent dealing with orphans.
|
|
|
|
if imsg.peer != b.syncPeer && !b.current() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If our chain is current and a peer announces a block we already
|
|
|
|
// know of, then update their current block height.
|
|
|
|
if lastBlock != -1 && b.current() {
|
|
|
|
_, blkHeight, err := b.server.GetBlockByHash(invVects[lastBlock].Hash)
|
|
|
|
if err == nil {
|
|
|
|
imsg.peer.UpdateLastBlockHeight(int32(blkHeight))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add blocks to the cache of known inventory for the peer.
|
|
|
|
for _, iv := range invVects {
|
|
|
|
if iv.Type == wire.InvTypeBlock {
|
|
|
|
imsg.peer.AddKnownInventory(iv)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-28 06:45:02 +02:00
|
|
|
// If this is the sync peer or we're current, get the headers
|
2017-02-14 00:00:02 +01:00
|
|
|
// for the announced blocks and update the last announced block.
|
2017-03-28 06:45:02 +02:00
|
|
|
if lastBlock != -1 && (imsg.peer == b.syncPeer || b.current()) {
|
2017-04-11 21:24:26 +02:00
|
|
|
lastEl := b.headerList.Back()
|
|
|
|
var lastHash chainhash.Hash
|
|
|
|
if lastEl != nil {
|
|
|
|
lastHash = lastEl.Value.(*headerNode).header.BlockHash()
|
|
|
|
}
|
2017-03-29 00:06:46 +02:00
|
|
|
// Only send getheaders if we don't already know about the last
|
|
|
|
// block hash being announced.
|
2017-04-11 21:24:26 +02:00
|
|
|
if lastHash != invVects[lastBlock].Hash && lastEl != nil &&
|
2017-03-29 00:06:46 +02:00
|
|
|
b.lastRequested != invVects[lastBlock].Hash {
|
|
|
|
// Make a locator starting from the latest known header
|
|
|
|
// we've processed.
|
|
|
|
locator := make(blockchain.BlockLocator, 0,
|
|
|
|
wire.MaxBlockLocatorsPerMsg)
|
|
|
|
locator = append(locator, &lastHash)
|
|
|
|
// Add locator from the database as backup.
|
|
|
|
knownLocator, err := b.server.LatestBlockLocator()
|
|
|
|
if err == nil {
|
|
|
|
locator = append(locator, knownLocator...)
|
|
|
|
}
|
|
|
|
// Get headers based on locator.
|
|
|
|
err = imsg.peer.PushGetHeadersMsg(locator,
|
|
|
|
&invVects[lastBlock].Hash)
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("Failed to send getheaders message "+
|
|
|
|
"to peer %s: %s", imsg.peer.Addr(), err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
b.lastRequested = invVects[lastBlock].Hash
|
2017-02-14 00:00:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// QueueHeaders adds the passed headers message and peer to the block handling
|
|
|
|
// queue.
|
|
|
|
func (b *blockManager) QueueHeaders(headers *wire.MsgHeaders, sp *serverPeer) {
|
|
|
|
// No channel handling here because peers do not need to block on
|
|
|
|
// headers messages.
|
|
|
|
if atomic.LoadInt32(&b.shutdown) != 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-14 04:38:38 +02:00
|
|
|
b.peerChan <- &headersMsg{headers: headers, peer: sp}
|
2017-02-14 00:00:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// handleHeadersMsg handles headers messages from all peers.
|
|
|
|
func (b *blockManager) handleHeadersMsg(hmsg *headersMsg) {
|
|
|
|
msg := hmsg.headers
|
|
|
|
numHeaders := len(msg.Headers)
|
|
|
|
|
|
|
|
// Nothing to do for an empty headers message.
|
|
|
|
if numHeaders == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-03-17 00:17:15 +01:00
|
|
|
// For checking to make sure blocks aren't too far in the
|
|
|
|
// future as of the time we receive the headers message.
|
|
|
|
maxTimestamp := b.server.timeSource.AdjustedTime().
|
|
|
|
Add(maxTimeOffset)
|
|
|
|
|
2017-02-14 00:00:02 +01:00
|
|
|
// Process all of the received headers ensuring each one connects to the
|
|
|
|
// previous and that checkpoints match.
|
|
|
|
receivedCheckpoint := false
|
|
|
|
var finalHash *chainhash.Hash
|
2017-03-15 19:39:37 +01:00
|
|
|
var finalHeight int32
|
2017-03-28 06:45:02 +02:00
|
|
|
for i, blockHeader := range msg.Headers {
|
2017-02-14 00:00:02 +01:00
|
|
|
blockHash := blockHeader.BlockHash()
|
|
|
|
finalHash = &blockHash
|
|
|
|
|
|
|
|
// Ensure there is a previous header to compare against.
|
|
|
|
prevNodeEl := b.headerList.Back()
|
|
|
|
if prevNodeEl == nil {
|
|
|
|
log.Warnf("Header list does not contain a previous" +
|
|
|
|
"element as expected -- disconnecting peer")
|
|
|
|
hmsg.peer.Disconnect()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-03-17 00:17:15 +01:00
|
|
|
// Ensure the header properly connects to the previous one,
|
|
|
|
// that the proof of work is good, and that the header's
|
|
|
|
// timestamp isn't too far in the future, and add it to the
|
|
|
|
// list of headers.
|
2017-02-14 00:00:02 +01:00
|
|
|
node := headerNode{header: blockHeader}
|
|
|
|
prevNode := prevNodeEl.Value.(*headerNode)
|
|
|
|
prevHash := prevNode.header.BlockHash()
|
|
|
|
if prevHash.IsEqual(&blockHeader.PrevBlock) {
|
2017-03-28 06:45:02 +02:00
|
|
|
err := b.checkHeaderSanity(blockHeader, maxTimestamp,
|
|
|
|
false)
|
2017-02-14 00:00:02 +01:00
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
log.Warnf("Header doesn't pass sanity check: "+
|
|
|
|
"%s -- disconnecting peer", err)
|
2017-03-17 00:17:15 +01:00
|
|
|
hmsg.peer.Disconnect()
|
|
|
|
return
|
|
|
|
}
|
2017-02-14 00:00:02 +01:00
|
|
|
node.height = prevNode.height + 1
|
2017-03-15 19:39:37 +01:00
|
|
|
finalHeight = node.height
|
2017-02-14 00:00:02 +01:00
|
|
|
err = b.server.putBlock(*blockHeader,
|
|
|
|
uint32(node.height))
|
|
|
|
if err != nil {
|
|
|
|
log.Criticalf("Couldn't write block to "+
|
2017-03-28 06:45:02 +02:00
|
|
|
"database: %s", err)
|
|
|
|
// Should we panic here?
|
2017-02-14 00:00:02 +01:00
|
|
|
}
|
|
|
|
err = b.server.putMaxBlockHeight(uint32(node.height))
|
|
|
|
if err != nil {
|
|
|
|
log.Criticalf("Couldn't write max block height"+
|
2017-03-28 06:45:02 +02:00
|
|
|
" to database: %s", err)
|
|
|
|
// Should we panic here?
|
2017-02-14 00:00:02 +01:00
|
|
|
}
|
2017-03-28 06:45:02 +02:00
|
|
|
hmsg.peer.UpdateLastBlockHeight(node.height)
|
2017-02-14 00:00:02 +01:00
|
|
|
e := b.headerList.PushBack(&node)
|
2017-04-14 15:08:17 +02:00
|
|
|
b.mapMutex.Lock()
|
2017-04-11 21:24:26 +02:00
|
|
|
b.basicHeaders[node.header.BlockHash()] = make(
|
|
|
|
map[chainhash.Hash][]*serverPeer,
|
|
|
|
)
|
|
|
|
b.extendedHeaders[node.header.BlockHash()] = make(
|
|
|
|
map[chainhash.Hash][]*serverPeer,
|
|
|
|
)
|
2017-04-14 15:08:17 +02:00
|
|
|
b.mapMutex.Unlock()
|
2017-02-14 00:00:02 +01:00
|
|
|
if b.startHeader == nil {
|
|
|
|
b.startHeader = e
|
|
|
|
}
|
|
|
|
} else {
|
2017-03-25 01:11:50 +01:00
|
|
|
// The block doesn't connect to the last block we know.
|
|
|
|
// We will need to do some additional checks to process
|
|
|
|
// possible reorganizations or incorrect chain on either
|
|
|
|
// our or the peer's side.
|
|
|
|
// If we got these headers from a peer that's not our
|
|
|
|
// sync peer, they might not be aligned correctly or
|
|
|
|
// even on the right chain. Just ignore the rest of the
|
2017-03-28 06:45:02 +02:00
|
|
|
// message. However, if we're current, this might be a
|
|
|
|
// reorg, in which case we'll either change our sync
|
|
|
|
// peer or disconnect the peer that sent us these
|
|
|
|
// bad headers.
|
|
|
|
if hmsg.peer != b.syncPeer && !b.current() {
|
2017-03-25 01:11:50 +01:00
|
|
|
return
|
|
|
|
}
|
2017-03-29 00:06:46 +02:00
|
|
|
// Check if this is the last block we know of. This is
|
|
|
|
// a shortcut for sendheaders so that each redundant
|
|
|
|
// header doesn't cause a disk read.
|
|
|
|
if blockHash == prevHash {
|
|
|
|
continue
|
|
|
|
}
|
2017-03-25 01:11:50 +01:00
|
|
|
// Check if this block is known. If so, we continue to
|
|
|
|
// the next one.
|
2017-03-28 06:45:02 +02:00
|
|
|
_, _, err := b.server.GetBlockByHash(blockHash)
|
2017-03-25 01:11:50 +01:00
|
|
|
if err == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Check if the previous block is known. If it is, this
|
|
|
|
// is probably a reorg based on the estimated latest
|
2017-03-28 06:45:02 +02:00
|
|
|
// block that matches between us and the peer as
|
2017-03-25 01:11:50 +01:00
|
|
|
// derived from the block locator we sent to request
|
|
|
|
// these headers. Otherwise, the headers don't connect
|
|
|
|
// to anything we know and we should disconnect the
|
|
|
|
// peer.
|
2017-03-28 06:45:02 +02:00
|
|
|
backHead, backHeight, err := b.server.GetBlockByHash(
|
2017-03-25 01:11:50 +01:00
|
|
|
blockHeader.PrevBlock)
|
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
log.Warnf("Received block header that does not"+
|
|
|
|
" properly connect to the chain from"+
|
|
|
|
" peer %s (%s) -- disconnecting",
|
|
|
|
hmsg.peer.Addr(), err)
|
2017-03-25 01:11:50 +01:00
|
|
|
hmsg.peer.Disconnect()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// We've found a branch we weren't aware of. If the
|
|
|
|
// branch is earlier than the latest synchronized
|
|
|
|
// checkpoint, it's invalid and we need to disconnect
|
|
|
|
// the reporting peer.
|
|
|
|
prevCheckpoint := b.findPreviousHeaderCheckpoint(
|
|
|
|
prevNode.height)
|
|
|
|
if backHeight < uint32(prevCheckpoint.Height) {
|
2017-03-28 06:45:02 +02:00
|
|
|
log.Errorf("Attempt at a reorg earlier than a "+
|
|
|
|
"checkpoint past which we've already "+
|
2017-03-25 01:11:50 +01:00
|
|
|
"synchronized -- disconnecting peer "+
|
2017-03-28 06:45:02 +02:00
|
|
|
"%s", hmsg.peer.Addr())
|
2017-03-25 01:11:50 +01:00
|
|
|
hmsg.peer.Disconnect()
|
|
|
|
return
|
|
|
|
}
|
2017-03-28 06:45:02 +02:00
|
|
|
// Check the sanity of the new branch. If any of the
|
|
|
|
// blocks don't pass sanity checks, disconnect the peer.
|
|
|
|
// We also keep track of the work represented by these
|
|
|
|
// headers so we can compare it to the work in the known
|
|
|
|
// good chain.
|
|
|
|
b.reorgList.Init()
|
|
|
|
b.reorgList.PushBack(&headerNode{
|
|
|
|
header: &backHead,
|
|
|
|
height: int32(backHeight),
|
|
|
|
})
|
|
|
|
totalWork := big.NewInt(0)
|
2017-03-29 00:06:46 +02:00
|
|
|
for j, reorgHeader := range msg.Headers[i:] {
|
2017-03-28 06:45:02 +02:00
|
|
|
err = b.checkHeaderSanity(reorgHeader,
|
|
|
|
maxTimestamp, true)
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("Header doesn't pass sanity"+
|
|
|
|
" check: %s -- disconnecting "+
|
|
|
|
"peer", err)
|
|
|
|
hmsg.peer.Disconnect()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
totalWork.Add(totalWork,
|
|
|
|
blockchain.CalcWork(reorgHeader.Bits))
|
2017-03-29 00:06:46 +02:00
|
|
|
b.reorgList.PushBack(&headerNode{
|
|
|
|
header: reorgHeader,
|
|
|
|
height: int32(backHeight+1) + int32(j),
|
|
|
|
})
|
2017-03-28 06:45:02 +02:00
|
|
|
}
|
|
|
|
log.Tracef("Sane reorg attempted. Total work from "+
|
|
|
|
"reorg chain: %v", totalWork)
|
|
|
|
// All the headers pass sanity checks. Now we calculate
|
|
|
|
// the total work for the known chain.
|
|
|
|
knownWork := big.NewInt(0)
|
|
|
|
// This should NEVER be nil because the most recent
|
|
|
|
// block is always pushed back by resetHeaderState
|
|
|
|
knownEl := b.headerList.Back()
|
|
|
|
var knownHead wire.BlockHeader
|
|
|
|
for j := uint32(prevNode.height); j > backHeight; j-- {
|
|
|
|
if knownEl != nil {
|
|
|
|
knownHead = *knownEl.Value.(*headerNode).header
|
|
|
|
knownEl = knownEl.Prev()
|
|
|
|
} else {
|
|
|
|
knownHead, _, err = b.server.GetBlockByHash(
|
|
|
|
knownHead.PrevBlock)
|
|
|
|
if err != nil {
|
|
|
|
log.Criticalf("Can't get block"+
|
|
|
|
"header for hash %s: "+
|
|
|
|
"%v",
|
|
|
|
knownHead.PrevBlock,
|
|
|
|
err)
|
|
|
|
// Should we panic here?
|
|
|
|
}
|
|
|
|
}
|
|
|
|
knownWork.Add(knownWork,
|
|
|
|
blockchain.CalcWork(knownHead.Bits))
|
|
|
|
}
|
|
|
|
log.Tracef("Total work from known chain: %v", knownWork)
|
|
|
|
// Compare the two work totals and reject the new chain
|
|
|
|
// if it doesn't have more work than the previously
|
2017-03-29 00:06:46 +02:00
|
|
|
// known chain. Disconnect if it's actually less than
|
|
|
|
// the known chain.
|
|
|
|
switch knownWork.Cmp(totalWork) {
|
|
|
|
case 1:
|
|
|
|
log.Warnf("Reorg attempt that has less work "+
|
|
|
|
"than known chain from peer %s -- "+
|
|
|
|
"disconnecting", hmsg.peer.Addr())
|
2017-03-28 06:45:02 +02:00
|
|
|
hmsg.peer.Disconnect()
|
2017-03-29 00:06:46 +02:00
|
|
|
fallthrough
|
|
|
|
case 0:
|
2017-03-28 06:45:02 +02:00
|
|
|
return
|
2017-03-29 00:06:46 +02:00
|
|
|
default:
|
2017-03-28 06:45:02 +02:00
|
|
|
}
|
|
|
|
// At this point, we have a valid reorg, so we roll
|
|
|
|
// back the existing chain and add the new block header.
|
|
|
|
// We also change the sync peer. Then we can continue
|
|
|
|
// with the rest of the headers in the message as if
|
|
|
|
// nothing has happened.
|
2017-05-04 08:05:05 +02:00
|
|
|
b.syncPeerMutex.Lock()
|
2017-03-28 06:45:02 +02:00
|
|
|
b.syncPeer = hmsg.peer
|
2017-05-04 08:05:05 +02:00
|
|
|
b.syncPeerMutex.Unlock()
|
2017-04-28 19:36:05 +02:00
|
|
|
_, err = b.server.rollBackToHeight(backHeight)
|
2017-04-18 02:15:29 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Criticalf("Rollback failed: %s",
|
|
|
|
err)
|
|
|
|
// Should we panic here?
|
|
|
|
}
|
|
|
|
err = b.server.putBlock(*blockHeader, backHeight+1)
|
|
|
|
if err != nil {
|
|
|
|
log.Criticalf("Couldn't write block to "+
|
|
|
|
"database: %s", err)
|
|
|
|
// Should we panic here?
|
|
|
|
}
|
|
|
|
err = b.server.putMaxBlockHeight(backHeight + 1)
|
|
|
|
if err != nil {
|
|
|
|
log.Criticalf("Couldn't write max block height"+
|
|
|
|
" to database: %s", err)
|
|
|
|
// Should we panic here?
|
|
|
|
}
|
2017-03-28 06:45:02 +02:00
|
|
|
b.resetHeaderState(&backHead, int32(backHeight))
|
|
|
|
b.headerList.PushBack(&headerNode{
|
|
|
|
header: blockHeader,
|
|
|
|
height: int32(backHeight + 1),
|
|
|
|
})
|
2017-04-18 01:55:42 +02:00
|
|
|
b.mapMutex.Lock()
|
|
|
|
b.basicHeaders[blockHeader.BlockHash()] = make(
|
|
|
|
map[chainhash.Hash][]*serverPeer,
|
|
|
|
)
|
|
|
|
b.extendedHeaders[blockHeader.BlockHash()] = make(
|
|
|
|
map[chainhash.Hash][]*serverPeer,
|
|
|
|
)
|
|
|
|
b.mapMutex.Unlock()
|
|
|
|
if b.lastBasicCFHeaderHeight > int32(backHeight) {
|
|
|
|
b.lastBasicCFHeaderHeight = int32(backHeight)
|
|
|
|
}
|
|
|
|
if b.lastExtCFHeaderHeight > int32(backHeight) {
|
|
|
|
b.lastExtCFHeaderHeight = int32(backHeight)
|
|
|
|
}
|
2017-02-14 00:00:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the header at the next checkpoint height matches.
|
2017-04-28 19:36:05 +02:00
|
|
|
if b.nextCheckpoint != nil &&
|
|
|
|
node.height == b.nextCheckpoint.Height {
|
2017-02-14 00:00:02 +01:00
|
|
|
nodeHash := node.header.BlockHash()
|
|
|
|
if nodeHash.IsEqual(b.nextCheckpoint.Hash) {
|
|
|
|
receivedCheckpoint = true
|
|
|
|
log.Infof("Verified downloaded block "+
|
|
|
|
"header against checkpoint at height "+
|
|
|
|
"%d/hash %s", node.height, nodeHash)
|
|
|
|
} else {
|
|
|
|
log.Warnf("Block header at height %d/hash "+
|
|
|
|
"%s from peer %s does NOT match "+
|
|
|
|
"expected checkpoint hash of %s -- "+
|
|
|
|
"disconnecting", node.height,
|
|
|
|
nodeHash, hmsg.peer.Addr(),
|
|
|
|
b.nextCheckpoint.Hash)
|
2017-04-28 19:36:05 +02:00
|
|
|
prevCheckpoint :=
|
|
|
|
b.findPreviousHeaderCheckpoint(
|
|
|
|
node.height)
|
2017-03-17 00:17:15 +01:00
|
|
|
log.Infof("Rolling back to previous validated "+
|
|
|
|
"checkpoint at height %d/hash %s",
|
|
|
|
prevCheckpoint.Height,
|
|
|
|
prevCheckpoint.Hash)
|
2017-04-28 19:36:05 +02:00
|
|
|
_, err := b.server.rollBackToHeight(uint32(
|
2017-03-17 00:17:15 +01:00
|
|
|
prevCheckpoint.Height))
|
2017-03-28 06:45:02 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Criticalf("Rollback failed: %s",
|
|
|
|
err)
|
|
|
|
// Should we panic here?
|
|
|
|
}
|
2017-02-14 00:00:02 +01:00
|
|
|
hmsg.peer.Disconnect()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// When this header is a checkpoint, switch to fetching the blocks for
|
|
|
|
// all of the headers since the last checkpoint.
|
|
|
|
if receivedCheckpoint {
|
2017-03-15 19:39:37 +01:00
|
|
|
b.nextCheckpoint = b.findNextHeaderCheckpoint(finalHeight)
|
2017-02-14 00:00:02 +01:00
|
|
|
}
|
|
|
|
|
2017-04-11 21:24:26 +02:00
|
|
|
// Send getcfheaders to each peer based on these headers.
|
|
|
|
cfhLocator := blockchain.BlockLocator([]*chainhash.Hash{
|
|
|
|
&msg.Headers[0].PrevBlock,
|
|
|
|
})
|
|
|
|
cfhStopHash := msg.Headers[len(msg.Headers)-1].BlockHash()
|
|
|
|
cfhCount := len(msg.Headers)
|
|
|
|
cfhReqB := cfhRequest{
|
|
|
|
extended: false,
|
|
|
|
stopHash: cfhStopHash,
|
|
|
|
}
|
|
|
|
cfhReqE := cfhRequest{
|
|
|
|
extended: true,
|
|
|
|
stopHash: cfhStopHash,
|
|
|
|
}
|
|
|
|
b.server.ForAllPeers(func(sp *serverPeer) {
|
|
|
|
// Should probably use better isolation for this but we're in
|
|
|
|
// the same package. One of the things to clean up when we do
|
|
|
|
// more general cleanup.
|
2017-04-27 00:34:05 +02:00
|
|
|
sp.mtxReqCFH.Lock()
|
2017-04-11 21:24:26 +02:00
|
|
|
sp.requestedCFHeaders[cfhReqB] = cfhCount
|
|
|
|
sp.requestedCFHeaders[cfhReqE] = cfhCount
|
2017-04-27 00:34:05 +02:00
|
|
|
sp.mtxReqCFH.Unlock()
|
|
|
|
sp.pushGetCFHeadersMsg(cfhLocator, &cfhStopHash, false)
|
2017-04-11 21:24:26 +02:00
|
|
|
sp.pushGetCFHeadersMsg(cfhLocator, &cfhStopHash, true)
|
|
|
|
})
|
|
|
|
|
2017-03-29 00:06:46 +02:00
|
|
|
// If not current, request the next batch of headers starting from the
|
|
|
|
// latest known header and ending with the next checkpoint.
|
2017-04-11 21:24:26 +02:00
|
|
|
if !b.current() || b.server.chainParams.Net ==
|
|
|
|
chaincfg.SimNetParams.Net {
|
2017-03-29 00:06:46 +02:00
|
|
|
locator := blockchain.BlockLocator([]*chainhash.Hash{finalHash})
|
|
|
|
nextHash := zeroHash
|
|
|
|
if b.nextCheckpoint != nil {
|
|
|
|
nextHash = *b.nextCheckpoint.Hash
|
|
|
|
}
|
|
|
|
err := hmsg.peer.PushGetHeadersMsg(locator, &nextHash)
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("Failed to send getheaders message to "+
|
|
|
|
"peer %s: %s", hmsg.peer.Addr(), err)
|
2017-04-11 21:24:26 +02:00
|
|
|
// Unnecessary but we might put other code after this
|
|
|
|
// eventually.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// QueueCFHeaders adds the passed headers message and peer to the block handling
|
|
|
|
// queue.
|
|
|
|
func (b *blockManager) QueueCFHeaders(cfheaders *wire.MsgCFHeaders,
|
|
|
|
sp *serverPeer) {
|
|
|
|
// No channel handling here because peers do not need to block on
|
|
|
|
// cfheaders messages.
|
|
|
|
if atomic.LoadInt32(&b.shutdown) != 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-15 01:40:49 +02:00
|
|
|
// Ignore messages with 0 headers.
|
|
|
|
if len(cfheaders.HeaderHashes) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the count is correct. This works even when the map lookup
|
|
|
|
// fails as it returns 0 in that case.
|
2017-04-19 01:51:56 +02:00
|
|
|
req := cfhRequest{
|
2017-04-15 01:40:49 +02:00
|
|
|
extended: cfheaders.Extended,
|
|
|
|
stopHash: cfheaders.StopHash,
|
2017-04-19 01:51:56 +02:00
|
|
|
}
|
2017-04-27 00:34:05 +02:00
|
|
|
// TODO: Get rid of this by refactoring all of this using the query API
|
|
|
|
sp.mtxReqCFH.Lock()
|
|
|
|
expLen := sp.requestedCFHeaders[req]
|
|
|
|
sp.mtxReqCFH.Unlock()
|
|
|
|
if expLen != len(cfheaders.HeaderHashes) {
|
2017-04-15 01:40:49 +02:00
|
|
|
log.Warnf("Received cfheaders message doesn't match any "+
|
|
|
|
"getcfheaders request. Peer %s is probably on a "+
|
|
|
|
"different chain -- ignoring", sp.Addr())
|
|
|
|
return
|
|
|
|
}
|
2017-04-27 00:34:05 +02:00
|
|
|
// TODO: Remove this by refactoring this section into a query client.
|
|
|
|
sp.mtxReqCFH.Lock()
|
2017-04-19 01:51:56 +02:00
|
|
|
delete(sp.requestedCFHeaders, req)
|
2017-04-27 00:34:05 +02:00
|
|
|
sp.mtxReqCFH.Unlock()
|
2017-04-15 01:40:49 +02:00
|
|
|
|
2017-04-14 23:52:58 +02:00
|
|
|
// Track number of pending cfheaders messsages for both basic and
|
|
|
|
// extended filters.
|
|
|
|
pendingMsgs := &b.numBasicCFHeadersMsgs
|
|
|
|
if cfheaders.Extended {
|
|
|
|
pendingMsgs = &b.numExtCFHeadersMsgs
|
|
|
|
}
|
|
|
|
atomic.AddInt32(pendingMsgs, 1)
|
2017-04-14 04:38:38 +02:00
|
|
|
b.peerChan <- &cfheadersMsg{cfheaders: cfheaders, peer: sp}
|
2017-04-11 21:24:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// handleCFHeadersMsg handles cfheaders messages from all peers.
|
2017-05-02 22:15:57 +02:00
|
|
|
// TODO: Refactor this using query API.
|
2017-04-11 21:24:26 +02:00
|
|
|
func (b *blockManager) handleCFHeadersMsg(cfhmsg *cfheadersMsg) {
|
|
|
|
// Grab the matching request we sent, as this message should correspond
|
|
|
|
// to that, and delete it from the map on return as we're now handling
|
|
|
|
// it.
|
2017-04-14 23:52:58 +02:00
|
|
|
headerMap := b.basicHeaders
|
|
|
|
pendingMsgs := &b.numBasicCFHeadersMsgs
|
2017-04-19 01:51:56 +02:00
|
|
|
if cfhmsg.cfheaders.Extended {
|
2017-04-14 23:52:58 +02:00
|
|
|
headerMap = b.extendedHeaders
|
|
|
|
pendingMsgs = &b.numExtCFHeadersMsgs
|
|
|
|
}
|
2017-04-15 01:40:49 +02:00
|
|
|
atomic.AddInt32(pendingMsgs, -1)
|
2017-04-11 21:24:26 +02:00
|
|
|
headerList := cfhmsg.cfheaders.HeaderHashes
|
|
|
|
respLen := len(headerList)
|
|
|
|
// Find the block header matching the last filter header, if any.
|
|
|
|
el := b.headerList.Back()
|
|
|
|
for el != nil {
|
2017-04-19 01:51:56 +02:00
|
|
|
if el.Value.(*headerNode).header.BlockHash() ==
|
|
|
|
cfhmsg.cfheaders.StopHash {
|
2017-04-11 21:24:26 +02:00
|
|
|
break
|
|
|
|
}
|
|
|
|
el = el.Prev()
|
|
|
|
}
|
|
|
|
// If nothing matched, there's nothing more to do.
|
|
|
|
if el == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Cycle through the filter header hashes and process them.
|
|
|
|
var node *headerNode
|
|
|
|
var hash chainhash.Hash
|
|
|
|
for i := respLen - 1; i >= 0 && el != nil; i-- {
|
|
|
|
// If there's no map for this header, the header is either no
|
|
|
|
// longer valid or has already been processed and committed to
|
|
|
|
// the database. Either way, break processing.
|
|
|
|
node = el.Value.(*headerNode)
|
|
|
|
hash = node.header.BlockHash()
|
2017-04-14 23:52:58 +02:00
|
|
|
b.mapMutex.Lock()
|
|
|
|
if _, ok := headerMap[hash]; !ok {
|
2017-04-15 01:40:49 +02:00
|
|
|
b.mapMutex.Unlock()
|
2017-04-18 01:55:42 +02:00
|
|
|
log.Tracef("Breaking at %d (%s)", node.height, hash)
|
2017-04-11 21:24:26 +02:00
|
|
|
break
|
|
|
|
}
|
|
|
|
// Process this header and set up the next iteration.
|
2017-04-14 23:52:58 +02:00
|
|
|
headerMap[hash][*headerList[i]] = append(
|
|
|
|
headerMap[hash][*headerList[i]], cfhmsg.peer,
|
2017-04-11 21:24:26 +02:00
|
|
|
)
|
2017-04-14 15:08:17 +02:00
|
|
|
b.mapMutex.Unlock()
|
2017-04-11 21:24:26 +02:00
|
|
|
el = el.Prev()
|
|
|
|
}
|
2017-04-14 04:38:38 +02:00
|
|
|
b.intChan <- &processCFHeadersMsg{
|
2017-04-11 21:24:26 +02:00
|
|
|
earliestNode: node,
|
2017-04-19 01:51:56 +02:00
|
|
|
stopHash: cfhmsg.cfheaders.StopHash,
|
|
|
|
extended: cfhmsg.cfheaders.Extended,
|
2017-04-11 21:24:26 +02:00
|
|
|
}
|
2017-04-15 01:40:49 +02:00
|
|
|
log.Tracef("Processed cfheaders starting at %d(%s), ending at %s, from"+
|
|
|
|
" peer %s, extended: %t", node.height, node.header.BlockHash(),
|
2017-04-19 01:51:56 +02:00
|
|
|
cfhmsg.cfheaders.StopHash, cfhmsg.peer.Addr(),
|
|
|
|
cfhmsg.cfheaders.Extended)
|
2017-04-11 21:24:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// handleProcessCFHeadersMsg checks to see if we have enough cfheaders to make
|
|
|
|
// a decision about what the correct headers are, makes that decision if
|
|
|
|
// possible, and downloads any cfilters and blocks necessary to make that
|
|
|
|
// decision.
|
2017-05-02 22:15:57 +02:00
|
|
|
// TODO: Refactor this using query API.
|
2017-04-11 21:24:26 +02:00
|
|
|
func (b *blockManager) handleProcessCFHeadersMsg(msg *processCFHeadersMsg) {
|
|
|
|
// Assume we aren't ready to make a decision about correct headers yet.
|
|
|
|
ready := false
|
|
|
|
|
2017-04-14 23:52:58 +02:00
|
|
|
headerMap := b.basicHeaders
|
2017-04-14 04:38:38 +02:00
|
|
|
writeFunc := b.server.putBasicHeader
|
|
|
|
readFunc := b.server.GetBasicHeader
|
|
|
|
lastCFHeaderHeight := &b.lastBasicCFHeaderHeight
|
2017-04-14 23:52:58 +02:00
|
|
|
pendingMsgs := &b.numBasicCFHeadersMsgs
|
2017-04-14 04:38:38 +02:00
|
|
|
if msg.extended {
|
2017-04-14 23:52:58 +02:00
|
|
|
headerMap = b.extendedHeaders
|
2017-04-14 04:38:38 +02:00
|
|
|
writeFunc = b.server.putExtHeader
|
|
|
|
readFunc = b.server.GetExtHeader
|
|
|
|
lastCFHeaderHeight = &b.lastExtCFHeaderHeight
|
2017-04-14 23:52:58 +02:00
|
|
|
pendingMsgs = &b.numExtCFHeadersMsgs
|
2017-04-14 04:38:38 +02:00
|
|
|
}
|
|
|
|
|
2017-04-18 01:55:42 +02:00
|
|
|
stopHash := msg.earliestNode.header.PrevBlock
|
|
|
|
|
2017-04-11 21:24:26 +02:00
|
|
|
// If we have started receiving cfheaders messages for blocks farther
|
|
|
|
// than the last set we haven't made a decision on, it's time to make
|
|
|
|
// a decision.
|
2017-04-14 23:52:58 +02:00
|
|
|
if msg.earliestNode.height > *lastCFHeaderHeight+1 {
|
|
|
|
ready = true
|
2017-04-11 21:24:26 +02:00
|
|
|
}
|
|
|
|
|
2017-04-18 01:55:42 +02:00
|
|
|
// If we have fewer processed cfheaders messages for the earliest node
|
|
|
|
// than the number of connected peers, give the other peers some time to
|
|
|
|
// catch up before checking if we've processed all of the queued
|
|
|
|
// cfheaders messages.
|
|
|
|
numHeaders := 0
|
|
|
|
blockMap := headerMap[msg.earliestNode.header.BlockHash()]
|
|
|
|
for headerHash := range blockMap {
|
|
|
|
numHeaders += len(blockMap[headerHash])
|
|
|
|
}
|
|
|
|
// Sleep for a bit if we have more peers than cfheaders messages for the
|
|
|
|
// earliest node for which we're trying to get cfheaders. This lets us
|
|
|
|
// wait for other peers to send cfheaders messages before making any
|
|
|
|
// decisions about whether we should write the headers in this message.
|
|
|
|
connCount := int(b.server.ConnectedCount())
|
|
|
|
log.Tracef("Number of peers for which we've processed a cfheaders for "+
|
|
|
|
"block %s: %d of %d", msg.earliestNode.header.BlockHash(),
|
|
|
|
numHeaders, connCount)
|
|
|
|
if numHeaders <= connCount {
|
|
|
|
time.Sleep(WaitForMoreCFHeaders)
|
|
|
|
}
|
|
|
|
|
2017-04-14 23:52:58 +02:00
|
|
|
// If there are no other cfheaders messages left for this type (basic vs
|
|
|
|
// extended), we should go ahead and make a decision because we have all
|
|
|
|
// the info we're going to get.
|
|
|
|
if atomic.LoadInt32(pendingMsgs) == 0 {
|
2017-04-11 21:24:26 +02:00
|
|
|
ready = true
|
2017-04-18 01:55:42 +02:00
|
|
|
stopHash = msg.stopHash
|
2017-04-11 21:24:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Do nothing if we're not ready to make a decision yet.
|
|
|
|
if !ready {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point, we've got all the cfheaders messages we're going to
|
|
|
|
// get for the range of headers described by the passed message. We now
|
|
|
|
// iterate through all of those headers, looking for conflicts. If we
|
|
|
|
// find a conflict, we have to do additional checks; otherwise, we write
|
|
|
|
// the filter header to the database.
|
|
|
|
el := b.headerList.Front()
|
|
|
|
for el != nil {
|
|
|
|
node := el.Value.(*headerNode)
|
2017-05-02 22:15:57 +02:00
|
|
|
header := node.header
|
|
|
|
hash := header.BlockHash()
|
2017-04-14 23:52:58 +02:00
|
|
|
if node.height > *lastCFHeaderHeight {
|
2017-04-14 15:08:17 +02:00
|
|
|
b.mapMutex.Lock()
|
2017-04-14 23:52:58 +02:00
|
|
|
blockMap := headerMap[hash]
|
2017-04-11 21:24:26 +02:00
|
|
|
switch len(blockMap) {
|
|
|
|
// This should only happen if the filter has already
|
2017-04-18 02:15:29 +02:00
|
|
|
// been written to the database.
|
2017-04-11 21:24:26 +02:00
|
|
|
case 0:
|
|
|
|
if _, err := readFunc(hash); err != nil {
|
|
|
|
// We don't have the filter stored in
|
2017-04-18 02:15:29 +02:00
|
|
|
// the DB, there's something wrong.
|
2017-04-11 21:24:26 +02:00
|
|
|
log.Warnf("Somehow we have 0 cfheaders"+
|
|
|
|
" for block %d (%s)",
|
|
|
|
node.height, hash)
|
2017-04-18 01:55:42 +02:00
|
|
|
b.mapMutex.Unlock()
|
2017-04-11 21:24:26 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// This is the normal case when nobody's trying to
|
|
|
|
// bamboozle us (or ALL our peers are).
|
|
|
|
case 1:
|
|
|
|
// This will only cycle once
|
2017-04-14 23:52:58 +02:00
|
|
|
for headerHash := range blockMap {
|
|
|
|
writeFunc(hash, headerHash)
|
|
|
|
log.Tracef("Wrote header for block %d "+
|
|
|
|
"with %d cfheaders messages, "+
|
|
|
|
"extended: %t", node.height,
|
|
|
|
len(blockMap[headerHash]),
|
|
|
|
msg.extended)
|
2017-05-02 22:15:57 +02:00
|
|
|
// Notify subscribers of a connected
|
|
|
|
// block.
|
|
|
|
// TODO: Rethink this so we're not
|
|
|
|
// interrupting block processing for
|
|
|
|
// notifications if the client messes
|
|
|
|
// up channel handling.
|
|
|
|
b.server.mtxSubscribers.RLock()
|
|
|
|
for sub := range b.server.blockSubscribers {
|
|
|
|
channel := sub.onConnectBasic
|
|
|
|
if msg.extended {
|
|
|
|
channel =
|
|
|
|
sub.onConnectExt
|
|
|
|
}
|
|
|
|
if channel != nil {
|
|
|
|
select {
|
|
|
|
case channel <- *header:
|
|
|
|
case <-sub.quit:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
b.server.mtxSubscribers.RUnlock()
|
2017-04-11 21:24:26 +02:00
|
|
|
}
|
2017-04-14 23:52:58 +02:00
|
|
|
*lastCFHeaderHeight = node.height
|
2017-04-11 21:24:26 +02:00
|
|
|
// This is when we have conflicting information from
|
|
|
|
// multiple peers.
|
2017-04-19 01:51:56 +02:00
|
|
|
// TODO: Handle this case as an adversarial condition.
|
2017-04-11 21:24:26 +02:00
|
|
|
default:
|
2017-04-14 23:52:58 +02:00
|
|
|
log.Warnf("Got more than 1 possible filter "+
|
|
|
|
"header for block %d (%s)", node.height,
|
|
|
|
node.header.BlockHash())
|
2017-04-11 21:24:26 +02:00
|
|
|
}
|
2017-04-14 15:08:17 +02:00
|
|
|
b.mapMutex.Unlock()
|
2017-04-11 21:24:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
//elToRemove := el
|
|
|
|
el = el.Next()
|
|
|
|
//b.headerList.Remove(elToRemove)
|
|
|
|
//b.startHeader = el
|
|
|
|
|
|
|
|
// If we've reached the end, we can return
|
2017-04-18 01:55:42 +02:00
|
|
|
if hash == stopHash {
|
2017-04-11 21:24:26 +02:00
|
|
|
log.Tracef("Finished processing cfheaders messages up "+
|
2017-04-14 04:38:38 +02:00
|
|
|
"to height %d/hash %s, extended: %t",
|
|
|
|
node.height, hash, msg.extended)
|
2017-03-29 00:06:46 +02:00
|
|
|
return
|
|
|
|
}
|
2017-02-14 00:00:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-28 06:45:02 +02:00
|
|
|
// checkHeaderSanity checks the PoW, and timestamp of a block header.
|
|
|
|
func (b *blockManager) checkHeaderSanity(blockHeader *wire.BlockHeader,
|
|
|
|
maxTimestamp time.Time, reorgAttempt bool) error {
|
|
|
|
diff, err := b.calcNextRequiredDifficulty(
|
|
|
|
blockHeader.Timestamp, reorgAttempt)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
stubBlock := btcutil.NewBlock(&wire.MsgBlock{
|
|
|
|
Header: *blockHeader,
|
|
|
|
})
|
|
|
|
err = blockchain.CheckProofOfWork(stubBlock,
|
|
|
|
blockchain.CompactToBig(diff))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Ensure the block time is not too far in the future.
|
|
|
|
if blockHeader.Timestamp.After(maxTimestamp) {
|
|
|
|
return fmt.Errorf("block timestamp of %v is too far in the "+
|
|
|
|
"future", blockHeader.Timestamp)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-02-14 00:00:02 +01:00
|
|
|
// calcNextRequiredDifficulty calculates the required difficulty for the block
|
|
|
|
// after the passed previous block node based on the difficulty retarget rules.
|
2017-03-28 06:45:02 +02:00
|
|
|
func (b *blockManager) calcNextRequiredDifficulty(newBlockTime time.Time,
|
|
|
|
reorgAttempt bool) (uint32, error) {
|
|
|
|
|
|
|
|
hList := b.headerList
|
|
|
|
if reorgAttempt {
|
|
|
|
hList = b.reorgList
|
|
|
|
}
|
2017-02-14 00:00:02 +01:00
|
|
|
|
2017-03-28 06:45:02 +02:00
|
|
|
lastNodeEl := hList.Back()
|
2017-02-14 00:00:02 +01:00
|
|
|
|
|
|
|
// Genesis block.
|
|
|
|
if lastNodeEl == nil {
|
|
|
|
return b.server.chainParams.PowLimitBits, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
lastNode := lastNodeEl.Value.(*headerNode)
|
|
|
|
|
|
|
|
// Return the previous block's difficulty requirements if this block
|
|
|
|
// is not at a difficulty retarget interval.
|
|
|
|
if (lastNode.height+1)%b.blocksPerRetarget != 0 {
|
|
|
|
// For networks that support it, allow special reduction of the
|
|
|
|
// required difficulty once too much time has elapsed without
|
|
|
|
// mining a block.
|
|
|
|
if b.server.chainParams.ReduceMinDifficulty {
|
|
|
|
// Return minimum difficulty when more than the desired
|
|
|
|
// amount of time has elapsed without mining a block.
|
|
|
|
reductionTime := int64(
|
|
|
|
b.server.chainParams.MinDiffReductionTime /
|
|
|
|
time.Second)
|
|
|
|
allowMinTime := lastNode.header.Timestamp.Unix() +
|
|
|
|
reductionTime
|
|
|
|
if newBlockTime.Unix() > allowMinTime {
|
|
|
|
return b.server.chainParams.PowLimitBits, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// The block was mined within the desired timeframe, so
|
|
|
|
// return the difficulty for the last block which did
|
|
|
|
// not have the special minimum difficulty rule applied.
|
2017-03-28 06:45:02 +02:00
|
|
|
prevBits, err := b.findPrevTestNetDifficulty(hList)
|
2017-02-14 00:00:02 +01:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
return prevBits, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// For the main network (or any unrecognized networks), simply
|
|
|
|
// return the previous block's difficulty requirements.
|
|
|
|
return lastNode.header.Bits, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the block node at the previous retarget (targetTimespan days
|
|
|
|
// worth of blocks).
|
2017-04-28 19:36:05 +02:00
|
|
|
firstNode, err := b.server.GetBlockByHeight(
|
2017-02-14 00:00:02 +01:00
|
|
|
uint32(lastNode.height + 1 - b.blocksPerRetarget))
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Limit the amount of adjustment that can occur to the previous
|
|
|
|
// difficulty.
|
|
|
|
actualTimespan := lastNode.header.Timestamp.Unix() -
|
|
|
|
firstNode.Timestamp.Unix()
|
|
|
|
adjustedTimespan := actualTimespan
|
|
|
|
if actualTimespan < b.minRetargetTimespan {
|
|
|
|
adjustedTimespan = b.minRetargetTimespan
|
|
|
|
} else if actualTimespan > b.maxRetargetTimespan {
|
|
|
|
adjustedTimespan = b.maxRetargetTimespan
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate new target difficulty as:
|
|
|
|
// currentDifficulty * (adjustedTimespan / targetTimespan)
|
|
|
|
// The result uses integer division which means it will be slightly
|
|
|
|
// rounded down. Bitcoind also uses integer division to calculate this
|
|
|
|
// result.
|
|
|
|
oldTarget := blockchain.CompactToBig(lastNode.header.Bits)
|
|
|
|
newTarget := new(big.Int).Mul(oldTarget, big.NewInt(adjustedTimespan))
|
|
|
|
targetTimeSpan := int64(b.server.chainParams.TargetTimespan /
|
|
|
|
time.Second)
|
|
|
|
newTarget.Div(newTarget, big.NewInt(targetTimeSpan))
|
|
|
|
|
|
|
|
// Limit new value to the proof of work limit.
|
|
|
|
if newTarget.Cmp(b.server.chainParams.PowLimit) > 0 {
|
|
|
|
newTarget.Set(b.server.chainParams.PowLimit)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Log new target difficulty and return it. The new target logging is
|
|
|
|
// intentionally converting the bits back to a number instead of using
|
|
|
|
// newTarget since conversion to the compact representation loses
|
|
|
|
// precision.
|
|
|
|
newTargetBits := blockchain.BigToCompact(newTarget)
|
|
|
|
log.Debugf("Difficulty retarget at block height %d", lastNode.height+1)
|
|
|
|
log.Debugf("Old target %08x (%064x)", lastNode.header.Bits, oldTarget)
|
|
|
|
log.Debugf("New target %08x (%064x)", newTargetBits,
|
|
|
|
blockchain.CompactToBig(newTargetBits))
|
|
|
|
log.Debugf("Actual timespan %v, adjusted timespan %v, target timespan %v",
|
|
|
|
time.Duration(actualTimespan)*time.Second,
|
|
|
|
time.Duration(adjustedTimespan)*time.Second,
|
|
|
|
b.server.chainParams.TargetTimespan)
|
|
|
|
|
|
|
|
return newTargetBits, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// findPrevTestNetDifficulty returns the difficulty of the previous block which
|
|
|
|
// did not have the special testnet minimum difficulty rule applied.
|
2017-03-28 06:45:02 +02:00
|
|
|
func (b *blockManager) findPrevTestNetDifficulty(hList *list.List) (uint32, error) {
|
|
|
|
startNodeEl := hList.Back()
|
2017-02-14 00:00:02 +01:00
|
|
|
|
|
|
|
// Genesis block.
|
|
|
|
if startNodeEl == nil {
|
|
|
|
return b.server.chainParams.PowLimitBits, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
startNode := startNodeEl.Value.(*headerNode)
|
|
|
|
|
|
|
|
// Search backwards through the chain for the last block without
|
|
|
|
// the special rule applied.
|
|
|
|
iterEl := startNodeEl
|
|
|
|
iterNode := startNode.header
|
|
|
|
iterHeight := startNode.height
|
|
|
|
for iterNode != nil && iterHeight%b.blocksPerRetarget != 0 &&
|
|
|
|
iterNode.Bits == b.server.chainParams.PowLimitBits {
|
|
|
|
|
|
|
|
// Get the previous block node. This function is used over
|
|
|
|
// simply accessing iterNode.parent directly as it will
|
|
|
|
// dynamically create previous block nodes as needed. This
|
|
|
|
// helps allow only the pieces of the chain that are needed
|
|
|
|
// to remain in memory.
|
|
|
|
iterHeight--
|
|
|
|
el := iterEl.Prev()
|
|
|
|
if el != nil {
|
|
|
|
iterNode = el.Value.(*headerNode).header
|
|
|
|
} else {
|
2017-04-28 19:36:05 +02:00
|
|
|
node, err := b.server.GetBlockByHeight(
|
|
|
|
uint32(iterHeight))
|
2017-02-14 00:00:02 +01:00
|
|
|
if err != nil {
|
2017-03-28 06:45:02 +02:00
|
|
|
log.Errorf("GetBlockByHeight: %s", err)
|
2017-02-14 00:00:02 +01:00
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
iterNode = &node
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return the found difficulty or the minimum difficulty if no
|
|
|
|
// appropriate block was found.
|
|
|
|
lastBits := b.server.chainParams.PowLimitBits
|
|
|
|
if iterNode != nil {
|
|
|
|
lastBits = iterNode.Bits
|
|
|
|
}
|
|
|
|
return lastBits, nil
|
|
|
|
}
|