2014-01-01 17:16:15 +01:00
|
|
|
// Copyright (c) 2013-2014 Conformal Systems LLC.
|
2013-08-06 23:55:22 +02:00
|
|
|
// Use of this source code is governed by an ISC
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2013-09-03 23:55:14 +02:00
|
|
|
"container/list"
|
2013-10-11 01:24:23 +02:00
|
|
|
"fmt"
|
2013-08-06 23:55:22 +02:00
|
|
|
"github.com/conformal/btcchain"
|
|
|
|
"github.com/conformal/btcdb"
|
|
|
|
"github.com/conformal/btcutil"
|
|
|
|
"github.com/conformal/btcwire"
|
2013-10-04 05:31:00 +02:00
|
|
|
"net"
|
2013-08-06 23:55:22 +02:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"sync"
|
2013-10-03 01:33:42 +02:00
|
|
|
"sync/atomic"
|
2013-08-06 23:55:22 +02:00
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
chanBufferSize = 50
|
2013-09-15 19:08:42 +02:00
|
|
|
|
|
|
|
// blockDbNamePrefix is the prefix for the block database name. The
|
|
|
|
// database type is appended to this value to form the full block
|
|
|
|
// database name.
|
|
|
|
blockDbNamePrefix = "blocks"
|
2013-08-06 23:55:22 +02:00
|
|
|
)
|
|
|
|
|
2013-10-01 00:53:21 +02:00
|
|
|
// newPeerMsg signifies a newly connected peer to the block handler.
|
|
|
|
type newPeerMsg struct {
|
|
|
|
peer *peer
|
|
|
|
}
|
|
|
|
|
2013-08-06 23:55:22 +02:00
|
|
|
// blockMsg packages a bitcoin block message and the peer it came from together
|
|
|
|
// so the block handler has access to that information.
|
|
|
|
type blockMsg struct {
|
|
|
|
block *btcutil.Block
|
2013-08-16 20:35:38 +02:00
|
|
|
peer *peer
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
|
|
|
|
2013-09-27 02:41:02 +02:00
|
|
|
// invMsg packages a bitcoin inv message and the peer it came from together
|
|
|
|
// so the block handler has access to that information.
|
|
|
|
type invMsg struct {
|
|
|
|
inv *btcwire.MsgInv
|
|
|
|
peer *peer
|
|
|
|
}
|
|
|
|
|
2013-11-16 00:16:51 +01:00
|
|
|
// blockMsg packages a bitcoin block message and the peer it came from together
|
|
|
|
// so the block handler has access to that information.
|
|
|
|
type headersMsg struct {
|
|
|
|
headers *btcwire.MsgHeaders
|
|
|
|
peer *peer
|
|
|
|
}
|
|
|
|
|
2013-10-01 00:53:21 +02:00
|
|
|
// donePeerMsg signifies a newly disconnected peer to the block handler.
|
|
|
|
type donePeerMsg struct {
|
|
|
|
peer *peer
|
|
|
|
}
|
|
|
|
|
2013-08-06 23:55:22 +02:00
|
|
|
// txMsg packages a bitcoin tx message and the peer it came from together
|
|
|
|
// so the block handler has access to that information.
|
|
|
|
type txMsg struct {
|
2013-10-28 21:44:38 +01:00
|
|
|
tx *btcutil.Tx
|
2013-08-06 23:55:22 +02:00
|
|
|
peer *peer
|
|
|
|
}
|
|
|
|
|
|
|
|
// blockManager provides a concurrency safe block manager for handling all
|
2013-08-29 21:44:43 +02:00
|
|
|
// incoming blocks.
|
2013-08-06 23:55:22 +02:00
|
|
|
type blockManager struct {
|
|
|
|
server *server
|
2013-10-03 01:33:42 +02:00
|
|
|
started int32
|
|
|
|
shutdown int32
|
2013-08-06 23:55:22 +02:00
|
|
|
blockChain *btcchain.BlockChain
|
2013-08-29 21:44:43 +02:00
|
|
|
blockPeer map[btcwire.ShaHash]*peer
|
2013-10-08 17:47:00 +02:00
|
|
|
requestedTxns map[btcwire.ShaHash]bool
|
2013-10-01 00:53:21 +02:00
|
|
|
requestedBlocks map[btcwire.ShaHash]bool
|
2013-08-06 23:55:22 +02:00
|
|
|
receivedLogBlocks int64
|
|
|
|
receivedLogTx int64
|
|
|
|
lastBlockLogTime time.Time
|
|
|
|
processingReqs bool
|
2013-09-03 23:55:14 +02:00
|
|
|
syncPeer *peer
|
2013-10-01 00:53:21 +02:00
|
|
|
msgChan chan interface{}
|
2013-08-06 23:55:22 +02:00
|
|
|
wg sync.WaitGroup
|
|
|
|
quit chan bool
|
2013-11-16 00:16:51 +01:00
|
|
|
|
|
|
|
headerPool map[btcwire.ShaHash]*headerstr
|
|
|
|
headerOrphan map[btcwire.ShaHash]*headerstr
|
|
|
|
fetchingHeaders bool
|
|
|
|
startBlock *btcwire.ShaHash
|
|
|
|
fetchBlock *btcwire.ShaHash
|
|
|
|
lastBlock *btcwire.ShaHash
|
|
|
|
latestCheckpoint *btcchain.Checkpoint
|
|
|
|
}
|
|
|
|
|
|
|
|
type headerstr struct {
|
|
|
|
header *btcwire.BlockHeader
|
|
|
|
next *headerstr
|
|
|
|
height int
|
|
|
|
sha btcwire.ShaHash
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
|
|
|
|
2013-09-03 23:55:14 +02:00
|
|
|
// startSync will choose the best peer among the available candidate peers to
|
|
|
|
// download/sync the blockchain from. When syncing is already running, it
|
|
|
|
// simply returns. It also examines the candidates for any which are no longer
|
|
|
|
// candidates and removes them as needed.
|
|
|
|
func (b *blockManager) startSync(peers *list.List) {
|
|
|
|
// Return now if we're already syncing.
|
|
|
|
if b.syncPeer != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find the height of the current known best block.
|
|
|
|
_, height, err := b.server.db.NewestSha()
|
|
|
|
if err != nil {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Errorf("%v", err)
|
2013-09-03 23:55:14 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var bestPeer *peer
|
2013-10-07 16:04:11 +02:00
|
|
|
var enext *list.Element
|
|
|
|
for e := peers.Front(); e != nil; e = enext {
|
|
|
|
enext = e.Next()
|
2013-09-03 23:55:14 +02:00
|
|
|
p := e.Value.(*peer)
|
|
|
|
|
|
|
|
// Remove sync candidate peers that are no longer candidates due
|
2013-10-04 05:31:00 +02:00
|
|
|
// to passing their latest known block. NOTE: The < is
|
|
|
|
// intentional as opposed to <=. While techcnically the peer
|
|
|
|
// doesn't have a later block when it's equal, it will likely
|
|
|
|
// have one soon so it is a reasonable choice. It also allows
|
|
|
|
// the case where both are at 0 such as during regression test.
|
|
|
|
if p.lastBlock < int32(height) {
|
2013-09-03 23:55:14 +02:00
|
|
|
peers.Remove(e)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(davec): Use a better algorithm to choose the best peer.
|
|
|
|
// For now, just pick the first available candidate.
|
|
|
|
bestPeer = p
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start syncing from the best peer if one was selected.
|
|
|
|
if bestPeer != nil {
|
|
|
|
locator, err := b.blockChain.LatestBlockLocator()
|
|
|
|
if err != nil {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Errorf("Failed to get block locator for the "+
|
2013-09-03 23:55:14 +02:00
|
|
|
"latest block: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Infof("Syncing to block height %d from peer %v",
|
2013-10-03 00:06:29 +02:00
|
|
|
bestPeer.lastBlock, bestPeer.addr)
|
2013-11-16 00:16:51 +01:00
|
|
|
|
2013-12-13 01:10:06 +01:00
|
|
|
// if starting from the beginning fetch headers and download
|
|
|
|
// blocks based on that, otherwise compute the block download
|
|
|
|
// via inv messages. Regression test mode does not support the
|
|
|
|
// headers-first approach so do normal block downloads when in
|
|
|
|
// regression test mode.
|
2013-12-20 21:01:25 +01:00
|
|
|
if height == 0 && !cfg.RegressionTest && !cfg.DisableCheckpoints {
|
2013-11-16 00:16:51 +01:00
|
|
|
bestPeer.PushGetHeadersMsg(locator)
|
|
|
|
b.fetchingHeaders = true
|
|
|
|
} else {
|
|
|
|
bestPeer.PushGetBlocksMsg(locator, &zeroHash)
|
|
|
|
}
|
2013-09-03 23:55:14 +02:00
|
|
|
b.syncPeer = bestPeer
|
2013-10-08 00:09:33 +02:00
|
|
|
} else {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Warnf("No sync peer candidates available")
|
2013-09-03 23:55:14 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-04 05:31:00 +02:00
|
|
|
// isSyncCandidate returns whether or not the peer is a candidate to consider
|
|
|
|
// syncing from.
|
|
|
|
func (b *blockManager) isSyncCandidate(p *peer) bool {
|
|
|
|
// Typically a peer is not a candidate for sync if it's not a full node,
|
|
|
|
// however regression test is special in that the regression tool is
|
|
|
|
// not a full node and still needs to be considered a sync candidate.
|
|
|
|
if cfg.RegressionTest {
|
|
|
|
// The peer is not a candidate if it's not coming from localhost
|
|
|
|
// or the hostname can't be determined for some reason.
|
|
|
|
host, _, err := net.SplitHostPort(p.addr)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if host != "127.0.0.1" && host != "localhost" {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// The peer is not a candidate for sync if it's not a full node.
|
|
|
|
if p.services&btcwire.SFNodeNetwork != btcwire.SFNodeNetwork {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Candidate if all checks passed.
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2013-10-01 00:53:21 +02:00
|
|
|
// handleNewPeerMsg deals with new peers that have signalled they may
|
2013-09-03 23:55:14 +02:00
|
|
|
// be considered as a sync peer (they have already successfully negotiated). It
|
2013-09-29 22:26:03 +02:00
|
|
|
// also starts syncing if needed. It is invoked from the syncHandler goroutine.
|
2013-10-01 00:53:21 +02:00
|
|
|
func (b *blockManager) handleNewPeerMsg(peers *list.List, p *peer) {
|
2013-09-03 23:55:14 +02:00
|
|
|
// Ignore if in the process of shutting down.
|
2013-10-03 01:33:42 +02:00
|
|
|
if atomic.LoadInt32(&b.shutdown) != 0 {
|
2013-09-03 23:55:14 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-01-07 17:34:34 +01:00
|
|
|
bmgrLog.Infof("New valid peer %s (%s)", p, p.userAgent)
|
2013-10-02 23:49:31 +02:00
|
|
|
|
2013-10-04 05:31:00 +02:00
|
|
|
// Ignore the peer if it's not a sync candidate.
|
|
|
|
if !b.isSyncCandidate(p) {
|
2013-09-03 23:55:14 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the peer as a candidate to sync from.
|
|
|
|
peers.PushBack(p)
|
|
|
|
|
|
|
|
// Start syncing by choosing the best candidate if needed.
|
|
|
|
b.startSync(peers)
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleDonePeerMsg deals with peers that have signalled they are done. It
|
|
|
|
// removes the peer as a candidate for syncing and in the case where it was
|
|
|
|
// the current sync peer, attempts to select a new best peer to sync from. It
|
|
|
|
// is invoked from the syncHandler goroutine.
|
|
|
|
func (b *blockManager) handleDonePeerMsg(peers *list.List, p *peer) {
|
|
|
|
// Remove the peer from the list of candidate peers.
|
2013-10-07 17:10:10 +02:00
|
|
|
for e := peers.Front(); e != nil; e = e.Next() {
|
2013-09-03 23:55:14 +02:00
|
|
|
if e.Value == p {
|
|
|
|
peers.Remove(e)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Infof("Lost peer %s", p)
|
2013-10-02 23:49:31 +02:00
|
|
|
|
2013-10-08 17:47:00 +02:00
|
|
|
// Remove requested transactions from the global map so that they will
|
|
|
|
// be fetched from elsewhere next time we get an inv.
|
|
|
|
for k := range p.requestedTxns {
|
|
|
|
delete(b.requestedTxns, k)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove requested blocks from the global map so that they will be
|
2013-10-03 18:10:26 +02:00
|
|
|
// fetched from elsewhere next time we get an inv.
|
2013-10-01 00:53:21 +02:00
|
|
|
// TODO(oga) we could possibly here check which peers have these blocks
|
|
|
|
// and request them now to speed things up a little.
|
|
|
|
for k := range p.requestedBlocks {
|
|
|
|
delete(b.requestedBlocks, k)
|
2013-09-03 23:55:14 +02:00
|
|
|
}
|
2013-10-03 18:59:34 +02:00
|
|
|
|
|
|
|
// Attempt to find a new peer to sync from if the quitting peer is the
|
|
|
|
// sync peer.
|
|
|
|
if b.syncPeer != nil && b.syncPeer == p {
|
2014-01-10 20:45:04 +01:00
|
|
|
if b.fetchingHeaders {
|
2014-01-09 16:40:20 +01:00
|
|
|
b.fetchingHeaders = false
|
|
|
|
b.startBlock = nil
|
|
|
|
b.fetchBlock = nil
|
|
|
|
b.lastBlock = nil
|
|
|
|
b.headerPool = make(map[btcwire.ShaHash]*headerstr)
|
|
|
|
b.headerOrphan = make(map[btcwire.ShaHash]*headerstr)
|
|
|
|
}
|
2013-10-03 18:59:34 +02:00
|
|
|
b.syncPeer = nil
|
|
|
|
b.startSync(peers)
|
|
|
|
}
|
2013-09-03 23:55:14 +02:00
|
|
|
}
|
|
|
|
|
2013-08-06 23:55:22 +02:00
|
|
|
// logBlockHeight logs a new block height as an information message to show
|
|
|
|
// progress to the user. In order to prevent spam, it limits logging to one
|
|
|
|
// message every 10 seconds with duration and totals included.
|
2013-10-11 01:24:23 +02:00
|
|
|
func (b *blockManager) logBlockHeight(numTx, height int64, latestHash *btcwire.ShaHash) {
|
2013-08-06 23:55:22 +02:00
|
|
|
b.receivedLogBlocks++
|
|
|
|
b.receivedLogTx += numTx
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
duration := now.Sub(b.lastBlockLogTime)
|
2013-08-29 21:44:43 +02:00
|
|
|
if duration < time.Second*10 {
|
2013-08-06 23:55:22 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-10-10 21:13:54 +02:00
|
|
|
// Truncated the duration to 10s of milliseconds.
|
|
|
|
durationMillis := int64(duration / time.Millisecond)
|
|
|
|
tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10)
|
|
|
|
|
2013-10-11 01:24:23 +02:00
|
|
|
// Attempt to get the timestamp of the latest block.
|
|
|
|
blockTimeStr := ""
|
|
|
|
block, err := b.server.db.FetchBlockBySha(latestHash)
|
|
|
|
if err == nil {
|
|
|
|
blockTimeStr = fmt.Sprintf(", %s", block.MsgBlock().Header.Timestamp)
|
|
|
|
}
|
|
|
|
|
2013-08-06 23:55:22 +02:00
|
|
|
// Log information about new block height.
|
|
|
|
blockStr := "blocks"
|
|
|
|
if b.receivedLogBlocks == 1 {
|
|
|
|
blockStr = "block"
|
|
|
|
}
|
|
|
|
txStr := "transactions"
|
|
|
|
if b.receivedLogTx == 1 {
|
|
|
|
txStr = "transaction"
|
|
|
|
}
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Infof("Processed %d %s in the last %s (%d %s, height %d%s)",
|
2013-10-11 01:24:23 +02:00
|
|
|
b.receivedLogBlocks, blockStr, tDuration, b.receivedLogTx,
|
|
|
|
txStr, height, blockTimeStr)
|
2013-08-06 23:55:22 +02:00
|
|
|
|
|
|
|
b.receivedLogBlocks = 0
|
|
|
|
b.receivedLogTx = 0
|
|
|
|
b.lastBlockLogTime = now
|
|
|
|
}
|
|
|
|
|
2013-10-08 17:47:00 +02:00
|
|
|
// handleTxMsg handles transaction messages from all peers.
|
|
|
|
func (b *blockManager) handleTxMsg(tmsg *txMsg) {
|
|
|
|
// Keep track of which peer the tx was sent from.
|
2013-10-28 21:44:38 +01:00
|
|
|
txHash := tmsg.tx.Sha()
|
2013-10-08 17:47:00 +02:00
|
|
|
|
2013-10-30 20:13:29 +01:00
|
|
|
// If we didn't ask for this transaction then the peer is misbehaving.
|
2013-10-28 21:44:38 +01:00
|
|
|
if _, ok := tmsg.peer.requestedTxns[*txHash]; !ok {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Warnf("Got unrequested transaction %v from %s -- "+
|
2013-10-28 21:44:38 +01:00
|
|
|
"disconnecting", txHash, tmsg.peer.addr)
|
2013-10-08 17:47:00 +02:00
|
|
|
tmsg.peer.Disconnect()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process the transaction to include validation, insertion in the
|
|
|
|
// memory pool, orphan handling, etc.
|
|
|
|
err := tmsg.peer.server.txMemPool.ProcessTransaction(tmsg.tx)
|
|
|
|
|
|
|
|
// Remove transaction from request maps. Either the mempool/chain
|
|
|
|
// already knows about it and as such we shouldn't have any more
|
|
|
|
// instances of trying to fetch it, or we failed to insert and thus
|
|
|
|
// we'll retry next time we get an inv.
|
2013-10-28 21:44:38 +01:00
|
|
|
delete(tmsg.peer.requestedTxns, *txHash)
|
|
|
|
delete(b.requestedTxns, *txHash)
|
2013-10-08 17:47:00 +02:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
// When the error is a rule error, it means the transaction was
|
|
|
|
// simply rejected as opposed to something actually going wrong,
|
|
|
|
// so log it as such. Otherwise, something really did go wrong,
|
|
|
|
// so log it as an actual error.
|
|
|
|
if _, ok := err.(TxRuleError); ok {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Debugf("Rejected transaction %v: %v", txHash, err)
|
2013-10-08 17:47:00 +02:00
|
|
|
} else {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Errorf("Failed to process transaction %v: %v", txHash, err)
|
2013-10-08 17:47:00 +02:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-03 21:48:10 +02:00
|
|
|
// current returns true if we believe we are synced with our peers, false if we
|
|
|
|
// still have blocks to check
|
|
|
|
func (b *blockManager) current() bool {
|
|
|
|
if !b.blockChain.IsCurrent() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// if blockChain thinks we are current and we have no syncPeer it
|
|
|
|
// is probably right.
|
|
|
|
if b.syncPeer == nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
_, height, err := b.server.db.NewestSha()
|
|
|
|
// No matter what chain thinks, if we are below the block we are
|
|
|
|
// syncing to we are not current.
|
|
|
|
// TODO(oga) we can get chain to return the height of each block when we
|
|
|
|
// parse an orphan, which would allow us to update the height of peers
|
|
|
|
// from what it was at initial handshake.
|
|
|
|
if err != nil || height < int64(b.syncPeer.lastBlock) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2013-08-29 21:44:43 +02:00
|
|
|
// handleBlockMsg handles block messages from all peers.
|
|
|
|
func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
|
2013-11-16 00:16:51 +01:00
|
|
|
defer func() {
|
|
|
|
if b.startBlock != nil &&
|
|
|
|
len(bmsg.peer.requestedBlocks) < 10 {
|
|
|
|
|
|
|
|
// block queue getting short, ask for more.
|
|
|
|
b.fetchHeaderBlocks()
|
|
|
|
}
|
|
|
|
|
|
|
|
}()
|
2013-10-01 22:32:50 +02:00
|
|
|
|
2013-10-03 18:21:41 +02:00
|
|
|
// If we didn't ask for this block then the peer is misbehaving.
|
2014-01-29 01:53:25 +01:00
|
|
|
blockSha, _ := bmsg.block.Sha()
|
2013-10-01 22:32:50 +02:00
|
|
|
if _, ok := bmsg.peer.requestedBlocks[*blockSha]; !ok {
|
2013-10-03 18:21:41 +02:00
|
|
|
// The regression test intentionally sends some blocks twice
|
|
|
|
// to test duplicate block insertion fails. Don't disconnect
|
|
|
|
// the peer or ignore the block when we're in regression test
|
|
|
|
// mode in this case so the chain code is actually fed the
|
|
|
|
// duplicate blocks.
|
|
|
|
if !cfg.RegressionTest {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Warnf("Got unrequested block %v from %s -- "+
|
2013-10-03 18:21:41 +02:00
|
|
|
"disconnecting", blockSha, bmsg.peer.addr)
|
|
|
|
bmsg.peer.Disconnect()
|
|
|
|
return
|
|
|
|
}
|
2013-10-01 22:32:50 +02:00
|
|
|
}
|
2014-01-29 01:53:25 +01:00
|
|
|
|
|
|
|
// Keep track of which peer the block was sent from so the notification
|
|
|
|
// handler can request the parent blocks from the appropriate peer.
|
2013-08-29 21:44:43 +02:00
|
|
|
b.blockPeer[*blockSha] = bmsg.peer
|
2013-08-06 23:55:22 +02:00
|
|
|
|
2013-11-16 00:16:51 +01:00
|
|
|
fastAdd := false
|
|
|
|
if b.fetchBlock != nil && blockSha.IsEqual(b.fetchBlock) {
|
|
|
|
firstblock, ok := b.headerPool[*blockSha]
|
|
|
|
if ok {
|
|
|
|
if b.latestCheckpoint == nil {
|
|
|
|
b.latestCheckpoint =
|
|
|
|
b.blockChain.LatestCheckpoint()
|
|
|
|
}
|
|
|
|
if int64(firstblock.height) <=
|
|
|
|
b.latestCheckpoint.Height {
|
|
|
|
|
|
|
|
fastAdd = true
|
|
|
|
}
|
|
|
|
if firstblock.next != nil {
|
|
|
|
b.fetchBlock = &firstblock.next.sha
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-01-29 01:53:25 +01:00
|
|
|
|
|
|
|
// Remove block from request maps. Either chain will know about it and
|
|
|
|
// so we shouldn't have any more instances of trying to fetch it, or we
|
|
|
|
// will fail the insert and thus we'll retry next time we get an inv.
|
|
|
|
delete(bmsg.peer.requestedBlocks, *blockSha)
|
|
|
|
delete(b.requestedBlocks, *blockSha)
|
2013-11-16 00:16:51 +01:00
|
|
|
|
|
|
|
if fastAdd && blockSha.IsEqual(b.lastBlock) {
|
|
|
|
// have processed all blocks, switch to normal handling
|
|
|
|
b.fetchingHeaders = false
|
|
|
|
b.startBlock = nil
|
|
|
|
b.fetchBlock = nil
|
|
|
|
b.lastBlock = nil
|
|
|
|
b.headerPool = make(map[btcwire.ShaHash]*headerstr)
|
|
|
|
b.headerOrphan = make(map[btcwire.ShaHash]*headerstr)
|
|
|
|
}
|
2013-10-01 00:53:21 +02:00
|
|
|
|
2014-01-29 01:53:25 +01:00
|
|
|
// Process the block to include validation, best chain selection, orphan
|
|
|
|
// handling, etc.
|
|
|
|
err := b.blockChain.ProcessBlock(bmsg.block, fastAdd)
|
2013-08-06 23:55:22 +02:00
|
|
|
if err != nil {
|
2013-08-29 21:44:43 +02:00
|
|
|
delete(b.blockPeer, *blockSha)
|
2013-10-04 20:13:16 +02:00
|
|
|
|
|
|
|
// When the error is a rule error, it means the block was simply
|
|
|
|
// rejected as opposed to something actually going wrong, so log
|
|
|
|
// it as such. Otherwise, something really did go wrong, so log
|
|
|
|
// it as an actual error.
|
|
|
|
if _, ok := err.(btcchain.RuleError); ok {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Infof("Rejected block %v: %v", blockSha, err)
|
2013-10-04 20:13:16 +02:00
|
|
|
} else {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Errorf("Failed to process block %v: %v", blockSha, err)
|
2013-10-04 20:13:16 +02:00
|
|
|
}
|
2013-08-06 23:55:22 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-08-29 21:44:43 +02:00
|
|
|
// Don't keep track of the peer that sent the block any longer if it's
|
|
|
|
// not an orphan.
|
|
|
|
if !b.blockChain.IsKnownOrphan(blockSha) {
|
|
|
|
delete(b.blockPeer, *blockSha)
|
|
|
|
}
|
|
|
|
|
2013-08-06 23:55:22 +02:00
|
|
|
// Log info about the new block height.
|
2013-10-11 01:24:23 +02:00
|
|
|
latestHash, height, err := b.server.db.NewestSha()
|
2013-08-06 23:55:22 +02:00
|
|
|
if err != nil {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Warnf("Failed to obtain latest sha - %v", err)
|
2013-08-06 23:55:22 +02:00
|
|
|
return
|
|
|
|
}
|
2013-10-11 01:24:23 +02:00
|
|
|
b.logBlockHeight(int64(len(bmsg.block.MsgBlock().Transactions)), height,
|
|
|
|
latestHash)
|
2013-08-06 23:55:22 +02:00
|
|
|
|
2013-08-29 21:44:43 +02:00
|
|
|
// Sync the db to disk.
|
|
|
|
b.server.db.Sync()
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
|
|
|
|
2014-01-29 01:53:25 +01:00
|
|
|
// fetchHeaderBlocks is creates and sends a request to the syncPeer for
|
|
|
|
// the next list of blocks to be downloaded.
|
|
|
|
func (b *blockManager) fetchHeaderBlocks() {
|
|
|
|
gdmsg := btcwire.NewMsgGetDataSizeHint(btcwire.MaxInvPerMsg)
|
|
|
|
numRequested := 0
|
|
|
|
startBlock := b.startBlock
|
|
|
|
for {
|
|
|
|
if b.startBlock == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
blockHash := b.startBlock
|
|
|
|
firstblock, ok := b.headerPool[*blockHash]
|
|
|
|
if !ok {
|
|
|
|
bmgrLog.Warnf("current fetch block %v missing from headerPool", blockHash)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
iv := btcwire.NewInvVect(btcwire.InvTypeBlock, blockHash)
|
|
|
|
if !b.haveInventory(iv) {
|
|
|
|
b.requestedBlocks[*blockHash] = true
|
|
|
|
b.syncPeer.requestedBlocks[*blockHash] = true
|
|
|
|
gdmsg.AddInvVect(iv)
|
|
|
|
numRequested++
|
|
|
|
}
|
|
|
|
|
|
|
|
if b.fetchBlock == nil {
|
|
|
|
b.fetchBlock = b.startBlock
|
|
|
|
}
|
|
|
|
if firstblock.next == nil {
|
|
|
|
b.startBlock = nil
|
|
|
|
break
|
|
|
|
} else {
|
|
|
|
b.startBlock = &firstblock.next.sha
|
|
|
|
}
|
|
|
|
|
|
|
|
if numRequested >= btcwire.MaxInvPerMsg {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(gdmsg.InvList) > 0 {
|
|
|
|
bmgrLog.Debugf("requesting block %v len %v\n", startBlock, len(gdmsg.InvList))
|
|
|
|
|
|
|
|
b.syncPeer.QueueMessage(gdmsg, nil)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleHeadersMsghandles headers messages from all peers.
|
|
|
|
func (b *blockManager) handleHeadersMsg(bmsg *headersMsg) {
|
|
|
|
msg := bmsg.headers
|
|
|
|
|
|
|
|
nheaders := len(msg.Headers)
|
|
|
|
if nheaders == 0 {
|
|
|
|
bmgrLog.Infof("Received %v block headers: Fetching blocks",
|
|
|
|
len(b.headerPool))
|
|
|
|
b.fetchHeaderBlocks()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
var blockhash btcwire.ShaHash
|
|
|
|
|
|
|
|
if b.latestCheckpoint == nil {
|
|
|
|
b.latestCheckpoint = b.blockChain.LatestCheckpoint()
|
|
|
|
}
|
|
|
|
|
|
|
|
for hdridx := range msg.Headers {
|
|
|
|
blockhash, _ = msg.Headers[hdridx].BlockSha()
|
|
|
|
var headerst headerstr
|
|
|
|
headerst.header = msg.Headers[hdridx]
|
|
|
|
headerst.sha = blockhash
|
|
|
|
prev, ok := b.headerPool[headerst.header.PrevBlock]
|
|
|
|
if ok {
|
|
|
|
if prev.next == nil {
|
|
|
|
prev.next = &headerst
|
|
|
|
} else {
|
|
|
|
bmgrLog.Infof("two children of the same block ??? %v %v %v", prev.sha, prev.next.sha, blockhash)
|
|
|
|
}
|
|
|
|
headerst.height = prev.height + 1
|
|
|
|
} else if headerst.header.PrevBlock.IsEqual(activeNetParams.genesisHash) {
|
|
|
|
ok = true
|
|
|
|
headerst.height = 1
|
|
|
|
b.startBlock = &headerst.sha
|
|
|
|
}
|
|
|
|
if int64(headerst.height) == b.latestCheckpoint.Height {
|
|
|
|
if headerst.sha.IsEqual(b.latestCheckpoint.Hash) {
|
|
|
|
// we can trust this header first download
|
|
|
|
// TODO flag this?
|
|
|
|
} else {
|
|
|
|
// XXX marker does not match, must throw
|
|
|
|
// away headers !?!?!
|
|
|
|
// XXX dont trust peer?
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ok {
|
|
|
|
b.headerPool[blockhash] = &headerst
|
|
|
|
b.lastBlock = &blockhash
|
|
|
|
} else {
|
|
|
|
bmgrLog.Infof("found orphan block %v", blockhash)
|
|
|
|
b.headerOrphan[headerst.header.PrevBlock] = &headerst
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Construct the getheaders request and queue it to be sent.
|
|
|
|
ghmsg := btcwire.NewMsgGetHeaders()
|
|
|
|
err := ghmsg.AddBlockLocatorHash(&blockhash)
|
|
|
|
if err != nil {
|
|
|
|
bmgrLog.Infof("msgheaders bad addheaders", blockhash)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
b.syncPeer.QueueMessage(ghmsg, nil)
|
|
|
|
}
|
|
|
|
|
2013-10-08 17:47:00 +02:00
|
|
|
// haveInventory returns whether or not the inventory represented by the passed
|
|
|
|
// inventory vector is known. This includes checking all of the various places
|
|
|
|
// inventory can be when it is in different states such as blocks that are part
|
|
|
|
// of the main chain, on a side chain, in the orphan pool, and transactions that
|
2014-01-11 05:32:05 +01:00
|
|
|
// are in the memory pool (either the main pool or orphan pool).
|
2013-10-08 17:47:00 +02:00
|
|
|
func (b *blockManager) haveInventory(invVect *btcwire.InvVect) bool {
|
|
|
|
switch invVect.Type {
|
2014-01-19 04:10:36 +01:00
|
|
|
case btcwire.InvTypeBlock:
|
2013-10-08 17:47:00 +02:00
|
|
|
// Ask chain if the block is known to it in any form (main
|
|
|
|
// chain, side chain, or orphan).
|
|
|
|
return b.blockChain.HaveBlock(&invVect.Hash)
|
|
|
|
|
2014-01-19 04:10:36 +01:00
|
|
|
case btcwire.InvTypeTx:
|
2013-10-08 17:47:00 +02:00
|
|
|
// Ask the transaction memory pool if the transaction is known
|
|
|
|
// to it in any form (main pool or orphan).
|
2013-10-11 21:12:40 +02:00
|
|
|
if b.server.txMemPool.HaveTransaction(&invVect.Hash) {
|
2013-10-08 17:47:00 +02:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the transaction exists from the point of view of the
|
|
|
|
// end of the main chain.
|
|
|
|
return b.server.db.ExistsTxSha(&invVect.Hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The requested inventory is is an unsupported type, so just claim
|
|
|
|
// it is known to avoid requesting it.
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2013-09-27 02:41:02 +02:00
|
|
|
// handleInvMsg handles inv messages from all peers.
|
|
|
|
// We examine the inventory advertised by the remote peer and act accordingly.
|
|
|
|
func (b *blockManager) handleInvMsg(imsg *invMsg) {
|
2013-10-03 17:25:13 +02:00
|
|
|
// Ignore invs from peers that aren't the sync if we are not current.
|
|
|
|
// Helps prevent fetching a mass of orphans.
|
2013-10-03 21:48:10 +02:00
|
|
|
if imsg.peer != b.syncPeer && !b.current() {
|
2013-10-03 17:25:13 +02:00
|
|
|
return
|
|
|
|
}
|
2013-10-04 05:31:00 +02:00
|
|
|
|
2013-09-27 02:41:02 +02:00
|
|
|
// Attempt to find the final block in the inventory list. There may
|
|
|
|
// not be one.
|
|
|
|
lastBlock := -1
|
|
|
|
invVects := imsg.inv.InvList
|
|
|
|
for i := len(invVects) - 1; i >= 0; i-- {
|
2013-10-08 22:55:07 +02:00
|
|
|
if invVects[i].Type == btcwire.InvTypeBlock {
|
2013-09-27 02:41:02 +02:00
|
|
|
lastBlock = i
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Request the advertised inventory if we don't already have it. Also,
|
|
|
|
// request parent blocks of orphans if we receive one we already have.
|
|
|
|
// Finally, attempt to detect potential stalls due to long side chains
|
|
|
|
// we already have and request more blocks to prevent them.
|
2013-09-20 19:55:27 +02:00
|
|
|
chain := b.blockChain
|
2013-09-27 02:41:02 +02:00
|
|
|
for i, iv := range invVects {
|
2013-09-20 19:55:27 +02:00
|
|
|
// Ignore unsupported inventory types.
|
2013-10-08 22:55:07 +02:00
|
|
|
if iv.Type != btcwire.InvTypeBlock && iv.Type != btcwire.InvTypeTx {
|
2013-09-20 19:55:27 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the inventory to the cache of known inventory
|
|
|
|
// for the peer.
|
2013-12-24 17:32:20 +01:00
|
|
|
imsg.peer.AddKnownInventory(iv)
|
2013-09-20 19:55:27 +02:00
|
|
|
|
2013-11-16 00:16:51 +01:00
|
|
|
if b.fetchingHeaders {
|
|
|
|
// if we are fetching headers and already know
|
|
|
|
// about a block, do not add process it.
|
|
|
|
if _, ok := b.headerPool[iv.Hash]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-20 19:55:27 +02:00
|
|
|
// Request the inventory if we don't already have it.
|
2013-10-08 17:47:00 +02:00
|
|
|
if !b.haveInventory(iv) {
|
2013-09-20 19:55:27 +02:00
|
|
|
// Add it to the request queue.
|
|
|
|
imsg.peer.requestQueue.PushBack(iv)
|
|
|
|
continue
|
|
|
|
}
|
2013-09-27 02:41:02 +02:00
|
|
|
|
2013-10-08 22:55:07 +02:00
|
|
|
if iv.Type == btcwire.InvTypeBlock {
|
2013-09-27 02:41:02 +02:00
|
|
|
// The block is an orphan block that we already have.
|
|
|
|
// When the existing orphan was processed, it requested
|
|
|
|
// the missing parent blocks. When this scenario
|
|
|
|
// happens, it means there were more blocks missing
|
|
|
|
// than are allowed into a single inventory message. As
|
|
|
|
// a result, once this peer requested the final
|
|
|
|
// advertised block, the remote peer noticed and is now
|
|
|
|
// resending the orphan block as an available block
|
|
|
|
// to signal there are more missing blocks that need to
|
|
|
|
// be requested.
|
2013-09-20 19:55:27 +02:00
|
|
|
if chain.IsKnownOrphan(&iv.Hash) {
|
2013-09-27 02:41:02 +02:00
|
|
|
// Request blocks starting at the latest known
|
|
|
|
// up to the root of the orphan that just came
|
|
|
|
// in.
|
2013-09-20 19:55:27 +02:00
|
|
|
orphanRoot := chain.GetOrphanRoot(&iv.Hash)
|
|
|
|
locator, err := chain.LatestBlockLocator()
|
2013-09-27 02:41:02 +02:00
|
|
|
if err != nil {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Errorf("PEER: Failed to get block "+
|
2013-09-27 02:41:02 +02:00
|
|
|
"locator for the latest block: "+
|
|
|
|
"%v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
imsg.peer.PushGetBlocksMsg(locator, orphanRoot)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// We already have the final block advertised by this
|
|
|
|
// inventory message, so force a request for more. This
|
2013-09-27 04:06:01 +02:00
|
|
|
// should only happen if we're on a really long side
|
|
|
|
// chain.
|
2013-09-27 02:41:02 +02:00
|
|
|
if i == lastBlock {
|
|
|
|
// Request blocks after this one up to the
|
|
|
|
// final one the remote peer knows about (zero
|
|
|
|
// stop hash).
|
2013-09-20 19:55:27 +02:00
|
|
|
locator := chain.BlockLocatorFromHash(&iv.Hash)
|
2013-09-27 02:41:02 +02:00
|
|
|
imsg.peer.PushGetBlocksMsg(locator, &zeroHash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Request as much as possible at once. Anything that won't fit into
|
|
|
|
// the request will be requested on the next inv message.
|
|
|
|
numRequested := 0
|
|
|
|
gdmsg := btcwire.NewMsgGetData()
|
2013-09-20 19:55:27 +02:00
|
|
|
requestQueue := imsg.peer.requestQueue
|
|
|
|
for e := requestQueue.Front(); e != nil; e = requestQueue.Front() {
|
2013-09-27 02:41:02 +02:00
|
|
|
iv := e.Value.(*btcwire.InvVect)
|
|
|
|
imsg.peer.requestQueue.Remove(e)
|
2013-10-08 17:47:00 +02:00
|
|
|
|
|
|
|
switch iv.Type {
|
2014-01-19 04:10:36 +01:00
|
|
|
case btcwire.InvTypeBlock:
|
2013-10-08 17:47:00 +02:00
|
|
|
// Request the block if there is not already a pending
|
|
|
|
// request.
|
|
|
|
if _, exists := b.requestedBlocks[iv.Hash]; !exists {
|
|
|
|
b.requestedBlocks[iv.Hash] = true
|
|
|
|
imsg.peer.requestedBlocks[iv.Hash] = true
|
|
|
|
gdmsg.AddInvVect(iv)
|
|
|
|
numRequested++
|
|
|
|
}
|
|
|
|
|
2014-01-19 04:10:36 +01:00
|
|
|
case btcwire.InvTypeTx:
|
2013-10-08 17:47:00 +02:00
|
|
|
// Request the transaction if there is not already a
|
|
|
|
// pending request.
|
|
|
|
if _, exists := b.requestedTxns[iv.Hash]; !exists {
|
|
|
|
b.requestedTxns[iv.Hash] = true
|
|
|
|
imsg.peer.requestedTxns[iv.Hash] = true
|
|
|
|
gdmsg.AddInvVect(iv)
|
|
|
|
numRequested++
|
|
|
|
}
|
2013-10-01 00:53:21 +02:00
|
|
|
}
|
2013-09-27 02:41:02 +02:00
|
|
|
|
|
|
|
if numRequested >= btcwire.MaxInvPerMsg {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(gdmsg.InvList) > 0 {
|
2013-10-16 16:49:09 +02:00
|
|
|
imsg.peer.QueueMessage(gdmsg, nil)
|
2013-09-27 02:41:02 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-29 21:44:43 +02:00
|
|
|
// blockHandler is the main handler for the block manager. It must be run
|
|
|
|
// as a goroutine. It processes block and inv messages in a separate goroutine
|
2013-10-04 07:34:24 +02:00
|
|
|
// from the peer handlers so the block (MsgBlock) messages are handled by a
|
|
|
|
// single thread without needing to lock memory data structures. This is
|
|
|
|
// important because the block manager controls which blocks are needed and how
|
|
|
|
// the fetching should proceed.
|
2013-08-06 23:55:22 +02:00
|
|
|
func (b *blockManager) blockHandler() {
|
2013-10-01 00:53:21 +02:00
|
|
|
candidatePeers := list.New()
|
2013-08-06 23:55:22 +02:00
|
|
|
out:
|
2013-10-03 01:33:42 +02:00
|
|
|
for {
|
2013-08-06 23:55:22 +02:00
|
|
|
select {
|
2013-10-01 00:53:21 +02:00
|
|
|
case m := <-b.msgChan:
|
|
|
|
switch msg := m.(type) {
|
|
|
|
case *newPeerMsg:
|
|
|
|
b.handleNewPeerMsg(candidatePeers, msg.peer)
|
|
|
|
|
2013-10-08 17:47:00 +02:00
|
|
|
case *txMsg:
|
|
|
|
b.handleTxMsg(msg)
|
|
|
|
msg.peer.txProcessed <- true
|
|
|
|
|
2013-10-01 00:53:21 +02:00
|
|
|
case *blockMsg:
|
|
|
|
b.handleBlockMsg(msg)
|
|
|
|
msg.peer.blockProcessed <- true
|
2013-09-27 04:06:01 +02:00
|
|
|
|
2013-10-01 00:53:21 +02:00
|
|
|
case *invMsg:
|
|
|
|
b.handleInvMsg(msg)
|
2013-08-06 23:55:22 +02:00
|
|
|
|
2013-11-16 00:16:51 +01:00
|
|
|
case *headersMsg:
|
|
|
|
b.handleHeadersMsg(msg)
|
|
|
|
|
2013-10-01 00:53:21 +02:00
|
|
|
case *donePeerMsg:
|
|
|
|
b.handleDonePeerMsg(candidatePeers, msg.peer)
|
|
|
|
|
|
|
|
default:
|
|
|
|
// bitch and whine.
|
|
|
|
}
|
2013-10-03 01:50:36 +02:00
|
|
|
|
2013-08-06 23:55:22 +02:00
|
|
|
case <-b.quit:
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
b.wg.Done()
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Trace("Block handler done")
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
|
|
|
|
2013-09-29 22:26:03 +02:00
|
|
|
// handleNotifyMsg handles notifications from btcchain. It does things such
|
|
|
|
// as request orphan block parents and relay accepted blocks to connected peers.
|
2013-08-06 23:55:22 +02:00
|
|
|
func (b *blockManager) handleNotifyMsg(notification *btcchain.Notification) {
|
|
|
|
switch notification.Type {
|
2013-09-29 22:26:03 +02:00
|
|
|
// An orphan block has been accepted by the block chain. Request
|
|
|
|
// its parents from the peer that sent it.
|
2013-08-06 23:55:22 +02:00
|
|
|
case btcchain.NTOrphanBlock:
|
2013-09-10 02:58:38 +02:00
|
|
|
orphanHash := notification.Data.(*btcwire.ShaHash)
|
|
|
|
if peer, exists := b.blockPeer[*orphanHash]; exists {
|
|
|
|
orphanRoot := b.blockChain.GetOrphanRoot(orphanHash)
|
2013-08-29 21:44:43 +02:00
|
|
|
locator, err := b.blockChain.LatestBlockLocator()
|
|
|
|
if err != nil {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Errorf("Failed to get block locator "+
|
2013-08-29 21:44:43 +02:00
|
|
|
"for the latest block: %v", err)
|
|
|
|
break
|
|
|
|
}
|
2013-09-27 02:41:02 +02:00
|
|
|
peer.PushGetBlocksMsg(locator, orphanRoot)
|
2013-08-29 21:44:43 +02:00
|
|
|
delete(b.blockPeer, *orphanRoot)
|
2013-09-12 21:19:10 +02:00
|
|
|
} else {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Warnf("Notification for orphan %v with no peer",
|
2013-09-12 21:19:10 +02:00
|
|
|
orphanHash)
|
2013-08-29 21:44:43 +02:00
|
|
|
}
|
2013-08-06 23:55:22 +02:00
|
|
|
|
2013-09-29 22:26:03 +02:00
|
|
|
// A block has been accepted into the block chain. Relay it to other
|
|
|
|
// peers.
|
2013-08-06 23:55:22 +02:00
|
|
|
case btcchain.NTBlockAccepted:
|
2013-10-03 17:25:13 +02:00
|
|
|
// Don't relay if we are not current. Other peers that are
|
|
|
|
// current should already know about it.
|
2013-10-03 21:48:10 +02:00
|
|
|
|
|
|
|
if !b.current() {
|
2013-10-03 17:25:13 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-09-09 17:58:56 +02:00
|
|
|
block, ok := notification.Data.(*btcutil.Block)
|
|
|
|
if !ok {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Warnf("Chain accepted notification is not a block.")
|
2013-09-09 17:58:56 +02:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// It's ok to ignore the error here since the notification is
|
|
|
|
// coming from the chain code which has already cached the hash.
|
|
|
|
hash, _ := block.Sha()
|
|
|
|
|
|
|
|
// Generate the inventory vector and relay it.
|
2013-10-08 22:55:07 +02:00
|
|
|
iv := btcwire.NewInvVect(btcwire.InvTypeBlock, hash)
|
2013-09-09 17:58:56 +02:00
|
|
|
b.server.RelayInventory(iv)
|
2013-09-20 19:55:27 +02:00
|
|
|
|
|
|
|
// A block has been connected to the main block chain.
|
|
|
|
case btcchain.NTBlockConnected:
|
|
|
|
block, ok := notification.Data.(*btcutil.Block)
|
|
|
|
if !ok {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Warnf("Chain connected notification is not a block.")
|
2013-09-20 19:55:27 +02:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove all of the transactions (except the coinbase) in the
|
2013-11-15 23:12:23 +01:00
|
|
|
// connected block from the transaction pool. Also, remove any
|
|
|
|
// transactions which are now double spends as a result of these
|
|
|
|
// new transactions. Note that removing a transaction from
|
|
|
|
// pool also removes any transactions which depend on it,
|
|
|
|
// recursively.
|
2013-10-28 21:44:38 +01:00
|
|
|
for _, tx := range block.Transactions()[1:] {
|
2013-11-15 23:12:23 +01:00
|
|
|
b.server.txMemPool.RemoveTransaction(tx)
|
|
|
|
b.server.txMemPool.RemoveDoubleSpends(tx)
|
2013-09-20 19:55:27 +02:00
|
|
|
}
|
|
|
|
|
2013-08-14 22:55:31 +02:00
|
|
|
// Notify frontends
|
|
|
|
if r := b.server.rpcServer; r != nil {
|
2013-11-12 20:50:33 +01:00
|
|
|
go func() {
|
|
|
|
r.NotifyBlockTXs(b.server.db, block)
|
|
|
|
r.NotifyBlockConnected(block)
|
|
|
|
}()
|
2013-08-14 22:55:31 +02:00
|
|
|
}
|
|
|
|
|
2013-09-20 19:55:27 +02:00
|
|
|
// A block has been disconnected from the main block chain.
|
|
|
|
case btcchain.NTBlockDisconnected:
|
|
|
|
block, ok := notification.Data.(*btcutil.Block)
|
|
|
|
if !ok {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Warnf("Chain disconnected notification is not a block.")
|
2013-09-20 19:55:27 +02:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reinsert all of the transactions (except the coinbase) into
|
|
|
|
// the transaction pool.
|
2013-10-28 21:44:38 +01:00
|
|
|
for _, tx := range block.Transactions()[1:] {
|
2013-11-15 08:50:02 +01:00
|
|
|
err := b.server.txMemPool.MaybeAcceptTransaction(tx, nil)
|
2013-09-20 19:55:27 +02:00
|
|
|
if err != nil {
|
|
|
|
// Remove the transaction and all transactions
|
|
|
|
// that depend on it if it wasn't accepted into
|
|
|
|
// the transaction pool.
|
2013-11-15 23:12:23 +01:00
|
|
|
b.server.txMemPool.RemoveTransaction(tx)
|
2013-09-20 19:55:27 +02:00
|
|
|
}
|
|
|
|
}
|
2013-08-14 22:55:31 +02:00
|
|
|
|
|
|
|
// Notify frontends
|
|
|
|
if r := b.server.rpcServer; r != nil {
|
|
|
|
go r.NotifyBlockDisconnected(block)
|
|
|
|
}
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-08 00:27:59 +02:00
|
|
|
// NewPeer informs the block manager of a newly active peer.
|
2013-10-01 00:53:21 +02:00
|
|
|
func (b *blockManager) NewPeer(p *peer) {
|
|
|
|
// Ignore if we are shutting down.
|
2013-10-03 01:33:42 +02:00
|
|
|
if atomic.LoadInt32(&b.shutdown) != 0 {
|
2013-10-01 00:53:21 +02:00
|
|
|
return
|
|
|
|
}
|
2013-10-08 00:27:59 +02:00
|
|
|
|
2013-10-01 00:53:21 +02:00
|
|
|
b.msgChan <- &newPeerMsg{peer: p}
|
|
|
|
}
|
|
|
|
|
2013-10-08 17:47:00 +02:00
|
|
|
// QueueTx adds the passed transaction message and peer to the block handling
|
|
|
|
// queue.
|
2013-10-28 21:44:38 +01:00
|
|
|
func (b *blockManager) QueueTx(tx *btcutil.Tx, p *peer) {
|
2013-10-08 17:47:00 +02:00
|
|
|
// Don't accept more transactions if we're shutting down.
|
|
|
|
if atomic.LoadInt32(&b.shutdown) != 0 {
|
|
|
|
p.txProcessed <- false
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
b.msgChan <- &txMsg{tx: tx, peer: p}
|
|
|
|
}
|
|
|
|
|
2013-08-06 23:55:22 +02:00
|
|
|
// QueueBlock adds the passed block message and peer to the block handling queue.
|
2013-08-16 20:35:38 +02:00
|
|
|
func (b *blockManager) QueueBlock(block *btcutil.Block, p *peer) {
|
2013-08-06 23:55:22 +02:00
|
|
|
// Don't accept more blocks if we're shutting down.
|
2013-10-03 01:33:42 +02:00
|
|
|
if atomic.LoadInt32(&b.shutdown) != 0 {
|
2013-08-16 20:35:38 +02:00
|
|
|
p.blockProcessed <- false
|
2013-08-06 23:55:22 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-10-08 00:27:59 +02:00
|
|
|
b.msgChan <- &blockMsg{block: block, peer: p}
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
|
|
|
|
2013-09-27 02:41:02 +02:00
|
|
|
// QueueInv adds the passed inv message and peer to the block handling queue.
|
|
|
|
func (b *blockManager) QueueInv(inv *btcwire.MsgInv, p *peer) {
|
2013-09-27 04:06:01 +02:00
|
|
|
// No channel handling here because peers do not need to block on inv
|
2013-09-27 02:41:02 +02:00
|
|
|
// messages.
|
2013-10-03 01:33:42 +02:00
|
|
|
if atomic.LoadInt32(&b.shutdown) != 0 {
|
2013-09-27 02:41:02 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-10-08 00:27:59 +02:00
|
|
|
b.msgChan <- &invMsg{inv: inv, peer: p}
|
2013-10-01 00:53:21 +02:00
|
|
|
}
|
|
|
|
|
2014-01-29 01:53:25 +01:00
|
|
|
// QueueHeaders adds the passed headers message and peer to the block handling
|
|
|
|
// queue.
|
2013-11-16 00:16:51 +01:00
|
|
|
func (b *blockManager) QueueHeaders(headers *btcwire.MsgHeaders, p *peer) {
|
|
|
|
// No channel handling here because peers do not need to block on inv
|
|
|
|
// messages.
|
|
|
|
if atomic.LoadInt32(&b.shutdown) != 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
b.msgChan <- &headersMsg{headers: headers, peer: p}
|
|
|
|
}
|
|
|
|
|
2013-10-01 00:53:21 +02:00
|
|
|
// DonePeer informs the blockmanager that a peer has disconnected.
|
|
|
|
func (b *blockManager) DonePeer(p *peer) {
|
|
|
|
// Ignore if we are shutting down.
|
2013-10-03 01:33:42 +02:00
|
|
|
if atomic.LoadInt32(&b.shutdown) != 0 {
|
2013-10-01 00:53:21 +02:00
|
|
|
return
|
|
|
|
}
|
2013-10-08 00:27:59 +02:00
|
|
|
|
2013-10-01 00:53:21 +02:00
|
|
|
b.msgChan <- &donePeerMsg{peer: p}
|
2013-09-27 02:41:02 +02:00
|
|
|
}
|
|
|
|
|
2013-08-06 23:55:22 +02:00
|
|
|
// Start begins the core block handler which processes block and inv messages.
|
|
|
|
func (b *blockManager) Start() {
|
|
|
|
// Already started?
|
2013-10-03 01:33:42 +02:00
|
|
|
if atomic.AddInt32(&b.started, 1) != 1 {
|
2013-08-06 23:55:22 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Trace("Starting block manager")
|
2013-10-03 04:21:52 +02:00
|
|
|
b.wg.Add(1)
|
2013-08-06 23:55:22 +02:00
|
|
|
go b.blockHandler()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop gracefully shuts down the block manager by stopping all asynchronous
|
|
|
|
// handlers and waiting for them to finish.
|
|
|
|
func (b *blockManager) Stop() error {
|
2013-10-03 01:33:42 +02:00
|
|
|
if atomic.AddInt32(&b.shutdown, 1) != 1 {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Warnf("Block manager is already in the process of " +
|
2013-08-06 23:55:22 +02:00
|
|
|
"shutting down")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Infof("Block manager shutting down")
|
2013-08-06 23:55:22 +02:00
|
|
|
close(b.quit)
|
|
|
|
b.wg.Wait()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// newBlockManager returns a new bitcoin block manager.
|
|
|
|
// Use Start to begin processing asynchronous block and inv updates.
|
2013-10-01 00:43:14 +02:00
|
|
|
func newBlockManager(s *server) (*blockManager, error) {
|
2013-08-06 23:55:22 +02:00
|
|
|
bm := blockManager{
|
|
|
|
server: s,
|
2013-08-29 21:44:43 +02:00
|
|
|
blockPeer: make(map[btcwire.ShaHash]*peer),
|
2013-10-08 17:47:00 +02:00
|
|
|
requestedTxns: make(map[btcwire.ShaHash]bool),
|
2013-10-01 00:53:21 +02:00
|
|
|
requestedBlocks: make(map[btcwire.ShaHash]bool),
|
2013-08-06 23:55:22 +02:00
|
|
|
lastBlockLogTime: time.Now(),
|
2013-10-01 00:53:21 +02:00
|
|
|
msgChan: make(chan interface{}, cfg.MaxPeers*3),
|
2013-11-16 00:16:51 +01:00
|
|
|
headerPool: make(map[btcwire.ShaHash]*headerstr),
|
|
|
|
headerOrphan: make(map[btcwire.ShaHash]*headerstr),
|
2013-08-06 23:55:22 +02:00
|
|
|
quit: make(chan bool),
|
|
|
|
}
|
2013-10-03 04:21:52 +02:00
|
|
|
bm.blockChain = btcchain.New(s.db, s.btcnet, bm.handleNotifyMsg)
|
2013-10-10 02:34:02 +02:00
|
|
|
bm.blockChain.DisableCheckpoints(cfg.DisableCheckpoints)
|
|
|
|
if cfg.DisableCheckpoints {
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Info("Checkpoints are disabled")
|
2013-10-10 02:34:02 +02:00
|
|
|
}
|
2013-10-01 00:43:14 +02:00
|
|
|
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Infof("Generating initial block node index. This may " +
|
2013-10-01 00:43:14 +02:00
|
|
|
"take a while...")
|
|
|
|
err := bm.blockChain.GenerateInitialIndex()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-11-21 19:03:56 +01:00
|
|
|
bmgrLog.Infof("Block index generation complete")
|
2013-10-01 00:43:14 +02:00
|
|
|
|
|
|
|
return &bm, nil
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
|
|
|
|
2013-09-15 23:58:02 +02:00
|
|
|
// removeRegressionDB removes the existing regression test database if running
|
|
|
|
// in regression test mode and it already exists.
|
|
|
|
func removeRegressionDB(dbPath string) error {
|
|
|
|
// Dont do anything if not in regression test mode.
|
|
|
|
if !cfg.RegressionTest {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the old regression test database if it already exists.
|
|
|
|
fi, err := os.Stat(dbPath)
|
|
|
|
if err == nil {
|
2013-11-21 19:03:56 +01:00
|
|
|
btcdLog.Infof("Removing regression test database from '%s'", dbPath)
|
2013-09-15 23:58:02 +02:00
|
|
|
if fi.IsDir() {
|
|
|
|
err := os.RemoveAll(dbPath)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
err := os.Remove(dbPath)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-10-08 22:00:34 +02:00
|
|
|
// dbPath returns the path to the block database given a database type.
|
|
|
|
func blockDbPath(dbType string) string {
|
2013-09-15 19:08:42 +02:00
|
|
|
// The database name is based on the database type.
|
2013-10-08 22:00:34 +02:00
|
|
|
dbName := blockDbNamePrefix + "_" + dbType
|
|
|
|
if dbType == "sqlite" {
|
2013-09-15 19:08:42 +02:00
|
|
|
dbName = dbName + ".db"
|
|
|
|
}
|
|
|
|
dbPath := filepath.Join(cfg.DataDir, dbName)
|
2013-10-08 22:00:34 +02:00
|
|
|
return dbPath
|
|
|
|
}
|
|
|
|
|
|
|
|
// warnMultipeDBs shows a warning if multiple block database types are detected.
|
|
|
|
// This is not a situation most users want. It is handy for development however
|
|
|
|
// to support multiple side-by-side databases.
|
|
|
|
func warnMultipeDBs() {
|
|
|
|
// This is intentionally not using the known db types which depend
|
|
|
|
// on the database types compiled into the binary since we want to
|
|
|
|
// detect legacy db types as well.
|
|
|
|
dbTypes := []string{"leveldb", "sqlite"}
|
|
|
|
duplicateDbPaths := make([]string, 0, len(dbTypes)-1)
|
|
|
|
for _, dbType := range dbTypes {
|
|
|
|
if dbType == cfg.DbType {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store db path as a duplicate db if it exists.
|
|
|
|
dbPath := blockDbPath(dbType)
|
|
|
|
if fileExists(dbPath) {
|
|
|
|
duplicateDbPaths = append(duplicateDbPaths, dbPath)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Warn if there are extra databases.
|
|
|
|
if len(duplicateDbPaths) > 0 {
|
|
|
|
selectedDbPath := blockDbPath(cfg.DbType)
|
2013-11-21 19:03:56 +01:00
|
|
|
btcdLog.Warnf("WARNING: There are multiple block chain databases "+
|
2013-10-08 22:00:34 +02:00
|
|
|
"using different database types.\nYou probably don't "+
|
|
|
|
"want to waste disk space by having more than one.\n"+
|
|
|
|
"Your current database is located at [%v].\nThe "+
|
|
|
|
"additional database is located at %v", selectedDbPath,
|
|
|
|
duplicateDbPaths)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-27 05:10:27 +01:00
|
|
|
// setupBlockDB loads (or creates when needed) the block database taking into
|
|
|
|
// account the selected database backend. It also contains additional logic
|
|
|
|
// such warning the user if there are multiple databases which consume space on
|
|
|
|
// the file system and ensuring the regression test database is clean when in
|
|
|
|
// regression test mode.
|
|
|
|
func setupBlockDB() (btcdb.Db, error) {
|
|
|
|
// The memdb backend does not have a file path associated with it, so
|
|
|
|
// handle it uniquely. We also don't want to worry about the multiple
|
|
|
|
// database type warnings when running with the memory database.
|
|
|
|
if cfg.DbType == "memdb" {
|
|
|
|
btcdLog.Infof("Creating block database in memory.")
|
|
|
|
db, err := btcdb.CreateDB(cfg.DbType)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return db, nil
|
|
|
|
}
|
|
|
|
|
2013-10-08 22:00:34 +02:00
|
|
|
warnMultipeDBs()
|
|
|
|
|
|
|
|
// The database name is based on the database type.
|
|
|
|
dbPath := blockDbPath(cfg.DbType)
|
2013-09-15 19:08:42 +02:00
|
|
|
|
2013-09-15 23:58:02 +02:00
|
|
|
// The regression test is special in that it needs a clean database for
|
|
|
|
// each run, so remove it now if it already exists.
|
|
|
|
removeRegressionDB(dbPath)
|
|
|
|
|
2013-11-21 19:03:56 +01:00
|
|
|
btcdLog.Infof("Loading block database from '%s'", dbPath)
|
2013-09-05 20:42:12 +02:00
|
|
|
db, err := btcdb.OpenDB(cfg.DbType, dbPath)
|
2013-08-06 23:55:22 +02:00
|
|
|
if err != nil {
|
2013-10-27 05:10:27 +01:00
|
|
|
// Return the error if it's not because the database
|
|
|
|
// doesn't exist.
|
2013-08-06 23:55:22 +02:00
|
|
|
if err != btcdb.DbDoesNotExist {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the db if it does not exist.
|
2013-09-14 01:02:10 +02:00
|
|
|
err = os.MkdirAll(cfg.DataDir, 0700)
|
2013-08-06 23:55:22 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-09-05 20:42:12 +02:00
|
|
|
db, err = btcdb.CreateDB(cfg.DbType, dbPath)
|
2013-08-06 23:55:22 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-08-08 07:47:48 +02:00
|
|
|
}
|
|
|
|
|
2013-10-27 05:10:27 +01:00
|
|
|
return db, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// loadBlockDB opens the block database and returns a handle to it.
|
|
|
|
func loadBlockDB() (btcdb.Db, error) {
|
|
|
|
db, err := setupBlockDB()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2013-08-08 07:47:48 +02:00
|
|
|
// Get the latest block height from the database.
|
|
|
|
_, height, err := db.NewestSha()
|
|
|
|
if err != nil {
|
|
|
|
db.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-08-06 23:55:22 +02:00
|
|
|
|
2013-08-08 07:47:48 +02:00
|
|
|
// Insert the appropriate genesis block for the bitcoin network being
|
|
|
|
// connected to if needed.
|
|
|
|
if height == -1 {
|
2013-08-06 23:55:22 +02:00
|
|
|
genesis := btcutil.NewBlock(activeNetParams.genesisBlock)
|
|
|
|
_, err := db.InsertBlock(genesis)
|
|
|
|
if err != nil {
|
|
|
|
db.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-11-21 19:03:56 +01:00
|
|
|
btcdLog.Infof("Inserted genesis block %v",
|
2013-08-06 23:55:22 +02:00
|
|
|
activeNetParams.genesisHash)
|
2013-08-08 07:47:48 +02:00
|
|
|
height = 0
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
|
|
|
|
2013-11-21 19:03:56 +01:00
|
|
|
btcdLog.Infof("Block database loaded with block height %d", height)
|
2013-08-06 23:55:22 +02:00
|
|
|
return db, nil
|
|
|
|
}
|