2013-08-06 23:55:22 +02:00
|
|
|
// Copyright (c) 2013 Conformal Systems LLC.
|
|
|
|
// Use of this source code is governed by an ISC
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2013-09-03 23:55:14 +02:00
|
|
|
"container/list"
|
2013-08-06 23:55:22 +02:00
|
|
|
"github.com/conformal/btcchain"
|
|
|
|
"github.com/conformal/btcdb"
|
|
|
|
"github.com/conformal/btcutil"
|
|
|
|
"github.com/conformal/btcwire"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
chanBufferSize = 50
|
2013-09-15 19:08:42 +02:00
|
|
|
|
|
|
|
// blockDbNamePrefix is the prefix for the block database name. The
|
|
|
|
// database type is appended to this value to form the full block
|
|
|
|
// database name.
|
|
|
|
blockDbNamePrefix = "blocks"
|
2013-08-06 23:55:22 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// blockMsg packages a bitcoin block message and the peer it came from together
|
|
|
|
// so the block handler has access to that information.
|
|
|
|
type blockMsg struct {
|
|
|
|
block *btcutil.Block
|
2013-08-16 20:35:38 +02:00
|
|
|
peer *peer
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
|
|
|
|
2013-09-27 02:41:02 +02:00
|
|
|
// invMsg packages a bitcoin inv message and the peer it came from together
|
|
|
|
// so the block handler has access to that information.
|
|
|
|
type invMsg struct {
|
|
|
|
inv *btcwire.MsgInv
|
|
|
|
peer *peer
|
|
|
|
}
|
|
|
|
|
2013-08-06 23:55:22 +02:00
|
|
|
// txMsg packages a bitcoin tx message and the peer it came from together
|
|
|
|
// so the block handler has access to that information.
|
|
|
|
type txMsg struct {
|
|
|
|
msg *btcwire.MsgTx
|
|
|
|
peer *peer
|
|
|
|
}
|
|
|
|
|
|
|
|
// blockManager provides a concurrency safe block manager for handling all
|
2013-08-29 21:44:43 +02:00
|
|
|
// incoming blocks.
|
2013-08-06 23:55:22 +02:00
|
|
|
type blockManager struct {
|
|
|
|
server *server
|
|
|
|
started bool
|
|
|
|
shutdown bool
|
|
|
|
blockChain *btcchain.BlockChain
|
2013-08-29 21:44:43 +02:00
|
|
|
blockPeer map[btcwire.ShaHash]*peer
|
2013-09-12 19:10:48 +02:00
|
|
|
blockPeerMutex sync.Mutex
|
2013-08-06 23:55:22 +02:00
|
|
|
receivedLogBlocks int64
|
|
|
|
receivedLogTx int64
|
|
|
|
lastBlockLogTime time.Time
|
|
|
|
processingReqs bool
|
2013-09-03 23:55:14 +02:00
|
|
|
syncPeer *peer
|
2013-08-06 23:55:22 +02:00
|
|
|
newBlocks chan bool
|
2013-09-03 23:55:14 +02:00
|
|
|
newCandidates chan *peer
|
|
|
|
donePeers chan *peer
|
2013-08-06 23:55:22 +02:00
|
|
|
blockQueue chan *blockMsg
|
2013-09-27 02:41:02 +02:00
|
|
|
invQueue chan *invMsg
|
2013-08-06 23:55:22 +02:00
|
|
|
chainNotify chan *btcchain.Notification
|
Improve chain notification handling.
Previously a new goroutine was launched for each notification in order to
avoid blocking chain from continuing while the notification is being
processed. This approach had a couple of issues.
First, since goroutines are not guaranteed to execute in any given order,
the notifications were no longer handled in the same order as they were
sent. For the current code, this is not a problem, but upcoming code that
handles a transaction memory pool, the order needs to be correct.
Second, goroutines are relatively cheap, but it's still quite a bit of
overhead to launch 3-4 goroutines per block.
This commit modifies the handling code to have a single sink executing in
a separate goroutine. The main handler then adds the notifications to a
queue which is processed by the sink. This approach retains the
non-blocking behavior of the previous approach, but also keeps the order
correct and, as an additional benefit, is also more efficient.
2013-09-29 05:01:39 +02:00
|
|
|
chainNotifySink chan *btcchain.Notification
|
2013-08-06 23:55:22 +02:00
|
|
|
wg sync.WaitGroup
|
|
|
|
quit chan bool
|
|
|
|
}
|
|
|
|
|
2013-09-03 23:55:14 +02:00
|
|
|
// startSync will choose the best peer among the available candidate peers to
|
|
|
|
// download/sync the blockchain from. When syncing is already running, it
|
|
|
|
// simply returns. It also examines the candidates for any which are no longer
|
|
|
|
// candidates and removes them as needed.
|
|
|
|
func (b *blockManager) startSync(peers *list.List) {
|
|
|
|
// Return now if we're already syncing.
|
|
|
|
if b.syncPeer != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find the height of the current known best block.
|
|
|
|
_, height, err := b.server.db.NewestSha()
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("[BMGR] %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var bestPeer *peer
|
|
|
|
for e := peers.Front(); e != nil; e = e.Next() {
|
|
|
|
p := e.Value.(*peer)
|
|
|
|
|
|
|
|
// Remove sync candidate peers that are no longer candidates due
|
|
|
|
// to passing their latest known block.
|
|
|
|
if p.lastBlock <= int32(height) {
|
|
|
|
peers.Remove(e)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(davec): Use a better algorithm to choose the best peer.
|
|
|
|
// For now, just pick the first available candidate.
|
|
|
|
bestPeer = p
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start syncing from the best peer if one was selected.
|
|
|
|
if bestPeer != nil {
|
|
|
|
locator, err := b.blockChain.LatestBlockLocator()
|
|
|
|
if err != nil {
|
2013-09-09 17:59:31 +02:00
|
|
|
log.Errorf("[BMGR] Failed to get block locator for the "+
|
2013-09-03 23:55:14 +02:00
|
|
|
"latest block: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("[BMGR] Syncing to block height %d from peer %v",
|
|
|
|
bestPeer.lastBlock, bestPeer.conn.RemoteAddr())
|
2013-09-27 02:41:02 +02:00
|
|
|
bestPeer.PushGetBlocksMsg(locator, &zeroHash)
|
2013-09-03 23:55:14 +02:00
|
|
|
b.syncPeer = bestPeer
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleNewCandidateMsg deals with new peers that have signalled they may
|
|
|
|
// be considered as a sync peer (they have already successfully negotiated). It
|
2013-09-29 22:26:03 +02:00
|
|
|
// also starts syncing if needed. It is invoked from the syncHandler goroutine.
|
2013-09-03 23:55:14 +02:00
|
|
|
func (b *blockManager) handleNewCandidateMsg(peers *list.List, p *peer) {
|
|
|
|
// Ignore if in the process of shutting down.
|
|
|
|
if b.shutdown {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// The peer is not a candidate for sync if it's not a full node.
|
|
|
|
if p.services&btcwire.SFNodeNetwork != btcwire.SFNodeNetwork {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the peer as a candidate to sync from.
|
|
|
|
peers.PushBack(p)
|
|
|
|
|
|
|
|
// Start syncing by choosing the best candidate if needed.
|
|
|
|
b.startSync(peers)
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleDonePeerMsg deals with peers that have signalled they are done. It
|
|
|
|
// removes the peer as a candidate for syncing and in the case where it was
|
|
|
|
// the current sync peer, attempts to select a new best peer to sync from. It
|
|
|
|
// is invoked from the syncHandler goroutine.
|
|
|
|
func (b *blockManager) handleDonePeerMsg(peers *list.List, p *peer) {
|
|
|
|
// Remove the peer from the list of candidate peers.
|
|
|
|
for e := peers.Front(); e != nil; e = e.Next() {
|
|
|
|
if e.Value == p {
|
|
|
|
peers.Remove(e)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to find a new peer to sync from if the quitting peer is the
|
|
|
|
// sync peer.
|
|
|
|
if b.syncPeer != nil && b.syncPeer == p {
|
|
|
|
b.syncPeer = nil
|
|
|
|
b.startSync(peers)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// syncHandler deals with handling downloading (syncing) the block chain from
|
|
|
|
// other peers as they connect and disconnect. It must be run as a goroutine.
|
|
|
|
func (b *blockManager) syncHandler() {
|
|
|
|
log.Tracef("[BMGR] Starting sync handler")
|
|
|
|
candidatePeers := list.New()
|
|
|
|
out:
|
|
|
|
// Live while we're not shutting down.
|
|
|
|
for !b.shutdown {
|
|
|
|
select {
|
|
|
|
case peer := <-b.newCandidates:
|
|
|
|
b.handleNewCandidateMsg(candidatePeers, peer)
|
|
|
|
|
|
|
|
case peer := <-b.donePeers:
|
|
|
|
b.handleDonePeerMsg(candidatePeers, peer)
|
|
|
|
|
|
|
|
case <-b.quit:
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
b.wg.Done()
|
|
|
|
log.Trace("[BMGR] Sync handler done")
|
|
|
|
}
|
|
|
|
|
2013-08-06 23:55:22 +02:00
|
|
|
// logBlockHeight logs a new block height as an information message to show
|
|
|
|
// progress to the user. In order to prevent spam, it limits logging to one
|
|
|
|
// message every 10 seconds with duration and totals included.
|
|
|
|
func (b *blockManager) logBlockHeight(numTx, height int64) {
|
|
|
|
b.receivedLogBlocks++
|
|
|
|
b.receivedLogTx += numTx
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
duration := now.Sub(b.lastBlockLogTime)
|
2013-08-29 21:44:43 +02:00
|
|
|
if duration < time.Second*10 {
|
2013-08-06 23:55:22 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Log information about new block height.
|
|
|
|
blockStr := "blocks"
|
|
|
|
if b.receivedLogBlocks == 1 {
|
|
|
|
blockStr = "block"
|
|
|
|
}
|
|
|
|
txStr := "transactions"
|
|
|
|
if b.receivedLogTx == 1 {
|
|
|
|
txStr = "transaction"
|
|
|
|
}
|
|
|
|
log.Infof("[BMGR] Processed %d %s (%d %s) in the last %s - Block "+
|
|
|
|
"height %d", b.receivedLogBlocks, blockStr, b.receivedLogTx,
|
|
|
|
txStr, duration, height)
|
|
|
|
|
|
|
|
b.receivedLogBlocks = 0
|
|
|
|
b.receivedLogTx = 0
|
|
|
|
b.lastBlockLogTime = now
|
|
|
|
}
|
|
|
|
|
2013-08-29 21:44:43 +02:00
|
|
|
// handleBlockMsg handles block messages from all peers.
|
|
|
|
func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
|
|
|
|
// Keep track of which peer the block was sent from so the notification
|
|
|
|
// handler can request the parent blocks from the appropriate peer.
|
|
|
|
blockSha, _ := bmsg.block.Sha()
|
2013-09-12 19:10:48 +02:00
|
|
|
b.blockPeerMutex.Lock()
|
2013-08-29 21:44:43 +02:00
|
|
|
b.blockPeer[*blockSha] = bmsg.peer
|
2013-09-12 19:10:48 +02:00
|
|
|
b.blockPeerMutex.Unlock()
|
2013-08-06 23:55:22 +02:00
|
|
|
|
|
|
|
// Process the block to include validation, best chain selection, orphan
|
|
|
|
// handling, etc.
|
2013-08-29 21:44:43 +02:00
|
|
|
err := b.blockChain.ProcessBlock(bmsg.block)
|
2013-08-06 23:55:22 +02:00
|
|
|
if err != nil {
|
2013-09-12 19:10:48 +02:00
|
|
|
b.blockPeerMutex.Lock()
|
2013-08-29 21:44:43 +02:00
|
|
|
delete(b.blockPeer, *blockSha)
|
2013-09-12 19:10:48 +02:00
|
|
|
b.blockPeerMutex.Unlock()
|
2013-08-06 23:55:22 +02:00
|
|
|
log.Warnf("[BMGR] Failed to process block %v: %v", blockSha, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-08-29 21:44:43 +02:00
|
|
|
// Don't keep track of the peer that sent the block any longer if it's
|
|
|
|
// not an orphan.
|
|
|
|
if !b.blockChain.IsKnownOrphan(blockSha) {
|
2013-09-12 19:10:48 +02:00
|
|
|
b.blockPeerMutex.Lock()
|
2013-08-29 21:44:43 +02:00
|
|
|
delete(b.blockPeer, *blockSha)
|
2013-09-12 19:10:48 +02:00
|
|
|
b.blockPeerMutex.Unlock()
|
2013-08-29 21:44:43 +02:00
|
|
|
}
|
|
|
|
|
2013-08-06 23:55:22 +02:00
|
|
|
// Log info about the new block height.
|
|
|
|
_, height, err := b.server.db.NewestSha()
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("[BMGR] Failed to obtain latest sha - %v", err)
|
|
|
|
return
|
|
|
|
}
|
2013-08-29 21:44:43 +02:00
|
|
|
b.logBlockHeight(int64(len(bmsg.block.MsgBlock().Transactions)), height)
|
2013-08-06 23:55:22 +02:00
|
|
|
|
2013-08-29 21:44:43 +02:00
|
|
|
// Sync the db to disk.
|
|
|
|
b.server.db.Sync()
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
|
|
|
|
2013-09-27 02:41:02 +02:00
|
|
|
// handleInvMsg handles inv messages from all peers.
|
|
|
|
// We examine the inventory advertised by the remote peer and act accordingly.
|
|
|
|
//
|
|
|
|
// NOTE: This will need to have tx handling added as well when they are
|
|
|
|
// supported.
|
|
|
|
func (b *blockManager) handleInvMsg(imsg *invMsg) {
|
|
|
|
// Attempt to find the final block in the inventory list. There may
|
|
|
|
// not be one.
|
|
|
|
lastBlock := -1
|
|
|
|
invVects := imsg.inv.InvList
|
|
|
|
for i := len(invVects) - 1; i >= 0; i-- {
|
|
|
|
if invVects[i].Type == btcwire.InvVect_Block {
|
|
|
|
lastBlock = i
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Request the advertised inventory if we don't already have it. Also,
|
|
|
|
// request parent blocks of orphans if we receive one we already have.
|
|
|
|
// Finally, attempt to detect potential stalls due to long side chains
|
|
|
|
// we already have and request more blocks to prevent them.
|
|
|
|
for i, iv := range invVects {
|
|
|
|
switch iv.Type {
|
|
|
|
case btcwire.InvVect_Block:
|
|
|
|
// Add the inventory to the cache of known inventory
|
|
|
|
// for the peer.
|
|
|
|
imsg.peer.addKnownInventory(iv)
|
|
|
|
|
|
|
|
// Request the inventory if we don't already have it.
|
|
|
|
if !b.blockChain.HaveInventory(iv) {
|
|
|
|
// Add it to the request queue.
|
|
|
|
imsg.peer.requestQueue.PushBack(iv)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// The block is an orphan block that we already have.
|
|
|
|
// When the existing orphan was processed, it requested
|
|
|
|
// the missing parent blocks. When this scenario
|
|
|
|
// happens, it means there were more blocks missing
|
|
|
|
// than are allowed into a single inventory message. As
|
|
|
|
// a result, once this peer requested the final
|
|
|
|
// advertised block, the remote peer noticed and is now
|
|
|
|
// resending the orphan block as an available block
|
|
|
|
// to signal there are more missing blocks that need to
|
|
|
|
// be requested.
|
|
|
|
if b.blockChain.IsKnownOrphan(&iv.Hash) {
|
|
|
|
// Request blocks starting at the latest known
|
|
|
|
// up to the root of the orphan that just came
|
|
|
|
// in.
|
|
|
|
orphanRoot := b.blockChain.GetOrphanRoot(
|
|
|
|
&iv.Hash)
|
|
|
|
locator, err := b.blockChain.LatestBlockLocator()
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("[PEER] Failed to get block "+
|
|
|
|
"locator for the latest block: "+
|
|
|
|
"%v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
imsg.peer.PushGetBlocksMsg(locator, orphanRoot)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// We already have the final block advertised by this
|
|
|
|
// inventory message, so force a request for more. This
|
2013-09-27 04:06:01 +02:00
|
|
|
// should only happen if we're on a really long side
|
|
|
|
// chain.
|
2013-09-27 02:41:02 +02:00
|
|
|
if i == lastBlock {
|
|
|
|
// Request blocks after this one up to the
|
|
|
|
// final one the remote peer knows about (zero
|
|
|
|
// stop hash).
|
|
|
|
locator := b.blockChain.BlockLocatorFromHash(
|
|
|
|
&iv.Hash)
|
|
|
|
imsg.peer.PushGetBlocksMsg(locator, &zeroHash)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ignore unsupported inventory types.
|
|
|
|
default:
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Request as much as possible at once. Anything that won't fit into
|
|
|
|
// the request will be requested on the next inv message.
|
|
|
|
numRequested := 0
|
|
|
|
gdmsg := btcwire.NewMsgGetData()
|
|
|
|
for e := imsg.peer.requestQueue.Front(); e != nil; e = imsg.peer.requestQueue.Front() {
|
|
|
|
iv := e.Value.(*btcwire.InvVect)
|
|
|
|
gdmsg.AddInvVect(iv)
|
|
|
|
imsg.peer.requestQueue.Remove(e)
|
|
|
|
// check that no one else has asked for this
|
|
|
|
// put on global ``requested'' map
|
|
|
|
// put on local ``requested'' map
|
|
|
|
|
|
|
|
numRequested++
|
|
|
|
if numRequested >= btcwire.MaxInvPerMsg {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(gdmsg.InvList) > 0 {
|
|
|
|
imsg.peer.QueueMessage(gdmsg)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-29 21:44:43 +02:00
|
|
|
// blockHandler is the main handler for the block manager. It must be run
|
|
|
|
// as a goroutine. It processes block and inv messages in a separate goroutine
|
|
|
|
// from the peer handlers so the block (MsgBlock) and tx (MsgTx) messages are
|
|
|
|
// handled by a single thread without needing to lock memory data structures.
|
|
|
|
// This is important because the block manager controls which blocks are needed
|
|
|
|
// and how the fetching should proceed.
|
2013-08-06 23:55:22 +02:00
|
|
|
//
|
|
|
|
// NOTE: Tx messages need to be handled here too.
|
|
|
|
// (either that or block and tx need to be handled in separate threads)
|
|
|
|
func (b *blockManager) blockHandler() {
|
|
|
|
out:
|
|
|
|
for !b.shutdown {
|
|
|
|
select {
|
|
|
|
// Handle new block messages.
|
2013-08-16 20:35:38 +02:00
|
|
|
case bmsg := <-b.blockQueue:
|
2013-08-29 21:44:43 +02:00
|
|
|
b.handleBlockMsg(bmsg)
|
2013-08-16 20:35:38 +02:00
|
|
|
bmsg.peer.blockProcessed <- true
|
2013-09-27 04:06:01 +02:00
|
|
|
|
|
|
|
// Handle new inventory messages.
|
2013-09-27 02:41:02 +02:00
|
|
|
case imsg := <-b.invQueue:
|
|
|
|
b.handleInvMsg(imsg)
|
2013-08-06 23:55:22 +02:00
|
|
|
|
|
|
|
case <-b.quit:
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
b.wg.Done()
|
|
|
|
log.Trace("[BMGR] Block handler done")
|
|
|
|
}
|
|
|
|
|
2013-09-29 22:26:03 +02:00
|
|
|
// handleNotifyMsg handles notifications from btcchain. It does things such
|
|
|
|
// as request orphan block parents and relay accepted blocks to connected peers.
|
2013-08-06 23:55:22 +02:00
|
|
|
func (b *blockManager) handleNotifyMsg(notification *btcchain.Notification) {
|
|
|
|
switch notification.Type {
|
2013-09-29 22:26:03 +02:00
|
|
|
// An orphan block has been accepted by the block chain. Request
|
|
|
|
// its parents from the peer that sent it.
|
2013-08-06 23:55:22 +02:00
|
|
|
case btcchain.NTOrphanBlock:
|
2013-09-12 19:10:48 +02:00
|
|
|
b.blockPeerMutex.Lock()
|
|
|
|
defer b.blockPeerMutex.Unlock()
|
|
|
|
|
2013-09-10 02:58:38 +02:00
|
|
|
orphanHash := notification.Data.(*btcwire.ShaHash)
|
|
|
|
if peer, exists := b.blockPeer[*orphanHash]; exists {
|
|
|
|
orphanRoot := b.blockChain.GetOrphanRoot(orphanHash)
|
2013-08-29 21:44:43 +02:00
|
|
|
locator, err := b.blockChain.LatestBlockLocator()
|
|
|
|
if err != nil {
|
2013-09-09 17:59:31 +02:00
|
|
|
log.Errorf("[BMGR] Failed to get block locator "+
|
2013-08-29 21:44:43 +02:00
|
|
|
"for the latest block: %v", err)
|
|
|
|
break
|
|
|
|
}
|
2013-09-27 02:41:02 +02:00
|
|
|
peer.PushGetBlocksMsg(locator, orphanRoot)
|
2013-08-29 21:44:43 +02:00
|
|
|
delete(b.blockPeer, *orphanRoot)
|
2013-09-12 21:19:10 +02:00
|
|
|
} else {
|
|
|
|
log.Warnf("Notification for orphan %v with no peer",
|
|
|
|
orphanHash)
|
2013-08-29 21:44:43 +02:00
|
|
|
}
|
2013-08-06 23:55:22 +02:00
|
|
|
|
2013-09-29 22:26:03 +02:00
|
|
|
// A block has been accepted into the block chain. Relay it to other
|
|
|
|
// peers.
|
2013-08-06 23:55:22 +02:00
|
|
|
case btcchain.NTBlockAccepted:
|
2013-09-09 17:58:56 +02:00
|
|
|
block, ok := notification.Data.(*btcutil.Block)
|
|
|
|
if !ok {
|
|
|
|
log.Warnf("[BMGR] Chain notification type not a block.")
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// It's ok to ignore the error here since the notification is
|
|
|
|
// coming from the chain code which has already cached the hash.
|
|
|
|
hash, _ := block.Sha()
|
|
|
|
|
|
|
|
// Generate the inventory vector and relay it.
|
|
|
|
iv := btcwire.NewInvVect(btcwire.InvVect_Block, hash)
|
|
|
|
b.server.RelayInventory(iv)
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Improve chain notification handling.
Previously a new goroutine was launched for each notification in order to
avoid blocking chain from continuing while the notification is being
processed. This approach had a couple of issues.
First, since goroutines are not guaranteed to execute in any given order,
the notifications were no longer handled in the same order as they were
sent. For the current code, this is not a problem, but upcoming code that
handles a transaction memory pool, the order needs to be correct.
Second, goroutines are relatively cheap, but it's still quite a bit of
overhead to launch 3-4 goroutines per block.
This commit modifies the handling code to have a single sink executing in
a separate goroutine. The main handler then adds the notifications to a
queue which is processed by the sink. This approach retains the
non-blocking behavior of the previous approach, but also keeps the order
correct and, as an additional benefit, is also more efficient.
2013-09-29 05:01:39 +02:00
|
|
|
// chainNotificationSinkHandler is the sink for the chain notification handler.
|
|
|
|
// It actually responds to the notifications so the main chain notification
|
|
|
|
// handler does not block chain while processing notifications. It must be run
|
|
|
|
// as a goroutine.
|
|
|
|
func (b *blockManager) chainNotificationSinkHandler() {
|
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case notification := <-b.chainNotifySink:
|
|
|
|
b.handleNotifyMsg(notification)
|
|
|
|
|
|
|
|
case <-b.quit:
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
b.wg.Done()
|
|
|
|
log.Trace("[BMGR] Chain notification sink done")
|
|
|
|
}
|
|
|
|
|
2013-08-06 23:55:22 +02:00
|
|
|
// chainNotificationHandler is the handler for asynchronous notifications from
|
|
|
|
// btcchain. It must be run as a goroutine.
|
|
|
|
func (b *blockManager) chainNotificationHandler() {
|
Improve chain notification handling.
Previously a new goroutine was launched for each notification in order to
avoid blocking chain from continuing while the notification is being
processed. This approach had a couple of issues.
First, since goroutines are not guaranteed to execute in any given order,
the notifications were no longer handled in the same order as they were
sent. For the current code, this is not a problem, but upcoming code that
handles a transaction memory pool, the order needs to be correct.
Second, goroutines are relatively cheap, but it's still quite a bit of
overhead to launch 3-4 goroutines per block.
This commit modifies the handling code to have a single sink executing in
a separate goroutine. The main handler then adds the notifications to a
queue which is processed by the sink. This approach retains the
non-blocking behavior of the previous approach, but also keeps the order
correct and, as an additional benefit, is also more efficient.
2013-09-29 05:01:39 +02:00
|
|
|
|
|
|
|
// pending is a list to queue notifications in order until the they can
|
|
|
|
// be processed by the sink. This is used to prevent blocking chain
|
|
|
|
// when it sends notifications while retaining order.
|
|
|
|
pending := list.New()
|
2013-08-06 23:55:22 +02:00
|
|
|
out:
|
|
|
|
for !b.shutdown {
|
Improve chain notification handling.
Previously a new goroutine was launched for each notification in order to
avoid blocking chain from continuing while the notification is being
processed. This approach had a couple of issues.
First, since goroutines are not guaranteed to execute in any given order,
the notifications were no longer handled in the same order as they were
sent. For the current code, this is not a problem, but upcoming code that
handles a transaction memory pool, the order needs to be correct.
Second, goroutines are relatively cheap, but it's still quite a bit of
overhead to launch 3-4 goroutines per block.
This commit modifies the handling code to have a single sink executing in
a separate goroutine. The main handler then adds the notifications to a
queue which is processed by the sink. This approach retains the
non-blocking behavior of the previous approach, but also keeps the order
correct and, as an additional benefit, is also more efficient.
2013-09-29 05:01:39 +02:00
|
|
|
// Sending on a nil channel always blocks and hence is ignored
|
|
|
|
// by select. Thus enable send only when the list is non-empty.
|
|
|
|
var firstItem *btcchain.Notification
|
|
|
|
var chainNotifySink chan *btcchain.Notification
|
|
|
|
if pending.Len() > 0 {
|
|
|
|
firstItem = pending.Front().Value.(*btcchain.Notification)
|
|
|
|
chainNotifySink = b.chainNotifySink
|
|
|
|
}
|
|
|
|
|
2013-08-06 23:55:22 +02:00
|
|
|
select {
|
|
|
|
case notification := <-b.chainNotify:
|
Improve chain notification handling.
Previously a new goroutine was launched for each notification in order to
avoid blocking chain from continuing while the notification is being
processed. This approach had a couple of issues.
First, since goroutines are not guaranteed to execute in any given order,
the notifications were no longer handled in the same order as they were
sent. For the current code, this is not a problem, but upcoming code that
handles a transaction memory pool, the order needs to be correct.
Second, goroutines are relatively cheap, but it's still quite a bit of
overhead to launch 3-4 goroutines per block.
This commit modifies the handling code to have a single sink executing in
a separate goroutine. The main handler then adds the notifications to a
queue which is processed by the sink. This approach retains the
non-blocking behavior of the previous approach, but also keeps the order
correct and, as an additional benefit, is also more efficient.
2013-09-29 05:01:39 +02:00
|
|
|
pending.PushBack(notification)
|
|
|
|
|
|
|
|
case chainNotifySink <- firstItem:
|
|
|
|
pending.Remove(pending.Front())
|
2013-08-06 23:55:22 +02:00
|
|
|
|
|
|
|
case <-b.quit:
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
b.wg.Done()
|
|
|
|
log.Trace("[BMGR] Chain notification handler done")
|
|
|
|
}
|
|
|
|
|
|
|
|
// QueueBlock adds the passed block message and peer to the block handling queue.
|
2013-08-16 20:35:38 +02:00
|
|
|
func (b *blockManager) QueueBlock(block *btcutil.Block, p *peer) {
|
2013-08-06 23:55:22 +02:00
|
|
|
// Don't accept more blocks if we're shutting down.
|
|
|
|
if b.shutdown {
|
2013-08-16 20:35:38 +02:00
|
|
|
p.blockProcessed <- false
|
2013-08-06 23:55:22 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-08-16 20:35:38 +02:00
|
|
|
bmsg := blockMsg{block: block, peer: p}
|
2013-08-06 23:55:22 +02:00
|
|
|
b.blockQueue <- &bmsg
|
|
|
|
}
|
|
|
|
|
2013-09-27 02:41:02 +02:00
|
|
|
// QueueInv adds the passed inv message and peer to the block handling queue.
|
|
|
|
func (b *blockManager) QueueInv(inv *btcwire.MsgInv, p *peer) {
|
2013-09-27 04:06:01 +02:00
|
|
|
// No channel handling here because peers do not need to block on inv
|
2013-09-27 02:41:02 +02:00
|
|
|
// messages.
|
|
|
|
if b.shutdown {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
imsg := invMsg{inv: inv, peer: p}
|
|
|
|
b.invQueue <- &imsg
|
|
|
|
}
|
|
|
|
|
2013-08-06 23:55:22 +02:00
|
|
|
// Start begins the core block handler which processes block and inv messages.
|
|
|
|
func (b *blockManager) Start() {
|
|
|
|
// Already started?
|
|
|
|
if b.started {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Trace("[BMGR] Starting block manager")
|
Improve chain notification handling.
Previously a new goroutine was launched for each notification in order to
avoid blocking chain from continuing while the notification is being
processed. This approach had a couple of issues.
First, since goroutines are not guaranteed to execute in any given order,
the notifications were no longer handled in the same order as they were
sent. For the current code, this is not a problem, but upcoming code that
handles a transaction memory pool, the order needs to be correct.
Second, goroutines are relatively cheap, but it's still quite a bit of
overhead to launch 3-4 goroutines per block.
This commit modifies the handling code to have a single sink executing in
a separate goroutine. The main handler then adds the notifications to a
queue which is processed by the sink. This approach retains the
non-blocking behavior of the previous approach, but also keeps the order
correct and, as an additional benefit, is also more efficient.
2013-09-29 05:01:39 +02:00
|
|
|
b.wg.Add(4)
|
2013-09-03 23:55:14 +02:00
|
|
|
go b.syncHandler()
|
2013-08-06 23:55:22 +02:00
|
|
|
go b.blockHandler()
|
Improve chain notification handling.
Previously a new goroutine was launched for each notification in order to
avoid blocking chain from continuing while the notification is being
processed. This approach had a couple of issues.
First, since goroutines are not guaranteed to execute in any given order,
the notifications were no longer handled in the same order as they were
sent. For the current code, this is not a problem, but upcoming code that
handles a transaction memory pool, the order needs to be correct.
Second, goroutines are relatively cheap, but it's still quite a bit of
overhead to launch 3-4 goroutines per block.
This commit modifies the handling code to have a single sink executing in
a separate goroutine. The main handler then adds the notifications to a
queue which is processed by the sink. This approach retains the
non-blocking behavior of the previous approach, but also keeps the order
correct and, as an additional benefit, is also more efficient.
2013-09-29 05:01:39 +02:00
|
|
|
go b.chainNotificationSinkHandler()
|
2013-08-06 23:55:22 +02:00
|
|
|
go b.chainNotificationHandler()
|
|
|
|
b.started = true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop gracefully shuts down the block manager by stopping all asynchronous
|
|
|
|
// handlers and waiting for them to finish.
|
|
|
|
func (b *blockManager) Stop() error {
|
|
|
|
if b.shutdown {
|
|
|
|
log.Warnf("[BMGR] Block manager is already in the process of " +
|
|
|
|
"shutting down")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("[BMGR] Block manager shutting down")
|
|
|
|
b.shutdown = true
|
|
|
|
close(b.quit)
|
|
|
|
b.wg.Wait()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// newBlockManager returns a new bitcoin block manager.
|
|
|
|
// Use Start to begin processing asynchronous block and inv updates.
|
|
|
|
func newBlockManager(s *server) *blockManager {
|
Improve chain notification handling.
Previously a new goroutine was launched for each notification in order to
avoid blocking chain from continuing while the notification is being
processed. This approach had a couple of issues.
First, since goroutines are not guaranteed to execute in any given order,
the notifications were no longer handled in the same order as they were
sent. For the current code, this is not a problem, but upcoming code that
handles a transaction memory pool, the order needs to be correct.
Second, goroutines are relatively cheap, but it's still quite a bit of
overhead to launch 3-4 goroutines per block.
This commit modifies the handling code to have a single sink executing in
a separate goroutine. The main handler then adds the notifications to a
queue which is processed by the sink. This approach retains the
non-blocking behavior of the previous approach, but also keeps the order
correct and, as an additional benefit, is also more efficient.
2013-09-29 05:01:39 +02:00
|
|
|
chainNotify := make(chan *btcchain.Notification)
|
2013-08-06 23:55:22 +02:00
|
|
|
bm := blockManager{
|
|
|
|
server: s,
|
|
|
|
blockChain: btcchain.New(s.db, s.btcnet, chainNotify),
|
2013-08-29 21:44:43 +02:00
|
|
|
blockPeer: make(map[btcwire.ShaHash]*peer),
|
2013-08-06 23:55:22 +02:00
|
|
|
lastBlockLogTime: time.Now(),
|
|
|
|
newBlocks: make(chan bool, 1),
|
2013-09-03 23:55:14 +02:00
|
|
|
newCandidates: make(chan *peer, cfg.MaxPeers),
|
|
|
|
donePeers: make(chan *peer, cfg.MaxPeers),
|
2013-08-06 23:55:22 +02:00
|
|
|
blockQueue: make(chan *blockMsg, chanBufferSize),
|
2013-09-27 02:41:02 +02:00
|
|
|
invQueue: make(chan *invMsg, chanBufferSize),
|
2013-08-06 23:55:22 +02:00
|
|
|
chainNotify: chainNotify,
|
Improve chain notification handling.
Previously a new goroutine was launched for each notification in order to
avoid blocking chain from continuing while the notification is being
processed. This approach had a couple of issues.
First, since goroutines are not guaranteed to execute in any given order,
the notifications were no longer handled in the same order as they were
sent. For the current code, this is not a problem, but upcoming code that
handles a transaction memory pool, the order needs to be correct.
Second, goroutines are relatively cheap, but it's still quite a bit of
overhead to launch 3-4 goroutines per block.
This commit modifies the handling code to have a single sink executing in
a separate goroutine. The main handler then adds the notifications to a
queue which is processed by the sink. This approach retains the
non-blocking behavior of the previous approach, but also keeps the order
correct and, as an additional benefit, is also more efficient.
2013-09-29 05:01:39 +02:00
|
|
|
chainNotifySink: make(chan *btcchain.Notification),
|
2013-08-06 23:55:22 +02:00
|
|
|
quit: make(chan bool),
|
|
|
|
}
|
|
|
|
bm.blockChain.DisableVerify(cfg.VerifyDisabled)
|
|
|
|
return &bm
|
|
|
|
}
|
|
|
|
|
2013-09-15 23:58:02 +02:00
|
|
|
// removeRegressionDB removes the existing regression test database if running
|
|
|
|
// in regression test mode and it already exists.
|
|
|
|
func removeRegressionDB(dbPath string) error {
|
|
|
|
// Dont do anything if not in regression test mode.
|
|
|
|
if !cfg.RegressionTest {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the old regression test database if it already exists.
|
|
|
|
fi, err := os.Stat(dbPath)
|
|
|
|
if err == nil {
|
|
|
|
log.Infof("[BMGR] Removing regression test database from '%s'", dbPath)
|
|
|
|
if fi.IsDir() {
|
|
|
|
err := os.RemoveAll(dbPath)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
err := os.Remove(dbPath)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-08-06 23:55:22 +02:00
|
|
|
// loadBlockDB opens the block database and returns a handle to it.
|
|
|
|
func loadBlockDB() (btcdb.Db, error) {
|
2013-09-15 19:08:42 +02:00
|
|
|
// The database name is based on the database type.
|
|
|
|
dbName := blockDbNamePrefix + "_" + cfg.DbType
|
|
|
|
if cfg.DbType == "sqlite" {
|
|
|
|
dbName = dbName + ".db"
|
|
|
|
}
|
|
|
|
dbPath := filepath.Join(cfg.DataDir, dbName)
|
|
|
|
|
2013-09-15 23:58:02 +02:00
|
|
|
// The regression test is special in that it needs a clean database for
|
|
|
|
// each run, so remove it now if it already exists.
|
|
|
|
removeRegressionDB(dbPath)
|
|
|
|
|
2013-08-06 23:55:22 +02:00
|
|
|
log.Infof("[BMGR] Loading block database from '%s'", dbPath)
|
2013-09-05 20:42:12 +02:00
|
|
|
db, err := btcdb.OpenDB(cfg.DbType, dbPath)
|
2013-08-06 23:55:22 +02:00
|
|
|
if err != nil {
|
|
|
|
// Return the error if it's not because the database doesn't
|
|
|
|
// exist.
|
|
|
|
if err != btcdb.DbDoesNotExist {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the db if it does not exist.
|
2013-09-14 01:02:10 +02:00
|
|
|
err = os.MkdirAll(cfg.DataDir, 0700)
|
2013-08-06 23:55:22 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-09-05 20:42:12 +02:00
|
|
|
db, err = btcdb.CreateDB(cfg.DbType, dbPath)
|
2013-08-06 23:55:22 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-08-08 07:47:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get the latest block height from the database.
|
|
|
|
_, height, err := db.NewestSha()
|
|
|
|
if err != nil {
|
|
|
|
db.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-08-06 23:55:22 +02:00
|
|
|
|
2013-08-08 07:47:48 +02:00
|
|
|
// Insert the appropriate genesis block for the bitcoin network being
|
|
|
|
// connected to if needed.
|
|
|
|
if height == -1 {
|
2013-08-06 23:55:22 +02:00
|
|
|
genesis := btcutil.NewBlock(activeNetParams.genesisBlock)
|
|
|
|
_, err := db.InsertBlock(genesis)
|
|
|
|
if err != nil {
|
|
|
|
db.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
log.Infof("[BMGR] Inserted genesis block %v",
|
|
|
|
activeNetParams.genesisHash)
|
2013-08-08 07:47:48 +02:00
|
|
|
height = 0
|
2013-08-06 23:55:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("[BMGR] Block database loaded with block height %d", height)
|
|
|
|
return db, nil
|
|
|
|
}
|