blockchain: Use hash values in structs.

This modifies the blockNode and BestState structs in the blockchain
package to store hashes directly instead of pointers to them and updates
callers to deal with the API change in the exported BestState struct.

In general, the preferred approach for hashes moving forward is to store
hash values in complex data structures, particularly those that will be
used for cache entries, and accept pointers to hashes in arguments to
functions.

Some of the reasoning behind making this change is:

- It is generally preferred to avoid storing pointers to data in cache
  objects since doing so can easily lead to storing interior pointers
  into other structs that then can't be GC'd
- Keeping the hash values directly in the block node provides better
  cache locality
This commit is contained in:
Dave Collins 2017-02-01 15:25:25 -06:00
parent 05c7c14023
commit d06c0bb181
No known key found for this signature in database
GPG key ID: B8904D9D9C93D1F2
15 changed files with 69 additions and 73 deletions

View file

@ -31,13 +31,13 @@ type blockNode struct {
children []*blockNode children []*blockNode
// hash is the double sha 256 of the block. // hash is the double sha 256 of the block.
hash *chainhash.Hash hash chainhash.Hash
// parentHash is the double sha 256 of the parent block. This is kept // parentHash is the double sha 256 of the parent block. This is kept
// here over simply relying on parent.hash directly since block nodes // here over simply relying on parent.hash directly since block nodes
// are sparse and the parent node might not be in memory when its hash // are sparse and the parent node might not be in memory when its hash
// is needed. // is needed.
parentHash *chainhash.Hash parentHash chainhash.Hash
// height is the position in the block chain. // height is the position in the block chain.
height int32 height int32
@ -67,13 +67,9 @@ type blockNode struct {
// for the passed block. The work sum is updated accordingly when the node is // for the passed block. The work sum is updated accordingly when the node is
// inserted into a chain. // inserted into a chain.
func newBlockNode(blockHeader *wire.BlockHeader, blockHash *chainhash.Hash, height int32) *blockNode { func newBlockNode(blockHeader *wire.BlockHeader, blockHash *chainhash.Hash, height int32) *blockNode {
// Make a copy of the hash so the node doesn't keep a reference to part
// of the full block/block header preventing it from being garbage
// collected.
prevHash := blockHeader.PrevBlock
node := blockNode{ node := blockNode{
hash: blockHash, hash: *blockHash,
parentHash: &prevHash, parentHash: blockHeader.PrevBlock,
workSum: CalcWork(blockHeader.Bits), workSum: CalcWork(blockHeader.Bits),
height: height, height: height,
version: blockHeader.Version, version: blockHeader.Version,
@ -92,7 +88,7 @@ func (node *blockNode) Header() wire.BlockHeader {
// No lock is needed because all accessed fields are immutable. // No lock is needed because all accessed fields are immutable.
return wire.BlockHeader{ return wire.BlockHeader{
Version: node.version, Version: node.version,
PrevBlock: *node.parentHash, PrevBlock: node.parentHash,
MerkleRoot: node.merkleRoot, MerkleRoot: node.merkleRoot,
Timestamp: time.Unix(node.timestamp, 0), Timestamp: time.Unix(node.timestamp, 0),
Bits: node.bits, Bits: node.bits,
@ -115,7 +111,7 @@ func removeChildNode(children []*blockNode, node *blockNode) []*blockNode {
// does not reevaluate the slice on each iteration nor does it adjust // does not reevaluate the slice on each iteration nor does it adjust
// the index for the modified slice. // the index for the modified slice.
for i := 0; i < len(children); i++ { for i := 0; i < len(children); i++ {
if children[i].hash.IsEqual(node.hash) { if children[i].hash.IsEqual(&node.hash) {
copy(children[i:], children[i+1:]) copy(children[i:], children[i+1:])
children[len(children)-1] = nil children[len(children)-1] = nil
return children[:len(children)-1] return children[:len(children)-1]
@ -286,7 +282,7 @@ func (bi *blockIndex) prevNodeFromNode(node *blockNode) (*blockNode, error) {
var prevBlockNode *blockNode var prevBlockNode *blockNode
err := bi.db.View(func(dbTx database.Tx) error { err := bi.db.View(func(dbTx database.Tx) error {
var err error var err error
prevBlockNode, err = bi.loadBlockNode(dbTx, node.parentHash) prevBlockNode, err = bi.loadBlockNode(dbTx, &node.parentHash)
return err return err
}) })
return prevBlockNode, err return prevBlockNode, err
@ -339,7 +335,7 @@ func (bi *blockIndex) RelativeNode(anchor *blockNode, distance uint32) (*blockNo
// pulling it into the memory cache in the processes. // pulling it into the memory cache in the processes.
default: default:
iterNode, err = bi.loadBlockNode(dbTx, iterNode, err = bi.loadBlockNode(dbTx,
iterNode.parentHash) &iterNode.parentHash)
if err != nil { if err != nil {
return err return err
} }
@ -389,8 +385,8 @@ func (bi *blockIndex) AncestorNode(node *blockNode, height int32) (*blockNode, e
// This function is safe for concurrent access. // This function is safe for concurrent access.
func (bi *blockIndex) AddNode(node *blockNode) { func (bi *blockIndex) AddNode(node *blockNode) {
bi.Lock() bi.Lock()
bi.index[*node.hash] = node bi.index[node.hash] = node
if prevHash := node.parentHash; prevHash != nil && *prevHash != *zeroHash { if prevHash := &node.parentHash; *prevHash != *zeroHash {
bi.depNodes[*prevHash] = append(bi.depNodes[*prevHash], node) bi.depNodes[*prevHash] = append(bi.depNodes[*prevHash], node)
} }
bi.Unlock() bi.Unlock()

View file

@ -121,7 +121,7 @@ func (bi *blockIndex) blockLocatorFromHash(hash *chainhash.Hash) BlockLocator {
iterNode = iterNode.parent iterNode = iterNode.parent
} }
if iterNode != nil && iterNode.height == blockHeight { if iterNode != nil && iterNode.height == blockHeight {
locator = append(locator, iterNode.hash) locator = append(locator, &iterNode.hash)
} }
continue continue
} }
@ -176,7 +176,7 @@ func (b *BlockChain) BlockLocatorFromHash(hash *chainhash.Hash) BlockLocator {
func (b *BlockChain) LatestBlockLocator() (BlockLocator, error) { func (b *BlockChain) LatestBlockLocator() (BlockLocator, error) {
b.chainLock.RLock() b.chainLock.RLock()
b.index.RLock() b.index.RLock()
locator := b.index.blockLocatorFromHash(b.bestNode.hash) locator := b.index.blockLocatorFromHash(&b.bestNode.hash)
b.index.RUnlock() b.index.RUnlock()
b.chainLock.RUnlock() b.chainLock.RUnlock()
return locator, nil return locator, nil

View file

@ -42,13 +42,13 @@ type orphanBlock struct {
// However, the returned snapshot must be treated as immutable since it is // However, the returned snapshot must be treated as immutable since it is
// shared by all callers. // shared by all callers.
type BestState struct { type BestState struct {
Hash *chainhash.Hash // The hash of the block. Hash chainhash.Hash // The hash of the block.
Height int32 // The height of the block. Height int32 // The height of the block.
Bits uint32 // The difficulty bits of the block. Bits uint32 // The difficulty bits of the block.
BlockSize uint64 // The size of the block. BlockSize uint64 // The size of the block.
NumTxns uint64 // The number of txns in the block. NumTxns uint64 // The number of txns in the block.
TotalTxns uint64 // The total number of txns in the chain. TotalTxns uint64 // The total number of txns in the chain.
MedianTime time.Time // Median time as per CalcPastMedianTime. MedianTime time.Time // Median time as per CalcPastMedianTime.
} }
// newBestState returns a new best stats instance for the given parameters. // newBestState returns a new best stats instance for the given parameters.
@ -522,7 +522,7 @@ func (b *BlockChain) getReorganizeNodes(node *blockNode) (*list.List, *list.List
// common ancestor adding each block to the list of nodes to detach from // common ancestor adding each block to the list of nodes to detach from
// the main chain. // the main chain.
for n := b.bestNode; n != nil && n.parent != nil; n = n.parent { for n := b.bestNode; n != nil && n.parent != nil; n = n.parent {
if n.hash.IsEqual(ancestor.hash) { if n.hash.IsEqual(&ancestor.hash) {
break break
} }
detachNodes.PushBack(n) detachNodes.PushBack(n)
@ -559,7 +559,7 @@ func dbMaybeStoreBlock(dbTx database.Tx, block *btcutil.Block) error {
func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, view *UtxoViewpoint, stxos []spentTxOut) error { func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, view *UtxoViewpoint, stxos []spentTxOut) error {
// Make sure it's extending the end of the best chain. // Make sure it's extending the end of the best chain.
prevHash := &block.MsgBlock().Header.PrevBlock prevHash := &block.MsgBlock().Header.PrevBlock
if !prevHash.IsEqual(b.bestNode.hash) { if !prevHash.IsEqual(&b.bestNode.hash) {
return AssertError("connectBlock must be called with a block " + return AssertError("connectBlock must be called with a block " +
"that extends the main chain") "that extends the main chain")
} }
@ -689,7 +689,7 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, view *U
// This function MUST be called with the chain state lock held (for writes). // This function MUST be called with the chain state lock held (for writes).
func (b *BlockChain) disconnectBlock(node *blockNode, block *btcutil.Block, view *UtxoViewpoint) error { func (b *BlockChain) disconnectBlock(node *blockNode, block *btcutil.Block, view *UtxoViewpoint) error {
// Make sure the node being disconnected is the end of the best chain. // Make sure the node being disconnected is the end of the best chain.
if !node.hash.IsEqual(b.bestNode.hash) { if !node.hash.IsEqual(&b.bestNode.hash) {
return AssertError("disconnectBlock must be called with the " + return AssertError("disconnectBlock must be called with the " +
"block at the end of the main chain") "block at the end of the main chain")
} }
@ -713,7 +713,7 @@ func (b *BlockChain) disconnectBlock(node *blockNode, block *btcutil.Block, view
var prevBlock *btcutil.Block var prevBlock *btcutil.Block
err = b.db.View(func(dbTx database.Tx) error { err = b.db.View(func(dbTx database.Tx) error {
var err error var err error
prevBlock, err = dbFetchBlockByHash(dbTx, prevNode.hash) prevBlock, err = dbFetchBlockByHash(dbTx, &prevNode.hash)
return err return err
}) })
if err != nil { if err != nil {
@ -843,13 +843,13 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List, flags
// database and using that information to unspend all of the spent txos // database and using that information to unspend all of the spent txos
// and remove the utxos created by the blocks. // and remove the utxos created by the blocks.
view := NewUtxoViewpoint() view := NewUtxoViewpoint()
view.SetBestHash(b.bestNode.hash) view.SetBestHash(&b.bestNode.hash)
for e := detachNodes.Front(); e != nil; e = e.Next() { for e := detachNodes.Front(); e != nil; e = e.Next() {
n := e.Value.(*blockNode) n := e.Value.(*blockNode)
var block *btcutil.Block var block *btcutil.Block
err := b.db.View(func(dbTx database.Tx) error { err := b.db.View(func(dbTx database.Tx) error {
var err error var err error
block, err = dbFetchBlockByHash(dbTx, n.hash) block, err = dbFetchBlockByHash(dbTx, &n.hash)
return err return err
}) })
if err != nil { if err != nil {
@ -903,7 +903,7 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List, flags
// NOTE: This block is not in the main chain, so the // NOTE: This block is not in the main chain, so the
// block has to be loaded directly from the database // block has to be loaded directly from the database
// instead of using the dbFetchBlockByHash function. // instead of using the dbFetchBlockByHash function.
blockBytes, err := dbTx.FetchBlock(n.hash) blockBytes, err := dbTx.FetchBlock(&n.hash)
if err != nil { if err != nil {
return err return err
} }
@ -944,7 +944,7 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List, flags
// view to be valid from the viewpoint of each block being connected or // view to be valid from the viewpoint of each block being connected or
// disconnected. // disconnected.
view = NewUtxoViewpoint() view = NewUtxoViewpoint()
view.SetBestHash(b.bestNode.hash) view.SetBestHash(&b.bestNode.hash)
// Disconnect blocks from the main chain. // Disconnect blocks from the main chain.
for i, e := 0, detachNodes.Front(); e != nil; i, e = i+1, e.Next() { for i, e := 0, detachNodes.Front(); e != nil; i, e = i+1, e.Next() {
@ -1040,12 +1040,12 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla
// We are extending the main (best) chain with a new block. This is the // We are extending the main (best) chain with a new block. This is the
// most common case. // most common case.
if node.parentHash.IsEqual(b.bestNode.hash) { if node.parentHash.IsEqual(&b.bestNode.hash) {
// Perform several checks to verify the block can be connected // Perform several checks to verify the block can be connected
// to the main chain without violating any rules and without // to the main chain without violating any rules and without
// actually connecting the block. // actually connecting the block.
view := NewUtxoViewpoint() view := NewUtxoViewpoint()
view.SetBestHash(node.parentHash) view.SetBestHash(&node.parentHash)
stxos := make([]spentTxOut, 0, countSpentOutputs(block)) stxos := make([]spentTxOut, 0, countSpentOutputs(block))
if !fastAdd { if !fastAdd {
err := b.checkConnectBlock(node, block, view, &stxos) err := b.checkConnectBlock(node, block, view, &stxos)
@ -1096,7 +1096,7 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla
// become the main chain, but in either case the entry is needed in the // become the main chain, but in either case the entry is needed in the
// index for future processing. // index for future processing.
b.index.Lock() b.index.Lock()
b.index.index[*node.hash] = node b.index.index[node.hash] = node
b.index.Unlock() b.index.Unlock()
// Connect the parent node to this node. // Connect the parent node to this node.
@ -1112,7 +1112,7 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla
node.parent.children = children node.parent.children = children
b.index.Lock() b.index.Lock()
delete(b.index.index, *node.hash) delete(b.index.index, node.hash)
b.index.Unlock() b.index.Unlock()
}() }()
} }
@ -1134,7 +1134,7 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla
} }
// Log information about how the block is forking the chain. // Log information about how the block is forking the chain.
if fork.hash.IsEqual(node.parent.hash) { if fork.hash.IsEqual(&node.parent.hash) {
log.Infof("FORK: Block %v forks the chain at height %d"+ log.Infof("FORK: Block %v forks the chain at height %d"+
"/block %v, but does not cause a reorganize", "/block %v, but does not cause a reorganize",
node.hash, fork.height, fork.hash) node.hash, fork.height, fork.hash)

View file

@ -1064,7 +1064,7 @@ func deserializeBestChainState(serializedData []byte) (bestChainState, error) {
func dbPutBestState(dbTx database.Tx, snapshot *BestState, workSum *big.Int) error { func dbPutBestState(dbTx database.Tx, snapshot *BestState, workSum *big.Int) error {
// Serialize the current best chain state. // Serialize the current best chain state.
serializedData := serializeBestChainState(bestChainState{ serializedData := serializeBestChainState(bestChainState{
hash: *snapshot.Hash, hash: snapshot.Hash,
height: uint32(snapshot.Height), height: uint32(snapshot.Height),
totalTxns: snapshot.TotalTxns, totalTxns: snapshot.TotalTxns,
workSum: workSum, workSum: workSum,
@ -1129,7 +1129,7 @@ func (b *BlockChain) createChainState() error {
// Add the genesis block hash to height and height to hash // Add the genesis block hash to height and height to hash
// mappings to the index. // mappings to the index.
err = dbPutBlockIndex(dbTx, b.bestNode.hash, b.bestNode.height) err = dbPutBlockIndex(dbTx, &b.bestNode.hash, b.bestNode.height)
if err != nil { if err != nil {
return err return err
} }

View file

@ -165,7 +165,7 @@ func TestFullBlocks(t *testing.T) {
// Ensure hash and height match. // Ensure hash and height match.
best := chain.BestSnapshot() best := chain.BestSnapshot()
if *best.Hash != item.Block.BlockHash() || if best.Hash != item.Block.BlockHash() ||
best.Height != blockHeight { best.Height != blockHeight {
t.Fatalf("block %q (hash %s, height %d) should be "+ t.Fatalf("block %q (hash %s, height %d) should be "+

View file

@ -103,20 +103,20 @@ type thresholdStateCache struct {
// Lookup returns the threshold state associated with the given hash along with // Lookup returns the threshold state associated with the given hash along with
// a boolean that indicates whether or not it is valid. // a boolean that indicates whether or not it is valid.
func (c *thresholdStateCache) Lookup(hash chainhash.Hash) (ThresholdState, bool) { func (c *thresholdStateCache) Lookup(hash *chainhash.Hash) (ThresholdState, bool) {
state, ok := c.entries[hash] state, ok := c.entries[*hash]
return state, ok return state, ok
} }
// Update updates the cache to contain the provided hash to threshold state // Update updates the cache to contain the provided hash to threshold state
// mapping while properly tracking needed updates flush changes to the database. // mapping while properly tracking needed updates flush changes to the database.
func (c *thresholdStateCache) Update(hash chainhash.Hash, state ThresholdState) { func (c *thresholdStateCache) Update(hash *chainhash.Hash, state ThresholdState) {
if existing, ok := c.entries[hash]; ok && existing == state { if existing, ok := c.entries[*hash]; ok && existing == state {
return return
} }
c.dbUpdates[hash] = state c.dbUpdates[*hash] = state
c.entries[hash] = state c.entries[*hash] = state
} }
// MarkFlushed marks all of the current udpates as flushed to the database. // MarkFlushed marks all of the current udpates as flushed to the database.
@ -170,7 +170,7 @@ func (b *BlockChain) thresholdState(prevNode *blockNode, checker thresholdCondit
for prevNode != nil { for prevNode != nil {
// Nothing more to do if the state of the block is already // Nothing more to do if the state of the block is already
// cached. // cached.
if _, ok := cache.Lookup(*prevNode.hash); ok { if _, ok := cache.Lookup(&prevNode.hash); ok {
break break
} }
@ -184,7 +184,7 @@ func (b *BlockChain) thresholdState(prevNode *blockNode, checker thresholdCondit
// The state is simply defined if the start time hasn't been // The state is simply defined if the start time hasn't been
// been reached yet. // been reached yet.
if uint64(medianTime.Unix()) < checker.BeginTime() { if uint64(medianTime.Unix()) < checker.BeginTime() {
cache.Update(*prevNode.hash, ThresholdDefined) cache.Update(&prevNode.hash, ThresholdDefined)
break break
} }
@ -206,7 +206,7 @@ func (b *BlockChain) thresholdState(prevNode *blockNode, checker thresholdCondit
state := ThresholdDefined state := ThresholdDefined
if prevNode != nil { if prevNode != nil {
var ok bool var ok bool
state, ok = cache.Lookup(*prevNode.hash) state, ok = cache.Lookup(&prevNode.hash)
if !ok { if !ok {
return ThresholdFailed, AssertError(fmt.Sprintf( return ThresholdFailed, AssertError(fmt.Sprintf(
"thresholdState: cache lookup failed for %v", "thresholdState: cache lookup failed for %v",
@ -298,7 +298,7 @@ func (b *BlockChain) thresholdState(prevNode *blockNode, checker thresholdCondit
// Update the cache to avoid recalculating the state in the // Update the cache to avoid recalculating the state in the
// future. // future.
cache.Update(*prevNode.hash, state) cache.Update(&prevNode.hash, state)
} }
return state, nil return state, nil

View file

@ -69,7 +69,7 @@ nextTest:
hash[0] = uint8(i + 1) hash[0] = uint8(i + 1)
// Ensure the hash isn't available in the cache already. // Ensure the hash isn't available in the cache already.
_, ok := cache.Lookup(hash) _, ok := cache.Lookup(&hash)
if ok { if ok {
t.Errorf("Lookup (%s): has entry for hash %v", t.Errorf("Lookup (%s): has entry for hash %v",
test.name, hash) test.name, hash)
@ -78,8 +78,8 @@ nextTest:
// Ensure hash that was added to the cache reports it's // Ensure hash that was added to the cache reports it's
// available and the state is the expected value. // available and the state is the expected value.
cache.Update(hash, test.state) cache.Update(&hash, test.state)
state, ok := cache.Lookup(hash) state, ok := cache.Lookup(&hash)
if !ok { if !ok {
t.Errorf("Lookup (%s): missing entry for hash "+ t.Errorf("Lookup (%s): missing entry for hash "+
"%v", test.name, hash) "%v", test.name, hash)
@ -118,7 +118,7 @@ nextTest:
// Ensure hash is still available in the cache and the // Ensure hash is still available in the cache and the
// state is the expected value. // state is the expected value.
state, ok = cache.Lookup(hash) state, ok = cache.Lookup(&hash)
if !ok { if !ok {
t.Errorf("Lookup (%s): missing entry after "+ t.Errorf("Lookup (%s): missing entry after "+
"flush for hash %v", test.name, hash) "flush for hash %v", test.name, hash)
@ -134,8 +134,8 @@ nextTest:
// Ensure adding an existing hash with the same state // Ensure adding an existing hash with the same state
// doesn't break the existing entry and it is NOT added // doesn't break the existing entry and it is NOT added
// to the database updates map. // to the database updates map.
cache.Update(hash, test.state) cache.Update(&hash, test.state)
state, ok = cache.Lookup(hash) state, ok = cache.Lookup(&hash)
if !ok { if !ok {
t.Errorf("Lookup (%s): missing entry after "+ t.Errorf("Lookup (%s): missing entry after "+
"second add for hash %v", test.name, "second add for hash %v", test.name,
@ -160,8 +160,8 @@ nextTest:
if newState == test.state { if newState == test.state {
newState = ThresholdStarted newState = ThresholdStarted
} }
cache.Update(hash, newState) cache.Update(&hash, newState)
state, ok = cache.Lookup(hash) state, ok = cache.Lookup(&hash)
if !ok { if !ok {
t.Errorf("Lookup (%s): missing entry after "+ t.Errorf("Lookup (%s): missing entry after "+
"state change for hash %v", test.name, "state change for hash %v", test.name,

View file

@ -968,7 +968,7 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi
} }
// Ensure the view is for the node being checked. // Ensure the view is for the node being checked.
if !view.BestHash().IsEqual(node.parentHash) { if !view.BestHash().IsEqual(&node.parentHash) {
return AssertError(fmt.Sprintf("inconsistent view when "+ return AssertError(fmt.Sprintf("inconsistent view when "+
"checking block connection: best hash is %v instead "+ "checking block connection: best hash is %v instead "+
"of expected %v", view.BestHash(), node.hash)) "of expected %v", view.BestHash(), node.hash))
@ -1146,7 +1146,7 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi
// Update the best hash for view to include this block since all of its // Update the best hash for view to include this block since all of its
// transactions have been connected. // transactions have been connected.
view.SetBestHash(node.hash) view.SetBestHash(&node.hash)
return nil return nil
} }
@ -1173,6 +1173,6 @@ func (b *BlockChain) CheckConnectBlock(block *btcutil.Block) error {
// Leave the spent txouts entry nil in the state since the information // Leave the spent txouts entry nil in the state since the information
// is not needed and thus extra work can be avoided. // is not needed and thus extra work can be avoided.
view := NewUtxoViewpoint() view := NewUtxoViewpoint()
view.SetBestHash(prevNode.hash) view.SetBestHash(&prevNode.hash)
return b.checkConnectBlock(newNode, block, view, nil) return b.checkConnectBlock(newNode, block, view, nil)
} }

View file

@ -378,7 +378,7 @@ func (b *blockManager) handleDonePeerMsg(peers *list.List, sp *serverPeer) {
b.syncPeer = nil b.syncPeer = nil
if b.headersFirstMode { if b.headersFirstMode {
best := b.chain.BestSnapshot() best := b.chain.BestSnapshot()
b.resetHeaderState(best.Hash, best.Height) b.resetHeaderState(&best.Hash, best.Height)
} }
b.startSync(peers) b.startSync(peers)
} }
@ -595,7 +595,7 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
// potential sync node candidacy. // potential sync node candidacy.
best := b.chain.BestSnapshot() best := b.chain.BestSnapshot()
heightUpdate = best.Height heightUpdate = best.Height
blkHashUpdate = best.Hash blkHashUpdate = &best.Hash
// Clear the rejected transactions. // Clear the rejected transactions.
b.rejectedTxns = make(map[chainhash.Hash]struct{}) b.rejectedTxns = make(map[chainhash.Hash]struct{})
@ -1417,7 +1417,7 @@ func newBlockManager(s *server, indexManager blockchain.IndexManager) (*blockMan
// Initialize the next checkpoint based on the current height. // Initialize the next checkpoint based on the current height.
bm.nextCheckpoint = bm.findNextHeaderCheckpoint(best.Height) bm.nextCheckpoint = bm.findNextHeaderCheckpoint(best.Height)
if bm.nextCheckpoint != nil { if bm.nextCheckpoint != nil {
bm.resetHeaderState(best.Hash, best.Height) bm.resetHeaderState(&best.Hash, best.Height)
} }
} else { } else {
bmgrLog.Info("Checkpoints are disabled") bmgrLog.Info("Checkpoints are disabled")

View file

@ -166,7 +166,7 @@ func main() {
fmt.Printf("Block database loaded with block height %d\n", best.Height) fmt.Printf("Block database loaded with block height %d\n", best.Height)
// Find checkpoint candidates. // Find checkpoint candidates.
candidates, err := findCandidates(chain, best.Hash) candidates, err := findCandidates(chain, &best.Hash)
if err != nil { if err != nil {
fmt.Fprintln(os.Stderr, "Unable to identify candidates:", err) fmt.Fprintln(os.Stderr, "Unable to identify candidates:", err)
return return

View file

@ -162,7 +162,7 @@ func (m *CPUMiner) submitBlock(block *btcutil.Block) bool {
// a new block, but the check only happens periodically, so it is // a new block, but the check only happens periodically, so it is
// possible a block was found and submitted in between. // possible a block was found and submitted in between.
msgBlock := block.MsgBlock() msgBlock := block.MsgBlock()
if !msgBlock.Header.PrevBlock.IsEqual(m.g.BestSnapshot().Hash) { if !msgBlock.Header.PrevBlock.IsEqual(&m.g.BestSnapshot().Hash) {
log.Debugf("Block submitted via CPU miner with previous "+ log.Debugf("Block submitted via CPU miner with previous "+
"block %s is stale", msgBlock.Header.PrevBlock) "block %s is stale", msgBlock.Header.PrevBlock)
return false return false
@ -249,7 +249,7 @@ func (m *CPUMiner) solveBlock(msgBlock *wire.MsgBlock, blockHeight int32,
// The current block is stale if the best block // The current block is stale if the best block
// has changed. // has changed.
best := m.g.BestSnapshot() best := m.g.BestSnapshot()
if !header.PrevBlock.IsEqual(best.Hash) { if !header.PrevBlock.IsEqual(&best.Hash) {
return false return false
} }

View file

@ -435,7 +435,7 @@ func NewBlkTmplGenerator(policy *Policy, params *chaincfg.Params,
func (g *BlkTmplGenerator) NewBlockTemplate(payToAddress btcutil.Address) (*BlockTemplate, error) { func (g *BlkTmplGenerator) NewBlockTemplate(payToAddress btcutil.Address) (*BlockTemplate, error) {
// Extend the most recently known best block. // Extend the most recently known best block.
best := g.chain.BestSnapshot() best := g.chain.BestSnapshot()
prevHash := best.Hash prevHash := &best.Hash
nextBlockHeight := best.Height + 1 nextBlockHeight := best.Height + 1
// Create a standard coinbase transaction paying to the provided // Create a standard coinbase transaction paying to the provided

View file

@ -1470,7 +1470,7 @@ func (state *gbtWorkState) updateBlockTemplate(s *rpcServer, useCoinbaseValue bo
// generated. // generated.
var msgBlock *wire.MsgBlock var msgBlock *wire.MsgBlock
var targetDifficulty string var targetDifficulty string
latestHash := s.server.blockManager.chain.BestSnapshot().Hash latestHash := &s.server.blockManager.chain.BestSnapshot().Hash
template := state.template template := state.template
if template == nil || state.prevHash == nil || if template == nil || state.prevHash == nil ||
!state.prevHash.IsEqual(latestHash) || !state.prevHash.IsEqual(latestHash) ||
@ -2026,9 +2026,9 @@ func handleGetBlockTemplateProposal(s *rpcServer, request *btcjson.TemplateReque
block := btcutil.NewBlock(&msgBlock) block := btcutil.NewBlock(&msgBlock)
// Ensure the block is building from the expected previous block. // Ensure the block is building from the expected previous block.
expectedPrevHash := s.server.blockManager.chain.BestSnapshot().Hash expectedPrevHash := &s.server.blockManager.chain.BestSnapshot().Hash
prevHash := &block.MsgBlock().Header.PrevBlock prevHash := &block.MsgBlock().Header.PrevBlock
if expectedPrevHash == nil || !expectedPrevHash.IsEqual(prevHash) { if !expectedPrevHash.IsEqual(prevHash) {
return "bad-prevblk", nil return "bad-prevblk", nil
} }

View file

@ -2471,7 +2471,7 @@ fetchRange:
blockManager := wsc.server.server.blockManager blockManager := wsc.server.server.blockManager
pauseGuard := blockManager.Pause() pauseGuard := blockManager.Pause()
best := blockManager.chain.BestSnapshot() best := blockManager.chain.BestSnapshot()
curHash := best.Hash curHash := &best.Hash
again := true again := true
if lastBlockHash == nil || *lastBlockHash == *curHash { if lastBlockHash == nil || *lastBlockHash == *curHash {
again = false again = false

View file

@ -246,7 +246,7 @@ func newServerPeer(s *server, isPersistent bool) *serverPeer {
// required by the configuration for the peer package. // required by the configuration for the peer package.
func (sp *serverPeer) newestBlock() (*chainhash.Hash, int32, error) { func (sp *serverPeer) newestBlock() (*chainhash.Hash, int32, error) {
best := sp.server.blockManager.chain.BestSnapshot() best := sp.server.blockManager.chain.BestSnapshot()
return best.Hash, best.Height, nil return &best.Hash, best.Height, nil
} }
// addKnownAddresses adds the given addresses to the set of known addresses to // addKnownAddresses adds the given addresses to the set of known addresses to
@ -1130,7 +1130,7 @@ func (s *server) pushBlockMsg(sp *serverPeer, hash *chainhash.Hash, doneChan cha
if sendInv { if sendInv {
best := sp.server.blockManager.chain.BestSnapshot() best := sp.server.blockManager.chain.BestSnapshot()
invMsg := wire.NewMsgInvSizeHint(1) invMsg := wire.NewMsgInvSizeHint(1)
iv := wire.NewInvVect(wire.InvTypeBlock, best.Hash) iv := wire.NewInvVect(wire.InvTypeBlock, &best.Hash)
invMsg.AddInvVect(iv) invMsg.AddInvVect(iv)
sp.QueueMessage(invMsg, doneChan) sp.QueueMessage(invMsg, doneChan)
sp.continueHash = nil sp.continueHash = nil