waddrmgr: maintain a maximum of MaxReorgDepth block hashes stored

In this commit, we modify the wallet's block hash index to only store up
to MaxReorgDepth blocks. This allows us to reduce consumed storage, as
we'd be mostly storing duplicate data. We choose to store up to
MaxReorgDepth to ensure we can recover from a potential long reorg.
This commit is contained in:
Wilmer Paulino 2019-05-14 13:14:58 -07:00
parent 66a95921c0
commit e548e76684
No known key found for this signature in database
GPG key ID: 6DF57B9F9514972F
2 changed files with 219 additions and 20 deletions

View file

@ -16,6 +16,13 @@ import (
"github.com/btcsuite/btcwallet/walletdb"
)
const (
// MaxReorgDepth represents the maximum number of block hashes we'll
// keep within the wallet at any given point in order to recover from
// long reorgs.
MaxReorgDepth = 10000
)
var (
// LatestMgrVersion is the most recent manager version.
LatestMgrVersion = getLatestVersion()
@ -1832,40 +1839,45 @@ func fetchSyncedTo(ns walletdb.ReadBucket) (*BlockStamp, error) {
// PutSyncedTo stores the provided synced to blockstamp to the database.
func PutSyncedTo(ns walletdb.ReadWriteBucket, bs *BlockStamp) error {
bucket := ns.NestedReadWriteBucket(syncBucketName)
errStr := fmt.Sprintf("failed to store sync information %v", bs.Hash)
// If the block height is greater than zero, check that the previous
// block height exists. This prevents reorg issues in the future.
// We use BigEndian so that keys/values are added to the bucket in
// order, making writes more efficient for some database backends.
// block height exists. This prevents reorg issues in the future. We use
// BigEndian so that keys/values are added to the bucket in order,
// making writes more efficient for some database backends.
if bs.Height > 0 {
if _, err := fetchBlockHash(ns, bs.Height-1); err != nil {
return managerError(ErrDatabase, errStr, err)
// We'll only check the previous block height exists if we've
// determined our birthday block. This is needed as we'll no
// longer store _all_ block hashes of the chain, so we only
// expect the previous block to exist once our initial sync has
// completed, which is dictated by our birthday block being set.
if _, err := FetchBirthdayBlock(ns); err == nil {
_, err := fetchBlockHash(ns, bs.Height-1)
if err != nil {
return managerError(ErrBlockNotFound, errStr, err)
}
}
}
// Store the block hash by block height.
height := make([]byte, 4)
binary.BigEndian.PutUint32(height, uint32(bs.Height))
err := bucket.Put(height, bs.Hash[0:32])
if err != nil {
if err := addBlockHash(ns, bs.Height, bs.Hash); err != nil {
return managerError(ErrDatabase, errStr, err)
}
// The serialized synced to format is:
// <blockheight><blockhash><timestamp>
//
// 4 bytes block height + 32 bytes hash length + 4 byte timestamp length
buf := make([]byte, 40)
binary.LittleEndian.PutUint32(buf[0:4], uint32(bs.Height))
copy(buf[4:36], bs.Hash[0:32])
binary.LittleEndian.PutUint32(buf[36:], uint32(bs.Timestamp.Unix()))
// Remove the stale height if any, as we should only store MaxReorgDepth
// block hashes at any given point.
staleHeight := staleHeight(bs.Height)
if staleHeight > 0 {
if err := deleteBlockHash(ns, staleHeight); err != nil {
return managerError(ErrDatabase, errStr, err)
}
}
err = bucket.Put(syncedToName, buf)
if err != nil {
// Finally, we can update the syncedTo value.
if err := updateSyncedTo(ns, bs); err != nil {
return managerError(ErrDatabase, errStr, err)
}
return nil
}
@ -1893,6 +1905,62 @@ func fetchBlockHash(ns walletdb.ReadBucket, height int32) (*chainhash.Hash, erro
return &hash, nil
}
// addBlockHash adds a block hash entry to the index within the syncBucket.
func addBlockHash(ns walletdb.ReadWriteBucket, height int32, hash chainhash.Hash) error {
var rawHeight [4]byte
binary.BigEndian.PutUint32(rawHeight[:], uint32(height))
bucket := ns.NestedReadWriteBucket(syncBucketName)
if err := bucket.Put(rawHeight[:], hash[:]); err != nil {
errStr := fmt.Sprintf("failed to add hash %v", hash)
return managerError(ErrDatabase, errStr, err)
}
return nil
}
// deleteBlockHash deletes the block hash entry within the syncBucket for the
// given height.
func deleteBlockHash(ns walletdb.ReadWriteBucket, height int32) error {
var rawHeight [4]byte
binary.BigEndian.PutUint32(rawHeight[:], uint32(height))
bucket := ns.NestedReadWriteBucket(syncBucketName)
if err := bucket.Delete(rawHeight[:]); err != nil {
errStr := fmt.Sprintf("failed to delete hash for height %v",
height)
return managerError(ErrDatabase, errStr, err)
}
return nil
}
// updateSyncedTo updates the value behind the syncedToName key to the given
// block.
func updateSyncedTo(ns walletdb.ReadWriteBucket, bs *BlockStamp) error {
// The serialized synced to format is:
// <blockheight><blockhash><timestamp>
//
// 4 bytes block height + 32 bytes hash length + 4 byte timestamp length
var serializedStamp [40]byte
binary.LittleEndian.PutUint32(serializedStamp[0:4], uint32(bs.Height))
copy(serializedStamp[4:36], bs.Hash[0:32])
binary.LittleEndian.PutUint32(
serializedStamp[36:], uint32(bs.Timestamp.Unix()),
)
bucket := ns.NestedReadWriteBucket(syncBucketName)
if err := bucket.Put(syncedToName, serializedStamp[:]); err != nil {
errStr := "failed to update synced to value"
return managerError(ErrDatabase, errStr, err)
}
return nil
}
// staleHeight returns the stale height for the given height. The stale height
// indicates the height we should remove in order to maintain a maximum of
// MaxReorgDepth block hashes.
func staleHeight(height int32) int32 {
return height - MaxReorgDepth
}
// FetchStartBlock loads the start block stamp for the manager from the
// database.
func FetchStartBlock(ns walletdb.ReadBucket) (*BlockStamp, error) {

131
waddrmgr/db_test.go Normal file
View file

@ -0,0 +1,131 @@
package waddrmgr
import (
"encoding/binary"
"fmt"
"testing"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcwallet/walletdb"
)
// TestStoreMaxReorgDepth ensures that we can only store up to MaxReorgDepth
// blocks at any given time.
func TestStoreMaxReorgDepth(t *testing.T) {
t.Parallel()
teardown, db, _ := setupManager(t)
defer teardown()
// We'll start the test by simulating a synced chain where we start from
// 1000 and end at 109999.
const (
startHeight = 1000
numBlocks = MaxReorgDepth - 1
)
blocks := make([]*BlockStamp, 0, numBlocks)
for i := int32(startHeight); i <= startHeight+numBlocks; i++ {
var hash chainhash.Hash
binary.BigEndian.PutUint32(hash[:], uint32(i))
blocks = append(blocks, &BlockStamp{
Hash: hash,
Height: i,
})
}
// We'll write all of the blocks to the database.
err := walletdb.Update(db, func(tx walletdb.ReadWriteTx) error {
ns := tx.ReadWriteBucket(waddrmgrNamespaceKey)
for _, block := range blocks {
if err := PutSyncedTo(ns, block); err != nil {
return err
}
}
return nil
})
if err != nil {
t.Fatal(err)
}
// We should be able to retrieve them all as we have MaxReorgDepth
// blocks.
err = walletdb.View(db, func(tx walletdb.ReadTx) error {
ns := tx.ReadBucket(waddrmgrNamespaceKey)
syncedTo, err := fetchSyncedTo(ns)
if err != nil {
return err
}
lastBlock := blocks[len(blocks)-1]
if syncedTo.Height != lastBlock.Height {
return fmt.Errorf("expected synced to block height "+
"%v, got %v", lastBlock.Height, syncedTo.Height)
}
if syncedTo.Hash != lastBlock.Hash {
return fmt.Errorf("expected synced to block hash %v, "+
"got %v", lastBlock.Hash, syncedTo.Hash)
}
firstBlock := blocks[0]
hash, err := fetchBlockHash(ns, firstBlock.Height)
if err != nil {
return err
}
if *hash != firstBlock.Hash {
return fmt.Errorf("expected hash %v for height %v, "+
"got %v", firstBlock.Hash, firstBlock.Height,
hash)
}
return nil
})
if err != nil {
t.Fatal(err)
}
// Then, we'll create a new block which we'll use to extend the chain.
lastBlock := blocks[len(blocks)-1]
newBlockHeight := lastBlock.Height + 1
var newBlockHash chainhash.Hash
binary.BigEndian.PutUint32(newBlockHash[:], uint32(newBlockHeight))
newBlock := &BlockStamp{Height: newBlockHeight, Hash: newBlockHash}
err = walletdb.Update(db, func(tx walletdb.ReadWriteTx) error {
ns := tx.ReadWriteBucket(waddrmgrNamespaceKey)
return PutSyncedTo(ns, newBlock)
})
if err != nil {
t.Fatal(err)
}
// Extending the chain would cause us to exceed our MaxReorgDepth blocks
// stored, so we should see the first block we ever added to now be
// removed.
err = walletdb.View(db, func(tx walletdb.ReadTx) error {
ns := tx.ReadBucket(waddrmgrNamespaceKey)
syncedTo, err := fetchSyncedTo(ns)
if err != nil {
return err
}
if syncedTo.Height != newBlock.Height {
return fmt.Errorf("expected synced to block height "+
"%v, got %v", newBlock.Height, syncedTo.Height)
}
if syncedTo.Hash != newBlock.Hash {
return fmt.Errorf("expected synced to block hash %v, "+
"got %v", newBlock.Hash, syncedTo.Hash)
}
firstBlock := blocks[0]
_, err = fetchBlockHash(ns, firstBlock.Height)
if !IsError(err, ErrBlockNotFound) {
return fmt.Errorf("expected ErrBlockNotFound, got %v",
err)
}
return nil
})
if err != nil {
t.Fatal(err)
}
}