waddrmgr: add migration to maintain MaxReorgDepth block hashes stored
In this commit, we add a migration that will be used by existing wallets to ensure they can adhere to the new requirement of storing up to MaxReorgDepth entries within the block hash index.
This commit is contained in:
parent
e548e76684
commit
f2f46b674d
2 changed files with 191 additions and 0 deletions
|
@ -31,6 +31,10 @@ var versions = []migration.Version{
|
|||
Number: 7,
|
||||
Migration: resetSyncedBlockToBirthday,
|
||||
},
|
||||
{
|
||||
Number: 8,
|
||||
Migration: storeMaxReorgDepth,
|
||||
},
|
||||
}
|
||||
|
||||
// getLatestVersion returns the version number of the latest database version.
|
||||
|
@ -372,3 +376,37 @@ func resetSyncedBlockToBirthday(ns walletdb.ReadWriteBucket) error {
|
|||
|
||||
return PutSyncedTo(ns, &birthdayBlock)
|
||||
}
|
||||
|
||||
// storeMaxReorgDepth is a migration responsible for allowing the wallet to only
|
||||
// maintain MaxReorgDepth block hashes stored in order to recover from long
|
||||
// reorgs.
|
||||
func storeMaxReorgDepth(ns walletdb.ReadWriteBucket) error {
|
||||
// Retrieve the current tip of the wallet. We'll use this to determine
|
||||
// the highest stale height we currently have stored within it.
|
||||
syncedTo, err := fetchSyncedTo(ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxStaleHeight := staleHeight(syncedTo.Height)
|
||||
|
||||
// It's possible for this height to be non-sensical if we have less than
|
||||
// MaxReorgDepth blocks stored, so we can end the migration now.
|
||||
if maxStaleHeight < 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Infof("Removing block hash entries beyond maximum reorg depth of "+
|
||||
"%v from current tip %v", MaxReorgDepth, syncedTo.Height)
|
||||
|
||||
// Otherwise, since we currently store all block hashes of the chain
|
||||
// before this migration, we'll remove all stale block hash entries
|
||||
// above the genesis block. This would leave us with only MaxReorgDepth
|
||||
// blocks stored.
|
||||
for height := maxStaleHeight; height > 0; height-- {
|
||||
if err := deleteBlockHash(ns, height); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,12 +2,14 @@ package waddrmgr
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
)
|
||||
|
||||
|
@ -296,3 +298,154 @@ func TestMigrationResetSyncedBlockToBirthdayWithNoBirthdayBlock(t *testing.T) {
|
|||
true,
|
||||
)
|
||||
}
|
||||
|
||||
// TestMigrationStoreMaxReorgDepth ensures that the storeMaxReorgDepth migration
|
||||
// works as expected under different sync scenarios.
|
||||
func TestMigrationStoreMaxReorgDepth(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
numBlocks int32
|
||||
}{
|
||||
{
|
||||
name: "genesis only",
|
||||
numBlocks: 0,
|
||||
},
|
||||
{
|
||||
name: "below max reorg depth",
|
||||
numBlocks: MaxReorgDepth - 1,
|
||||
},
|
||||
{
|
||||
name: "above max reorg depth",
|
||||
numBlocks: MaxReorgDepth + 1,
|
||||
},
|
||||
{
|
||||
name: "double max reorg depth",
|
||||
numBlocks: MaxReorgDepth * 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
success := t.Run(testCase.name, func(t *testing.T) {
|
||||
// We'll start the test by creating the number of blocks
|
||||
// we'll add to the chain. We start from height 1 as the
|
||||
// genesis block (height 0) is already included when the
|
||||
// address manager is created.
|
||||
blocks := make([]*BlockStamp, 0, testCase.numBlocks)
|
||||
for i := int32(1); i <= testCase.numBlocks; i++ {
|
||||
var hash chainhash.Hash
|
||||
binary.BigEndian.PutUint32(hash[:], uint32(i))
|
||||
blocks = append(blocks, &BlockStamp{
|
||||
Hash: hash,
|
||||
Height: i,
|
||||
})
|
||||
}
|
||||
|
||||
// Before the migration, we'll go ahead and add all of
|
||||
// the blocks created. This simulates the behavior of an
|
||||
// existing synced chain. We won't use PutSyncedTo as
|
||||
// that would remove the stale entries on its own.
|
||||
beforeMigration := func(ns walletdb.ReadWriteBucket) error {
|
||||
if testCase.numBlocks == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write all the block hash entries.
|
||||
for _, block := range blocks {
|
||||
err := addBlockHash(
|
||||
ns, block.Height, block.Hash,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = updateSyncedTo(ns, block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Check to make sure they've been added
|
||||
// properly.
|
||||
for _, block := range blocks {
|
||||
hash, err := fetchBlockHash(
|
||||
ns, block.Height,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *hash != block.Hash {
|
||||
return fmt.Errorf("expected "+
|
||||
"hash %v for height "+
|
||||
"%v, got %v",
|
||||
block.Hash,
|
||||
block.Height, hash)
|
||||
}
|
||||
}
|
||||
block, err := fetchSyncedTo(ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expectedBlock := blocks[len(blocks)-1]
|
||||
if block.Height != block.Height {
|
||||
return fmt.Errorf("expected synced to "+
|
||||
"block height %v, got %v",
|
||||
expectedBlock.Height,
|
||||
block.Height)
|
||||
}
|
||||
if block.Hash != block.Hash {
|
||||
return fmt.Errorf("expected synced to "+
|
||||
"block hash %v, got %v",
|
||||
expectedBlock.Hash,
|
||||
block.Hash)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// After the migration, we'll ensure we're unable to
|
||||
// find all the block hashes that should have been
|
||||
// removed.
|
||||
afterMigration := func(ns walletdb.ReadWriteBucket) error {
|
||||
maxStaleHeight := staleHeight(testCase.numBlocks)
|
||||
for _, block := range blocks {
|
||||
if block.Height <= maxStaleHeight {
|
||||
_, err := fetchBlockHash(
|
||||
ns, block.Height,
|
||||
)
|
||||
if IsError(err, ErrBlockNotFound) {
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("expected "+
|
||||
"ErrBlockNotFound for "+
|
||||
"height %v, got %v",
|
||||
block.Height, err)
|
||||
}
|
||||
|
||||
hash, err := fetchBlockHash(
|
||||
ns, block.Height,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *hash != block.Hash {
|
||||
return fmt.Errorf("expected "+
|
||||
"hash %v for height "+
|
||||
"%v, got %v",
|
||||
block.Hash,
|
||||
block.Height, hash)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
applyMigration(
|
||||
t, beforeMigration, afterMigration,
|
||||
storeMaxReorgDepth, false,
|
||||
)
|
||||
})
|
||||
if !success {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue