chain+waddrmgr+wallet: store all hashes for better reorg handling (#5)
This commit is contained in:
parent
4ac75773d7
commit
c85893de1a
8 changed files with 217 additions and 314 deletions
|
@ -19,6 +19,7 @@ type Interface interface {
|
|||
WaitForShutdown()
|
||||
GetBestBlock() (*chainhash.Hash, int32, error)
|
||||
GetBlock(*chainhash.Hash) (*wire.MsgBlock, error)
|
||||
GetBlockHash(int64) (*chainhash.Hash, error)
|
||||
BlockStamp() (*waddrmgr.BlockStamp, error)
|
||||
SendRawTransaction(*wire.MsgTx, bool) (*chainhash.Hash, error)
|
||||
Rescan(*chainhash.Hash, []btcutil.Address, []*wire.OutPoint) error
|
||||
|
|
|
@ -128,6 +128,18 @@ func (s *NeutrinoClient) BlockStamp() (*waddrmgr.BlockStamp, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// GetBlockHash returns the block hash for the given height, or an error if the
|
||||
// client has been shut down or the hash at the block height doesn't exist or
|
||||
// is unknown.
|
||||
func (s *NeutrinoClient) GetBlockHash(height int64) (*chainhash.Hash, error) {
|
||||
header, err := s.CS.BlockHeaders.FetchHeaderByHeight(uint32(height))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := header.BlockHash()
|
||||
return &hash, nil
|
||||
}
|
||||
|
||||
// SendRawTransaction replicates the RPC client's SendRawTransaction command.
|
||||
func (s *NeutrinoClient) SendRawTransaction(tx *wire.MsgTx, allowHighFees bool) (
|
||||
*chainhash.Hash, error) {
|
||||
|
|
107
waddrmgr/db.go
107
waddrmgr/db.go
|
@ -192,9 +192,8 @@ var (
|
|||
watchingOnlyName = []byte("watchonly")
|
||||
|
||||
// Sync related key names (sync bucket).
|
||||
syncedToName = []byte("syncedto")
|
||||
startBlockName = []byte("startblock")
|
||||
recentBlocksName = []byte("recentblocks")
|
||||
syncedToName = []byte("syncedto")
|
||||
startBlockName = []byte("startblock")
|
||||
|
||||
// Account related key names (account bucket).
|
||||
acctNumAcctsName = []byte("numaccts")
|
||||
|
@ -1394,6 +1393,25 @@ func fetchSyncedTo(ns walletdb.ReadBucket) (*BlockStamp, error) {
|
|||
// putSyncedTo stores the provided synced to blockstamp to the database.
|
||||
func putSyncedTo(ns walletdb.ReadWriteBucket, bs *BlockStamp) error {
|
||||
bucket := ns.NestedReadWriteBucket(syncBucketName)
|
||||
errStr := fmt.Sprintf("failed to store sync information %v", bs.Hash)
|
||||
|
||||
// If the block height is greater than zero, check that the previous
|
||||
// block height exists. This prevents reorg issues in the future.
|
||||
// We use BigEndian so that keys/values are added to the bucket in
|
||||
// order, making writes more efficient for some database backends.
|
||||
if bs.Height > 0 {
|
||||
if _, err := fetchBlockHash(ns, bs.Height-1); err != nil {
|
||||
return managerError(ErrDatabase, errStr, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Store the block hash by block height.
|
||||
height := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(height, uint32(bs.Height))
|
||||
err := bucket.Put(height, bs.Hash[0:32])
|
||||
if err != nil {
|
||||
return managerError(ErrDatabase, errStr, err)
|
||||
}
|
||||
|
||||
// The serialized synced to format is:
|
||||
// <blockheight><blockhash>
|
||||
|
@ -1403,14 +1421,33 @@ func putSyncedTo(ns walletdb.ReadWriteBucket, bs *BlockStamp) error {
|
|||
binary.LittleEndian.PutUint32(buf[0:4], uint32(bs.Height))
|
||||
copy(buf[4:36], bs.Hash[0:32])
|
||||
|
||||
err := bucket.Put(syncedToName, buf)
|
||||
err = bucket.Put(syncedToName, buf)
|
||||
if err != nil {
|
||||
str := fmt.Sprintf("failed to store sync information %v", bs.Hash)
|
||||
return managerError(ErrDatabase, str, err)
|
||||
return managerError(ErrDatabase, errStr, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fetchBlockHash loads the block hash for the provided height from the
|
||||
// database.
|
||||
func fetchBlockHash(ns walletdb.ReadBucket, height int32) (*chainhash.Hash, error) {
|
||||
bucket := ns.NestedReadBucket(syncBucketName)
|
||||
errStr := fmt.Sprintf("failed to fetch block hash for height %d", height)
|
||||
|
||||
heightBytes := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(heightBytes, uint32(height))
|
||||
hashBytes := bucket.Get(heightBytes)
|
||||
if len(hashBytes) != 32 {
|
||||
err := fmt.Errorf("couldn't get hash from database")
|
||||
return nil, managerError(ErrDatabase, errStr, err)
|
||||
}
|
||||
var hash chainhash.Hash
|
||||
if err := hash.SetBytes(hashBytes); err != nil {
|
||||
return nil, managerError(ErrDatabase, errStr, err)
|
||||
}
|
||||
return &hash, nil
|
||||
}
|
||||
|
||||
// fetchStartBlock loads the start block stamp for the manager from the
|
||||
// database.
|
||||
func fetchStartBlock(ns walletdb.ReadBucket) (*BlockStamp, error) {
|
||||
|
@ -1452,64 +1489,6 @@ func putStartBlock(ns walletdb.ReadWriteBucket, bs *BlockStamp) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// fetchRecentBlocks returns the height of the most recent block height and
|
||||
// hashes of the most recent blocks.
|
||||
func fetchRecentBlocks(ns walletdb.ReadBucket) (int32, []chainhash.Hash, error) {
|
||||
bucket := ns.NestedReadBucket(syncBucketName)
|
||||
|
||||
// The serialized recent blocks format is:
|
||||
// <blockheight><numhashes><blockhashes>
|
||||
//
|
||||
// 4 bytes recent block height + 4 bytes number of hashes + raw hashes
|
||||
// at 32 bytes each.
|
||||
|
||||
// Given the above, the length of the entry must be at a minimum
|
||||
// the constant value sizes.
|
||||
buf := bucket.Get(recentBlocksName)
|
||||
if len(buf) < 8 {
|
||||
str := "malformed recent blocks stored in database"
|
||||
return 0, nil, managerError(ErrDatabase, str, nil)
|
||||
}
|
||||
|
||||
recentHeight := int32(binary.LittleEndian.Uint32(buf[0:4]))
|
||||
numHashes := binary.LittleEndian.Uint32(buf[4:8])
|
||||
recentHashes := make([]chainhash.Hash, numHashes)
|
||||
offset := 8
|
||||
for i := uint32(0); i < numHashes; i++ {
|
||||
copy(recentHashes[i][:], buf[offset:offset+32])
|
||||
offset += 32
|
||||
}
|
||||
|
||||
return recentHeight, recentHashes, nil
|
||||
}
|
||||
|
||||
// putRecentBlocks stores the provided start block stamp to the database.
|
||||
func putRecentBlocks(ns walletdb.ReadWriteBucket, recentHeight int32, recentHashes []chainhash.Hash) error {
|
||||
bucket := ns.NestedReadWriteBucket(syncBucketName)
|
||||
|
||||
// The serialized recent blocks format is:
|
||||
// <blockheight><numhashes><blockhashes>
|
||||
//
|
||||
// 4 bytes recent block height + 4 bytes number of hashes + raw hashes
|
||||
// at 32 bytes each.
|
||||
numHashes := uint32(len(recentHashes))
|
||||
buf := make([]byte, 8+(numHashes*32))
|
||||
binary.LittleEndian.PutUint32(buf[0:4], uint32(recentHeight))
|
||||
binary.LittleEndian.PutUint32(buf[4:8], numHashes)
|
||||
offset := 8
|
||||
for i := uint32(0); i < numHashes; i++ {
|
||||
copy(buf[offset:offset+32], recentHashes[i][:])
|
||||
offset += 32
|
||||
}
|
||||
|
||||
err := bucket.Put(recentBlocksName, buf)
|
||||
if err != nil {
|
||||
str := "failed to store recent blocks"
|
||||
return managerError(ErrDatabase, str, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// managerExists returns whether or not the manager has already been created
|
||||
// in the given database namespace.
|
||||
func managerExists(ns walletdb.ReadBucket) bool {
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
|
||||
"github.com/roasbeef/btcd/btcec"
|
||||
"github.com/roasbeef/btcd/chaincfg"
|
||||
"github.com/roasbeef/btcd/chaincfg/chainhash"
|
||||
"github.com/roasbeef/btcutil"
|
||||
"github.com/roasbeef/btcutil/hdkeychain"
|
||||
"github.com/roasbeef/btcwallet/internal/zero"
|
||||
|
@ -2126,11 +2125,6 @@ func loadManager(ns walletdb.ReadBucket, pubPassphrase []byte, chainParams *chai
|
|||
return nil, maybeConvertDbError(err)
|
||||
}
|
||||
|
||||
recentHeight, recentHashes, err := fetchRecentBlocks(ns)
|
||||
if err != nil {
|
||||
return nil, maybeConvertDbError(err)
|
||||
}
|
||||
|
||||
// When not a watching-only manager, set the master private key params,
|
||||
// but don't derive it now since the manager starts off locked.
|
||||
var masterKeyPriv snacl.SecretKey
|
||||
|
@ -2165,7 +2159,7 @@ func loadManager(ns walletdb.ReadBucket, pubPassphrase []byte, chainParams *chai
|
|||
zero.Bytes(cryptoKeyPubCT)
|
||||
|
||||
// Create the sync state struct.
|
||||
syncInfo := newSyncState(startBlock, syncedTo, recentHeight, recentHashes)
|
||||
syncInfo := newSyncState(startBlock, syncedTo)
|
||||
|
||||
// Generate private passphrase salt.
|
||||
var privPassphraseSalt [saltSize]byte
|
||||
|
@ -2405,9 +2399,7 @@ func Create(ns walletdb.ReadWriteBucket, seed, pubPassphrase, privPassphrase []b
|
|||
createdAt := &BlockStamp{Hash: *chainParams.GenesisHash, Height: 0}
|
||||
|
||||
// Create the initial sync state.
|
||||
recentHashes := []chainhash.Hash{createdAt.Hash}
|
||||
recentHeight := createdAt.Height
|
||||
syncInfo := newSyncState(createdAt, createdAt, recentHeight, recentHashes)
|
||||
syncInfo := newSyncState(createdAt, createdAt)
|
||||
|
||||
// Save the master key params to the database.
|
||||
pubParams := masterKeyPub.Marshal()
|
||||
|
@ -2447,12 +2439,6 @@ func Create(ns walletdb.ReadWriteBucket, seed, pubPassphrase, privPassphrase []b
|
|||
return err
|
||||
}
|
||||
|
||||
// Save the initial recent blocks state.
|
||||
err = putRecentBlocks(ns, recentHeight, recentHashes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the information for the imported account to the database.
|
||||
err = putAccountInfo(ns, ImportedAddrAccount, nil,
|
||||
nil, 0, 0, ImportedAddrAccountName)
|
||||
|
|
170
waddrmgr/sync.go
170
waddrmgr/sync.go
|
@ -5,18 +5,10 @@
|
|||
package waddrmgr
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/roasbeef/btcd/chaincfg/chainhash"
|
||||
"github.com/roasbeef/btcwallet/walletdb"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxRecentHashes is the maximum number of hashes to keep in history
|
||||
// for the purposes of rollbacks.
|
||||
maxRecentHashes = 20
|
||||
)
|
||||
|
||||
// BlockStamp defines a block (by height and a unique hash) and is
|
||||
// used to mark a point in the blockchain that an address manager element is
|
||||
// synced to.
|
||||
|
@ -36,101 +28,17 @@ type syncState struct {
|
|||
// syncedTo is the current block the addresses in the manager are known
|
||||
// to be synced against.
|
||||
syncedTo BlockStamp
|
||||
|
||||
// recentHeight is the most recently seen sync height.
|
||||
recentHeight int32
|
||||
|
||||
// recentHashes is a list of the last several seen block hashes.
|
||||
recentHashes []chainhash.Hash
|
||||
}
|
||||
|
||||
// iter returns a BlockIterator that can be used to iterate over the recently
|
||||
// seen blocks in the sync state.
|
||||
func (s *syncState) iter(mtx *sync.RWMutex) *BlockIterator {
|
||||
if s.recentHeight == -1 || len(s.recentHashes) == 0 {
|
||||
return nil
|
||||
}
|
||||
return &BlockIterator{
|
||||
mtx: mtx,
|
||||
height: s.recentHeight,
|
||||
index: len(s.recentHashes) - 1,
|
||||
syncInfo: s,
|
||||
}
|
||||
}
|
||||
|
||||
// newSyncState returns a new sync state with the provided parameters.
|
||||
func newSyncState(startBlock, syncedTo *BlockStamp, recentHeight int32,
|
||||
recentHashes []chainhash.Hash) *syncState {
|
||||
func newSyncState(startBlock, syncedTo *BlockStamp) *syncState {
|
||||
|
||||
return &syncState{
|
||||
startBlock: *startBlock,
|
||||
syncedTo: *syncedTo,
|
||||
recentHeight: recentHeight,
|
||||
recentHashes: recentHashes,
|
||||
startBlock: *startBlock,
|
||||
syncedTo: *syncedTo,
|
||||
}
|
||||
}
|
||||
|
||||
// BlockIterator allows for the forwards and backwards iteration of recently
|
||||
// seen blocks.
|
||||
type BlockIterator struct {
|
||||
mtx *sync.RWMutex
|
||||
height int32
|
||||
index int
|
||||
syncInfo *syncState
|
||||
}
|
||||
|
||||
// Next returns the next recently seen block or false if there is not one.
|
||||
func (it *BlockIterator) Next() bool {
|
||||
it.mtx.RLock()
|
||||
defer it.mtx.RUnlock()
|
||||
|
||||
if it.index+1 >= len(it.syncInfo.recentHashes) {
|
||||
return false
|
||||
}
|
||||
it.index++
|
||||
return true
|
||||
}
|
||||
|
||||
// Prev returns the previous recently seen block or false if there is not one.
|
||||
func (it *BlockIterator) Prev() bool {
|
||||
it.mtx.RLock()
|
||||
defer it.mtx.RUnlock()
|
||||
|
||||
if it.index-1 < 0 {
|
||||
return false
|
||||
}
|
||||
it.index--
|
||||
return true
|
||||
}
|
||||
|
||||
// BlockStamp returns the block stamp associated with the recently seen block
|
||||
// the iterator is currently pointing to.
|
||||
func (it *BlockIterator) BlockStamp() BlockStamp {
|
||||
it.mtx.RLock()
|
||||
defer it.mtx.RUnlock()
|
||||
|
||||
return BlockStamp{
|
||||
Height: it.syncInfo.recentHeight -
|
||||
int32(len(it.syncInfo.recentHashes)-1-it.index),
|
||||
Hash: it.syncInfo.recentHashes[it.index],
|
||||
}
|
||||
}
|
||||
|
||||
// NewIterateRecentBlocks returns an iterator for recently-seen blocks.
|
||||
// The iterator starts at the most recently-added block, and Prev should
|
||||
// be used to access earlier blocks.
|
||||
//
|
||||
// NOTE: Ideally this should not really be a part of the address manager as it
|
||||
// is intended for syncing purposes. It is being exposed here for now to go
|
||||
// with the other syncing code. Ultimately, all syncing code should probably
|
||||
// go into its own package and share the data store.
|
||||
func (m *Manager) NewIterateRecentBlocks() *BlockIterator {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
|
||||
return m.syncState.iter(&m.mtx)
|
||||
}
|
||||
|
||||
// SetSyncedTo marks the address manager to be in sync with the recently-seen
|
||||
// block described by the blockstamp. When the provided blockstamp is nil,
|
||||
// the oldest blockstamp of the block the manager was created at and of all
|
||||
|
@ -141,61 +49,10 @@ func (m *Manager) SetSyncedTo(ns walletdb.ReadWriteBucket, bs *BlockStamp) error
|
|||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
// Update the recent history.
|
||||
//
|
||||
// NOTE: The values in the memory sync state aren't directly modified
|
||||
// here in case the forthcoming db update fails. The memory sync state
|
||||
// is updated with these values as needed after the db updates.
|
||||
recentHeight := m.syncState.recentHeight
|
||||
recentHashes := m.syncState.recentHashes
|
||||
// Use the stored start blockstamp and reset recent hashes and height
|
||||
// when the provided blockstamp is nil.
|
||||
if bs == nil {
|
||||
// Use the stored start blockstamp and reset recent hashes and
|
||||
// height when the provided blockstamp is nil.
|
||||
bs = &m.syncState.startBlock
|
||||
recentHeight = m.syncState.startBlock.Height
|
||||
recentHashes = nil
|
||||
|
||||
} else if bs.Height < recentHeight {
|
||||
// When the new block stamp height is prior to the most recently
|
||||
// seen height, a rollback is being performed. Thus, when the
|
||||
// previous block stamp is already saved, remove anything after
|
||||
// it. Otherwise, the rollback must be too far in history, so
|
||||
// clear the recent hashes and set the recent height to the
|
||||
// current block stamp height.
|
||||
numHashes := len(recentHashes)
|
||||
idx := numHashes - 1 - int(recentHeight-bs.Height)
|
||||
if idx >= 0 && idx < numHashes && recentHashes[idx] == bs.Hash {
|
||||
// subslice out the removed hashes.
|
||||
recentHeight = bs.Height
|
||||
recentHashes = recentHashes[:idx]
|
||||
} else {
|
||||
recentHeight = bs.Height
|
||||
recentHashes = nil
|
||||
}
|
||||
|
||||
} else if bs.Height != recentHeight+1 {
|
||||
// At this point the new block stamp height is after the most
|
||||
// recently seen block stamp, so it should be the next height in
|
||||
// sequence. When this is not the case, the recent history is
|
||||
// no longer valid, so clear the recent hashes and set the
|
||||
// recent height to the current block stamp height.
|
||||
recentHeight = bs.Height
|
||||
recentHashes = nil
|
||||
} else {
|
||||
// The only case left is when the new block stamp height is the
|
||||
// next height in sequence after the most recently seen block
|
||||
// stamp, so update it accordingly.
|
||||
recentHeight = bs.Height
|
||||
}
|
||||
|
||||
// Enforce maximum number of recent hashes.
|
||||
if len(recentHashes) == maxRecentHashes {
|
||||
// Shift everything down one position and add the new hash in
|
||||
// the last position.
|
||||
copy(recentHashes, recentHashes[1:])
|
||||
recentHashes[maxRecentHashes-1] = bs.Hash
|
||||
} else {
|
||||
recentHashes = append(recentHashes, bs.Hash)
|
||||
}
|
||||
|
||||
// Update the database.
|
||||
|
@ -203,15 +60,9 @@ func (m *Manager) SetSyncedTo(ns walletdb.ReadWriteBucket, bs *BlockStamp) error
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = putRecentBlocks(ns, recentHeight, recentHashes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update memory now that the database is updated.
|
||||
m.syncState.syncedTo = *bs
|
||||
m.syncState.recentHashes = recentHashes
|
||||
m.syncState.recentHeight = recentHeight
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -225,3 +76,14 @@ func (m *Manager) SyncedTo() BlockStamp {
|
|||
|
||||
return m.syncState.syncedTo
|
||||
}
|
||||
|
||||
// BlockHash returns the block hash at a particular block height. This
|
||||
// information is useful for comparing against the chain back-end to see if a
|
||||
// reorg is taking place and how far back it goes.
|
||||
func (m *Manager) BlockHash(ns walletdb.ReadBucket, height int32) (
|
||||
*chainhash.Hash, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
return fetchBlockHash(ns, height)
|
||||
}
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
package wallet
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/roasbeef/btcd/txscript"
|
||||
"github.com/roasbeef/btcwallet/chain"
|
||||
"github.com/roasbeef/btcwallet/waddrmgr"
|
||||
|
@ -111,29 +113,27 @@ func (w *Wallet) disconnectBlock(dbtx walletdb.ReadWriteTx, b wtxmgr.BlockMeta)
|
|||
return nil
|
||||
}
|
||||
|
||||
// Disconnect the last seen block from the manager if it matches the
|
||||
// removed block.
|
||||
iter := w.Manager.NewIterateRecentBlocks()
|
||||
if iter != nil && iter.BlockStamp().Hash == b.Hash {
|
||||
if iter.Prev() {
|
||||
prev := iter.BlockStamp()
|
||||
w.Manager.SetSyncedTo(addrmgrNs, &prev)
|
||||
err := w.TxStore.Rollback(txmgrNs, prev.Height+1)
|
||||
// Disconnect the removed block and all blocks after it if we know about
|
||||
// the disconnected block. Otherwise, the block is in the future.
|
||||
if b.Height <= w.Manager.SyncedTo().Height {
|
||||
hash, err := w.Manager.BlockHash(addrmgrNs, b.Height)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bytes.Equal(hash[:], b.Hash[:]) {
|
||||
bs := waddrmgr.BlockStamp{
|
||||
Height: b.Height - 1,
|
||||
}
|
||||
hash, err = w.Manager.BlockHash(addrmgrNs, bs.Height)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// The reorg is farther back than the recently-seen list
|
||||
// of blocks has recorded, so set it to unsynced which
|
||||
// will in turn lead to a rescan from either the
|
||||
// earliest blockstamp the addresses in the manager are
|
||||
// known to have been created.
|
||||
w.Manager.SetSyncedTo(addrmgrNs, nil)
|
||||
// Rollback everything but the genesis block.
|
||||
err := w.TxStore.Rollback(txmgrNs, 1)
|
||||
b.Hash = *hash
|
||||
err = w.Manager.SetSyncedTo(addrmgrNs, &bs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = w.TxStore.Rollback(txmgrNs, b.Height)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -175,13 +175,41 @@ out:
|
|||
log.Infof("Rescanned through block %v (height %d)",
|
||||
n.Hash, n.Height)
|
||||
|
||||
bs := waddrmgr.BlockStamp{
|
||||
Hash: *n.Hash,
|
||||
Height: n.Height,
|
||||
}
|
||||
client := w.ChainClient()
|
||||
// Since btcd rescans don't send blockconnected
|
||||
// notifications, we need to cycle through all of the
|
||||
// rescanned blocks and write the hashes to the
|
||||
// database. Neutrino rescans do send the notifications,
|
||||
// which means this loop won't actually cycle.
|
||||
//
|
||||
// TODO(aakselrod): There's a race conditon here, which
|
||||
// happens when a reorg occurs between the
|
||||
// rescanProgress notification and the last GetBlockHash
|
||||
// call. The solution when using btcd is to make btcd
|
||||
// send blockconnected notifications with each block
|
||||
// the way Neutrino does, and get rid of the loop. The
|
||||
// other alternative is to check the final hash and,
|
||||
// if it doesn't match the original hash returned by
|
||||
// the notification, to roll back and restart the
|
||||
// rescan.
|
||||
err := walletdb.Update(w.db, func(tx walletdb.ReadWriteTx) error {
|
||||
ns := tx.ReadWriteBucket(waddrmgrNamespaceKey)
|
||||
return w.Manager.SetSyncedTo(ns, &bs)
|
||||
startBlock := w.Manager.SyncedTo()
|
||||
for i := startBlock.Height + 1; i <= n.Height; i++ {
|
||||
hash, err := client.GetBlockHash(int64(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bs := waddrmgr.BlockStamp{
|
||||
Height: i,
|
||||
Hash: *hash,
|
||||
}
|
||||
err = w.Manager.SetSyncedTo(ns, &bs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to update address manager "+
|
||||
|
@ -197,13 +225,41 @@ out:
|
|||
"%s, height %d)", len(addrs), noun, n.Hash,
|
||||
n.Height)
|
||||
|
||||
bs := waddrmgr.BlockStamp{
|
||||
Height: n.Height,
|
||||
Hash: *n.Hash,
|
||||
}
|
||||
client := w.ChainClient()
|
||||
// Since btcd rescans don't send blockconnected
|
||||
// notifications, we need to cycle through all of the
|
||||
// rescanned blocks and write the hashes to the
|
||||
// database. Neutrino rescans do send the notifications,
|
||||
// which means this loop won't actually cycle.
|
||||
//
|
||||
// TODO(aakselrod): There's a race conditon here, which
|
||||
// happens when a reorg occurs between the
|
||||
// rescanFinished notification and the last GetBlockHash
|
||||
// call. The solution when using btcd is to make btcd
|
||||
// send blockconnected notifications with each block
|
||||
// the way Neutrino does, and get rid of the loop. The
|
||||
// other alternative is to check the final hash and,
|
||||
// if it doesn't match the original hash returned by
|
||||
// the notification, to roll back and restart the
|
||||
// rescan.
|
||||
err := walletdb.Update(w.db, func(tx walletdb.ReadWriteTx) error {
|
||||
ns := tx.ReadWriteBucket(waddrmgrNamespaceKey)
|
||||
return w.Manager.SetSyncedTo(ns, &bs)
|
||||
startBlock := w.Manager.SyncedTo()
|
||||
for i := startBlock.Height + 1; i <= n.Height; i++ {
|
||||
hash, err := client.GetBlockHash(int64(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bs := waddrmgr.BlockStamp{
|
||||
Height: i,
|
||||
Hash: *hash,
|
||||
}
|
||||
err = w.Manager.SetSyncedTo(ns, &bs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to update address manager "+
|
||||
|
|
113
wallet/wallet.go
113
wallet/wallet.go
|
@ -6,6 +6,7 @@
|
|||
package wallet
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -325,19 +326,6 @@ func (w *Wallet) syncWithChain() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Request notifications for connected and disconnected blocks.
|
||||
//
|
||||
// TODO(jrick): Either request this notification only once, or when
|
||||
// btcrpcclient is modified to allow some notification request to not
|
||||
// automatically resent on reconnect, include the notifyblocks request
|
||||
// as well. I am leaning towards allowing off all btcrpcclient
|
||||
// notification re-registrations, in which case the code here should be
|
||||
// left as is.
|
||||
err = chainClient.NotifyBlocks()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Request notifications for transactions sending to all wallet
|
||||
// addresses.
|
||||
var (
|
||||
|
@ -363,64 +351,83 @@ func (w *Wallet) syncWithChain() error {
|
|||
// addresses ever created, including those that don't need to be watched
|
||||
// anymore. This code should be updated when this assumption is no
|
||||
// longer true, but worst case would result in an unnecessary rescan.
|
||||
if len(addrs) == 0 && len(unspent) == 0 {
|
||||
// TODO: It would be ideal if on initial sync wallet saved the
|
||||
// last several recent blocks rather than just one. This would
|
||||
// avoid a full rescan for a one block reorg of the current
|
||||
// chain tip.
|
||||
hash, height, err := chainClient.GetBestBlock()
|
||||
if len(addrs) == 0 && len(unspent) == 0 && w.Manager.SyncedTo().Height == 0 {
|
||||
_, bestHeight, err := chainClient.GetBestBlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return walletdb.Update(w.db, func(tx walletdb.ReadWriteTx) error {
|
||||
ns := tx.ReadWriteBucket(waddrmgrNamespaceKey)
|
||||
return w.Manager.SetSyncedTo(ns, &waddrmgr.BlockStamp{
|
||||
Hash: *hash,
|
||||
Height: height,
|
||||
for height := int32(1); height <= bestHeight; height++ {
|
||||
hash, err := chainClient.GetBlockHash(int64(height))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = walletdb.Update(w.db, func(tx walletdb.ReadWriteTx) error {
|
||||
ns := tx.ReadWriteBucket(waddrmgrNamespaceKey)
|
||||
return w.Manager.SetSyncedTo(ns, &waddrmgr.BlockStamp{
|
||||
Hash: *hash,
|
||||
Height: height,
|
||||
})
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compare previously-seen blocks against the chain server. If any of
|
||||
// these blocks no longer exist, rollback all of the missing blocks
|
||||
// before catching up with the rescan.
|
||||
iter := w.Manager.NewIterateRecentBlocks()
|
||||
rollback := iter == nil
|
||||
syncBlock := waddrmgr.BlockStamp{
|
||||
Hash: *w.chainParams.GenesisHash,
|
||||
Height: 0,
|
||||
}
|
||||
for cont := iter != nil; cont; cont = iter.Prev() {
|
||||
bs := iter.BlockStamp()
|
||||
log.Debugf("Checking for previous saved block with height %v hash %v",
|
||||
bs.Height, bs.Hash)
|
||||
_, err = chainClient.GetBlock(&bs.Hash)
|
||||
if err != nil {
|
||||
err = walletdb.Update(w.db, func(tx walletdb.ReadWriteTx) error {
|
||||
addrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey)
|
||||
txmgrNs := tx.ReadWriteBucket(wtxmgrNamespaceKey)
|
||||
rollback := false
|
||||
rollbackStamp := w.Manager.SyncedTo()
|
||||
for height := rollbackStamp.Height; true; height-- {
|
||||
hash, err := w.Manager.BlockHash(addrmgrNs, height)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chainHash, err := chainClient.GetBlockHash(int64(height))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rollbackStamp.Hash = *chainHash
|
||||
rollbackStamp.Height = height
|
||||
if bytes.Equal(hash[:], chainHash[:]) {
|
||||
break
|
||||
}
|
||||
rollback = true
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debug("Found matching block.")
|
||||
syncBlock = bs
|
||||
break
|
||||
}
|
||||
|
||||
if rollback {
|
||||
err := walletdb.Update(w.db, func(tx walletdb.ReadWriteTx) error {
|
||||
addrmgrNs := tx.ReadWriteBucket(waddrmgrNamespaceKey)
|
||||
txmgrNs := tx.ReadWriteBucket(wtxmgrNamespaceKey)
|
||||
err := w.Manager.SetSyncedTo(addrmgrNs, &syncBlock)
|
||||
if rollback {
|
||||
err := w.Manager.SetSyncedTo(addrmgrNs, &rollbackStamp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Rollback unconfirms transactions at and beyond the passed
|
||||
// height, so add one to the new synced-to height to prevent
|
||||
// unconfirming txs from the synced-to block.
|
||||
return w.TxStore.Rollback(txmgrNs, syncBlock.Height+1)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
err = w.TxStore.Rollback(txmgrNs, rollbackStamp.Height+1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Request notifications for connected and disconnected blocks.
|
||||
//
|
||||
// TODO(jrick): Either request this notification only once, or when
|
||||
// btcrpcclient is modified to allow some notification request to not
|
||||
// automatically resent on reconnect, include the notifyblocks request
|
||||
// as well. I am leaning towards allowing off all btcrpcclient
|
||||
// notification re-registrations, in which case the code here should be
|
||||
// left as is.
|
||||
err = chainClient.NotifyBlocks()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return w.Rescan(addrs, unspent)
|
||||
|
|
Loading…
Add table
Reference in a new issue