2017-05-20 01:45:38 +02:00
|
|
|
package chain
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2018-05-15 07:11:11 +02:00
|
|
|
"github.com/btcsuite/btcd/chaincfg"
|
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/rpcclient"
|
|
|
|
"github.com/btcsuite/btcd/txscript"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/btcsuite/btcutil"
|
|
|
|
"github.com/btcsuite/btcutil/gcs"
|
|
|
|
"github.com/btcsuite/btcutil/gcs/builder"
|
|
|
|
"github.com/btcsuite/btcwallet/waddrmgr"
|
|
|
|
"github.com/btcsuite/btcwallet/wtxmgr"
|
2018-05-24 04:02:33 +02:00
|
|
|
"github.com/lightninglabs/neutrino"
|
2019-09-06 03:31:03 +02:00
|
|
|
"github.com/lightninglabs/neutrino/headerfs"
|
2017-05-20 01:45:38 +02:00
|
|
|
)
|
|
|
|
|
2017-05-25 02:52:46 +02:00
|
|
|
// NeutrinoClient is an implementation of the btcwalet chain.Interface interface.
|
|
|
|
type NeutrinoClient struct {
|
2017-05-24 18:46:38 +02:00
|
|
|
CS *neutrino.ChainService
|
2017-05-20 01:45:38 +02:00
|
|
|
|
2018-03-21 02:04:36 +01:00
|
|
|
chainParams *chaincfg.Params
|
|
|
|
|
2017-05-20 01:45:38 +02:00
|
|
|
// We currently support one rescan/notifiction goroutine per client
|
2018-01-11 11:21:49 +01:00
|
|
|
rescan *neutrino.Rescan
|
2017-05-20 01:45:38 +02:00
|
|
|
|
2019-04-19 04:23:05 +02:00
|
|
|
enqueueNotification chan interface{}
|
|
|
|
dequeueNotification chan interface{}
|
|
|
|
startTime time.Time
|
|
|
|
lastProgressSent bool
|
|
|
|
lastFilteredBlockHeader *wire.BlockHeader
|
|
|
|
currentBlock chan *waddrmgr.BlockStamp
|
2017-05-20 01:45:38 +02:00
|
|
|
|
|
|
|
quit chan struct{}
|
|
|
|
rescanQuit chan struct{}
|
2017-05-24 07:03:58 +02:00
|
|
|
rescanErr <-chan error
|
2017-05-20 01:45:38 +02:00
|
|
|
wg sync.WaitGroup
|
|
|
|
started bool
|
|
|
|
scanning bool
|
2017-05-21 04:36:40 +02:00
|
|
|
finished bool
|
2017-09-20 00:53:23 +02:00
|
|
|
isRescan bool
|
2017-05-20 01:45:38 +02:00
|
|
|
|
|
|
|
clientMtx sync.Mutex
|
|
|
|
}
|
|
|
|
|
2017-05-25 02:52:46 +02:00
|
|
|
// NewNeutrinoClient creates a new NeutrinoClient struct with a backing
|
|
|
|
// ChainService.
|
2018-03-21 02:04:36 +01:00
|
|
|
func NewNeutrinoClient(chainParams *chaincfg.Params,
|
|
|
|
chainService *neutrino.ChainService) *NeutrinoClient {
|
|
|
|
|
|
|
|
return &NeutrinoClient{
|
|
|
|
CS: chainService,
|
|
|
|
chainParams: chainParams,
|
|
|
|
}
|
2017-05-20 01:45:38 +02:00
|
|
|
}
|
|
|
|
|
2017-11-10 01:13:40 +01:00
|
|
|
// BackEnd returns the name of the driver.
|
|
|
|
func (s *NeutrinoClient) BackEnd() string {
|
|
|
|
return "neutrino"
|
|
|
|
}
|
|
|
|
|
2017-05-20 01:45:38 +02:00
|
|
|
// Start replicates the RPC client's Start method.
|
2017-05-25 02:52:46 +02:00
|
|
|
func (s *NeutrinoClient) Start() error {
|
2021-03-24 14:43:24 +01:00
|
|
|
if err := s.CS.Start(); err != nil {
|
|
|
|
return fmt.Errorf("error starting chain service: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-20 01:45:38 +02:00
|
|
|
s.clientMtx.Lock()
|
|
|
|
defer s.clientMtx.Unlock()
|
|
|
|
if !s.started {
|
|
|
|
s.enqueueNotification = make(chan interface{})
|
|
|
|
s.dequeueNotification = make(chan interface{})
|
|
|
|
s.currentBlock = make(chan *waddrmgr.BlockStamp)
|
|
|
|
s.quit = make(chan struct{})
|
|
|
|
s.started = true
|
|
|
|
s.wg.Add(1)
|
2017-05-24 07:03:58 +02:00
|
|
|
go func() {
|
|
|
|
select {
|
|
|
|
case s.enqueueNotification <- ClientConnected{}:
|
|
|
|
case <-s.quit:
|
|
|
|
}
|
|
|
|
}()
|
2017-05-20 01:45:38 +02:00
|
|
|
go s.notificationHandler()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop replicates the RPC client's Stop method.
|
2017-05-25 02:52:46 +02:00
|
|
|
func (s *NeutrinoClient) Stop() {
|
2017-05-20 01:45:38 +02:00
|
|
|
s.clientMtx.Lock()
|
|
|
|
defer s.clientMtx.Unlock()
|
|
|
|
if !s.started {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
close(s.quit)
|
|
|
|
s.started = false
|
|
|
|
}
|
|
|
|
|
|
|
|
// WaitForShutdown replicates the RPC client's WaitForShutdown method.
|
2017-05-25 02:52:46 +02:00
|
|
|
func (s *NeutrinoClient) WaitForShutdown() {
|
2017-05-20 01:45:38 +02:00
|
|
|
s.wg.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBlock replicates the RPC client's GetBlock command.
|
2017-05-25 02:52:46 +02:00
|
|
|
func (s *NeutrinoClient) GetBlock(hash *chainhash.Hash) (*wire.MsgBlock, error) {
|
2017-05-20 01:45:38 +02:00
|
|
|
// TODO(roasbeef): add a block cache?
|
|
|
|
// * which evication strategy? depends on use case
|
2017-05-24 03:55:57 +02:00
|
|
|
// Should the block cache be INSIDE neutrino instead of in btcwallet?
|
2018-08-23 05:20:25 +02:00
|
|
|
block, err := s.CS.GetBlock(*hash)
|
2017-05-20 01:45:38 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return block.MsgBlock(), nil
|
|
|
|
}
|
|
|
|
|
2017-05-24 03:55:57 +02:00
|
|
|
// GetBlockHeight gets the height of a block by its hash. It serves as a
|
|
|
|
// replacement for the use of GetBlockVerboseTxAsync for the wallet package
|
|
|
|
// since we can't actually return a FutureGetBlockVerboseResult because the
|
2017-08-25 02:30:43 +02:00
|
|
|
// underlying type is private to rpcclient.
|
2017-05-25 02:52:46 +02:00
|
|
|
func (s *NeutrinoClient) GetBlockHeight(hash *chainhash.Hash) (int32, error) {
|
2018-09-10 16:50:51 +02:00
|
|
|
return s.CS.GetBlockHeight(hash)
|
2017-05-24 03:55:57 +02:00
|
|
|
}
|
|
|
|
|
2017-05-20 01:45:38 +02:00
|
|
|
// GetBestBlock replicates the RPC client's GetBestBlock command.
|
2017-05-25 02:52:46 +02:00
|
|
|
func (s *NeutrinoClient) GetBestBlock() (*chainhash.Hash, int32, error) {
|
2018-09-10 16:50:51 +02:00
|
|
|
chainTip, err := s.CS.BestBlock()
|
2017-05-20 01:45:38 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
2017-06-04 23:40:00 +02:00
|
|
|
|
|
|
|
return &chainTip.Hash, chainTip.Height, nil
|
2017-05-20 01:45:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// BlockStamp returns the latest block notified by the client, or an error
|
|
|
|
// if the client has been shut down.
|
2017-05-25 02:52:46 +02:00
|
|
|
func (s *NeutrinoClient) BlockStamp() (*waddrmgr.BlockStamp, error) {
|
2017-05-20 01:45:38 +02:00
|
|
|
select {
|
|
|
|
case bs := <-s.currentBlock:
|
|
|
|
return bs, nil
|
|
|
|
case <-s.quit:
|
|
|
|
return nil, errors.New("disconnected")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-12 01:13:10 +02:00
|
|
|
// GetBlockHash returns the block hash for the given height, or an error if the
|
|
|
|
// client has been shut down or the hash at the block height doesn't exist or
|
|
|
|
// is unknown.
|
|
|
|
func (s *NeutrinoClient) GetBlockHash(height int64) (*chainhash.Hash, error) {
|
2018-09-10 16:50:51 +02:00
|
|
|
return s.CS.GetBlockHash(height)
|
2017-07-12 01:13:10 +02:00
|
|
|
}
|
|
|
|
|
2017-09-20 22:33:12 +02:00
|
|
|
// GetBlockHeader returns the block header for the given block hash, or an error
|
|
|
|
// if the client has been shut down or the hash doesn't exist or is unknown.
|
|
|
|
func (s *NeutrinoClient) GetBlockHeader(
|
|
|
|
blockHash *chainhash.Hash) (*wire.BlockHeader, error) {
|
2018-09-10 16:50:51 +02:00
|
|
|
return s.CS.GetBlockHeader(blockHash)
|
2017-09-20 22:33:12 +02:00
|
|
|
}
|
|
|
|
|
2019-05-14 22:17:58 +02:00
|
|
|
// IsCurrent returns whether the chain backend considers its view of the network
|
|
|
|
// as "current".
|
|
|
|
func (s *NeutrinoClient) IsCurrent() bool {
|
|
|
|
return s.CS.IsCurrent()
|
|
|
|
}
|
|
|
|
|
2017-05-20 01:45:38 +02:00
|
|
|
// SendRawTransaction replicates the RPC client's SendRawTransaction command.
|
2017-05-25 02:52:46 +02:00
|
|
|
func (s *NeutrinoClient) SendRawTransaction(tx *wire.MsgTx, allowHighFees bool) (
|
2017-05-20 01:45:38 +02:00
|
|
|
*chainhash.Hash, error) {
|
2017-05-24 18:46:38 +02:00
|
|
|
err := s.CS.SendTransaction(tx)
|
2017-05-20 01:45:38 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
hash := tx.TxHash()
|
|
|
|
return &hash, nil
|
|
|
|
}
|
|
|
|
|
2018-03-21 02:04:36 +01:00
|
|
|
// FilterBlocks scans the blocks contained in the FilterBlocksRequest for any
|
|
|
|
// addresses of interest. For each requested block, the corresponding compact
|
|
|
|
// filter will first be checked for matches, skipping those that do not report
|
2021-03-24 14:43:24 +01:00
|
|
|
// anything. If the filter returns a positive match, the full block will be
|
|
|
|
// fetched and filtered. This method returns a FilterBlocksResponse for the first
|
2018-03-21 02:04:36 +01:00
|
|
|
// block containing a matching address. If no matches are found in the range of
|
|
|
|
// blocks requested, the returned response will be nil.
|
|
|
|
func (s *NeutrinoClient) FilterBlocks(
|
|
|
|
req *FilterBlocksRequest) (*FilterBlocksResponse, error) {
|
|
|
|
|
|
|
|
blockFilterer := NewBlockFilterer(s.chainParams, req)
|
|
|
|
|
|
|
|
// Construct the watchlist using the addresses and outpoints contained
|
|
|
|
// in the filter blocks request.
|
|
|
|
watchList, err := buildFilterBlocksWatchList(req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate over the requested blocks, fetching the compact filter for
|
|
|
|
// each one, and matching it against the watchlist generated above. If
|
|
|
|
// the filter returns a positive match, the full block is then requested
|
|
|
|
// and scanned for addresses using the block filterer.
|
|
|
|
for i, blk := range req.Blocks {
|
2020-03-25 00:23:41 +01:00
|
|
|
// TODO(wilmer): Investigate why polling it still necessary
|
|
|
|
// here. While testing, I ran into a few instances where the
|
|
|
|
// filter was not retrieved, leading to a panic. This should not
|
|
|
|
// happen in most cases thanks to the query logic revamp within
|
|
|
|
// Neutrino, but it seems there's still an uncovered edge case.
|
2018-03-21 02:04:36 +01:00
|
|
|
filter, err := s.pollCFilter(&blk.Hash)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Skip any empty filters.
|
|
|
|
if filter == nil || filter.N() == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
key := builder.DeriveKey(&blk.Hash)
|
|
|
|
matched, err := filter.MatchAny(key, watchList)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
} else if !matched {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("Fetching block height=%d hash=%v",
|
|
|
|
blk.Height, blk.Hash)
|
|
|
|
|
|
|
|
// TODO(conner): can optimize bandwidth by only fetching
|
|
|
|
// stripped blocks
|
|
|
|
rawBlock, err := s.GetBlock(&blk.Hash)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !blockFilterer.FilterBlock(rawBlock) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// If any external or internal addresses were detected in this
|
|
|
|
// block, we return them to the caller so that the rescan
|
|
|
|
// windows can widened with subsequent addresses. The
|
|
|
|
// `BatchIndex` is returned so that the caller can compute the
|
|
|
|
// *next* block from which to begin again.
|
|
|
|
resp := &FilterBlocksResponse{
|
|
|
|
BatchIndex: uint32(i),
|
|
|
|
BlockMeta: blk,
|
|
|
|
FoundExternalAddrs: blockFilterer.FoundExternal,
|
|
|
|
FoundInternalAddrs: blockFilterer.FoundInternal,
|
|
|
|
FoundOutPoints: blockFilterer.FoundOutPoints,
|
|
|
|
RelevantTxns: blockFilterer.RelevantTxns,
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// No addresses were found for this range.
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// buildFilterBlocksWatchList constructs a watchlist used for matching against a
|
|
|
|
// cfilter from a FilterBlocksRequest. The watchlist will be populated with all
|
|
|
|
// external addresses, internal addresses, and outpoints contained in the
|
|
|
|
// request.
|
|
|
|
func buildFilterBlocksWatchList(req *FilterBlocksRequest) ([][]byte, error) {
|
|
|
|
// Construct a watch list containing the script addresses of all
|
|
|
|
// internal and external addresses that were requested, in addition to
|
|
|
|
// the set of outpoints currently being watched.
|
|
|
|
watchListSize := len(req.ExternalAddrs) +
|
|
|
|
len(req.InternalAddrs) +
|
|
|
|
len(req.WatchedOutPoints)
|
|
|
|
|
|
|
|
watchList := make([][]byte, 0, watchListSize)
|
|
|
|
|
|
|
|
for _, addr := range req.ExternalAddrs {
|
|
|
|
p2shAddr, err := txscript.PayToAddrScript(addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
watchList = append(watchList, p2shAddr)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, addr := range req.InternalAddrs {
|
|
|
|
p2shAddr, err := txscript.PayToAddrScript(addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
watchList = append(watchList, p2shAddr)
|
|
|
|
}
|
|
|
|
|
2018-06-15 07:02:40 +02:00
|
|
|
for _, addr := range req.WatchedOutPoints {
|
|
|
|
addr, err := txscript.PayToAddrScript(addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
watchList = append(watchList, addr)
|
2018-03-21 02:04:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return watchList, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// pollCFilter attempts to fetch a CFilter from the neutrino client. This is
|
|
|
|
// used to get around the fact that the filter headers may lag behind the
|
|
|
|
// highest known block header.
|
|
|
|
func (s *NeutrinoClient) pollCFilter(hash *chainhash.Hash) (*gcs.Filter, error) {
|
|
|
|
var (
|
|
|
|
filter *gcs.Filter
|
|
|
|
err error
|
|
|
|
count int
|
|
|
|
)
|
|
|
|
|
|
|
|
const maxFilterRetries = 50
|
|
|
|
for count < maxFilterRetries {
|
|
|
|
if count > 0 {
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
}
|
|
|
|
|
2020-03-25 00:23:41 +01:00
|
|
|
filter, err = s.CS.GetCFilter(
|
|
|
|
*hash, wire.GCSFilterRegular, neutrino.OptimisticBatch(),
|
|
|
|
)
|
2018-03-21 02:04:36 +01:00
|
|
|
if err != nil {
|
|
|
|
count++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
return filter, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-05-20 01:45:38 +02:00
|
|
|
// Rescan replicates the RPC client's Rescan command.
|
2017-05-25 02:52:46 +02:00
|
|
|
func (s *NeutrinoClient) Rescan(startHash *chainhash.Hash, addrs []btcutil.Address,
|
2018-06-15 07:02:40 +02:00
|
|
|
outPoints map[wire.OutPoint]btcutil.Address) error {
|
|
|
|
|
2017-05-20 01:45:38 +02:00
|
|
|
s.clientMtx.Lock()
|
|
|
|
if !s.started {
|
2019-06-06 01:12:18 +02:00
|
|
|
s.clientMtx.Unlock()
|
2017-05-20 01:45:38 +02:00
|
|
|
return fmt.Errorf("can't do a rescan when the chain client " +
|
|
|
|
"is not started")
|
|
|
|
}
|
|
|
|
if s.scanning {
|
|
|
|
// Restart the rescan by killing the existing rescan.
|
|
|
|
close(s.rescanQuit)
|
2019-06-06 01:12:18 +02:00
|
|
|
rescan := s.rescan
|
2017-09-22 00:17:09 +02:00
|
|
|
s.clientMtx.Unlock()
|
2019-06-06 01:12:18 +02:00
|
|
|
rescan.WaitForShutdown()
|
2017-09-22 00:17:09 +02:00
|
|
|
s.clientMtx.Lock()
|
2018-01-11 11:21:49 +01:00
|
|
|
s.rescan = nil
|
|
|
|
s.rescanErr = nil
|
2017-05-20 01:45:38 +02:00
|
|
|
}
|
|
|
|
s.rescanQuit = make(chan struct{})
|
|
|
|
s.scanning = true
|
2017-05-21 04:36:40 +02:00
|
|
|
s.finished = false
|
2017-09-20 00:53:23 +02:00
|
|
|
s.lastProgressSent = false
|
2019-04-19 04:23:05 +02:00
|
|
|
s.lastFilteredBlockHeader = nil
|
2017-09-20 00:53:23 +02:00
|
|
|
s.isRescan = true
|
2019-06-06 01:12:18 +02:00
|
|
|
s.clientMtx.Unlock()
|
2018-06-15 07:02:40 +02:00
|
|
|
|
2018-09-10 16:50:51 +02:00
|
|
|
bestBlock, err := s.CS.BestBlock()
|
2017-05-24 21:31:05 +02:00
|
|
|
if err != nil {
|
2021-03-24 14:43:24 +01:00
|
|
|
return fmt.Errorf("can't get chain service's best block: %s", err)
|
2017-05-24 21:31:05 +02:00
|
|
|
}
|
2018-09-10 16:50:51 +02:00
|
|
|
header, err := s.CS.GetBlockHeader(&bestBlock.Hash)
|
|
|
|
if err != nil {
|
2021-03-24 14:43:24 +01:00
|
|
|
return fmt.Errorf("can't get block header for hash %v: %s",
|
2018-09-10 16:50:51 +02:00
|
|
|
bestBlock.Hash, err)
|
|
|
|
}
|
2017-06-05 20:10:13 +02:00
|
|
|
|
|
|
|
// If the wallet is already fully caught up, or the rescan has started
|
|
|
|
// with state that indicates a "fresh" wallet, we'll send a
|
|
|
|
// notification indicating the rescan has "finished".
|
2017-05-24 21:31:05 +02:00
|
|
|
if header.BlockHash() == *startHash {
|
2019-06-06 01:12:18 +02:00
|
|
|
s.clientMtx.Lock()
|
2017-05-24 21:31:05 +02:00
|
|
|
s.finished = true
|
2019-06-06 01:12:18 +02:00
|
|
|
rescanQuit := s.rescanQuit
|
|
|
|
s.clientMtx.Unlock()
|
|
|
|
|
|
|
|
// Release the lock while dispatching the notification since
|
|
|
|
// it's possible for the notificationHandler to be waiting to
|
|
|
|
// acquire it before receiving the notification.
|
2017-05-24 21:31:05 +02:00
|
|
|
select {
|
|
|
|
case s.enqueueNotification <- &RescanFinished{
|
|
|
|
Hash: startHash,
|
2021-03-24 14:43:24 +01:00
|
|
|
Height: bestBlock.Height,
|
2017-05-24 21:31:05 +02:00
|
|
|
Time: header.Timestamp,
|
|
|
|
}:
|
|
|
|
case <-s.quit:
|
|
|
|
return nil
|
2019-06-06 01:12:18 +02:00
|
|
|
case <-rescanQuit:
|
2017-05-24 21:31:05 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2017-06-05 20:10:13 +02:00
|
|
|
|
2018-06-15 07:02:40 +02:00
|
|
|
var inputsToWatch []neutrino.InputWithScript
|
|
|
|
for op, addr := range outPoints {
|
|
|
|
addrScript, err := txscript.PayToAddrScript(addr)
|
|
|
|
if err != nil {
|
2019-06-06 01:12:18 +02:00
|
|
|
return err
|
2018-06-15 07:02:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
inputsToWatch = append(inputsToWatch, neutrino.InputWithScript{
|
|
|
|
OutPoint: op,
|
|
|
|
PkScript: addrScript,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-06-06 01:12:18 +02:00
|
|
|
s.clientMtx.Lock()
|
2019-03-13 04:34:47 +01:00
|
|
|
newRescan := neutrino.NewRescan(
|
|
|
|
&neutrino.RescanChainSource{
|
|
|
|
ChainService: s.CS,
|
|
|
|
},
|
2017-08-25 02:30:43 +02:00
|
|
|
neutrino.NotificationHandlers(rpcclient.NotificationHandlers{
|
2017-06-05 20:10:13 +02:00
|
|
|
OnBlockConnected: s.onBlockConnected,
|
2017-05-20 01:45:38 +02:00
|
|
|
OnFilteredBlockConnected: s.onFilteredBlockConnected,
|
|
|
|
OnBlockDisconnected: s.onBlockDisconnected,
|
|
|
|
}),
|
2019-09-06 03:31:03 +02:00
|
|
|
neutrino.StartBlock(&headerfs.BlockStamp{Hash: *startHash}),
|
2017-09-20 00:53:23 +02:00
|
|
|
neutrino.StartTime(s.startTime),
|
2017-05-20 01:45:38 +02:00
|
|
|
neutrino.QuitChan(s.rescanQuit),
|
2017-05-21 04:36:40 +02:00
|
|
|
neutrino.WatchAddrs(addrs...),
|
2018-06-15 07:02:40 +02:00
|
|
|
neutrino.WatchInputs(inputsToWatch...),
|
2017-05-20 01:45:38 +02:00
|
|
|
)
|
2018-07-14 01:46:45 +02:00
|
|
|
s.rescan = newRescan
|
2017-05-24 07:03:58 +02:00
|
|
|
s.rescanErr = s.rescan.Start()
|
2019-06-06 01:12:18 +02:00
|
|
|
s.clientMtx.Unlock()
|
2017-06-05 20:10:13 +02:00
|
|
|
|
2017-05-21 04:36:40 +02:00
|
|
|
return nil
|
2017-05-20 01:45:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// NotifyBlocks replicates the RPC client's NotifyBlocks command.
|
2017-05-25 02:52:46 +02:00
|
|
|
func (s *NeutrinoClient) NotifyBlocks() error {
|
2017-05-21 04:36:40 +02:00
|
|
|
s.clientMtx.Lock()
|
|
|
|
// If we're scanning, we're already notifying on blocks. Otherwise,
|
|
|
|
// start a rescan without watching any addresses.
|
|
|
|
if !s.scanning {
|
2017-05-24 07:03:58 +02:00
|
|
|
s.clientMtx.Unlock()
|
2017-05-21 04:36:40 +02:00
|
|
|
return s.NotifyReceived([]btcutil.Address{})
|
|
|
|
}
|
2017-05-24 07:03:58 +02:00
|
|
|
s.clientMtx.Unlock()
|
2017-05-20 01:45:38 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NotifyReceived replicates the RPC client's NotifyReceived command.
|
2017-05-25 02:52:46 +02:00
|
|
|
func (s *NeutrinoClient) NotifyReceived(addrs []btcutil.Address) error {
|
2017-06-13 11:15:50 +02:00
|
|
|
s.clientMtx.Lock()
|
|
|
|
|
2017-05-21 04:36:40 +02:00
|
|
|
// If we have a rescan running, we just need to add the appropriate
|
|
|
|
// addresses to the watch list.
|
|
|
|
if s.scanning {
|
2017-12-12 08:25:22 +01:00
|
|
|
s.clientMtx.Unlock()
|
2017-05-21 04:36:40 +02:00
|
|
|
return s.rescan.Update(neutrino.AddAddrs(addrs...))
|
|
|
|
}
|
2017-06-13 11:15:50 +02:00
|
|
|
|
2017-05-21 04:36:40 +02:00
|
|
|
s.rescanQuit = make(chan struct{})
|
|
|
|
s.scanning = true
|
2017-06-13 11:15:50 +02:00
|
|
|
|
2017-09-20 00:53:23 +02:00
|
|
|
// Don't need RescanFinished or RescanProgress notifications.
|
2017-05-21 04:36:40 +02:00
|
|
|
s.finished = true
|
2017-09-20 00:53:23 +02:00
|
|
|
s.lastProgressSent = true
|
2019-04-19 04:23:05 +02:00
|
|
|
s.lastFilteredBlockHeader = nil
|
2017-06-13 11:15:50 +02:00
|
|
|
|
2017-05-21 04:36:40 +02:00
|
|
|
// Rescan with just the specified addresses.
|
2019-03-13 04:34:47 +01:00
|
|
|
newRescan := neutrino.NewRescan(
|
|
|
|
&neutrino.RescanChainSource{
|
|
|
|
ChainService: s.CS,
|
|
|
|
},
|
2017-08-25 02:30:43 +02:00
|
|
|
neutrino.NotificationHandlers(rpcclient.NotificationHandlers{
|
2017-06-05 21:22:09 +02:00
|
|
|
OnBlockConnected: s.onBlockConnected,
|
2017-05-21 04:36:40 +02:00
|
|
|
OnFilteredBlockConnected: s.onFilteredBlockConnected,
|
|
|
|
OnBlockDisconnected: s.onBlockDisconnected,
|
|
|
|
}),
|
2017-09-20 00:53:23 +02:00
|
|
|
neutrino.StartTime(s.startTime),
|
2017-05-21 04:36:40 +02:00
|
|
|
neutrino.QuitChan(s.rescanQuit),
|
|
|
|
neutrino.WatchAddrs(addrs...),
|
|
|
|
)
|
2018-07-14 01:46:45 +02:00
|
|
|
s.rescan = newRescan
|
2017-05-24 07:03:58 +02:00
|
|
|
s.rescanErr = s.rescan.Start()
|
2017-12-12 08:25:22 +01:00
|
|
|
s.clientMtx.Unlock()
|
2017-05-20 01:45:38 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notifications replicates the RPC client's Notifications method.
|
2017-05-25 02:52:46 +02:00
|
|
|
func (s *NeutrinoClient) Notifications() <-chan interface{} {
|
2017-05-20 01:45:38 +02:00
|
|
|
return s.dequeueNotification
|
|
|
|
}
|
|
|
|
|
2017-09-20 00:53:23 +02:00
|
|
|
// SetStartTime is a non-interface method to set the birthday of the wallet
|
|
|
|
// using this object. Since only a single rescan at a time is currently
|
|
|
|
// supported, only one birthday needs to be set. This does not fully restart a
|
|
|
|
// running rescan, so should not be used to update a rescan while it is running.
|
|
|
|
// TODO: When factoring out to multiple rescans per Neutrino client, add a
|
|
|
|
// birthday per client.
|
|
|
|
func (s *NeutrinoClient) SetStartTime(startTime time.Time) {
|
|
|
|
s.clientMtx.Lock()
|
|
|
|
defer s.clientMtx.Unlock()
|
|
|
|
|
|
|
|
s.startTime = startTime
|
|
|
|
}
|
|
|
|
|
2017-05-20 01:45:38 +02:00
|
|
|
// onFilteredBlockConnected sends appropriate notifications to the notification
|
|
|
|
// channel.
|
2017-05-25 02:52:46 +02:00
|
|
|
func (s *NeutrinoClient) onFilteredBlockConnected(height int32,
|
2017-05-20 01:45:38 +02:00
|
|
|
header *wire.BlockHeader, relevantTxs []*btcutil.Tx) {
|
2017-05-21 04:36:40 +02:00
|
|
|
ntfn := FilteredBlockConnected{
|
|
|
|
Block: &wtxmgr.BlockMeta{
|
|
|
|
Block: wtxmgr.Block{
|
|
|
|
Hash: header.BlockHash(),
|
|
|
|
Height: height,
|
|
|
|
},
|
|
|
|
Time: header.Timestamp,
|
2017-05-20 01:45:38 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, tx := range relevantTxs {
|
|
|
|
rec, err := wtxmgr.NewTxRecordFromMsgTx(tx.MsgTx(),
|
2017-05-21 04:36:40 +02:00
|
|
|
header.Timestamp)
|
2017-05-20 01:45:38 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf("Cannot create transaction record for "+
|
|
|
|
"relevant tx: %s", err)
|
2017-05-21 04:36:40 +02:00
|
|
|
// TODO(aakselrod): Return?
|
|
|
|
continue
|
2017-05-20 01:45:38 +02:00
|
|
|
}
|
2017-05-21 04:36:40 +02:00
|
|
|
ntfn.RelevantTxs = append(ntfn.RelevantTxs, rec)
|
|
|
|
}
|
2019-04-19 04:23:05 +02:00
|
|
|
|
2017-05-21 04:36:40 +02:00
|
|
|
select {
|
|
|
|
case s.enqueueNotification <- ntfn:
|
|
|
|
case <-s.quit:
|
|
|
|
return
|
|
|
|
case <-s.rescanQuit:
|
|
|
|
return
|
2017-05-20 01:45:38 +02:00
|
|
|
}
|
2017-09-20 00:53:23 +02:00
|
|
|
|
2019-04-19 04:23:05 +02:00
|
|
|
s.clientMtx.Lock()
|
|
|
|
s.lastFilteredBlockHeader = header
|
|
|
|
s.clientMtx.Unlock()
|
|
|
|
|
2017-09-20 00:53:23 +02:00
|
|
|
// Handle RescanFinished notification if required.
|
2019-04-19 04:30:40 +02:00
|
|
|
s.dispatchRescanFinished()
|
2017-05-20 01:45:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// onBlockDisconnected sends appropriate notifications to the notification
|
|
|
|
// channel.
|
2017-05-25 02:52:46 +02:00
|
|
|
func (s *NeutrinoClient) onBlockDisconnected(hash *chainhash.Hash, height int32,
|
2017-05-20 01:45:38 +02:00
|
|
|
t time.Time) {
|
|
|
|
select {
|
|
|
|
case s.enqueueNotification <- BlockDisconnected{
|
|
|
|
Block: wtxmgr.Block{
|
|
|
|
Hash: *hash,
|
|
|
|
Height: height,
|
|
|
|
},
|
|
|
|
Time: t,
|
|
|
|
}:
|
|
|
|
case <-s.quit:
|
|
|
|
case <-s.rescanQuit:
|
2017-06-05 20:10:13 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *NeutrinoClient) onBlockConnected(hash *chainhash.Hash, height int32,
|
|
|
|
time time.Time) {
|
2017-09-20 00:53:23 +02:00
|
|
|
// TODO: Move this closure out and parameterize it? Is it useful
|
|
|
|
// outside here?
|
|
|
|
sendRescanProgress := func() {
|
|
|
|
select {
|
|
|
|
case s.enqueueNotification <- &RescanProgress{
|
|
|
|
Hash: hash,
|
2017-06-05 20:10:13 +02:00
|
|
|
Height: height,
|
2017-09-20 00:53:23 +02:00
|
|
|
Time: time,
|
|
|
|
}:
|
|
|
|
case <-s.quit:
|
|
|
|
case <-s.rescanQuit:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Only send BlockConnected notification if we're processing blocks
|
|
|
|
// before the birthday. Otherwise, we can just update using
|
|
|
|
// RescanProgress notifications.
|
|
|
|
if time.Before(s.startTime) {
|
|
|
|
// Send a RescanProgress notification every 10K blocks.
|
|
|
|
if height%10000 == 0 {
|
|
|
|
s.clientMtx.Lock()
|
|
|
|
shouldSend := s.isRescan && !s.finished
|
|
|
|
s.clientMtx.Unlock()
|
|
|
|
if shouldSend {
|
|
|
|
sendRescanProgress()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Send a RescanProgress notification if we're just going over
|
|
|
|
// the boundary between pre-birthday and post-birthday blocks,
|
|
|
|
// and note that we've sent it.
|
|
|
|
s.clientMtx.Lock()
|
|
|
|
if !s.lastProgressSent {
|
|
|
|
shouldSend := s.isRescan && !s.finished
|
|
|
|
if shouldSend {
|
|
|
|
s.clientMtx.Unlock()
|
|
|
|
sendRescanProgress()
|
|
|
|
s.clientMtx.Lock()
|
|
|
|
s.lastProgressSent = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.clientMtx.Unlock()
|
|
|
|
select {
|
|
|
|
case s.enqueueNotification <- BlockConnected{
|
|
|
|
Block: wtxmgr.Block{
|
|
|
|
Hash: *hash,
|
|
|
|
Height: height,
|
|
|
|
},
|
|
|
|
Time: time,
|
|
|
|
}:
|
|
|
|
case <-s.quit:
|
|
|
|
case <-s.rescanQuit:
|
|
|
|
}
|
2017-05-20 01:45:38 +02:00
|
|
|
}
|
2019-04-19 04:32:30 +02:00
|
|
|
|
|
|
|
// Check if we're able to dispatch our final RescanFinished notification
|
|
|
|
// after processing this block.
|
|
|
|
s.dispatchRescanFinished()
|
2017-05-20 01:45:38 +02:00
|
|
|
}
|
|
|
|
|
2019-04-19 04:30:40 +02:00
|
|
|
// dispatchRescanFinished determines whether we're able to dispatch our final
|
|
|
|
// RescanFinished notification in order to mark the wallet as synced with the
|
|
|
|
// chain. If the notification has already been dispatched, then it won't be done
|
|
|
|
// again.
|
|
|
|
func (s *NeutrinoClient) dispatchRescanFinished() {
|
|
|
|
bs, err := s.CS.BestBlock()
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("Can't get chain service's best block: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
s.clientMtx.Lock()
|
2019-04-19 21:27:33 +02:00
|
|
|
// Only send the RescanFinished notification once.
|
|
|
|
if s.lastFilteredBlockHeader == nil || s.finished {
|
2019-04-19 04:30:40 +02:00
|
|
|
s.clientMtx.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-04-19 21:27:33 +02:00
|
|
|
// Only send the RescanFinished notification once the underlying chain
|
|
|
|
// service sees itself as current.
|
|
|
|
if bs.Hash != s.lastFilteredBlockHeader.BlockHash() {
|
2019-04-19 04:30:40 +02:00
|
|
|
s.clientMtx.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
s.finished = s.CS.IsCurrent() && s.lastProgressSent
|
|
|
|
if !s.finished {
|
|
|
|
s.clientMtx.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
header := s.lastFilteredBlockHeader
|
|
|
|
s.clientMtx.Unlock()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case s.enqueueNotification <- &RescanFinished{
|
|
|
|
Hash: &bs.Hash,
|
|
|
|
Height: bs.Height,
|
|
|
|
Time: header.Timestamp,
|
|
|
|
}:
|
|
|
|
case <-s.quit:
|
|
|
|
return
|
|
|
|
case <-s.rescanQuit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-20 01:45:38 +02:00
|
|
|
// notificationHandler queues and dequeues notifications. There are currently
|
|
|
|
// no bounds on the queue, so the dequeue channel should be read continually to
|
|
|
|
// avoid running out of memory.
|
2017-05-25 02:52:46 +02:00
|
|
|
func (s *NeutrinoClient) notificationHandler() {
|
2017-05-20 01:45:38 +02:00
|
|
|
hash, height, err := s.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("Failed to get best block from chain service: %s",
|
|
|
|
err)
|
|
|
|
s.Stop()
|
|
|
|
s.wg.Done()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
bs := &waddrmgr.BlockStamp{Hash: *hash, Height: height}
|
|
|
|
|
|
|
|
// TODO: Rather than leaving this as an unbounded queue for all types of
|
|
|
|
// notifications, try dropping ones where a later enqueued notification
|
|
|
|
// can fully invalidate one waiting to be processed. For example,
|
|
|
|
// blockconnected notifications for greater block heights can remove the
|
|
|
|
// need to process earlier blockconnected notifications still waiting
|
|
|
|
// here.
|
|
|
|
|
|
|
|
var notifications []interface{}
|
|
|
|
enqueue := s.enqueueNotification
|
|
|
|
var dequeue chan interface{}
|
|
|
|
var next interface{}
|
|
|
|
out:
|
|
|
|
for {
|
2018-01-11 11:21:49 +01:00
|
|
|
s.clientMtx.Lock()
|
|
|
|
rescanErr := s.rescanErr
|
|
|
|
s.clientMtx.Unlock()
|
2017-05-20 01:45:38 +02:00
|
|
|
select {
|
|
|
|
case n, ok := <-enqueue:
|
|
|
|
if !ok {
|
|
|
|
// If no notifications are queued for handling,
|
|
|
|
// the queue is finished.
|
|
|
|
if len(notifications) == 0 {
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
// nil channel so no more reads can occur.
|
|
|
|
enqueue = nil
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if len(notifications) == 0 {
|
|
|
|
next = n
|
|
|
|
dequeue = s.dequeueNotification
|
|
|
|
}
|
|
|
|
notifications = append(notifications, n)
|
|
|
|
|
|
|
|
case dequeue <- next:
|
|
|
|
if n, ok := next.(BlockConnected); ok {
|
|
|
|
bs = &waddrmgr.BlockStamp{
|
|
|
|
Height: n.Height,
|
|
|
|
Hash: n.Hash,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
notifications[0] = nil
|
|
|
|
notifications = notifications[1:]
|
|
|
|
if len(notifications) != 0 {
|
|
|
|
next = notifications[0]
|
|
|
|
} else {
|
|
|
|
// If no more notifications can be enqueued, the
|
|
|
|
// queue is finished.
|
|
|
|
if enqueue == nil {
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
dequeue = nil
|
|
|
|
}
|
|
|
|
|
2018-01-11 11:21:49 +01:00
|
|
|
case err := <-rescanErr:
|
2017-05-24 07:03:58 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf("Neutrino rescan ended with error: %s", err)
|
|
|
|
}
|
|
|
|
|
2017-05-20 01:45:38 +02:00
|
|
|
case s.currentBlock <- bs:
|
|
|
|
|
|
|
|
case <-s.quit:
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
s.Stop()
|
|
|
|
close(s.dequeueNotification)
|
|
|
|
s.wg.Done()
|
|
|
|
}
|