Fixed a couple of bugs and added --dropcfindex option
This commit is contained in:
parent
763842329b
commit
6102e129c5
4 changed files with 68 additions and 24 deletions
|
@ -6,7 +6,9 @@ package indexers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/blockchain"
|
"github.com/btcsuite/btcd/blockchain"
|
||||||
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||||
"github.com/btcsuite/btcd/database"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
|
@ -37,14 +39,14 @@ var (
|
||||||
cfExtendedHeaderKey = []byte("cf1headerbyhashidx")
|
cfExtendedHeaderKey = []byte("cf1headerbyhashidx")
|
||||||
)
|
)
|
||||||
|
|
||||||
// dbFetchFilter() retrieves a block's basic or extended filter. A filter's
|
// dbFetchFilter retrieves a block's basic or extended filter. A filter's
|
||||||
// absence is not considered an error.
|
// absence is not considered an error.
|
||||||
func dbFetchFilter(dbTx database.Tx, key []byte, h *chainhash.Hash) ([]byte, error) {
|
func dbFetchFilter(dbTx database.Tx, key []byte, h *chainhash.Hash) ([]byte, error) {
|
||||||
idx := dbTx.Metadata().Bucket(key)
|
idx := dbTx.Metadata().Bucket(key)
|
||||||
return idx.Get(h[:]), nil
|
return idx.Get(h[:]), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// dbFetchFilterHeader() retrieves a block's basic or extended filter header.
|
// dbFetchFilterHeader retrieves a block's basic or extended filter header.
|
||||||
// A filter's absence is not considered an error.
|
// A filter's absence is not considered an error.
|
||||||
func dbFetchFilterHeader(dbTx database.Tx, key []byte, h *chainhash.Hash) ([]byte, error) {
|
func dbFetchFilterHeader(dbTx database.Tx, key []byte, h *chainhash.Hash) ([]byte, error) {
|
||||||
idx := dbTx.Metadata().Bucket(key)
|
idx := dbTx.Metadata().Bucket(key)
|
||||||
|
@ -55,13 +57,13 @@ func dbFetchFilterHeader(dbTx database.Tx, key []byte, h *chainhash.Hash) ([]byt
|
||||||
return fh, nil
|
return fh, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// dbStoreFilter() stores a block's basic or extended filter.
|
// dbStoreFilter stores a block's basic or extended filter.
|
||||||
func dbStoreFilter(dbTx database.Tx, key []byte, h *chainhash.Hash, f []byte) error {
|
func dbStoreFilter(dbTx database.Tx, key []byte, h *chainhash.Hash, f []byte) error {
|
||||||
idx := dbTx.Metadata().Bucket(key)
|
idx := dbTx.Metadata().Bucket(key)
|
||||||
return idx.Put(h[:], f)
|
return idx.Put(h[:], f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// dbStoreFilterHeader() stores a block's basic or extended filter header.
|
// dbStoreFilterHeader stores a block's basic or extended filter header.
|
||||||
func dbStoreFilterHeader(dbTx database.Tx, key []byte, h *chainhash.Hash, fh []byte) error {
|
func dbStoreFilterHeader(dbTx database.Tx, key []byte, h *chainhash.Hash, fh []byte) error {
|
||||||
if len(fh) != fastsha256.Size {
|
if len(fh) != fastsha256.Size {
|
||||||
return errors.New("invalid filter header length")
|
return errors.New("invalid filter header length")
|
||||||
|
@ -70,13 +72,13 @@ func dbStoreFilterHeader(dbTx database.Tx, key []byte, h *chainhash.Hash, fh []b
|
||||||
return idx.Put(h[:], fh)
|
return idx.Put(h[:], fh)
|
||||||
}
|
}
|
||||||
|
|
||||||
// dbDeleteFilter() deletes a filter's basic or extended filter.
|
// dbDeleteFilter deletes a filter's basic or extended filter.
|
||||||
func dbDeleteFilter(dbTx database.Tx, key []byte, h *chainhash.Hash) error {
|
func dbDeleteFilter(dbTx database.Tx, key []byte, h *chainhash.Hash) error {
|
||||||
idx := dbTx.Metadata().Bucket(key)
|
idx := dbTx.Metadata().Bucket(key)
|
||||||
return idx.Delete(h[:])
|
return idx.Delete(h[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
// dbDeleteFilterHeader() deletes a filter's basic or extended filter header.
|
// dbDeleteFilterHeader deletes a filter's basic or extended filter header.
|
||||||
func dbDeleteFilterHeader(dbTx database.Tx, key []byte, h *chainhash.Hash) error {
|
func dbDeleteFilterHeader(dbTx database.Tx, key []byte, h *chainhash.Hash) error {
|
||||||
idx := dbTx.Metadata().Bucket(key)
|
idx := dbTx.Metadata().Bucket(key)
|
||||||
return idx.Delete(h[:])
|
return idx.Delete(h[:])
|
||||||
|
@ -85,6 +87,7 @@ func dbDeleteFilterHeader(dbTx database.Tx, key []byte, h *chainhash.Hash) error
|
||||||
// CfIndex implements a committed filter (cf) by hash index.
|
// CfIndex implements a committed filter (cf) by hash index.
|
||||||
type CfIndex struct {
|
type CfIndex struct {
|
||||||
db database.DB
|
db database.DB
|
||||||
|
chainParams *chaincfg.Params
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure the CfIndex type implements the Indexer interface.
|
// Ensure the CfIndex type implements the Indexer interface.
|
||||||
|
@ -108,7 +111,7 @@ func (idx *CfIndex) Name() string {
|
||||||
return cfIndexName
|
return cfIndexName
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create() is invoked when the indexer manager determines the index needs to
|
// Create is invoked when the indexer manager determines the index needs to
|
||||||
// be created for the first time. It creates buckets for the two hash-based cf
|
// be created for the first time. It creates buckets for the two hash-based cf
|
||||||
// indexes (simple, extended).
|
// indexes (simple, extended).
|
||||||
func (idx *CfIndex) Create(dbTx database.Tx) error {
|
func (idx *CfIndex) Create(dbTx database.Tx) error {
|
||||||
|
@ -126,10 +129,29 @@ func (idx *CfIndex) Create(dbTx database.Tx) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = meta.CreateBucket(cfExtendedHeaderKey)
|
_, err = meta.CreateBucket(cfExtendedHeaderKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstHeader := make([]byte, chainhash.HashSize)
|
||||||
|
err = dbStoreFilterHeader(
|
||||||
|
dbTx,
|
||||||
|
cfBasicHeaderKey,
|
||||||
|
&idx.chainParams.GenesisBlock.Header.PrevBlock,
|
||||||
|
firstHeader,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = dbStoreFilterHeader(
|
||||||
|
dbTx,
|
||||||
|
cfExtendedHeaderKey,
|
||||||
|
&idx.chainParams.GenesisBlock.Header.PrevBlock,
|
||||||
|
firstHeader,
|
||||||
|
)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeBasicFilterForBlock() builds a block's basic filter, which consists of
|
// makeBasicFilterForBlock builds a block's basic filter, which consists of
|
||||||
// all outpoints and pkscript data pushes referenced by transactions within the
|
// all outpoints and pkscript data pushes referenced by transactions within the
|
||||||
// block.
|
// block.
|
||||||
func makeBasicFilterForBlock(block *btcutil.Block) ([]byte, error) {
|
func makeBasicFilterForBlock(block *btcutil.Block) ([]byte, error) {
|
||||||
|
@ -153,7 +175,7 @@ func makeBasicFilterForBlock(block *btcutil.Block) ([]byte, error) {
|
||||||
return f.Bytes(), nil
|
return f.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeExtendedFilterForBlock() builds a block's extended filter, which consists
|
// makeExtendedFilterForBlock builds a block's extended filter, which consists
|
||||||
// of all tx hashes and sigscript data pushes contained in the block.
|
// of all tx hashes and sigscript data pushes contained in the block.
|
||||||
func makeExtendedFilterForBlock(block *btcutil.Block) ([]byte, error) {
|
func makeExtendedFilterForBlock(block *btcutil.Block) ([]byte, error) {
|
||||||
b := builder.WithKeyHash(block.Hash())
|
b := builder.WithKeyHash(block.Hash())
|
||||||
|
@ -174,7 +196,7 @@ func makeExtendedFilterForBlock(block *btcutil.Block) ([]byte, error) {
|
||||||
return f.Bytes(), nil
|
return f.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeHeaderForFilter() implements the chaining logic between filters, where
|
// makeHeaderForFilter implements the chaining logic between filters, where
|
||||||
// a filter's header is defined as sha256(sha256(filter) + previousFilterHeader).
|
// a filter's header is defined as sha256(sha256(filter) + previousFilterHeader).
|
||||||
func makeHeaderForFilter(f, pfh []byte) []byte {
|
func makeHeaderForFilter(f, pfh []byte) []byte {
|
||||||
fhash := fastsha256.Sum256(f)
|
fhash := fastsha256.Sum256(f)
|
||||||
|
@ -185,7 +207,7 @@ func makeHeaderForFilter(f, pfh []byte) []byte {
|
||||||
return fh[:]
|
return fh[:]
|
||||||
}
|
}
|
||||||
|
|
||||||
// storeFilter() stores a given filter, and performs the steps needed to
|
// storeFilter stores a given filter, and performs the steps needed to
|
||||||
// generate the filter's header.
|
// generate the filter's header.
|
||||||
func storeFilter(dbTx database.Tx, block *btcutil.Block, f []byte, extended bool) error {
|
func storeFilter(dbTx database.Tx, block *btcutil.Block, f []byte, extended bool) error {
|
||||||
// Figure out which buckets to use.
|
// Figure out which buckets to use.
|
||||||
|
@ -209,10 +231,10 @@ func storeFilter(dbTx database.Tx, block *btcutil.Block, f []byte, extended bool
|
||||||
}
|
}
|
||||||
// Construct the new block's filter header, and store it.
|
// Construct the new block's filter header, and store it.
|
||||||
fh := makeHeaderForFilter(f, pfh)
|
fh := makeHeaderForFilter(f, pfh)
|
||||||
return dbStoreFilterHeader(dbTx, cfBasicHeaderKey, h, fh)
|
return dbStoreFilterHeader(dbTx, hkey, h, fh)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConnectBlock() is invoked by the index manager when a new block has been
|
// ConnectBlock is invoked by the index manager when a new block has been
|
||||||
// connected to the main chain. This indexer adds a hash-to-cf mapping for
|
// connected to the main chain. This indexer adds a hash-to-cf mapping for
|
||||||
// every passed block. This is part of the Indexer interface.
|
// every passed block. This is part of the Indexer interface.
|
||||||
func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block,
|
func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block,
|
||||||
|
@ -232,7 +254,7 @@ func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block,
|
||||||
return storeFilter(dbTx, block, f, true)
|
return storeFilter(dbTx, block, f, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DisconnectBlock() is invoked by the index manager when a block has been
|
// DisconnectBlock is invoked by the index manager when a block has been
|
||||||
// disconnected from the main chain. This indexer removes the hash-to-cf
|
// disconnected from the main chain. This indexer removes the hash-to-cf
|
||||||
// mapping for every passed block. This is part of the Indexer interface.
|
// mapping for every passed block. This is part of the Indexer interface.
|
||||||
func (idx *CfIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block,
|
func (idx *CfIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block,
|
||||||
|
@ -244,7 +266,7 @@ func (idx *CfIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block,
|
||||||
return dbDeleteFilter(dbTx, cfExtendedIndexKey, block.Hash())
|
return dbDeleteFilter(dbTx, cfExtendedIndexKey, block.Hash())
|
||||||
}
|
}
|
||||||
|
|
||||||
// FilterByBlockHash() returns the serialized contents of a block's basic or
|
// FilterByBlockHash returns the serialized contents of a block's basic or
|
||||||
// extended committed filter.
|
// extended committed filter.
|
||||||
func (idx *CfIndex) FilterByBlockHash(h *chainhash.Hash, extended bool) ([]byte, error) {
|
func (idx *CfIndex) FilterByBlockHash(h *chainhash.Hash, extended bool) ([]byte, error) {
|
||||||
var f []byte
|
var f []byte
|
||||||
|
@ -260,7 +282,7 @@ func (idx *CfIndex) FilterByBlockHash(h *chainhash.Hash, extended bool) ([]byte,
|
||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// FilterHeaderByBlockHash() returns the serialized contents of a block's basic
|
// FilterHeaderByBlockHash returns the serialized contents of a block's basic
|
||||||
// or extended committed filter header.
|
// or extended committed filter header.
|
||||||
func (idx *CfIndex) FilterHeaderByBlockHash(h *chainhash.Hash, extended bool) ([]byte, error) {
|
func (idx *CfIndex) FilterHeaderByBlockHash(h *chainhash.Hash, extended bool) ([]byte, error) {
|
||||||
var fh []byte
|
var fh []byte
|
||||||
|
@ -283,11 +305,24 @@ func (idx *CfIndex) FilterHeaderByBlockHash(h *chainhash.Hash, extended bool) ([
|
||||||
// It implements the Indexer interface which plugs into the IndexManager that in
|
// It implements the Indexer interface which plugs into the IndexManager that in
|
||||||
// turn is used by the blockchain package. This allows the index to be
|
// turn is used by the blockchain package. This allows the index to be
|
||||||
// seamlessly maintained along with the chain.
|
// seamlessly maintained along with the chain.
|
||||||
func NewCfIndex(db database.DB) *CfIndex {
|
func NewCfIndex(db database.DB, chainParams *chaincfg.Params) *CfIndex {
|
||||||
return &CfIndex{db: db}
|
return &CfIndex{db: db, chainParams: chainParams}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DropCfIndex drops the CF index from the provided database if exists.
|
// DropCfIndex drops the CF index from the provided database if exists.
|
||||||
func DropCfIndex(db database.DB) error {
|
func DropCfIndex(db database.DB) error {
|
||||||
return dropIndex(db, cfBasicIndexKey, cfIndexName)
|
err := dropIndex(db, cfBasicIndexKey, cfIndexName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = dropIndex(db, cfBasicHeaderKey, cfIndexName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = dropIndex(db, cfExtendedIndexKey, cfIndexName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = dropIndex(db, cfExtendedHeaderKey, cfIndexName)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
8
btcd.go
8
btcd.go
|
@ -135,6 +135,14 @@ func btcdMain(serverChan chan<- *server) error {
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
if cfg.DropCfIndex {
|
||||||
|
if err := indexers.DropCfIndex(db); err != nil {
|
||||||
|
btcdLog.Errorf("%v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Create server and start it.
|
// Create server and start it.
|
||||||
server, err := newServer(cfg.Listeners, db, activeNetParams.Params,
|
server, err := newServer(cfg.Listeners, db, activeNetParams.Params,
|
||||||
|
|
|
@ -151,6 +151,7 @@ type config struct {
|
||||||
UserAgentComments []string `long:"uacomment" description:"Comment to add to the user agent -- See BIP 14 for more information."`
|
UserAgentComments []string `long:"uacomment" description:"Comment to add to the user agent -- See BIP 14 for more information."`
|
||||||
NoPeerBloomFilters bool `long:"nopeerbloomfilters" description:"Disable bloom filtering support"`
|
NoPeerBloomFilters bool `long:"nopeerbloomfilters" description:"Disable bloom filtering support"`
|
||||||
NoCFilters bool `long:"nocfilters" description:"Disable committed filtering (CF) support"`
|
NoCFilters bool `long:"nocfilters" description:"Disable committed filtering (CF) support"`
|
||||||
|
DropCfIndex bool `long:"dropcfindex" description:"Deletes the index used for committed filtering (CF) support from the database on start up and then exits."`
|
||||||
SigCacheMaxSize uint `long:"sigcachemaxsize" description:"The maximum number of entries in the signature verification cache"`
|
SigCacheMaxSize uint `long:"sigcachemaxsize" description:"The maximum number of entries in the signature verification cache"`
|
||||||
BlocksOnly bool `long:"blocksonly" description:"Do not accept transactions from remote peers."`
|
BlocksOnly bool `long:"blocksonly" description:"Do not accept transactions from remote peers."`
|
||||||
TxIndex bool `long:"txindex" description:"Maintain a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"`
|
TxIndex bool `long:"txindex" description:"Maintain a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"`
|
||||||
|
|
|
@ -2249,7 +2249,7 @@ func newServer(listenAddrs []string, db database.DB, chainParams *chaincfg.Param
|
||||||
}
|
}
|
||||||
if !cfg.NoCFilters {
|
if !cfg.NoCFilters {
|
||||||
indxLog.Info("cf index is enabled")
|
indxLog.Info("cf index is enabled")
|
||||||
s.cfIndex = indexers.NewCfIndex(db)
|
s.cfIndex = indexers.NewCfIndex(db, chainParams)
|
||||||
indexes = append(indexes, s.cfIndex)
|
indexes = append(indexes, s.cfIndex)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue