Merge pull request #1211 from wpaulino/gcs-modifications
multi: update gcs indexing and serving code to match latest BIP 158 instance
This commit is contained in:
commit
fdfc19097e
8 changed files with 52 additions and 88 deletions
|
@ -22,39 +22,36 @@ const (
|
|||
cfIndexName = "committed filter index"
|
||||
)
|
||||
|
||||
// Committed filters come in two flavours: basic and extended. They are
|
||||
// generated and dropped in pairs, and both are indexed by a block's hash.
|
||||
// Besides holding different content, they also live in different buckets.
|
||||
// Committed filters come in one flavor currently: basic. They are generated
|
||||
// and dropped in pairs, and both are indexed by a block's hash. Besides
|
||||
// holding different content, they also live in different buckets.
|
||||
var (
|
||||
// cfIndexParentBucketKey is the name of the parent bucket used to house
|
||||
// the index. The rest of the buckets live below this bucket.
|
||||
// cfIndexParentBucketKey is the name of the parent bucket used to
|
||||
// house the index. The rest of the buckets live below this bucket.
|
||||
cfIndexParentBucketKey = []byte("cfindexparentbucket")
|
||||
|
||||
// cfIndexKeys is an array of db bucket names used to house indexes of
|
||||
// block hashes to cfilters.
|
||||
cfIndexKeys = [][]byte{
|
||||
[]byte("cf0byhashidx"),
|
||||
[]byte("cf1byhashidx"),
|
||||
}
|
||||
|
||||
// cfHeaderKeys is an array of db bucket names used to house indexes of
|
||||
// block hashes to cf headers.
|
||||
cfHeaderKeys = [][]byte{
|
||||
[]byte("cf0headerbyhashidx"),
|
||||
[]byte("cf1headerbyhashidx"),
|
||||
}
|
||||
|
||||
// cfHashKeys is an array of db bucket names used to house indexes of
|
||||
// block hashes to cf hashes.
|
||||
cfHashKeys = [][]byte{
|
||||
[]byte("cf0hashbyhashidx"),
|
||||
[]byte("cf1hashbyhashidx"),
|
||||
}
|
||||
|
||||
maxFilterType = uint8(len(cfHeaderKeys) - 1)
|
||||
|
||||
// zeroHash is the chainhash.Hash value of all zero bytes, defined here for
|
||||
// convenience.
|
||||
// zeroHash is the chainhash.Hash value of all zero bytes, defined here
|
||||
// for convenience.
|
||||
zeroHash chainhash.Hash
|
||||
)
|
||||
|
||||
|
@ -86,6 +83,17 @@ type CfIndex struct {
|
|||
// Ensure the CfIndex type implements the Indexer interface.
|
||||
var _ Indexer = (*CfIndex)(nil)
|
||||
|
||||
// Ensure the CfIndex type implements the NeedsInputser interface.
|
||||
var _ NeedsInputser = (*CfIndex)(nil)
|
||||
|
||||
// NeedsInputs signals that the index requires the referenced inputs in order
|
||||
// to properly create the index.
|
||||
//
|
||||
// This implements the NeedsInputser interface.
|
||||
func (idx *CfIndex) NeedsInputs() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Init initializes the hash-based cf index. This is part of the Indexer
|
||||
// interface.
|
||||
func (idx *CfIndex) Init() error {
|
||||
|
@ -106,7 +114,7 @@ func (idx *CfIndex) Name() string {
|
|||
|
||||
// Create is invoked when the indexer manager determines the index needs to
|
||||
// be created for the first time. It creates buckets for the two hash-based cf
|
||||
// indexes (simple, extended).
|
||||
// indexes (regular only currently).
|
||||
func (idx *CfIndex) Create(dbTx database.Tx) error {
|
||||
meta := dbTx.Metadata()
|
||||
|
||||
|
@ -204,22 +212,17 @@ func storeFilter(dbTx database.Tx, block *btcutil.Block, f *gcs.Filter,
|
|||
func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block,
|
||||
stxos []blockchain.SpentTxOut) error {
|
||||
|
||||
f, err := builder.BuildBasicFilter(block.MsgBlock())
|
||||
prevScripts := make([][]byte, len(stxos))
|
||||
for i, stxo := range stxos {
|
||||
prevScripts[i] = stxo.PkScript
|
||||
}
|
||||
|
||||
f, err := builder.BuildBasicFilter(block.MsgBlock(), prevScripts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = storeFilter(dbTx, block, f, wire.GCSFilterRegular)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err = builder.BuildExtFilter(block.MsgBlock())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return storeFilter(dbTx, block, f, wire.GCSFilterExtended)
|
||||
return storeFilter(dbTx, block, f, wire.GCSFilterRegular)
|
||||
}
|
||||
|
||||
// DisconnectBlock is invoked by the index manager when a block has been
|
||||
|
@ -296,42 +299,42 @@ func (idx *CfIndex) entriesByBlockHashes(filterTypeKeys [][]byte,
|
|||
}
|
||||
|
||||
// FilterByBlockHash returns the serialized contents of a block's basic or
|
||||
// extended committed filter.
|
||||
// committed filter.
|
||||
func (idx *CfIndex) FilterByBlockHash(h *chainhash.Hash,
|
||||
filterType wire.FilterType) ([]byte, error) {
|
||||
return idx.entryByBlockHash(cfIndexKeys, filterType, h)
|
||||
}
|
||||
|
||||
// FiltersByBlockHashes returns the serialized contents of a block's basic or
|
||||
// extended committed filter for a set of blocks by hash.
|
||||
// committed filter for a set of blocks by hash.
|
||||
func (idx *CfIndex) FiltersByBlockHashes(blockHashes []*chainhash.Hash,
|
||||
filterType wire.FilterType) ([][]byte, error) {
|
||||
return idx.entriesByBlockHashes(cfIndexKeys, filterType, blockHashes)
|
||||
}
|
||||
|
||||
// FilterHeaderByBlockHash returns the serialized contents of a block's basic
|
||||
// or extended committed filter header.
|
||||
// committed filter header.
|
||||
func (idx *CfIndex) FilterHeaderByBlockHash(h *chainhash.Hash,
|
||||
filterType wire.FilterType) ([]byte, error) {
|
||||
return idx.entryByBlockHash(cfHeaderKeys, filterType, h)
|
||||
}
|
||||
|
||||
// FilterHeadersByBlockHashes returns the serialized contents of a block's basic
|
||||
// or extended committed filter header for a set of blocks by hash.
|
||||
// FilterHeadersByBlockHashes returns the serialized contents of a block's
|
||||
// basic committed filter header for a set of blocks by hash.
|
||||
func (idx *CfIndex) FilterHeadersByBlockHashes(blockHashes []*chainhash.Hash,
|
||||
filterType wire.FilterType) ([][]byte, error) {
|
||||
return idx.entriesByBlockHashes(cfHeaderKeys, filterType, blockHashes)
|
||||
}
|
||||
|
||||
// FilterHashByBlockHash returns the serialized contents of a block's basic
|
||||
// or extended committed filter hash.
|
||||
// committed filter hash.
|
||||
func (idx *CfIndex) FilterHashByBlockHash(h *chainhash.Hash,
|
||||
filterType wire.FilterType) ([]byte, error) {
|
||||
return idx.entryByBlockHash(cfHashKeys, filterType, h)
|
||||
}
|
||||
|
||||
// FilterHashesByBlockHashes returns the serialized contents of a block's basic
|
||||
// or extended committed filter hash for a set of blocks by hash.
|
||||
// committed filter hash for a set of blocks by hash.
|
||||
func (idx *CfIndex) FilterHashesByBlockHashes(blockHashes []*chainhash.Hash,
|
||||
filterType wire.FilterType) ([][]byte, error) {
|
||||
return idx.entriesByBlockHashes(cfHashKeys, filterType, blockHashes)
|
||||
|
|
|
@ -494,42 +494,6 @@ func dbFetchTx(dbTx database.Tx, hash *chainhash.Hash) (*wire.MsgTx, error) {
|
|||
return &msgTx, nil
|
||||
}
|
||||
|
||||
// makeUtxoView creates a mock unspent transaction output view by using the
|
||||
// transaction index in order to look up all inputs referenced by the
|
||||
// transactions in the block. This is sometimes needed when catching indexes up
|
||||
// because many of the txouts could actually already be spent however the
|
||||
// associated scripts are still required to index them.
|
||||
func makeUtxoView(dbTx database.Tx, block *btcutil.Block, interrupt <-chan struct{}) (*blockchain.UtxoViewpoint, error) {
|
||||
view := blockchain.NewUtxoViewpoint()
|
||||
for txIdx, tx := range block.Transactions() {
|
||||
// Coinbases do not reference any inputs. Since the block is
|
||||
// required to have already gone through full validation, it has
|
||||
// already been proven on the first transaction in the block is
|
||||
// a coinbase.
|
||||
if txIdx == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Use the transaction index to load all of the referenced
|
||||
// inputs and add their outputs to the view.
|
||||
for _, txIn := range tx.MsgTx().TxIn {
|
||||
originOut := &txIn.PreviousOutPoint
|
||||
originTx, err := dbFetchTx(dbTx, &originOut.Hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
view.AddTxOuts(btcutil.NewTx(originTx), 0)
|
||||
}
|
||||
|
||||
if interruptRequested(interrupt) {
|
||||
return nil, errInterruptRequested
|
||||
}
|
||||
}
|
||||
|
||||
return view, nil
|
||||
}
|
||||
|
||||
// ConnectBlock must be invoked when a block is extending the main chain. It
|
||||
// keeps track of the state of each index it is managing, performs some sanity
|
||||
// checks, and invokes each indexer.
|
||||
|
|
|
@ -322,32 +322,32 @@ func TestChainSvrCmds(t *testing.T) {
|
|||
name: "getcfilter",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("getcfilter", "123",
|
||||
wire.GCSFilterExtended)
|
||||
wire.GCSFilterRegular)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewGetCFilterCmd("123",
|
||||
wire.GCSFilterExtended)
|
||||
wire.GCSFilterRegular)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getcfilter","params":["123",1],"id":1}`,
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getcfilter","params":["123",0],"id":1}`,
|
||||
unmarshalled: &btcjson.GetCFilterCmd{
|
||||
Hash: "123",
|
||||
FilterType: wire.GCSFilterExtended,
|
||||
FilterType: wire.GCSFilterRegular,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "getcfilterheader",
|
||||
newCmd: func() (interface{}, error) {
|
||||
return btcjson.NewCmd("getcfilterheader", "123",
|
||||
wire.GCSFilterExtended)
|
||||
wire.GCSFilterRegular)
|
||||
},
|
||||
staticCmd: func() interface{} {
|
||||
return btcjson.NewGetCFilterHeaderCmd("123",
|
||||
wire.GCSFilterExtended)
|
||||
wire.GCSFilterRegular)
|
||||
},
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getcfilterheader","params":["123",1],"id":1}`,
|
||||
marshalled: `{"jsonrpc":"1.0","method":"getcfilterheader","params":["123",0],"id":1}`,
|
||||
unmarshalled: &btcjson.GetCFilterHeaderCmd{
|
||||
Hash: "123",
|
||||
FilterType: wire.GCSFilterExtended,
|
||||
FilterType: wire.GCSFilterRegular,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
6
glide.lock
generated
6
glide.lock
generated
|
@ -1,12 +1,12 @@
|
|||
hash: c8cc08c3afbedc99feda4fed38a64bb1aa42da128520dcac028cfc7e7de06a6e
|
||||
updated: 2018-05-23T20:46:41.891755246-07:00
|
||||
hash: 30562f0035d9bb4d4ee531762e4f7f762ede916255e9aa781df58537651f7382
|
||||
updated: 2018-07-06T16:07:52.877260592-07:00
|
||||
imports:
|
||||
- name: github.com/aead/siphash
|
||||
version: e404fcfc888570cadd1610538e2dbc89f66af814
|
||||
- name: github.com/btcsuite/btclog
|
||||
version: 84c8d2346e9fc8c7b947e243b9c24e6df9fd206a
|
||||
- name: github.com/btcsuite/btcutil
|
||||
version: d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4
|
||||
version: ab6388e0c60ae4834a1f57511e20c17b5f78be4b
|
||||
subpackages:
|
||||
- base58
|
||||
- bech32
|
||||
|
|
|
@ -2,7 +2,7 @@ package: github.com/btcsuite/btcd
|
|||
import:
|
||||
- package: github.com/btcsuite/btclog
|
||||
- package: github.com/btcsuite/btcutil
|
||||
version: d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4
|
||||
version: ab6388e0c60ae4834a1f57511e20c17b5f78be4b
|
||||
subpackages:
|
||||
- bloom
|
||||
- hdkeychain
|
||||
|
|
|
@ -334,13 +334,13 @@ var helpDescsEnUS = map[string]string{
|
|||
|
||||
// GetCFilterCmd help.
|
||||
"getcfilter--synopsis": "Returns a block's committed filter given its hash.",
|
||||
"getcfilter-filtertype": "The type of filter to return (0=regular, 1=extended)",
|
||||
"getcfilter-filtertype": "The type of filter to return (0=regular)",
|
||||
"getcfilter-hash": "The hash of the block",
|
||||
"getcfilter--result0": "The block's committed filter",
|
||||
|
||||
// GetCFilterHeaderCmd help.
|
||||
"getcfilterheader--synopsis": "Returns a block's compact filter header given its hash.",
|
||||
"getcfilterheader-filtertype": "The type of filter header to return (0=regular, 1=extended)",
|
||||
"getcfilterheader-filtertype": "The type of filter header to return (0=regular)",
|
||||
"getcfilterheader-hash": "The hash of the block",
|
||||
"getcfilterheader--result0": "The block's gcs filter header",
|
||||
|
||||
|
|
|
@ -69,13 +69,13 @@ func TestMessage(t *testing.T) {
|
|||
bh := NewBlockHeader(1, &chainhash.Hash{}, &chainhash.Hash{}, 0, 0)
|
||||
msgMerkleBlock := NewMsgMerkleBlock(bh)
|
||||
msgReject := NewMsgReject("block", RejectDuplicate, "duplicate block")
|
||||
msgGetCFilters := NewMsgGetCFilters(GCSFilterExtended, 0, &chainhash.Hash{})
|
||||
msgGetCFHeaders := NewMsgGetCFHeaders(GCSFilterExtended, 0, &chainhash.Hash{})
|
||||
msgGetCFCheckpt := NewMsgGetCFCheckpt(GCSFilterExtended, &chainhash.Hash{})
|
||||
msgCFilter := NewMsgCFilter(GCSFilterExtended, &chainhash.Hash{},
|
||||
msgGetCFilters := NewMsgGetCFilters(GCSFilterRegular, 0, &chainhash.Hash{})
|
||||
msgGetCFHeaders := NewMsgGetCFHeaders(GCSFilterRegular, 0, &chainhash.Hash{})
|
||||
msgGetCFCheckpt := NewMsgGetCFCheckpt(GCSFilterRegular, &chainhash.Hash{})
|
||||
msgCFilter := NewMsgCFilter(GCSFilterRegular, &chainhash.Hash{},
|
||||
[]byte("payload"))
|
||||
msgCFHeaders := NewMsgCFHeaders()
|
||||
msgCFCheckpt := NewMsgCFCheckpt(GCSFilterExtended, &chainhash.Hash{}, 0)
|
||||
msgCFCheckpt := NewMsgCFCheckpt(GCSFilterRegular, &chainhash.Hash{}, 0)
|
||||
|
||||
tests := []struct {
|
||||
in Message // Value to encode
|
||||
|
|
|
@ -17,9 +17,6 @@ type FilterType uint8
|
|||
const (
|
||||
// GCSFilterRegular is the regular filter type.
|
||||
GCSFilterRegular FilterType = iota
|
||||
|
||||
// GCSFilterExtended is the extended filter type.
|
||||
GCSFilterExtended
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
Loading…
Reference in a new issue