lbcd/blockchain/indexers/cfindex.go

300 lines
8.6 KiB
Go
Raw Normal View History

2017-01-10 15:38:15 +01:00
// Copyright (c) 2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package indexers
import (
2017-02-01 13:12:30 +01:00
"errors"
2017-01-10 15:38:15 +01:00
"github.com/btcsuite/btcd/blockchain"
"github.com/btcsuite/btcd/chaincfg"
2017-01-12 14:25:45 +01:00
"github.com/btcsuite/btcd/chaincfg/chainhash"
2017-01-10 15:38:15 +01:00
"github.com/btcsuite/btcd/database"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcutil/gcs"
2017-01-19 11:49:40 +01:00
"github.com/btcsuite/btcutil/gcs/builder"
2017-02-01 13:12:30 +01:00
"github.com/btcsuite/fastsha256"
2017-01-10 15:38:15 +01:00
)
const (
// cfIndexName is the human-readable name for the index.
cfIndexName = "committed filter index"
2017-01-10 15:38:15 +01:00
)
// Committed filters come in two flavours: basic and extended. They are
// generated and dropped in pairs, and both are indexed by a block's hash.
// Besides holding different content, they also live in different buckets.
2017-01-10 15:38:15 +01:00
var (
2017-03-02 11:20:54 +01:00
// cfIndexParentBucketKey is the name of the parent bucket used to house
// the index. The rest of the buckets live below this bucket.
cfIndexParentBucketKey = []byte("cfindexparentbucket")
// cfIndexKeys is an array of db bucket names used to house indexes of
// block hashes to cfilters.
cfIndexKeys = [][]byte{
[]byte("cf0byhashidx"),
[]byte("cf1byhashidx"),
}
// cfHeaderKeys is an array of db bucket names used to house indexes of
// block hashes to cf headers.
cfHeaderKeys = [][]byte{
[]byte("cf0headerbyhashidx"),
[]byte("cf1headerbyhashidx"),
}
maxFilterType = uint8(len(cfHeaderKeys) - 1)
2017-01-10 15:38:15 +01:00
)
// dbFetchFilter retrieves a block's basic or extended filter. A filter's
2017-02-01 10:50:36 +01:00
// absence is not considered an error.
2017-02-01 13:12:30 +01:00
func dbFetchFilter(dbTx database.Tx, key []byte, h *chainhash.Hash) ([]byte, error) {
2017-03-02 11:20:54 +01:00
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
2017-02-01 10:50:36 +01:00
return idx.Get(h[:]), nil
}
// dbFetchFilterHeader retrieves a block's basic or extended filter header.
2017-02-01 10:50:36 +01:00
// A filter's absence is not considered an error.
2017-02-01 13:12:30 +01:00
func dbFetchFilterHeader(dbTx database.Tx, key []byte, h *chainhash.Hash) ([]byte, error) {
2017-03-02 11:20:54 +01:00
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
2017-02-01 13:12:30 +01:00
fh := idx.Get(h[:])
if len(fh) != fastsha256.Size {
return nil, errors.New("invalid filter header length")
}
2017-02-01 13:12:30 +01:00
return fh, nil
2017-02-01 10:50:36 +01:00
}
// dbStoreFilter stores a block's basic or extended filter.
2017-02-01 13:12:30 +01:00
func dbStoreFilter(dbTx database.Tx, key []byte, h *chainhash.Hash, f []byte) error {
2017-03-02 11:20:54 +01:00
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
return idx.Put(h[:], f)
}
// dbStoreFilterHeader stores a block's basic or extended filter header.
2017-02-01 13:12:30 +01:00
func dbStoreFilterHeader(dbTx database.Tx, key []byte, h *chainhash.Hash, fh []byte) error {
if len(fh) != fastsha256.Size {
return errors.New("invalid filter header length")
}
2017-03-02 11:20:54 +01:00
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
2017-02-01 13:12:30 +01:00
return idx.Put(h[:], fh)
}
// dbDeleteFilter deletes a filter's basic or extended filter.
2017-02-01 13:12:30 +01:00
func dbDeleteFilter(dbTx database.Tx, key []byte, h *chainhash.Hash) error {
2017-03-02 11:20:54 +01:00
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
return idx.Delete(h[:])
}
// dbDeleteFilterHeader deletes a filter's basic or extended filter header.
2017-02-01 13:12:30 +01:00
func dbDeleteFilterHeader(dbTx database.Tx, key []byte, h *chainhash.Hash) error {
2017-03-02 11:20:54 +01:00
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
return idx.Delete(h[:])
}
// CfIndex implements a committed filter (cf) by hash index.
type CfIndex struct {
db database.DB
chainParams *chaincfg.Params
2017-01-10 15:38:15 +01:00
}
// Ensure the CfIndex type implements the Indexer interface.
var _ Indexer = (*CfIndex)(nil)
2017-01-10 15:38:15 +01:00
// Init initializes the hash-based cf index. This is part of the Indexer
// interface.
func (idx *CfIndex) Init() error {
return nil // Nothing to do.
2017-01-10 15:38:15 +01:00
}
// Key returns the database key to use for the index as a byte slice. This is
// part of the Indexer interface.
func (idx *CfIndex) Key() []byte {
2017-03-02 11:20:54 +01:00
return cfIndexParentBucketKey
2017-01-10 15:38:15 +01:00
}
// Name returns the human-readable name of the index. This is part of the
// Indexer interface.
func (idx *CfIndex) Name() string {
return cfIndexName
2017-01-10 15:38:15 +01:00
}
// Create is invoked when the indexer manager determines the index needs to
2017-02-01 13:12:30 +01:00
// be created for the first time. It creates buckets for the two hash-based cf
// indexes (simple, extended).
func (idx *CfIndex) Create(dbTx database.Tx) error {
2017-01-10 15:38:15 +01:00
meta := dbTx.Metadata()
2017-03-02 11:20:54 +01:00
cfIndexParentBucket, err := meta.CreateBucket(cfIndexParentBucketKey)
if err != nil {
return err
}
for _, bucketName := range cfIndexKeys {
_, err = cfIndexParentBucket.CreateBucket(bucketName)
if err != nil {
return err
}
2017-03-02 11:20:54 +01:00
}
for _, bucketName := range cfHeaderKeys {
_, err = cfIndexParentBucket.CreateBucket(bucketName)
if err != nil {
return err
}
}
firstHeader := make([]byte, chainhash.HashSize)
err = dbStoreFilterHeader(
dbTx,
cfHeaderKeys[0],
&idx.chainParams.GenesisBlock.Header.PrevBlock,
firstHeader,
)
if err != nil {
return err
}
return dbStoreFilterHeader(
dbTx,
cfHeaderKeys[1],
&idx.chainParams.GenesisBlock.Header.PrevBlock,
firstHeader,
)
2017-01-10 15:38:15 +01:00
}
// storeFilter stores a given filter, and performs the steps needed to
2017-02-01 13:12:30 +01:00
// generate the filter's header.
func storeFilter(dbTx database.Tx, block *btcutil.Block, f *gcs.Filter,
filterType uint8) error {
if filterType > maxFilterType {
return errors.New("unsupported filter type")
}
2017-02-01 13:12:30 +01:00
// Figure out which buckets to use.
fkey := cfIndexKeys[filterType]
hkey := cfHeaderKeys[filterType]
2017-02-01 13:12:30 +01:00
// Start by storing the filter.
h := block.Hash()
var basicFilterBytes []byte
if f != nil {
basicFilterBytes = f.NBytes()
}
err := dbStoreFilter(dbTx, fkey, h, basicFilterBytes)
2017-02-01 13:12:30 +01:00
if err != nil {
return err
}
2017-02-01 13:12:30 +01:00
// Then fetch the previous block's filter header.
ph := &block.MsgBlock().Header.PrevBlock
pfh, err := dbFetchFilterHeader(dbTx, hkey, ph)
if err != nil {
return err
}
2017-02-01 13:12:30 +01:00
// Construct the new block's filter header, and store it.
prevHeader, err := chainhash.NewHash(pfh)
if err != nil {
return err
}
fh := builder.MakeHeaderForFilter(f, *prevHeader)
return dbStoreFilterHeader(dbTx, hkey, h, fh[:])
2017-02-01 13:12:30 +01:00
}
// ConnectBlock is invoked by the index manager when a new block has been
// connected to the main chain. This indexer adds a hash-to-cf mapping for
// every passed block. This is part of the Indexer interface.
func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block,
view *blockchain.UtxoViewpoint) error {
f, err := builder.BuildBasicFilter(block.MsgBlock())
if err != nil && err != gcs.ErrNoData {
return err
}
if err := storeFilter(dbTx, block, f, 0); err != nil {
return err
}
f, err = builder.BuildExtFilter(block.MsgBlock())
if err != nil {
return err
}
return storeFilter(dbTx, block, f, 1)
2017-01-10 15:38:15 +01:00
}
// DisconnectBlock is invoked by the index manager when a block has been
// disconnected from the main chain. This indexer removes the hash-to-cf
// mapping for every passed block. This is part of the Indexer interface.
func (idx *CfIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block,
view *blockchain.UtxoViewpoint) error {
for _, key := range cfIndexKeys {
err := dbDeleteFilter(dbTx, key, block.Hash())
if err != nil {
return err
}
}
for _, key := range cfHeaderKeys {
err := dbDeleteFilterHeader(dbTx, key, block.Hash())
if err != nil {
return err
}
}
return nil
2017-01-10 15:38:15 +01:00
}
// FilterByBlockHash returns the serialized contents of a block's basic or
2017-02-01 14:43:09 +01:00
// extended committed filter.
func (idx *CfIndex) FilterByBlockHash(h *chainhash.Hash, filterType uint8) ([]byte, error) {
2017-02-01 14:43:09 +01:00
var f []byte
err := idx.db.View(func(dbTx database.Tx) error {
if filterType > maxFilterType {
return errors.New("unsupported filter type")
}
var err error
f, err = dbFetchFilter(dbTx, cfIndexKeys[filterType], h)
return err
})
2017-02-01 14:43:09 +01:00
return f, err
}
// FilterHeaderByBlockHash returns the serialized contents of a block's basic
2017-02-01 14:43:09 +01:00
// or extended committed filter header.
func (idx *CfIndex) FilterHeaderByBlockHash(h *chainhash.Hash, filterType uint8) ([]byte, error) {
2017-02-01 14:43:09 +01:00
var fh []byte
err := idx.db.View(func(dbTx database.Tx) error {
if filterType > 1 {
return errors.New("unsupported filter type")
2017-02-01 14:43:09 +01:00
}
var err error
fh, err = dbFetchFilterHeader(dbTx, cfHeaderKeys[filterType], h)
2017-02-01 14:43:09 +01:00
return err
})
return fh, err
}
// NewCfIndex returns a new instance of an indexer that is used to create a
2017-01-10 15:38:15 +01:00
// mapping of the hashes of all blocks in the blockchain to their respective
// committed filters.
2017-01-10 15:38:15 +01:00
//
// It implements the Indexer interface which plugs into the IndexManager that
// in turn is used by the blockchain package. This allows the index to be
2017-01-10 15:38:15 +01:00
// seamlessly maintained along with the chain.
func NewCfIndex(db database.DB, chainParams *chaincfg.Params) *CfIndex {
return &CfIndex{db: db, chainParams: chainParams}
2017-01-10 15:38:15 +01:00
}
// DropCfIndex drops the CF index from the provided database if exists.
func DropCfIndex(db database.DB) error {
2017-03-02 11:20:54 +01:00
return dropIndex(db, cfIndexParentBucketKey, cfIndexName)
2017-01-10 15:38:15 +01:00
}