2017-01-10 15:38:15 +01:00
|
|
|
// Copyright (c) 2017 The btcsuite developers
|
|
|
|
// Use of this source code is governed by an ISC
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package indexers
|
|
|
|
|
|
|
|
import (
|
2017-02-01 13:12:30 +01:00
|
|
|
"errors"
|
2017-02-02 03:51:23 +01:00
|
|
|
|
2021-10-15 07:45:32 +02:00
|
|
|
"github.com/lbryio/lbcd/blockchain"
|
|
|
|
"github.com/lbryio/lbcd/chaincfg"
|
|
|
|
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
|
|
|
"github.com/lbryio/lbcd/database"
|
|
|
|
"github.com/lbryio/lbcd/wire"
|
|
|
|
btcutil "github.com/lbryio/lbcutil"
|
|
|
|
"github.com/lbryio/lbcutil/gcs"
|
|
|
|
"github.com/lbryio/lbcutil/gcs/builder"
|
2017-01-10 15:38:15 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2017-01-18 09:09:05 +01:00
|
|
|
// cfIndexName is the human-readable name for the index.
|
2017-01-19 14:20:41 +01:00
|
|
|
cfIndexName = "committed filter index"
|
2017-01-10 15:38:15 +01:00
|
|
|
)
|
|
|
|
|
2018-05-31 05:54:56 +02:00
|
|
|
// Committed filters come in one flavor currently: basic. They are generated
|
|
|
|
// and dropped in pairs, and both are indexed by a block's hash. Besides
|
|
|
|
// holding different content, they also live in different buckets.
|
2017-01-10 15:38:15 +01:00
|
|
|
var (
|
2018-05-31 05:54:56 +02:00
|
|
|
// cfIndexParentBucketKey is the name of the parent bucket used to
|
|
|
|
// house the index. The rest of the buckets live below this bucket.
|
2017-03-02 11:20:54 +01:00
|
|
|
cfIndexParentBucketKey = []byte("cfindexparentbucket")
|
2017-04-28 04:44:43 +02:00
|
|
|
|
2017-09-13 14:42:24 +02:00
|
|
|
// cfIndexKeys is an array of db bucket names used to house indexes of
|
|
|
|
// block hashes to cfilters.
|
|
|
|
cfIndexKeys = [][]byte{
|
|
|
|
[]byte("cf0byhashidx"),
|
|
|
|
}
|
2017-04-28 04:44:43 +02:00
|
|
|
|
2017-09-13 14:42:24 +02:00
|
|
|
// cfHeaderKeys is an array of db bucket names used to house indexes of
|
|
|
|
// block hashes to cf headers.
|
|
|
|
cfHeaderKeys = [][]byte{
|
|
|
|
[]byte("cf0headerbyhashidx"),
|
|
|
|
}
|
2017-04-28 04:44:43 +02:00
|
|
|
|
2018-01-22 21:59:20 +01:00
|
|
|
// cfHashKeys is an array of db bucket names used to house indexes of
|
|
|
|
// block hashes to cf hashes.
|
|
|
|
cfHashKeys = [][]byte{
|
|
|
|
[]byte("cf0hashbyhashidx"),
|
|
|
|
}
|
|
|
|
|
2017-09-13 14:42:24 +02:00
|
|
|
maxFilterType = uint8(len(cfHeaderKeys) - 1)
|
2018-01-22 21:59:20 +01:00
|
|
|
|
2018-05-31 05:54:56 +02:00
|
|
|
// zeroHash is the chainhash.Hash value of all zero bytes, defined here
|
|
|
|
// for convenience.
|
2018-01-22 21:59:20 +01:00
|
|
|
zeroHash chainhash.Hash
|
2017-01-10 15:38:15 +01:00
|
|
|
)
|
|
|
|
|
2018-02-01 08:38:10 +01:00
|
|
|
// dbFetchFilterIdxEntry retrieves a data blob from the filter index database.
|
|
|
|
// An entry's absence is not considered an error.
|
|
|
|
func dbFetchFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash) ([]byte, error) {
|
2017-03-02 11:20:54 +01:00
|
|
|
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
|
2017-02-01 10:50:36 +01:00
|
|
|
return idx.Get(h[:]), nil
|
|
|
|
}
|
|
|
|
|
2018-02-01 08:38:10 +01:00
|
|
|
// dbStoreFilterIdxEntry stores a data blob in the filter index database.
|
|
|
|
func dbStoreFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash, f []byte) error {
|
2017-03-02 11:20:54 +01:00
|
|
|
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
|
2017-01-19 15:55:39 +01:00
|
|
|
return idx.Put(h[:], f)
|
|
|
|
}
|
|
|
|
|
2018-02-01 08:38:10 +01:00
|
|
|
// dbDeleteFilterIdxEntry deletes a data blob from the filter index database.
|
|
|
|
func dbDeleteFilterIdxEntry(dbTx database.Tx, key []byte, h *chainhash.Hash) error {
|
2017-03-02 11:20:54 +01:00
|
|
|
idx := dbTx.Metadata().Bucket(cfIndexParentBucketKey).Bucket(key)
|
2017-01-19 15:55:39 +01:00
|
|
|
return idx.Delete(h[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
// CfIndex implements a committed filter (cf) by hash index.
|
|
|
|
type CfIndex struct {
|
2017-02-02 03:51:23 +01:00
|
|
|
db database.DB
|
|
|
|
chainParams *chaincfg.Params
|
2017-01-10 15:38:15 +01:00
|
|
|
}
|
|
|
|
|
2017-01-19 15:55:39 +01:00
|
|
|
// Ensure the CfIndex type implements the Indexer interface.
|
|
|
|
var _ Indexer = (*CfIndex)(nil)
|
2017-01-10 15:38:15 +01:00
|
|
|
|
2018-07-06 21:38:17 +02:00
|
|
|
// Ensure the CfIndex type implements the NeedsInputser interface.
|
|
|
|
var _ NeedsInputser = (*CfIndex)(nil)
|
|
|
|
|
|
|
|
// NeedsInputs signals that the index requires the referenced inputs in order
|
|
|
|
// to properly create the index.
|
|
|
|
//
|
|
|
|
// This implements the NeedsInputser interface.
|
|
|
|
func (idx *CfIndex) NeedsInputs() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2017-01-19 15:55:39 +01:00
|
|
|
// Init initializes the hash-based cf index. This is part of the Indexer
|
|
|
|
// interface.
|
|
|
|
func (idx *CfIndex) Init() error {
|
|
|
|
return nil // Nothing to do.
|
2017-01-10 15:38:15 +01:00
|
|
|
}
|
|
|
|
|
2017-01-19 15:55:39 +01:00
|
|
|
// Key returns the database key to use for the index as a byte slice. This is
|
|
|
|
// part of the Indexer interface.
|
|
|
|
func (idx *CfIndex) Key() []byte {
|
2017-03-02 11:20:54 +01:00
|
|
|
return cfIndexParentBucketKey
|
2017-01-10 15:38:15 +01:00
|
|
|
}
|
|
|
|
|
2017-01-19 15:55:39 +01:00
|
|
|
// Name returns the human-readable name of the index. This is part of the
|
|
|
|
// Indexer interface.
|
|
|
|
func (idx *CfIndex) Name() string {
|
2017-01-18 09:09:05 +01:00
|
|
|
return cfIndexName
|
2017-01-10 15:38:15 +01:00
|
|
|
}
|
|
|
|
|
2017-02-02 03:51:23 +01:00
|
|
|
// Create is invoked when the indexer manager determines the index needs to
|
2017-02-01 13:12:30 +01:00
|
|
|
// be created for the first time. It creates buckets for the two hash-based cf
|
2018-05-31 05:54:56 +02:00
|
|
|
// indexes (regular only currently).
|
2017-01-19 15:55:39 +01:00
|
|
|
func (idx *CfIndex) Create(dbTx database.Tx) error {
|
2017-01-10 15:38:15 +01:00
|
|
|
meta := dbTx.Metadata()
|
2017-04-28 04:44:43 +02:00
|
|
|
|
2017-03-02 11:20:54 +01:00
|
|
|
cfIndexParentBucket, err := meta.CreateBucket(cfIndexParentBucketKey)
|
2017-01-19 14:20:41 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-09-13 14:42:24 +02:00
|
|
|
|
|
|
|
for _, bucketName := range cfIndexKeys {
|
|
|
|
_, err = cfIndexParentBucket.CreateBucket(bucketName)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-03-02 11:20:54 +01:00
|
|
|
}
|
2017-09-13 14:42:24 +02:00
|
|
|
|
|
|
|
for _, bucketName := range cfHeaderKeys {
|
|
|
|
_, err = cfIndexParentBucket.CreateBucket(bucketName)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-02-02 03:51:23 +01:00
|
|
|
}
|
2017-04-28 04:44:43 +02:00
|
|
|
|
2018-01-22 21:59:20 +01:00
|
|
|
for _, bucketName := range cfHashKeys {
|
|
|
|
_, err = cfIndexParentBucket.CreateBucket(bucketName)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-02-02 03:51:23 +01:00
|
|
|
}
|
2017-04-28 04:44:43 +02:00
|
|
|
|
2018-01-22 21:59:20 +01:00
|
|
|
return nil
|
2017-01-10 15:38:15 +01:00
|
|
|
}
|
|
|
|
|
2017-02-02 03:51:23 +01:00
|
|
|
// storeFilter stores a given filter, and performs the steps needed to
|
2017-02-01 13:12:30 +01:00
|
|
|
// generate the filter's header.
|
2017-04-27 02:55:24 +02:00
|
|
|
func storeFilter(dbTx database.Tx, block *btcutil.Block, f *gcs.Filter,
|
2017-10-31 07:24:57 +01:00
|
|
|
filterType wire.FilterType) error {
|
|
|
|
if uint8(filterType) > maxFilterType {
|
2017-09-13 14:42:24 +02:00
|
|
|
return errors.New("unsupported filter type")
|
|
|
|
}
|
2017-04-28 04:44:43 +02:00
|
|
|
|
2017-02-01 13:12:30 +01:00
|
|
|
// Figure out which buckets to use.
|
2017-09-13 14:42:24 +02:00
|
|
|
fkey := cfIndexKeys[filterType]
|
|
|
|
hkey := cfHeaderKeys[filterType]
|
2018-01-22 21:59:20 +01:00
|
|
|
hashkey := cfHashKeys[filterType]
|
2017-04-28 04:44:43 +02:00
|
|
|
|
2017-02-01 13:12:30 +01:00
|
|
|
// Start by storing the filter.
|
|
|
|
h := block.Hash()
|
2018-05-16 06:08:31 +02:00
|
|
|
filterBytes, err := f.NBytes()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-04-28 06:48:53 +02:00
|
|
|
}
|
2018-02-01 08:38:10 +01:00
|
|
|
err = dbStoreFilterIdxEntry(dbTx, fkey, h, filterBytes)
|
2017-02-01 13:12:30 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-04-28 04:44:43 +02:00
|
|
|
|
2018-01-22 21:59:20 +01:00
|
|
|
// Next store the filter hash.
|
|
|
|
filterHash, err := builder.GetFilterHash(f)
|
2017-02-01 13:12:30 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-01-22 21:59:20 +01:00
|
|
|
err = dbStoreFilterIdxEntry(dbTx, hashkey, h, filterHash[:])
|
2017-04-27 02:55:24 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-01-22 21:59:20 +01:00
|
|
|
|
|
|
|
// Then fetch the previous block's filter header.
|
|
|
|
var prevHeader *chainhash.Hash
|
|
|
|
ph := &block.MsgBlock().Header.PrevBlock
|
|
|
|
if ph.IsEqual(&zeroHash) {
|
|
|
|
prevHeader = &zeroHash
|
|
|
|
} else {
|
|
|
|
pfh, err := dbFetchFilterIdxEntry(dbTx, hkey, ph)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Construct the new block's filter header, and store it.
|
|
|
|
prevHeader, err = chainhash.NewHash(pfh)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-01 08:38:10 +01:00
|
|
|
fh, err := builder.MakeHeaderForFilter(f, *prevHeader)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return dbStoreFilterIdxEntry(dbTx, hkey, h, fh[:])
|
2017-02-01 13:12:30 +01:00
|
|
|
}
|
|
|
|
|
2017-02-02 03:51:23 +01:00
|
|
|
// ConnectBlock is invoked by the index manager when a new block has been
|
2017-01-19 15:55:39 +01:00
|
|
|
// connected to the main chain. This indexer adds a hash-to-cf mapping for
|
|
|
|
// every passed block. This is part of the Indexer interface.
|
|
|
|
func (idx *CfIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block,
|
2018-05-29 06:55:34 +02:00
|
|
|
stxos []blockchain.SpentTxOut) error {
|
2017-04-28 04:44:43 +02:00
|
|
|
|
2018-06-15 02:50:39 +02:00
|
|
|
prevScripts := make([][]byte, len(stxos))
|
|
|
|
for i, stxo := range stxos {
|
|
|
|
prevScripts[i] = stxo.PkScript
|
|
|
|
}
|
|
|
|
|
|
|
|
f, err := builder.BuildBasicFilter(block.MsgBlock(), prevScripts)
|
2018-05-16 06:08:31 +02:00
|
|
|
if err != nil {
|
2017-01-11 17:09:08 +01:00
|
|
|
return err
|
|
|
|
}
|
2017-04-28 04:44:43 +02:00
|
|
|
|
2018-05-31 05:54:56 +02:00
|
|
|
return storeFilter(dbTx, block, f, wire.GCSFilterRegular)
|
2017-01-10 15:38:15 +01:00
|
|
|
}
|
|
|
|
|
2017-02-02 03:51:23 +01:00
|
|
|
// DisconnectBlock is invoked by the index manager when a block has been
|
2017-01-19 15:55:39 +01:00
|
|
|
// disconnected from the main chain. This indexer removes the hash-to-cf
|
|
|
|
// mapping for every passed block. This is part of the Indexer interface.
|
|
|
|
func (idx *CfIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block,
|
2018-05-29 06:55:34 +02:00
|
|
|
_ []blockchain.SpentTxOut) error {
|
2017-04-28 04:44:43 +02:00
|
|
|
|
2017-09-13 14:42:24 +02:00
|
|
|
for _, key := range cfIndexKeys {
|
2018-02-01 08:38:10 +01:00
|
|
|
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
|
2017-09-13 14:42:24 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-01-31 10:56:07 +01:00
|
|
|
}
|
2017-04-28 04:44:43 +02:00
|
|
|
|
2017-09-13 14:42:24 +02:00
|
|
|
for _, key := range cfHeaderKeys {
|
2018-02-01 08:38:10 +01:00
|
|
|
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
|
2017-09-13 14:42:24 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-22 21:59:20 +01:00
|
|
|
for _, key := range cfHashKeys {
|
|
|
|
err := dbDeleteFilterIdxEntry(dbTx, key, block.Hash())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-13 14:42:24 +02:00
|
|
|
return nil
|
2017-01-10 15:38:15 +01:00
|
|
|
}
|
|
|
|
|
2018-02-01 08:38:10 +01:00
|
|
|
// entryByBlockHash fetches a filter index entry of a particular type
|
|
|
|
// (eg. filter, filter header, etc) for a filter type and block hash.
|
|
|
|
func (idx *CfIndex) entryByBlockHash(filterTypeKeys [][]byte,
|
|
|
|
filterType wire.FilterType, h *chainhash.Hash) ([]byte, error) {
|
|
|
|
|
|
|
|
if uint8(filterType) > maxFilterType {
|
|
|
|
return nil, errors.New("unsupported filter type")
|
|
|
|
}
|
|
|
|
key := filterTypeKeys[filterType]
|
2017-04-28 04:44:43 +02:00
|
|
|
|
2018-02-01 08:38:10 +01:00
|
|
|
var entry []byte
|
|
|
|
err := idx.db.View(func(dbTx database.Tx) error {
|
2017-09-13 14:42:24 +02:00
|
|
|
var err error
|
2018-02-01 08:38:10 +01:00
|
|
|
entry, err = dbFetchFilterIdxEntry(dbTx, key, h)
|
2017-01-12 16:28:27 +01:00
|
|
|
return err
|
|
|
|
})
|
2018-02-01 08:38:10 +01:00
|
|
|
return entry, err
|
|
|
|
}
|
|
|
|
|
2018-02-01 09:00:45 +01:00
|
|
|
// entriesByBlockHashes batch fetches a filter index entry of a particular type
|
|
|
|
// (eg. filter, filter header, etc) for a filter type and slice of block hashes.
|
|
|
|
func (idx *CfIndex) entriesByBlockHashes(filterTypeKeys [][]byte,
|
|
|
|
filterType wire.FilterType, blockHashes []*chainhash.Hash) ([][]byte, error) {
|
|
|
|
|
|
|
|
if uint8(filterType) > maxFilterType {
|
|
|
|
return nil, errors.New("unsupported filter type")
|
|
|
|
}
|
|
|
|
key := filterTypeKeys[filterType]
|
|
|
|
|
|
|
|
entries := make([][]byte, 0, len(blockHashes))
|
|
|
|
err := idx.db.View(func(dbTx database.Tx) error {
|
|
|
|
for _, blockHash := range blockHashes {
|
|
|
|
entry, err := dbFetchFilterIdxEntry(dbTx, key, blockHash)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
entries = append(entries, entry)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
return entries, err
|
|
|
|
}
|
|
|
|
|
2018-02-01 08:38:10 +01:00
|
|
|
// FilterByBlockHash returns the serialized contents of a block's basic or
|
2018-05-31 05:54:56 +02:00
|
|
|
// committed filter.
|
2018-02-01 08:38:10 +01:00
|
|
|
func (idx *CfIndex) FilterByBlockHash(h *chainhash.Hash,
|
|
|
|
filterType wire.FilterType) ([]byte, error) {
|
|
|
|
return idx.entryByBlockHash(cfIndexKeys, filterType, h)
|
2017-02-01 14:43:09 +01:00
|
|
|
}
|
|
|
|
|
2018-02-01 09:00:45 +01:00
|
|
|
// FiltersByBlockHashes returns the serialized contents of a block's basic or
|
2018-05-31 05:54:56 +02:00
|
|
|
// committed filter for a set of blocks by hash.
|
2018-02-01 09:00:45 +01:00
|
|
|
func (idx *CfIndex) FiltersByBlockHashes(blockHashes []*chainhash.Hash,
|
|
|
|
filterType wire.FilterType) ([][]byte, error) {
|
|
|
|
return idx.entriesByBlockHashes(cfIndexKeys, filterType, blockHashes)
|
|
|
|
}
|
|
|
|
|
2017-02-02 03:51:23 +01:00
|
|
|
// FilterHeaderByBlockHash returns the serialized contents of a block's basic
|
2018-05-31 05:54:56 +02:00
|
|
|
// committed filter header.
|
2017-10-31 07:24:57 +01:00
|
|
|
func (idx *CfIndex) FilterHeaderByBlockHash(h *chainhash.Hash,
|
|
|
|
filterType wire.FilterType) ([]byte, error) {
|
2018-02-01 08:38:10 +01:00
|
|
|
return idx.entryByBlockHash(cfHeaderKeys, filterType, h)
|
2017-01-12 16:28:27 +01:00
|
|
|
}
|
|
|
|
|
2018-05-31 05:54:56 +02:00
|
|
|
// FilterHeadersByBlockHashes returns the serialized contents of a block's
|
|
|
|
// basic committed filter header for a set of blocks by hash.
|
2018-02-01 09:00:45 +01:00
|
|
|
func (idx *CfIndex) FilterHeadersByBlockHashes(blockHashes []*chainhash.Hash,
|
|
|
|
filterType wire.FilterType) ([][]byte, error) {
|
|
|
|
return idx.entriesByBlockHashes(cfHeaderKeys, filterType, blockHashes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// FilterHashByBlockHash returns the serialized contents of a block's basic
|
2018-05-31 05:54:56 +02:00
|
|
|
// committed filter hash.
|
2018-01-22 21:59:20 +01:00
|
|
|
func (idx *CfIndex) FilterHashByBlockHash(h *chainhash.Hash,
|
|
|
|
filterType wire.FilterType) ([]byte, error) {
|
|
|
|
return idx.entryByBlockHash(cfHashKeys, filterType, h)
|
|
|
|
}
|
|
|
|
|
2018-02-01 09:00:45 +01:00
|
|
|
// FilterHashesByBlockHashes returns the serialized contents of a block's basic
|
2018-05-31 05:54:56 +02:00
|
|
|
// committed filter hash for a set of blocks by hash.
|
2018-02-01 09:00:45 +01:00
|
|
|
func (idx *CfIndex) FilterHashesByBlockHashes(blockHashes []*chainhash.Hash,
|
|
|
|
filterType wire.FilterType) ([][]byte, error) {
|
|
|
|
return idx.entriesByBlockHashes(cfHashKeys, filterType, blockHashes)
|
|
|
|
}
|
|
|
|
|
2017-01-19 15:55:39 +01:00
|
|
|
// NewCfIndex returns a new instance of an indexer that is used to create a
|
2017-01-10 15:38:15 +01:00
|
|
|
// mapping of the hashes of all blocks in the blockchain to their respective
|
2017-01-19 15:55:39 +01:00
|
|
|
// committed filters.
|
2017-01-10 15:38:15 +01:00
|
|
|
//
|
2017-04-28 04:44:43 +02:00
|
|
|
// It implements the Indexer interface which plugs into the IndexManager that
|
|
|
|
// in turn is used by the blockchain package. This allows the index to be
|
2017-01-10 15:38:15 +01:00
|
|
|
// seamlessly maintained along with the chain.
|
2017-02-02 03:51:23 +01:00
|
|
|
func NewCfIndex(db database.DB, chainParams *chaincfg.Params) *CfIndex {
|
|
|
|
return &CfIndex{db: db, chainParams: chainParams}
|
2017-01-10 15:38:15 +01:00
|
|
|
}
|
|
|
|
|
2017-01-19 15:55:39 +01:00
|
|
|
// DropCfIndex drops the CF index from the provided database if exists.
|
2017-11-18 00:51:08 +01:00
|
|
|
func DropCfIndex(db database.DB, interrupt <-chan struct{}) error {
|
|
|
|
return dropIndex(db, cfIndexParentBucketKey, cfIndexName, interrupt)
|
2017-01-10 15:38:15 +01:00
|
|
|
}
|