diff --git a/database2/README.md b/database2/README.md new file mode 100644 index 00000000..e200238b --- /dev/null +++ b/database2/README.md @@ -0,0 +1,77 @@ +database +======== + +[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)] +(https://travis-ci.org/btcsuite/btcd) + +Package database provides a block and metadata storage database. + +Please note that this package is intended to enable btcd to support different +database backends and is not something that a client can directly access as only +one entity can have the database open at a time (for most database backends), +and that entity will be btcd. + +When a client wants programmatic access to the data provided by btcd, they'll +likely want to use the [btcrpcclient](https://github.com/btcsuite/btcrpcclient) +package which makes use of the [JSON-RPC API] +(https://github.com/btcsuite/btcd/tree/master/docs/json_rpc_api.md). + +However, this package could be extremely useful for any applications requiring +Bitcoin block storage capabilities. + +As of July 2015, there are over 365,000 blocks in the Bitcoin block chain and +and over 76 million transactions (which turns out to be over 35GB of data). +This package provides a database layer to store and retrieve this data in a +simple and efficient manner. + +The default backend, ffldb, has a strong focus on speed, efficiency, and +robustness. It makes use of leveldb for the metadata, flat files for block +storage, and strict checksums in key areas to ensure data integrity. + +## Feature Overview + +- Key/value metadata store +- Bitcoin block storage +- Efficient retrieval of block headers and regions (transactions, scripts, etc) +- Read-only and read-write transactions with both manual and managed modes +- Nested buckets +- Iteration support including cursors with seek capability +- Supports registration of backend databases +- Comprehensive test coverage + +## Documentation + +[![GoDoc](https://godoc.org/github.com/btcsuite/btcd/database?status.png)] +(http://godoc.org/github.com/btcsuite/btcd/database) + +Full `go doc` style documentation for the project can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/btcsuite/btcd/database + +You can also view the documentation locally once the package is installed with +the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to +http://localhost:6060/pkg/github.com/btcsuite/btcd/database + +## Installation + +```bash +$ go get github.com/btcsuite/btcd/database +``` + +## Examples + +* [Basic Usage Example] + (http://godoc.org/github.com/btcsuite/btcd/database#example-package--BasicUsage) + Demonstrates creating a new database and using a managed read-write + transaction to store and retrieve metadata. + +* [Block Storage and Retrieval Example] + (http://godoc.org/github.com/btcsuite/btcd/database#example-package--BlockStorageAndRetrieval) + Demonstrates creating a new database, using a managed read-write transaction + to store a block, and then using a managed read-only transaction to fetch the + block. + +## License + +Package database is licensed under the [copyfree](http://copyfree.org) ISC +License. diff --git a/database2/cmd/dbtool/fetchblock.go b/database2/cmd/dbtool/fetchblock.go new file mode 100644 index 00000000..789c507d --- /dev/null +++ b/database2/cmd/dbtool/fetchblock.go @@ -0,0 +1,62 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "encoding/hex" + "errors" + "time" + + database "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/wire" +) + +// fetchBlockCmd defines the configuration options for the fetchblock command. +type fetchBlockCmd struct{} + +var ( + // fetchBlockCfg defines the configuration options for the command. + fetchBlockCfg = fetchBlockCmd{} +) + +// Execute is the main entry point for the command. It's invoked by the parser. +func (cmd *fetchBlockCmd) Execute(args []string) error { + // Setup the global config options and ensure they are valid. + if err := setupGlobalConfig(); err != nil { + return err + } + + if len(args) < 1 { + return errors.New("required block hash parameter not specified") + } + blockHash, err := wire.NewShaHashFromStr(args[0]) + if err != nil { + return err + } + + // Load the block database. + db, err := loadBlockDB() + if err != nil { + return err + } + defer db.Close() + + return db.View(func(tx database.Tx) error { + log.Infof("Fetching block %s", blockHash) + startTime := time.Now() + blockBytes, err := tx.FetchBlock(blockHash) + if err != nil { + return err + } + log.Infof("Loaded block in %v", time.Now().Sub(startTime)) + log.Infof("Block Hex: %s", hex.EncodeToString(blockBytes)) + return nil + }) +} + +// Usage overrides the usage display for the command. +func (cmd *fetchBlockCmd) Usage() string { + return "" +} diff --git a/database2/cmd/dbtool/fetchblockregion.go b/database2/cmd/dbtool/fetchblockregion.go new file mode 100644 index 00000000..c98ac4c1 --- /dev/null +++ b/database2/cmd/dbtool/fetchblockregion.go @@ -0,0 +1,90 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "encoding/hex" + "errors" + "strconv" + "time" + + database "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/wire" +) + +// blockRegionCmd defines the configuration options for the fetchblockregion +// command. +type blockRegionCmd struct{} + +var ( + // blockRegionCfg defines the configuration options for the command. + blockRegionCfg = blockRegionCmd{} +) + +// Execute is the main entry point for the command. It's invoked by the parser. +func (cmd *blockRegionCmd) Execute(args []string) error { + // Setup the global config options and ensure they are valid. + if err := setupGlobalConfig(); err != nil { + return err + } + + // Ensure expected arguments. + if len(args) < 1 { + return errors.New("required block hash parameter not specified") + } + if len(args) < 2 { + return errors.New("required start offset parameter not " + + "specified") + } + if len(args) < 3 { + return errors.New("required region length parameter not " + + "specified") + } + + // Parse arguments. + blockHash, err := wire.NewShaHashFromStr(args[0]) + if err != nil { + return err + } + startOffset, err := strconv.ParseUint(args[1], 10, 32) + if err != nil { + return err + } + regionLen, err := strconv.ParseUint(args[2], 10, 32) + if err != nil { + return err + } + + // Load the block database. + db, err := loadBlockDB() + if err != nil { + return err + } + defer db.Close() + + return db.View(func(tx database.Tx) error { + log.Infof("Fetching block region %s<%d:%d>", blockHash, + startOffset, startOffset+regionLen-1) + region := database.BlockRegion{ + Hash: blockHash, + Offset: uint32(startOffset), + Len: uint32(regionLen), + } + startTime := time.Now() + regionBytes, err := tx.FetchBlockRegion(®ion) + if err != nil { + return err + } + log.Infof("Loaded block region in %v", time.Now().Sub(startTime)) + log.Infof("Double SHA256: %s", wire.DoubleSha256SH(regionBytes)) + log.Infof("Region Hex: %s", hex.EncodeToString(regionBytes)) + return nil + }) +} + +// Usage overrides the usage display for the command. +func (cmd *blockRegionCmd) Usage() string { + return " " +} diff --git a/database2/cmd/dbtool/globalconfig.go b/database2/cmd/dbtool/globalconfig.go new file mode 100644 index 00000000..72f7e716 --- /dev/null +++ b/database2/cmd/dbtool/globalconfig.go @@ -0,0 +1,121 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/btcsuite/btcd/chaincfg" + database "github.com/btcsuite/btcd/database2" + _ "github.com/btcsuite/btcd/database2/ffldb" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" +) + +var ( + btcdHomeDir = btcutil.AppDataDir("btcd", false) + knownDbTypes = database.SupportedDrivers() + activeNetParams = &chaincfg.MainNetParams + + // Default global config. + cfg = &config{ + DataDir: filepath.Join(btcdHomeDir, "data"), + DbType: "ffldb", + } +) + +// config defines the global configuration options. +type config struct { + DataDir string `short:"b" long:"datadir" description:"Location of the btcd data directory"` + DbType string `long:"dbtype" description:"Database backend to use for the Block Chain"` + TestNet3 bool `long:"testnet" description:"Use the test network"` + RegressionTest bool `long:"regtest" description:"Use the regression test network"` + SimNet bool `long:"simnet" description:"Use the simulation test network"` +} + +// fileExists reports whether the named file or directory exists. +func fileExists(name string) bool { + if _, err := os.Stat(name); err != nil { + if os.IsNotExist(err) { + return false + } + } + return true +} + +// validDbType returns whether or not dbType is a supported database type. +func validDbType(dbType string) bool { + for _, knownType := range knownDbTypes { + if dbType == knownType { + return true + } + } + + return false +} + +// netName returns the name used when referring to a bitcoin network. At the +// time of writing, btcd currently places blocks for testnet version 3 in the +// data and log directory "testnet", which does not match the Name field of the +// chaincfg parameters. This function can be used to override this directory name +// as "testnet" when the passed active network matches wire.TestNet3. +// +// A proper upgrade to move the data and log directories for this network to +// "testnet3" is planned for the future, at which point this function can be +// removed and the network parameter's name used instead. +func netName(chainParams *chaincfg.Params) string { + switch chainParams.Net { + case wire.TestNet3: + return "testnet" + default: + return chainParams.Name + } +} + +// setupGlobalConfig examine the global configuration options for any conditions +// which are invalid as well as performs any addition setup necessary after the +// initial parse. +func setupGlobalConfig() error { + // Multiple networks can't be selected simultaneously. + // Count number of network flags passed; assign active network params + // while we're at it + numNets := 0 + if cfg.TestNet3 { + numNets++ + activeNetParams = &chaincfg.TestNet3Params + } + if cfg.RegressionTest { + numNets++ + activeNetParams = &chaincfg.RegressionNetParams + } + if cfg.SimNet { + numNets++ + activeNetParams = &chaincfg.SimNetParams + } + if numNets > 1 { + return errors.New("The testnet, regtest, and simnet params " + + "can't be used together -- choose one of the three") + } + + // Validate database type. + if !validDbType(cfg.DbType) { + str := "The specified database type [%v] is invalid -- " + + "supported types %v" + return fmt.Errorf(str, cfg.DbType, knownDbTypes) + } + + // Append the network type to the data directory so it is "namespaced" + // per network. In addition to the block database, there are other + // pieces of data that are saved to disk such as address manager state. + // All data is specific to a network, so namespacing the data directory + // means each individual piece of serialized data does not have to + // worry about changing names per network and such. + cfg.DataDir = filepath.Join(cfg.DataDir, netName(activeNetParams)) + + return nil +} diff --git a/database2/cmd/dbtool/insecureimport.go b/database2/cmd/dbtool/insecureimport.go new file mode 100644 index 00000000..a99903f4 --- /dev/null +++ b/database2/cmd/dbtool/insecureimport.go @@ -0,0 +1,401 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "encoding/binary" + "fmt" + "io" + "os" + "sync" + "time" + + database "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" +) + +// importCmd defines the configuration options for the insecureimport command. +type importCmd struct { + InFile string `short:"i" long:"infile" description:"File containing the block(s)"` + Progress int `short:"p" long:"progress" description:"Show a progress message each time this number of seconds have passed -- Use 0 to disable progress announcements"` +} + +var ( + // importCfg defines the configuration options for the command. + importCfg = importCmd{ + InFile: "bootstrap.dat", + Progress: 10, + } + + // zeroHash is a simply a hash with all zeros. It is defined here to + // avoid creating it multiple times. + zeroHash = wire.ShaHash{} +) + +// importResults houses the stats and result as an import operation. +type importResults struct { + blocksProcessed int64 + blocksImported int64 + err error +} + +// blockImporter houses information about an ongoing import from a block data +// file to the block database. +type blockImporter struct { + db database.DB + r io.ReadSeeker + processQueue chan []byte + doneChan chan bool + errChan chan error + quit chan struct{} + wg sync.WaitGroup + blocksProcessed int64 + blocksImported int64 + receivedLogBlocks int64 + receivedLogTx int64 + lastHeight int64 + lastBlockTime time.Time + lastLogTime time.Time +} + +// readBlock reads the next block from the input file. +func (bi *blockImporter) readBlock() ([]byte, error) { + // The block file format is: + // + var net uint32 + err := binary.Read(bi.r, binary.LittleEndian, &net) + if err != nil { + if err != io.EOF { + return nil, err + } + + // No block and no error means there are no more blocks to read. + return nil, nil + } + if net != uint32(activeNetParams.Net) { + return nil, fmt.Errorf("network mismatch -- got %x, want %x", + net, uint32(activeNetParams.Net)) + } + + // Read the block length and ensure it is sane. + var blockLen uint32 + if err := binary.Read(bi.r, binary.LittleEndian, &blockLen); err != nil { + return nil, err + } + if blockLen > wire.MaxBlockPayload { + return nil, fmt.Errorf("block payload of %d bytes is larger "+ + "than the max allowed %d bytes", blockLen, + wire.MaxBlockPayload) + } + + serializedBlock := make([]byte, blockLen) + if _, err := io.ReadFull(bi.r, serializedBlock); err != nil { + return nil, err + } + + return serializedBlock, nil +} + +// processBlock potentially imports the block into the database. It first +// deserializes the raw block while checking for errors. Already known blocks +// are skipped and orphan blocks are considered errors. Returns whether the +// block was imported along with any potential errors. +// +// NOTE: This is not a safe import as it does not verify chain rules. +func (bi *blockImporter) processBlock(serializedBlock []byte) (bool, error) { + // Deserialize the block which includes checks for malformed blocks. + block, err := btcutil.NewBlockFromBytes(serializedBlock) + if err != nil { + return false, err + } + + // update progress statistics + bi.lastBlockTime = block.MsgBlock().Header.Timestamp + bi.receivedLogTx += int64(len(block.MsgBlock().Transactions)) + + // Skip blocks that already exist. + var exists bool + err = bi.db.View(func(tx database.Tx) error { + exists, err = tx.HasBlock(block.Sha()) + if err != nil { + return err + } + return nil + }) + if err != nil { + return false, err + } + if exists { + return false, nil + } + + // Don't bother trying to process orphans. + prevHash := &block.MsgBlock().Header.PrevBlock + if !prevHash.IsEqual(&zeroHash) { + var exists bool + err := bi.db.View(func(tx database.Tx) error { + exists, err = tx.HasBlock(prevHash) + if err != nil { + return err + } + return nil + }) + if err != nil { + return false, err + } + if !exists { + return false, fmt.Errorf("import file contains block "+ + "%v which does not link to the available "+ + "block chain", prevHash) + } + } + + // Put the blocks into the database with no checking of chain rules. + err = bi.db.Update(func(tx database.Tx) error { + return tx.StoreBlock(block) + }) + if err != nil { + return false, err + } + + return true, nil +} + +// readHandler is the main handler for reading blocks from the import file. +// This allows block processing to take place in parallel with block reads. +// It must be run as a goroutine. +func (bi *blockImporter) readHandler() { +out: + for { + // Read the next block from the file and if anything goes wrong + // notify the status handler with the error and bail. + serializedBlock, err := bi.readBlock() + if err != nil { + bi.errChan <- fmt.Errorf("Error reading from input "+ + "file: %v", err.Error()) + break out + } + + // A nil block with no error means we're done. + if serializedBlock == nil { + break out + } + + // Send the block or quit if we've been signalled to exit by + // the status handler due to an error elsewhere. + select { + case bi.processQueue <- serializedBlock: + case <-bi.quit: + break out + } + } + + // Close the processing channel to signal no more blocks are coming. + close(bi.processQueue) + bi.wg.Done() +} + +// logProgress logs block progress as an information message. In order to +// prevent spam, it limits logging to one message every importCfg.Progress +// seconds with duration and totals included. +func (bi *blockImporter) logProgress() { + bi.receivedLogBlocks++ + + now := time.Now() + duration := now.Sub(bi.lastLogTime) + if duration < time.Second*time.Duration(importCfg.Progress) { + return + } + + // Truncate the duration to 10s of milliseconds. + durationMillis := int64(duration / time.Millisecond) + tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10) + + // Log information about new block height. + blockStr := "blocks" + if bi.receivedLogBlocks == 1 { + blockStr = "block" + } + txStr := "transactions" + if bi.receivedLogTx == 1 { + txStr = "transaction" + } + log.Infof("Processed %d %s in the last %s (%d %s, height %d, %s)", + bi.receivedLogBlocks, blockStr, tDuration, bi.receivedLogTx, + txStr, bi.lastHeight, bi.lastBlockTime) + + bi.receivedLogBlocks = 0 + bi.receivedLogTx = 0 + bi.lastLogTime = now +} + +// processHandler is the main handler for processing blocks. This allows block +// processing to take place in parallel with block reads from the import file. +// It must be run as a goroutine. +func (bi *blockImporter) processHandler() { +out: + for { + select { + case serializedBlock, ok := <-bi.processQueue: + // We're done when the channel is closed. + if !ok { + break out + } + + bi.blocksProcessed++ + bi.lastHeight++ + imported, err := bi.processBlock(serializedBlock) + if err != nil { + bi.errChan <- err + break out + } + + if imported { + bi.blocksImported++ + } + + bi.logProgress() + + case <-bi.quit: + break out + } + } + bi.wg.Done() +} + +// statusHandler waits for updates from the import operation and notifies +// the passed doneChan with the results of the import. It also causes all +// goroutines to exit if an error is reported from any of them. +func (bi *blockImporter) statusHandler(resultsChan chan *importResults) { + select { + // An error from either of the goroutines means we're done so signal + // caller with the error and signal all goroutines to quit. + case err := <-bi.errChan: + resultsChan <- &importResults{ + blocksProcessed: bi.blocksProcessed, + blocksImported: bi.blocksImported, + err: err, + } + close(bi.quit) + + // The import finished normally. + case <-bi.doneChan: + resultsChan <- &importResults{ + blocksProcessed: bi.blocksProcessed, + blocksImported: bi.blocksImported, + err: nil, + } + } +} + +// Import is the core function which handles importing the blocks from the file +// associated with the block importer to the database. It returns a channel +// on which the results will be returned when the operation has completed. +func (bi *blockImporter) Import() chan *importResults { + // Start up the read and process handling goroutines. This setup allows + // blocks to be read from disk in parallel while being processed. + bi.wg.Add(2) + go bi.readHandler() + go bi.processHandler() + + // Wait for the import to finish in a separate goroutine and signal + // the status handler when done. + go func() { + bi.wg.Wait() + bi.doneChan <- true + }() + + // Start the status handler and return the result channel that it will + // send the results on when the import is done. + resultChan := make(chan *importResults) + go bi.statusHandler(resultChan) + return resultChan +} + +// newBlockImporter returns a new importer for the provided file reader seeker +// and database. +func newBlockImporter(db database.DB, r io.ReadSeeker) *blockImporter { + return &blockImporter{ + db: db, + r: r, + processQueue: make(chan []byte, 2), + doneChan: make(chan bool), + errChan: make(chan error), + quit: make(chan struct{}), + lastLogTime: time.Now(), + } +} + +// Execute is the main entry point for the command. It's invoked by the parser. +func (cmd *importCmd) Execute(args []string) error { + // Setup the global config options and ensure they are valid. + if err := setupGlobalConfig(); err != nil { + return err + } + + // Ensure the specified block file exists. + if !fileExists(cmd.InFile) { + str := "The specified block file [%v] does not exist" + return fmt.Errorf(str, cmd.InFile) + } + + // Load the block database. + db, err := loadBlockDB() + if err != nil { + return err + } + defer db.Close() + + // Ensure the database is sync'd and closed on Ctrl+C. + addInterruptHandler(func() { + log.Infof("Gracefully shutting down the database...") + db.Close() + }) + + fi, err := os.Open(importCfg.InFile) + if err != nil { + return err + } + defer fi.Close() + + // Create a block importer for the database and input file and start it. + // The results channel returned from start will contain an error if + // anything went wrong. + importer := newBlockImporter(db, fi) + + // Perform the import asynchronously and signal the main goroutine when + // done. This allows blocks to be processed and read in parallel. The + // results channel returned from Import contains the statistics about + // the import including an error if something went wrong. This is done + // in a separate goroutine rather than waiting directly so the main + // goroutine can be signaled for shutdown by either completion, error, + // or from the main interrupt handler. This is necessary since the main + // goroutine must be kept running long enough for the interrupt handler + // goroutine to finish. + go func() { + log.Info("Starting import") + resultsChan := importer.Import() + results := <-resultsChan + if results.err != nil { + dbErr, ok := results.err.(database.Error) + if !ok || ok && dbErr.ErrorCode != database.ErrDbNotOpen { + shutdownChannel <- results.err + return + } + } + + log.Infof("Processed a total of %d blocks (%d imported, %d "+ + "already known)", results.blocksProcessed, + results.blocksImported, + results.blocksProcessed-results.blocksImported) + shutdownChannel <- nil + }() + + // Wait for shutdown signal from either a normal completion or from the + // interrupt handler. + err = <-shutdownChannel + return err +} diff --git a/database2/cmd/dbtool/loadheaders.go b/database2/cmd/dbtool/loadheaders.go new file mode 100644 index 00000000..f03506bc --- /dev/null +++ b/database2/cmd/dbtool/loadheaders.go @@ -0,0 +1,101 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "time" + + database "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/wire" +) + +// headersCmd defines the configuration options for the loadheaders command. +type headersCmd struct { + Bulk bool `long:"bulk" description:"Use bulk loading of headers instead of one at a time"` +} + +var ( + // headersCfg defines the configuration options for the command. + headersCfg = headersCmd{ + Bulk: false, + } +) + +// Execute is the main entry point for the command. It's invoked by the parser. +func (cmd *headersCmd) Execute(args []string) error { + // Setup the global config options and ensure they are valid. + if err := setupGlobalConfig(); err != nil { + return err + } + + // Load the block database. + db, err := loadBlockDB() + if err != nil { + return err + } + defer db.Close() + + // NOTE: This code will only work for ffldb. Ideally the package using + // the database would keep a metadata index of its own. + blockIdxName := []byte("ffldb-blockidx") + if !headersCfg.Bulk { + err = db.View(func(tx database.Tx) error { + totalHdrs := 0 + blockIdxBucket := tx.Metadata().Bucket(blockIdxName) + blockIdxBucket.ForEach(func(k, v []byte) error { + totalHdrs++ + return nil + }) + log.Infof("Loading headers for %d blocks...", totalHdrs) + numLoaded := 0 + startTime := time.Now() + blockIdxBucket.ForEach(func(k, v []byte) error { + var hash wire.ShaHash + copy(hash[:], k) + _, err := tx.FetchBlockHeader(&hash) + if err != nil { + return err + } + numLoaded++ + return nil + }) + log.Infof("Loaded %d headers in %v", numLoaded, + time.Now().Sub(startTime)) + return nil + }) + if err != nil { + return err + } + + return nil + } + + // Bulk load headers. + err = db.View(func(tx database.Tx) error { + blockIdxBucket := tx.Metadata().Bucket(blockIdxName) + hashes := make([]wire.ShaHash, 0, 500000) + blockIdxBucket.ForEach(func(k, v []byte) error { + var hash wire.ShaHash + copy(hash[:], k) + hashes = append(hashes, hash) + return nil + }) + + log.Infof("Loading headers for %d blocks...", len(hashes)) + startTime := time.Now() + hdrs, err := tx.FetchBlockHeaders(hashes) + if err != nil { + return err + } + log.Infof("Loaded %d headers in %v", len(hdrs), + time.Now().Sub(startTime)) + return nil + }) + if err != nil { + return err + } + + return nil +} diff --git a/database2/cmd/dbtool/main.go b/database2/cmd/dbtool/main.go new file mode 100644 index 00000000..eedb642e --- /dev/null +++ b/database2/cmd/dbtool/main.go @@ -0,0 +1,116 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "path/filepath" + "runtime" + "strings" + + database "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btclog" + flags "github.com/btcsuite/go-flags" +) + +const ( + // blockDbNamePrefix is the prefix for the btcd block database. + blockDbNamePrefix = "blocks" +) + +var ( + log btclog.Logger + shutdownChannel = make(chan error) +) + +// loadBlockDB opens the block database and returns a handle to it. +func loadBlockDB() (database.DB, error) { + // The database name is based on the database type. + dbName := blockDbNamePrefix + "_" + cfg.DbType + dbPath := filepath.Join(cfg.DataDir, dbName) + + log.Infof("Loading block database from '%s'", dbPath) + db, err := database.Open(cfg.DbType, dbPath, activeNetParams.Net) + if err != nil { + // Return the error if it's not because the database doesn't + // exist. + if dbErr, ok := err.(database.Error); !ok || dbErr.ErrorCode != + database.ErrDbDoesNotExist { + + return nil, err + } + + // Create the db if it does not exist. + err = os.MkdirAll(cfg.DataDir, 0700) + if err != nil { + return nil, err + } + db, err = database.Create(cfg.DbType, dbPath, activeNetParams.Net) + if err != nil { + return nil, err + } + } + + log.Info("Block database loaded") + return db, nil +} + +// realMain is the real main function for the utility. It is necessary to work +// around the fact that deferred functions do not run when os.Exit() is called. +func realMain() error { + // Setup logging. + backendLogger := btclog.NewDefaultBackendLogger() + defer backendLogger.Flush() + log = btclog.NewSubsystemLogger(backendLogger, "") + dbLog := btclog.NewSubsystemLogger(backendLogger, "BCDB: ") + dbLog.SetLevel(btclog.DebugLvl) + database.UseLogger(dbLog) + + // Setup the parser options and commands. + appName := filepath.Base(os.Args[0]) + appName = strings.TrimSuffix(appName, filepath.Ext(appName)) + parserFlags := flags.Options(flags.HelpFlag | flags.PassDoubleDash) + parser := flags.NewNamedParser(appName, parserFlags) + parser.AddGroup("Global Options", "", cfg) + parser.AddCommand("insecureimport", + "Insecurely import bulk block data from bootstrap.dat", + "Insecurely import bulk block data from bootstrap.dat. "+ + "WARNING: This is NOT secure because it does NOT "+ + "verify chain rules. It is only provided for testing "+ + "purposes.", &importCfg) + parser.AddCommand("loadheaders", + "Time how long to load headers for all blocks in the database", + "", &headersCfg) + parser.AddCommand("fetchblock", + "Fetch the specific block hash from the database", "", + &fetchBlockCfg) + parser.AddCommand("fetchblockregion", + "Fetch the specified block region from the database", "", + &blockRegionCfg) + + // Parse command line and invoke the Execute function for the specified + // command. + if _, err := parser.Parse(); err != nil { + if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp { + parser.WriteHelp(os.Stderr) + } else { + log.Error(err) + } + + return err + } + + return nil +} + +func main() { + // Use all processor cores. + runtime.GOMAXPROCS(runtime.NumCPU()) + + // Work around defer not working after os.Exit() + if err := realMain(); err != nil { + os.Exit(1) + } +} diff --git a/database2/cmd/dbtool/signal.go b/database2/cmd/dbtool/signal.go new file mode 100644 index 00000000..a32f1b37 --- /dev/null +++ b/database2/cmd/dbtool/signal.go @@ -0,0 +1,82 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "os/signal" +) + +// interruptChannel is used to receive SIGINT (Ctrl+C) signals. +var interruptChannel chan os.Signal + +// addHandlerChannel is used to add an interrupt handler to the list of handlers +// to be invoked on SIGINT (Ctrl+C) signals. +var addHandlerChannel = make(chan func()) + +// mainInterruptHandler listens for SIGINT (Ctrl+C) signals on the +// interruptChannel and invokes the registered interruptCallbacks accordingly. +// It also listens for callback registration. It must be run as a goroutine. +func mainInterruptHandler() { + // interruptCallbacks is a list of callbacks to invoke when a + // SIGINT (Ctrl+C) is received. + var interruptCallbacks []func() + + // isShutdown is a flag which is used to indicate whether or not + // the shutdown signal has already been received and hence any future + // attempts to add a new interrupt handler should invoke them + // immediately. + var isShutdown bool + + for { + select { + case <-interruptChannel: + // Ignore more than one shutdown signal. + if isShutdown { + log.Infof("Received SIGINT (Ctrl+C). " + + "Already shutting down...") + continue + } + + isShutdown = true + log.Infof("Received SIGINT (Ctrl+C). Shutting down...") + + // Run handlers in LIFO order. + for i := range interruptCallbacks { + idx := len(interruptCallbacks) - 1 - i + callback := interruptCallbacks[idx] + callback() + } + + // Signal the main goroutine to shutdown. + go func() { + shutdownChannel <- nil + }() + + case handler := <-addHandlerChannel: + // The shutdown signal has already been received, so + // just invoke and new handlers immediately. + if isShutdown { + handler() + } + + interruptCallbacks = append(interruptCallbacks, handler) + } + } +} + +// addInterruptHandler adds a handler to call when a SIGINT (Ctrl+C) is +// received. +func addInterruptHandler(handler func()) { + // Create the channel and start the main interrupt handler which invokes + // all other callbacks and exits if not already done. + if interruptChannel == nil { + interruptChannel = make(chan os.Signal, 1) + signal.Notify(interruptChannel, os.Interrupt) + go mainInterruptHandler() + } + + addHandlerChannel <- handler +} diff --git a/database2/doc.go b/database2/doc.go new file mode 100644 index 00000000..5d8dd17f --- /dev/null +++ b/database2/doc.go @@ -0,0 +1,94 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +Package database2 provides a block and metadata storage database. + +Overview + +As of July 2015, there are over 365,000 blocks in the Bitcoin block chain and +and over 76 million transactions (which turns out to be over 35GB of data). +This package provides a database layer to store and retrieve this data in a +simple and efficient manner. + +The default backend, ffldb, has a strong focus on speed, efficiency, and +robustness. It makes use leveldb for the metadata, flat files for block +storage, and strict checksums in key areas to ensure data integrity. + +A quick overview of the features database provides are as follows: + + - Key/value metadata store + - Bitcoin block storage + - Efficient retrieval of block headers and regions (transactions, scripts, etc) + - Read-only and read-write transactions with both manual and managed modes + - Nested buckets + - Supports registration of backend databases + - Comprehensive test coverage + +Database + +The main entry point is the DB interface. It exposes functionality for +transactional-based access and storage of metadata and block data. It is +obtained via the Create and Open functions which take a database type string +that identifies the specific database driver (backend) to use as well as +arguments specific to the specified driver. + +Namespaces + +The Namespace interface is an abstraction that provides facilities for obtaining +transactions (the Tx interface) that are the basis of all database reads and +writes. Unlike some database interfaces that support reading and writing +without transactions, this interface requires transactions even when only +reading or writing a single key. + +The Begin function provides an unmanaged transaction while the View and Update +functions provide a managed transaction. These are described in more detail +below. + +Transactions + +The Tx interface provides facilities for rolling back or commiting changes that +took place while the transaction was active. It also provides the root metadata +bucket under which all keys, values, and nested buckets are stored. A +transaction can either be read-only or read-write and managed or unmanaged. + +Managed versus Unmanaged Transactions + +A managed transaction is one where the caller provides a function to execute +within the context of the transaction and the commit or rollback is handled +automatically depending on whether or not the provided function returns an +error. Attempting to manually call Rollback or Commit on the managed +transaction will result in a panic. + +An unmanaged transaction, on the other hand, requires the caller to manually +call Commit or Rollback when they are finished with it. Leaving transactions +open for long periods of time can have several adverse effects, so it is +recommended that managed transactions are used instead. + +Buckets + +The Bucket interface provides the ability to manipulate key/value pairs and +nested buckets as well as iterate through them. + +The Get, Put, and Delete functions work with key/value pairs, while the Bucket, +CreateBucket, CreateBucketIfNotExists, and DeleteBucket functions work with +buckets. The ForEach function allows the caller to provide a function to be +called with each key/value pair and nested bucket in the current bucket. + +Metadata Bucket + +As discussed above, all of the functions which are used to manipulate key/value +pairs and nested buckets exist on the Bucket interface. The root metadata +bucket is the upper-most bucket in which data is stored and is created at the +same time as the database. Use the Metadata function on the Tx interface +to retrieve it. + +Nested Buckets + +The CreateBucket and CreateBucketIfNotExists functions on the Bucket interface +provide the ability to create an arbitrary number of nested buckets. It is +a good idea to avoid a lot of buckets with little data in them as it could lead +to poor page utilization depending on the specific driver in use. +*/ +package database2 diff --git a/database2/driver.go b/database2/driver.go new file mode 100644 index 00000000..b71e0c1d --- /dev/null +++ b/database2/driver.go @@ -0,0 +1,89 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package database2 + +import ( + "fmt" + + "github.com/btcsuite/btclog" +) + +// Driver defines a structure for backend drivers to use when they registered +// themselves as a backend which implements the DB interface. +type Driver struct { + // DbType is the identifier used to uniquely identify a specific + // database driver. There can be only one driver with the same name. + DbType string + + // Create is the function that will be invoked with all user-specified + // arguments to create the database. This function must return + // ErrDbExists if the database already exists. + Create func(args ...interface{}) (DB, error) + + // Open is the function that will be invoked with all user-specified + // arguments to open the database. This function must return + // ErrDbDoesNotExist if the database has not already been created. + Open func(args ...interface{}) (DB, error) + + // UseLogger uses a specified Logger to output package logging info. + UseLogger func(logger btclog.Logger) +} + +// driverList holds all of the registered database backends. +var drivers = make(map[string]*Driver) + +// RegisterDriver adds a backend database driver to available interfaces. +// ErrDbTypeRegistered will be retruned if the database type for the driver has +// already been registered. +func RegisterDriver(driver Driver) error { + if _, exists := drivers[driver.DbType]; exists { + str := fmt.Sprintf("driver %q is already registered", + driver.DbType) + return makeError(ErrDbTypeRegistered, str, nil) + } + + drivers[driver.DbType] = &driver + return nil +} + +// SupportedDrivers returns a slice of strings that represent the database +// drivers that have been registered and are therefore supported. +func SupportedDrivers() []string { + supportedDBs := make([]string, 0, len(drivers)) + for _, drv := range drivers { + supportedDBs = append(supportedDBs, drv.DbType) + } + return supportedDBs +} + +// Create intializes and opens a database for the specified type. The arguments +// are specific to the database type driver. See the documentation for the +// database driver for further details. +// +// ErrDbUnknownType will be returned if the the database type is not registered. +func Create(dbType string, args ...interface{}) (DB, error) { + drv, exists := drivers[dbType] + if !exists { + str := fmt.Sprintf("driver %q is not registered", dbType) + return nil, makeError(ErrDbUnknownType, str, nil) + } + + return drv.Create(args...) +} + +// Open opens an existing database for the specified type. The arguments are +// specific to the database type driver. See the documentation for the database +// driver for further details. +// +// ErrDbUnknownType will be returned if the the database type is not registered. +func Open(dbType string, args ...interface{}) (DB, error) { + drv, exists := drivers[dbType] + if !exists { + str := fmt.Sprintf("driver %q is not registered", dbType) + return nil, makeError(ErrDbUnknownType, str, nil) + } + + return drv.Open(args...) +} diff --git a/database2/driver_test.go b/database2/driver_test.go new file mode 100644 index 00000000..aa7d15d1 --- /dev/null +++ b/database2/driver_test.go @@ -0,0 +1,136 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package database2_test + +import ( + "fmt" + "testing" + + database "github.com/btcsuite/btcd/database2" + _ "github.com/btcsuite/btcd/database2/ffldb" +) + +var ( + // ignoreDbTypes are types which should be ignored when running tests + // that iterate all supported DB types. This allows some tests to add + // bogus drivers for testing purposes while still allowing other tests + // to easily iterate all supported drivers. + ignoreDbTypes = map[string]bool{"createopenfail": true} +) + +// checkDbError ensures the passed error is a database.Error with an error code +// that matches the passed error code. +func checkDbError(t *testing.T, testName string, gotErr error, wantErrCode database.ErrorCode) bool { + dbErr, ok := gotErr.(database.Error) + if !ok { + t.Errorf("%s: unexpected error type - got %T, want %T", + testName, gotErr, database.Error{}) + return false + } + if dbErr.ErrorCode != wantErrCode { + t.Errorf("%s: unexpected error code - got %s (%s), want %s", + testName, dbErr.ErrorCode, dbErr.Description, + wantErrCode) + return false + } + + return true +} + +// TestAddDuplicateDriver ensures that adding a duplicate driver does not +// overwrite an existing one. +func TestAddDuplicateDriver(t *testing.T) { + supportedDrivers := database.SupportedDrivers() + if len(supportedDrivers) == 0 { + t.Errorf("no backends to test") + return + } + dbType := supportedDrivers[0] + + // bogusCreateDB is a function which acts as a bogus create and open + // driver function and intentionally returns a failure that can be + // detected if the interface allows a duplicate driver to overwrite an + // existing one. + bogusCreateDB := func(args ...interface{}) (database.DB, error) { + return nil, fmt.Errorf("duplicate driver allowed for database "+ + "type [%v]", dbType) + } + + // Create a driver that tries to replace an existing one. Set its + // create and open functions to a function that causes a test failure if + // they are invoked. + driver := database.Driver{ + DbType: dbType, + Create: bogusCreateDB, + Open: bogusCreateDB, + } + testName := "duplicate driver registration" + err := database.RegisterDriver(driver) + if !checkDbError(t, testName, err, database.ErrDbTypeRegistered) { + return + } +} + +// TestCreateOpenFail ensures that errors which occur while opening or closing +// a database are handled properly. +func TestCreateOpenFail(t *testing.T) { + // bogusCreateDB is a function which acts as a bogus create and open + // driver function that intentionally returns a failure which can be + // detected. + dbType := "createopenfail" + openError := fmt.Errorf("failed to create or open database for "+ + "database type [%v]", dbType) + bogusCreateDB := func(args ...interface{}) (database.DB, error) { + return nil, openError + } + + // Create and add driver that intentionally fails when created or opened + // to ensure errors on database open and create are handled properly. + driver := database.Driver{ + DbType: dbType, + Create: bogusCreateDB, + Open: bogusCreateDB, + } + database.RegisterDriver(driver) + + // Ensure creating a database with the new type fails with the expected + // error. + _, err := database.Create(dbType) + if err != openError { + t.Errorf("expected error not received - got: %v, want %v", err, + openError) + return + } + + // Ensure opening a database with the new type fails with the expected + // error. + _, err = database.Open(dbType) + if err != openError { + t.Errorf("expected error not received - got: %v, want %v", err, + openError) + return + } +} + +// TestCreateOpenUnsupported ensures that attempting to create or open an +// unsupported database type is handled properly. +func TestCreateOpenUnsupported(t *testing.T) { + // Ensure creating a database with an unsupported type fails with the + // expected error. + testName := "create with unsupported database type" + dbType := "unsupported" + _, err := database.Create(dbType) + if !checkDbError(t, testName, err, database.ErrDbUnknownType) { + return + } + + // Ensure opening a database with the an unsupported type fails with the + // expected error. + testName = "open with unsupported database type" + _, err = database.Open(dbType) + if !checkDbError(t, testName, err, database.ErrDbUnknownType) { + return + } +} diff --git a/database2/error.go b/database2/error.go new file mode 100644 index 00000000..fd4b0f5b --- /dev/null +++ b/database2/error.go @@ -0,0 +1,197 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package database2 + +import "fmt" + +// ErrorCode identifies a kind of error. +type ErrorCode int + +// These constants are used to identify a specific database Error. +const ( + // ************************************** + // Errors related to driver registration. + // ************************************** + + // ErrDbTypeRegistered indicates two different database drivers + // attempt to register with the name database type. + ErrDbTypeRegistered ErrorCode = iota + + // ************************************* + // Errors related to database functions. + // ************************************* + + // ErrDbUnknownType indicates there is no driver registered for + // the specified database type. + ErrDbUnknownType + + // ErrDbDoesNotExist indicates open is called for a database that + // does not exist. + ErrDbDoesNotExist + + // ErrDbExists indicates create is called for a database that + // already exists. + ErrDbExists + + // ErrDbNotOpen indicates a database instance is accessed before + // it is opened or after it is closed. + ErrDbNotOpen + + // ErrDbAlreadyOpen indicates open was called on a database that + // is already open. + ErrDbAlreadyOpen + + // ErrInvalid indicates the specified database is not valid. + ErrInvalid + + // ErrCorruption indicates a checksum failure occurred which invariably + // means the database is corrupt. + ErrCorruption + + // **************************************** + // Errors related to database transactions. + // **************************************** + + // ErrTxClosed indicates an attempt was made to commit or rollback a + // transaction that has already had one of those operations performed. + ErrTxClosed + + // ErrTxNotWritable indicates an operation that requires write access to + // the database was attempted against a read-only transaction. + ErrTxNotWritable + + // ************************************** + // Errors related to metadata operations. + // ************************************** + + // ErrBucketNotFound indicates an attempt to access a bucket that has + // not been created yet. + ErrBucketNotFound + + // ErrBucketExists indicates an attempt to create a bucket that already + // exists. + ErrBucketExists + + // ErrBucketNameRequired indicates an attempt to create a bucket with a + // blank name. + ErrBucketNameRequired + + // ErrKeyRequired indicates at attempt to insert a zero-length key. + ErrKeyRequired + + // ErrKeyTooLarge indicates an attmempt to insert a key that is larger + // than the max allowed key size. The max key size depends on the + // specific backend driver being used. As a general rule, key sizes + // should be relatively, so this should rarely be an issue. + ErrKeyTooLarge + + // ErrValueTooLarge indicates an attmpt to insert a value that is larger + // than max allowed value size. The max key size depends on the + // specific backend driver being used. + ErrValueTooLarge + + // ErrIncompatibleValue indicates the value in question is invalid for + // the specific requested operation. For example, trying create or + // delete a bucket with an existing non-bucket key, attempting to create + // or delete a non-bucket key with an existing bucket key, or trying to + // delete a value via a cursor when it points to a nested bucket. + ErrIncompatibleValue + + // *************************************** + // Errors related to block I/O operations. + // *************************************** + + // ErrBlockNotFound indicates a block with the provided hash does not + // exist in the database. + ErrBlockNotFound + + // ErrBlockExists indicates a block with the provided hash already + // exists in the database. + ErrBlockExists + + // ErrBlockRegionInvalid indicates a region that exceeds the bounds of + // the specified block was requested. When the hash provided by the + // region does not correspond to an existing block, the error will be + // ErrBlockNotFound instead. + ErrBlockRegionInvalid + + // *********************************** + // Support for driver-specific errors. + // *********************************** + + // ErrDriverSpecific indicates the Err field is a driver-specific error. + // This provides a mechanism for drivers to plug-in their own custom + // errors for any situations which aren't already covered by the error + // codes provided by this package. + ErrDriverSpecific + + // numErrorCodes is the maximum error code number used in tests. + numErrorCodes +) + +// Map of ErrorCode values back to their constant names for pretty printing. +var errorCodeStrings = map[ErrorCode]string{ + ErrDbTypeRegistered: "ErrDbTypeRegistered", + ErrDbUnknownType: "ErrDbUnknownType", + ErrDbDoesNotExist: "ErrDbDoesNotExist", + ErrDbExists: "ErrDbExists", + ErrDbNotOpen: "ErrDbNotOpen", + ErrDbAlreadyOpen: "ErrDbAlreadyOpen", + ErrInvalid: "ErrInvalid", + ErrCorruption: "ErrCorruption", + ErrTxClosed: "ErrTxClosed", + ErrTxNotWritable: "ErrTxNotWritable", + ErrBucketNotFound: "ErrBucketNotFound", + ErrBucketExists: "ErrBucketExists", + ErrBucketNameRequired: "ErrBucketNameRequired", + ErrKeyRequired: "ErrKeyRequired", + ErrKeyTooLarge: "ErrKeyTooLarge", + ErrValueTooLarge: "ErrValueTooLarge", + ErrIncompatibleValue: "ErrIncompatibleValue", + ErrBlockNotFound: "ErrBlockNotFound", + ErrBlockExists: "ErrBlockExists", + ErrBlockRegionInvalid: "ErrBlockRegionInvalid", + ErrDriverSpecific: "ErrDriverSpecific", +} + +// String returns the ErrorCode as a human-readable name. +func (e ErrorCode) String() string { + if s := errorCodeStrings[e]; s != "" { + return s + } + return fmt.Sprintf("Unknown ErrorCode (%d)", int(e)) +} + +// Error provides a single type for errors that can happen during database +// operation. It is used to indicate several types of failures including errors +// with caller requests such as specifying invalid block regions or attempting +// to access data against closed database transactions, driver errors, errors +// retrieving data, and errors communicating with database servers. +// +// The caller can use type assertions to determine if an error is an Error and +// access the ErrorCode field to ascertain the specific reason for the failure. +// +// The ErrDriverSpecific error code will also have the Err field set with the +// underlying error. Depending on the backend driver, the Err field might be +// set to the underlying error for other error codes as well. +type Error struct { + ErrorCode ErrorCode // Describes the kind of error + Description string // Human readable description of the issue + Err error // Underlying error +} + +// Error satisfies the error interface and prints human-readable errors. +func (e Error) Error() string { + if e.Err != nil { + return e.Description + ": " + e.Err.Error() + } + return e.Description +} + +// makeError creates an Error given a set of arguments. The error code must +// be one of the error codes provided by this package. +func makeError(c ErrorCode, desc string, err error) Error { + return Error{ErrorCode: c, Description: desc, Err: err} +} diff --git a/database2/error_test.go b/database2/error_test.go new file mode 100644 index 00000000..1ca2a393 --- /dev/null +++ b/database2/error_test.go @@ -0,0 +1,97 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package database2_test + +import ( + "errors" + "testing" + + database "github.com/btcsuite/btcd/database2" +) + +// TestErrorCodeStringer tests the stringized output for the ErrorCode type. +func TestErrorCodeStringer(t *testing.T) { + tests := []struct { + in database.ErrorCode + want string + }{ + {database.ErrDbTypeRegistered, "ErrDbTypeRegistered"}, + {database.ErrDbUnknownType, "ErrDbUnknownType"}, + {database.ErrDbDoesNotExist, "ErrDbDoesNotExist"}, + {database.ErrDbExists, "ErrDbExists"}, + {database.ErrDbNotOpen, "ErrDbNotOpen"}, + {database.ErrDbAlreadyOpen, "ErrDbAlreadyOpen"}, + {database.ErrInvalid, "ErrInvalid"}, + {database.ErrCorruption, "ErrCorruption"}, + {database.ErrTxClosed, "ErrTxClosed"}, + {database.ErrTxNotWritable, "ErrTxNotWritable"}, + {database.ErrBucketNotFound, "ErrBucketNotFound"}, + {database.ErrBucketExists, "ErrBucketExists"}, + {database.ErrBucketNameRequired, "ErrBucketNameRequired"}, + {database.ErrKeyRequired, "ErrKeyRequired"}, + {database.ErrKeyTooLarge, "ErrKeyTooLarge"}, + {database.ErrValueTooLarge, "ErrValueTooLarge"}, + {database.ErrIncompatibleValue, "ErrIncompatibleValue"}, + {database.ErrBlockNotFound, "ErrBlockNotFound"}, + {database.ErrBlockExists, "ErrBlockExists"}, + {database.ErrBlockRegionInvalid, "ErrBlockRegionInvalid"}, + {database.ErrDriverSpecific, "ErrDriverSpecific"}, + + {0xffff, "Unknown ErrorCode (65535)"}, + } + + // Detect additional error codes that don't have the stringer added. + if len(tests)-1 != int(database.TstNumErrorCodes) { + t.Errorf("It appears an error code was added without adding " + + "an associated stringer test") + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + result := test.in.String() + if result != test.want { + t.Errorf("String #%d\ngot: %s\nwant: %s", i, result, + test.want) + continue + } + } +} + +// TestError tests the error output for the Error type. +func TestError(t *testing.T) { + t.Parallel() + + tests := []struct { + in database.Error + want string + }{ + { + database.Error{Description: "some error"}, + "some error", + }, + { + database.Error{Description: "human-readable error"}, + "human-readable error", + }, + { + database.Error{ + ErrorCode: database.ErrDriverSpecific, + Description: "some error", + Err: errors.New("driver-specific error"), + }, + "some error: driver-specific error", + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + result := test.in.Error() + if result != test.want { + t.Errorf("Error #%d\n got: %s want: %s", i, result, + test.want) + continue + } + } +} diff --git a/database2/example_test.go b/database2/example_test.go new file mode 100644 index 00000000..79f4a85c --- /dev/null +++ b/database2/example_test.go @@ -0,0 +1,177 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package database2_test + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + + "github.com/btcsuite/btcd/chaincfg" + database "github.com/btcsuite/btcd/database2" + _ "github.com/btcsuite/btcd/database2/ffldb" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" +) + +// This example demonstrates creating a new database. +func ExampleCreate() { + // This example assumes the ffldb driver is imported. + // + // import ( + // "github.com/btcsuite/btcd/database" + // _ "github.com/btcsuite/btcd/database/ffldb" + // ) + + // Create a database and schedule it to be closed and removed on exit. + // Typically you wouldn't want to remove the database right away like + // this, nor put it in the temp directory, but it's done here to ensure + // the example cleans up after itself. + dbPath := filepath.Join(os.TempDir(), "examplecreate") + db, err := database.Create("ffldb", dbPath, wire.MainNet) + if err != nil { + fmt.Println(err) + return + } + defer os.RemoveAll(dbPath) + defer db.Close() + + // Output: +} + +// This example demonstrates creating a new database and using a managed +// read-write transaction to store and retrieve metadata. +func Example_basicUsage() { + // This example assumes the ffldb driver is imported. + // + // import ( + // "github.com/btcsuite/btcd/database" + // _ "github.com/btcsuite/btcd/database/ffldb" + // ) + + // Create a database and schedule it to be closed and removed on exit. + // Typically you wouldn't want to remove the database right away like + // this, nor put it in the temp directory, but it's done here to ensure + // the example cleans up after itself. + dbPath := filepath.Join(os.TempDir(), "exampleusage") + db, err := database.Create("ffldb", dbPath, wire.MainNet) + if err != nil { + fmt.Println(err) + return + } + defer os.RemoveAll(dbPath) + defer db.Close() + + // Use the Update function of the database to perform a managed + // read-write transaction. The transaction will automatically be rolled + // back if the supplied inner function returns a non-nil error. + err = db.Update(func(tx database.Tx) error { + // Store a key/value pair directly in the metadata bucket. + // Typically a nested bucket would be used for a given feature, + // but this example is using the metadata bucket directly for + // simplicity. + key := []byte("mykey") + value := []byte("myvalue") + if err := tx.Metadata().Put(key, value); err != nil { + return err + } + + // Read the key back and ensure it matches. + if !bytes.Equal(tx.Metadata().Get(key), value) { + return fmt.Errorf("unexpected value for key '%s'", key) + } + + // Create a new nested bucket under the metadata bucket. + nestedBucketKey := []byte("mybucket") + nestedBucket, err := tx.Metadata().CreateBucket(nestedBucketKey) + if err != nil { + return err + } + + // The key from above that was set in the metadata bucket does + // not exist in this new nested bucket. + if nestedBucket.Get(key) != nil { + return fmt.Errorf("key '%s' is not expected nil", key) + } + + return nil + }) + if err != nil { + fmt.Println(err) + return + } + + // Output: +} + +// This example demonstrates creating a new database, using a managed read-write +// transaction to store a block, and using a managed read-only transaction to +// fetch the block. +func Example_blockStorageAndRetrieval() { + // This example assumes the ffldb driver is imported. + // + // import ( + // "github.com/btcsuite/btcd/database" + // _ "github.com/btcsuite/btcd/database/ffldb" + // ) + + // Create a database and schedule it to be closed and removed on exit. + // Typically you wouldn't want to remove the database right away like + // this, nor put it in the temp directory, but it's done here to ensure + // the example cleans up after itself. + dbPath := filepath.Join(os.TempDir(), "exampleblkstorage") + db, err := database.Create("ffldb", dbPath, wire.MainNet) + if err != nil { + fmt.Println(err) + return + } + defer os.RemoveAll(dbPath) + defer db.Close() + + // Use the Update function of the database to perform a managed + // read-write transaction and store a genesis block in the database as + // and example. + err = db.Update(func(tx database.Tx) error { + genesisBlock := chaincfg.MainNetParams.GenesisBlock + return tx.StoreBlock(btcutil.NewBlock(genesisBlock)) + }) + if err != nil { + fmt.Println(err) + return + } + + // Use the View function of the database to perform a managed read-only + // transaction and fetch the block stored above. + var loadedBlockBytes []byte + err = db.Update(func(tx database.Tx) error { + genesisHash := chaincfg.MainNetParams.GenesisHash + blockBytes, err := tx.FetchBlock(genesisHash) + if err != nil { + return err + } + + // As documented, all data fetched from the database is only + // valid during a database transaction in order to support + // zero-copy backends. Thus, make a copy of the data so it + // can be used outside of the transaction. + loadedBlockBytes = make([]byte, len(blockBytes)) + copy(loadedBlockBytes, blockBytes) + return nil + }) + if err != nil { + fmt.Println(err) + return + } + + // Typically at this point, the block could be deserialized via the + // wire.MsgBlock.Deserialize function or used in its serialized form + // depending on need. However, for this example, just display the + // number of serialized bytes to show it was loaded as expected. + fmt.Printf("Serialized block size: %d bytes\n", len(loadedBlockBytes)) + + // Output: + // Serialized block size: 285 bytes +} diff --git a/database2/export_test.go b/database2/export_test.go new file mode 100644 index 00000000..08f6c9e9 --- /dev/null +++ b/database2/export_test.go @@ -0,0 +1,17 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +This test file is part of the database package rather than than the +database_test package so it can bridge access to the internals to properly test +cases which are either not possible or can't reliably be tested via the public +interface. The functions, constants, and variables are only exported while the +tests are being run. +*/ + +package database2 + +// TstNumErrorCodes makes the internal numErrorCodes parameter available to the +// test package. +const TstNumErrorCodes = numErrorCodes diff --git a/database2/ffldb/README.md b/database2/ffldb/README.md new file mode 100644 index 00000000..dcb8308c --- /dev/null +++ b/database2/ffldb/README.md @@ -0,0 +1,52 @@ +ffldb +===== + +[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)] +(https://travis-ci.org/btcsuite/btcd) + +Package ffldb implements a driver for the database package that uses leveldb for +the backing metadata and flat files for block storage. + +This driver is the recommended driver for use with btcd. It makes use leveldb +for the metadata, flat files for block storage, and checksums in key areas to +ensure data integrity. + +Package ffldb is licensed under the copyfree ISC license. + +## Usage + +This package is a driver to the database package and provides the database type +of "ffldb". The parameters the Open and Create functions take are the +database path as a string and the block network. + +```Go +db, err := database.Open("ffldb", "path/to/database", wire.MainNet) +if err != nil { + // Handle error +} +``` + +```Go +db, err := database.Create("ffldb", "path/to/database", wire.MainNet) +if err != nil { + // Handle error +} +``` + +## Documentation + +[![GoDoc](https://godoc.org/github.com/btcsuite/btcd/database/ffldb?status.png)] +(http://godoc.org/github.com/btcsuite/btcd/database/ffldb) + +Full `go doc` style documentation for the project can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/btcsuite/btcd/database/ffldb + +You can also view the documentation locally once the package is installed with +the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to +http://localhost:6060/pkg/github.com/btcsuite/btcd/database/ffldb + +## License + +Package ffldb is licensed under the [copyfree](http://copyfree.org) ISC +License. diff --git a/database2/ffldb/bench_test.go b/database2/ffldb/bench_test.go new file mode 100644 index 00000000..c3362f6c --- /dev/null +++ b/database2/ffldb/bench_test.go @@ -0,0 +1,103 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package ffldb + +import ( + "os" + "path/filepath" + "testing" + + "github.com/btcsuite/btcd/chaincfg" + database "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcutil" +) + +// BenchmarkBlockHeader benchmarks how long it takes to load the mainnet genesis +// block header. +func BenchmarkBlockHeader(b *testing.B) { + // Start by creating a new database and populating it with the mainnet + // genesis block. + dbPath := filepath.Join(os.TempDir(), "ffldb-benchblkhdr") + _ = os.RemoveAll(dbPath) + db, err := database.Create("ffldb", dbPath, blockDataNet) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(dbPath) + defer db.Close() + err = db.Update(func(tx database.Tx) error { + block := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + if err := tx.StoreBlock(block); err != nil { + return err + } + return nil + }) + if err != nil { + b.Fatal(err) + } + + b.ReportAllocs() + b.ResetTimer() + err = db.View(func(tx database.Tx) error { + blockHash := chaincfg.MainNetParams.GenesisHash + for i := 0; i < b.N; i++ { + _, err := tx.FetchBlockHeader(blockHash) + if err != nil { + return err + } + } + return nil + }) + if err != nil { + b.Fatal(err) + } + + // Don't benchmark teardown. + b.StopTimer() +} + +// BenchmarkBlockHeader benchmarks how long it takes to load the mainnet genesis +// block. +func BenchmarkBlock(b *testing.B) { + // Start by creating a new database and populating it with the mainnet + // genesis block. + dbPath := filepath.Join(os.TempDir(), "ffldb-benchblk") + _ = os.RemoveAll(dbPath) + db, err := database.Create("ffldb", dbPath, blockDataNet) + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(dbPath) + defer db.Close() + err = db.Update(func(tx database.Tx) error { + block := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + if err := tx.StoreBlock(block); err != nil { + return err + } + return nil + }) + if err != nil { + b.Fatal(err) + } + + b.ReportAllocs() + b.ResetTimer() + err = db.View(func(tx database.Tx) error { + blockHash := chaincfg.MainNetParams.GenesisHash + for i := 0; i < b.N; i++ { + _, err := tx.FetchBlock(blockHash) + if err != nil { + return err + } + } + return nil + }) + if err != nil { + b.Fatal(err) + } + + // Don't benchmark teardown. + b.StopTimer() +} diff --git a/database2/ffldb/blockio.go b/database2/ffldb/blockio.go new file mode 100644 index 00000000..d9a30cff --- /dev/null +++ b/database2/ffldb/blockio.go @@ -0,0 +1,747 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// This file contains the implementation functions for reading, writing, and +// otherwise working with the flat files that house the actual blocks. + +package ffldb + +import ( + "container/list" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "sync" + + database "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/wire" +) + +const ( + // The Bitcoin protocol encodes block height as int32, so max number of + // blocks is 2^31. Max block size per the protocol is 32MiB per block. + // So the theoretical max at the time this comment was written is 64PiB + // (pebibytes). With files @ 512MiB each, this would require a maximum + // of 134,217,728 files. Thus, choose 9 digits of precision for the + // filenames. An additional benefit is 9 digits provides 10^9 files @ + // 512MiB each for a total of ~476.84PiB (roughly 7.4 times the current + // theoretical max), so there is room for the max block size to grow in + // the future. + blockFilenameTemplate = "%09d.fdb" + + // maxOpenFiles is the max number of open files to maintain in the + // open blocks cache. Note that this does not include the current + // write file, so there will typically be one more than this value open. + maxOpenFiles = 25 + + // maxBlockFileSize is the maximum size for each file used to store + // blocks. + // + // NOTE: The current code uses uint32 for all offsets, so this value + // must be less than 2^32 (4 GiB). This is also why it's a typed + // constant. + maxBlockFileSize uint32 = 512 * 1024 * 1024 // 512 MiB + + // blockLocSize is the number of bytes the serialized block location + // data that is stored in the block index. + // + // The serialized block location format is: + // + // [0:4] Block file (4 bytes) + // [4:8] File offset (4 bytes) + // [8:12] Block length (4 bytes) + blockLocSize = 12 +) + +var ( + // castagnoli houses the Catagnoli polynomial used for CRC-32 checksums. + castagnoli = crc32.MakeTable(crc32.Castagnoli) +) + +// filer is an interface which acts very similar to a *os.File and is typically +// implemented by it. It exists so the test code can provide mock files for +// properly testing corruption and file system issues. +type filer interface { + io.Closer + io.WriterAt + io.ReaderAt + Truncate(size int64) error + Sync() error +} + +// lockableFile represents a block file on disk that has been opened for either +// read or read/write access. It also contains a read-write mutex to support +// multiple concurrent readers. +type lockableFile struct { + sync.RWMutex + file filer +} + +// writeCursor represents the current file and offset of the block file on disk +// for performing all writes. It also contains a read-write mutex to support +// multiple concurrent readers which can reuse the file handle. +type writeCursor struct { + sync.RWMutex + + // curFile is the current block file that will be appended to when + // writing new blocks. + curFile *lockableFile + + // curFileNum is the current block file number and is used to allow + // readers to use the same open file handle. + curFileNum uint32 + + // curOffset is the offset in the current write block file where the + // next new block will be written. + curOffset uint32 +} + +// blockStore houses information used to handle reading and writing blocks (and +// part of blocks) into flat files with support for multiple concurrent readers. +type blockStore struct { + // network is the specific network to use in the flat files for each + // block. + network wire.BitcoinNet + + // basePath is the base path used for the flat block files and metadata. + basePath string + + // maxBlockFileSize is the maximum size for each file used to store + // blocks. It is defined on the store so the whitebox tests can + // override the value. + maxBlockFileSize uint32 + + // The following fields are related to the flat files which hold the + // actual blocks. The number of open files is limited by maxOpenFiles. + // + // obfMutex protects concurrent access to the openBlockFiles map. It is + // a RWMutex so multiple readers can simultaneously access open files. + // + // openBlockFiles houses the open file handles for existing block files + // which have been opened read-only along with an individual RWMutex. + // This scheme allows multiple concurrent readers to the same file while + // preventing the file from being closed out from under them. + // + // lruMutex protects concurrent access to the least recently used list + // and lookup map. + // + // openBlocksLRU tracks how the open files are refenced by pushing the + // most recently used files to the front of the list thereby trickling + // the least recently used files to end of the list. When a file needs + // to be closed due to exceeding the the max number of allowed open + // files, the one at the end of the list is closed. + // + // fileNumToLRUElem is a mapping between a specific block file number + // and the associated list element on the least recently used list. + // + // Thus, with the combination of these fields, the database supports + // concurrent non-blocking reads across multiple and individual files + // along with intelligently limiting the number of open file handles by + // closing the least recently used files as needed. + // + // NOTE: The locking order used throughout is well-defined and MUST be + // followed. Failure to do so could lead to deadlocks. In particular, + // the locking order is as follows: + // 1) obfMutex + // 2) lruMutex + // 3) writeCursor mutex + // 4) specific file mutexes + // + // None of the mutexes are required to be locked at the same time, and + // often aren't. However, if they are to be locked simultaneously, they + // MUST be locked in the order previously specified. + // + // Due to the high performance and multi-read concurrency requirements, + // write locks should only be held for the minimum time necessary. + obfMutex sync.RWMutex + lruMutex sync.Mutex + openBlocksLRU *list.List // Contains uint32 block file numbers. + fileNumToLRUElem map[uint32]*list.Element + openBlockFiles map[uint32]*lockableFile + + // writeCursor houses the state for the current file and location that + // new blocks are written to. + writeCursor *writeCursor + + // These functions are set to openFile, openWriteFile, and deleteFile by + // default, but are exposed here to allow the whitebox tests to replace + // them when working with mock files. + openFileFunc func(fileNum uint32) (*lockableFile, error) + openWriteFileFunc func(fileNum uint32) (filer, error) + deleteFileFunc func(fileNum uint32) error +} + +// blockLocation identifies a particular block file and location. +type blockLocation struct { + blockFileNum uint32 + fileOffset uint32 + blockLen uint32 +} + +// deserializeBlockLoc deserializes the passed serialized block location +// information. This is data stored into the block index metadata for each +// block. The serialized data passed to this function MUST be at least +// blockLocSize bytes or it will panic. The error check is avoided here because +// this information will always be coming from the block index which includes a +// checksum to detect corruption. Thus it is safe to use this unchecked here. +func deserializeBlockLoc(serializedLoc []byte) blockLocation { + // The serialized block location format is: + // + // [0:4] Block file (4 bytes) + // [4:8] File offset (4 bytes) + // [8:12] Block length (4 bytes) + return blockLocation{ + blockFileNum: byteOrder.Uint32(serializedLoc[0:4]), + fileOffset: byteOrder.Uint32(serializedLoc[4:8]), + blockLen: byteOrder.Uint32(serializedLoc[8:12]), + } +} + +// serializeBlockLoc returns the serialization of the passed block location. +// This is data to be stored into the block index metadata for each block. +func serializeBlockLoc(loc blockLocation) []byte { + // The serialized block location format is: + // + // [0:4] Block file (4 bytes) + // [4:8] File offset (4 bytes) + // [8:12] Block length (4 bytes) + var serializedData [12]byte + byteOrder.PutUint32(serializedData[0:4], loc.blockFileNum) + byteOrder.PutUint32(serializedData[4:8], loc.fileOffset) + byteOrder.PutUint32(serializedData[8:12], loc.blockLen) + return serializedData[:] +} + +// blockFilePath return the file path for the provided block file number. +func blockFilePath(dbPath string, fileNum uint32) string { + fileName := fmt.Sprintf(blockFilenameTemplate, fileNum) + return filepath.Join(dbPath, fileName) +} + +// openWriteFile returns a file handle for the passed flat file number in +// read/write mode. The file will be created if needed. It is typically used +// for the current file that will have all new data appended. Unlike openFile, +// this function does not keep track of the open file and it is not subject to +// the maxOpenFiles limit. +func (s *blockStore) openWriteFile(fileNum uint32) (filer, error) { + // The current block file needs to be read-write so it is possible to + // append to it. Also, it shouldn't be part of the least recently used + // file. + filePath := blockFilePath(s.basePath, fileNum) + file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + str := fmt.Sprintf("failed to open file %q: %v", filePath, err) + return nil, makeDbErr(database.ErrDriverSpecific, str, err) + } + + return file, nil +} + +// openFile returns a read-only file handle for the passed flat file number. +// The function also keeps track of the open files, performs least recently +// used tracking, and limits the number of open files to maxOpenFiles by closing +// the least recently used file as needed. +// +// This function MUST be called with the overall files mutex (s.obfMutex) locked +// for WRITES. +func (s *blockStore) openFile(fileNum uint32) (*lockableFile, error) { + // Open the appropriate file as read-only. + filePath := blockFilePath(s.basePath, fileNum) + file, err := os.Open(filePath) + if err != nil { + return nil, makeDbErr(database.ErrDriverSpecific, err.Error(), + err) + } + blockFile := &lockableFile{file: file} + + // Close the least recently used file if the file exceeds the max + // allowed open files. This is not done until after the file open in + // case the file fails to open, there is no need to close any files. + // + // A write lock is required on the LRU list here to protect against + // modifications happening as already open files are read from and + // shuffled to the front of the list. + // + // Also, add the file that was just opened to the front of the least + // recently used list to indicate it is the most recently used file and + // therefore should be closed last. + s.lruMutex.Lock() + lruList := s.openBlocksLRU + if lruList.Len() >= maxOpenFiles { + lruFileNum := lruList.Remove(lruList.Back()).(uint32) + oldBlockFile := s.openBlockFiles[lruFileNum] + + // Close the old file under the write lock for the file in case + // any readers are currently reading from it so it's not closed + // out from under them. + oldBlockFile.Lock() + _ = oldBlockFile.file.Close() + oldBlockFile.Unlock() + + delete(s.openBlockFiles, lruFileNum) + delete(s.fileNumToLRUElem, lruFileNum) + } + s.fileNumToLRUElem[fileNum] = lruList.PushFront(fileNum) + s.lruMutex.Unlock() + + // Store a reference to it in the open block files map. + s.openBlockFiles[fileNum] = blockFile + + return blockFile, nil +} + +// deleteFile removes the block file for the passed flat file number. The file +// must already be closed and it is the responsibility of the caller to do any +// other state cleanup necessary. +func (s *blockStore) deleteFile(fileNum uint32) error { + filePath := blockFilePath(s.basePath, fileNum) + if err := os.Remove(filePath); err != nil { + return makeDbErr(database.ErrDriverSpecific, err.Error(), err) + } + + return nil +} + +// blockFile attempts to return an existing file handle for the passed flat file +// number if it is already open as well as marking it as most recently used. It +// will also open the file when it's not already open subject to the rules +// described in openFile. +// +// NOTE: The returned block file will already have the read lock acquired and +// the caller MUST call .RUnlock() to release it once it has finished all read +// operations. This is necessary because otherwise it would be possible for a +// separate goroutine to close the file after it is returned from here, but +// before the caller has acquired a read lock. +func (s *blockStore) blockFile(fileNum uint32) (*lockableFile, error) { + // When the requested block file is open for writes, return it. + wc := s.writeCursor + wc.RLock() + if fileNum == wc.curFileNum && wc.curFile.file != nil { + obf := wc.curFile + obf.RLock() + wc.RUnlock() + return obf, nil + } + wc.RUnlock() + + // Try to return an open file under the overall files read lock. + s.obfMutex.RLock() + if obf, ok := s.openBlockFiles[fileNum]; ok { + s.lruMutex.Lock() + s.openBlocksLRU.MoveToFront(s.fileNumToLRUElem[fileNum]) + s.lruMutex.Unlock() + + obf.RLock() + s.obfMutex.RUnlock() + return obf, nil + } + s.obfMutex.RUnlock() + + // Since the file isn't open already, need to check the open block files + // map again under write lock in case multiple readers got here and a + // separate one is already opening the file. + s.obfMutex.Lock() + if obf, ok := s.openBlockFiles[fileNum]; ok { + obf.RLock() + s.obfMutex.Unlock() + return obf, nil + } + + // The file isn't open, so open it while potentially closing the least + // recently used one as needed. + obf, err := s.openFileFunc(fileNum) + if err != nil { + s.obfMutex.Unlock() + return nil, err + } + obf.RLock() + s.obfMutex.Unlock() + return obf, nil +} + +// writeData is a helper function for writeBlock which writes the provided data +// at the current write offset and updates the write cursor accordingly. The +// field name parameter is only used when there is an error to provide a nicer +// error message. +// +// The write cursor will be advanced the number of bytes actually written in the +// event of failure. +// +// NOTE: This function MUST be called with the write cursor current file lock +// held and must only be called during a write transaction so it is effectively +// locked for writes. Also, the write cursor current file must NOT be nil. +func (s *blockStore) writeData(data []byte, fieldName string) error { + wc := s.writeCursor + n, err := wc.curFile.file.WriteAt(data, int64(wc.curOffset)) + wc.curOffset += uint32(n) + if err != nil { + str := fmt.Sprintf("failed to write %s to file %d at "+ + "offset %d: %v", fieldName, wc.curFileNum, + wc.curOffset-uint32(n), err) + return makeDbErr(database.ErrDriverSpecific, str, err) + } + + return nil +} + +// writeBlock appends the specified raw block bytes to the store's write cursor +// location and increments it accordingly. When the block would exceed the max +// file size for the current flat file, this function will close the current +// file, create the next file, update the write cursor, and write the block to +// the new file. +// +// The write cursor will also be advanced the number of bytes actually written +// in the event of failure. +// +// Format: +func (s *blockStore) writeBlock(rawBlock []byte) (blockLocation, error) { + // Compute how many bytes will be written. + // 4 bytes each for block network + 4 bytes for block length + + // length of raw block + 4 bytes for checksum. + blockLen := uint32(len(rawBlock)) + fullLen := blockLen + 12 + + // Move to the next block file if adding the new block would exceed the + // max allowed size for the current block file. Also detect overflow + // to be paranoid, even though it isn't possible currently, numbers + // might change in the future to make it possible. + // + // NOTE: The writeCursor.offset field isn't protected by the mutex + // since it's only read/changed during this function which can only be + // called during a write transaction, of which there can be only one at + // a time. + wc := s.writeCursor + finalOffset := wc.curOffset + fullLen + if finalOffset < wc.curOffset || finalOffset > s.maxBlockFileSize { + // This is done under the write cursor lock since the fileNum + // field is accessed elsewhere by readers. + // + // Close the current write file to force a read-only reopen + // with LRU tracking. The close is done under the write lock + // for the file to prevent it from being closed out from under + // any readers currently reading from it. + wc.Lock() + wc.curFile.Lock() + if wc.curFile.file != nil { + _ = wc.curFile.file.Close() + wc.curFile.file = nil + } + wc.curFile.Unlock() + + // Start writes into next file. + wc.curFileNum++ + wc.curOffset = 0 + wc.Unlock() + } + + // All writes are done under the write lock for the file to ensure any + // readers are finished and blocked first. + wc.curFile.Lock() + defer wc.curFile.Unlock() + + // Open the current file if needed. This will typically only be the + // case when moving to the next file to write to or on initial database + // load. However, it might also be the case if rollbacks happened after + // file writes started during a transaction commit. + if wc.curFile.file == nil { + file, err := s.openWriteFileFunc(wc.curFileNum) + if err != nil { + return blockLocation{}, err + } + wc.curFile.file = file + } + + // Bitcoin network. + origOffset := wc.curOffset + hasher := crc32.New(castagnoli) + var scratch [4]byte + byteOrder.PutUint32(scratch[:], uint32(s.network)) + if err := s.writeData(scratch[:], "network"); err != nil { + return blockLocation{}, err + } + _, _ = hasher.Write(scratch[:]) + + // Block length. + byteOrder.PutUint32(scratch[:], blockLen) + if err := s.writeData(scratch[:], "block length"); err != nil { + return blockLocation{}, err + } + _, _ = hasher.Write(scratch[:]) + + // Serialized block. + if err := s.writeData(rawBlock[:], "block"); err != nil { + return blockLocation{}, err + } + _, _ = hasher.Write(rawBlock) + + // Castagnoli CRC-32 as a checksum of all the previous. + if err := s.writeData(hasher.Sum(nil), "checksum"); err != nil { + return blockLocation{}, err + } + + // Sync the file to disk. + if err := wc.curFile.file.Sync(); err != nil { + str := fmt.Sprintf("failed to sync file %d: %v", wc.curFileNum, + err) + return blockLocation{}, makeDbErr(database.ErrDriverSpecific, + str, err) + } + + loc := blockLocation{ + blockFileNum: wc.curFileNum, + fileOffset: origOffset, + blockLen: fullLen, + } + return loc, nil +} + +// readBlock reads the specified block record and returns the serialized block. +// It ensures the integrity of the block data by checking that the serialized +// network matches the current network associated with the block store and +// comparing the calculated checksum against the one stored in the flat file. +// This function also automatically handles all file management such as opening +// and closing files as necessary to stay within the maximum allowed open files +// limit. +// +// Returns ErrDriverSpecific if the data fails to read for any reason and +// ErrCorruption if the checksum of the read data doesn't match the checksum +// read from the file. +// +// Format: +func (s *blockStore) readBlock(hash *wire.ShaHash, loc blockLocation) ([]byte, error) { + // Get the referenced block file handle opening the file as needed. The + // function also handles closing files as needed to avoid going over the + // max allowed open files. + blockFile, err := s.blockFile(loc.blockFileNum) + if err != nil { + return nil, err + } + + serializedData := make([]byte, loc.blockLen) + n, err := blockFile.file.ReadAt(serializedData, int64(loc.fileOffset)) + blockFile.RUnlock() + if err != nil { + str := fmt.Sprintf("failed to read block %s from file %d, "+ + "offset %d: %v", hash, loc.blockFileNum, loc.fileOffset, + err) + return nil, makeDbErr(database.ErrDriverSpecific, str, err) + } + + // Calculate the checksum of the read data and ensure it matches the + // serialized checksum. This will detect any data corruption in the + // flat file without having to do much more expensive merkle root + // calculations on the loaded block. + serializedChecksum := binary.BigEndian.Uint32(serializedData[n-4:]) + calculatedChecksum := crc32.Checksum(serializedData[:n-4], castagnoli) + if serializedChecksum != calculatedChecksum { + str := fmt.Sprintf("block data for block %s checksum "+ + "does not match - got %x, want %x", hash, + calculatedChecksum, serializedChecksum) + return nil, makeDbErr(database.ErrCorruption, str, nil) + } + + // The network associated with the block must match the current active + // network, otherwise somebody probably put the block files for the + // wrong network in the directory. + serializedNet := byteOrder.Uint32(serializedData[:4]) + if serializedNet != uint32(s.network) { + str := fmt.Sprintf("block data for block %s is for the "+ + "wrong network - got %d, want %d", hash, serializedNet, + uint32(s.network)) + return nil, makeDbErr(database.ErrDriverSpecific, str, nil) + } + + // The raw block excludes the network, length of the block, and + // checksum. + return serializedData[8 : n-4], nil +} + +// readBlockRegion reads the specified amount of data at the provided offset for +// a given block location. The offset is relative to the start of the +// serialized block (as opposed to the beginning of the block record). This +// function automatically handles all file management such as opening and +// closing files as necessary to stay within the maximum allowed open files +// limit. +// +// Returns ErrDriverSpecific if the data fails to read for any reason. +func (s *blockStore) readBlockRegion(loc blockLocation, offset, numBytes uint32) ([]byte, error) { + // Get the referenced block file handle opening the file as needed. The + // function also handles closing files as needed to avoid going over the + // max allowed open files. + blockFile, err := s.blockFile(loc.blockFileNum) + if err != nil { + return nil, err + } + + // Regions are offsets into the actual block, however the serialized + // data for a block includes an initial 4 bytes for network + 4 bytes + // for block length. Thus, add 8 bytes to adjust. + readOffset := loc.fileOffset + 8 + offset + serializedData := make([]byte, numBytes) + _, err = blockFile.file.ReadAt(serializedData, int64(readOffset)) + blockFile.RUnlock() + if err != nil { + str := fmt.Sprintf("failed to read region from block file %d, "+ + "offset %d, len %d: %v", loc.blockFileNum, readOffset, + numBytes, err) + return nil, makeDbErr(database.ErrDriverSpecific, str, err) + } + + return serializedData, nil +} + +// handleRollback rolls the block files on disk back to the provided file number +// and offset. This involves potentially deleting and truncating the files that +// were partially written. +// +// There are effectively two scenarios to consider here: +// 1) Transient write failures from which recovery is possible +// 2) More permanent failures such as hard disk death and/or removal +// +// In either case, the write cursor will be repositioned to the old block file +// offset regardless of any other errors that occur while attempting to undo +// writes. +// +// For the first scenario, this will lead to any data which failed to be undone +// being overwritten and thus behaves as desired as the system continues to run. +// +// For the second scenario, the metadata which stores the current write cursor +// position within the block files will not have been updated yet and thus if +// the system eventually recovers (perhaps the hard drive is reconnected), it +// will also lead to any data which failed to be undone being overwritten and +// thus behaves as desired. +// +// Therefore, any errors are simply logged at a warning level rather than being +// returned since there is nothing more that could be done about it anyways. +func (s *blockStore) handleRollback(oldBlockFileNum, oldBlockOffset uint32) { + // Grab the write cursor mutex since it is modified throughout this + // function. + wc := s.writeCursor + wc.Lock() + defer wc.Unlock() + + // Nothing to do if the rollback point is the same as the current write + // cursor. + if wc.curFileNum == oldBlockFileNum && wc.curOffset == oldBlockOffset { + return + } + + // Regardless of any failures that happen below, reposition the write + // cursor to the old block file and offset. + defer func() { + wc.curFileNum = oldBlockFileNum + wc.curOffset = oldBlockOffset + }() + + log.Debugf("ROLLBACK: Rolling back to file %d, offset %d", + oldBlockFileNum, oldBlockOffset) + + // Close the current write file if it needs to be deleted. Then delete + // all files that are newer than the provided rollback file while + // also moving the write cursor file backwards accordingly. + if wc.curFileNum > oldBlockFileNum { + wc.curFile.Lock() + if wc.curFile.file != nil { + _ = wc.curFile.file.Close() + wc.curFile.file = nil + } + wc.curFile.Unlock() + } + for ; wc.curFileNum > oldBlockFileNum; wc.curFileNum-- { + if err := s.deleteFileFunc(wc.curFileNum); err != nil { + _ = log.Warnf("ROLLBACK: Failed to delete block file "+ + "number %d: %v", wc.curFileNum, err) + return + } + } + + // Open the file for the current write cursor if needed. + wc.curFile.Lock() + if wc.curFile.file == nil { + obf, err := s.openWriteFileFunc(wc.curFileNum) + if err != nil { + wc.curFile.Unlock() + _ = log.Warnf("ROLLBACK: %v", err) + return + } + wc.curFile.file = obf + } + + // Truncate the to the provided rollback offset. + if err := wc.curFile.file.Truncate(int64(oldBlockOffset)); err != nil { + wc.curFile.Unlock() + _ = log.Warnf("ROLLBACK: Failed to truncate file %d: %v", + wc.curFileNum, err) + return + } + + // Sync the file to disk. + err := wc.curFile.file.Sync() + wc.curFile.Unlock() + if err != nil { + _ = log.Warnf("ROLLBACK: Failed to sync file %d: %v", + wc.curFileNum, err) + return + } + return +} + +// scanBlockFiles searches the database directory for all flat block files to +// find the end of the most recent file. This position is considered the +// current write cursor which is also stored in the metadata. Thus, it is used +// to detect unexpected shutdowns in the middle of writes so the block files +// can be reconciled. +func scanBlockFiles(dbPath string) (int, uint32) { + lastFile := -1 + fileLen := uint32(0) + for i := 0; ; i++ { + filePath := blockFilePath(dbPath, uint32(i)) + st, err := os.Stat(filePath) + if err != nil { + break + } + lastFile = i + + fileLen = uint32(st.Size()) + } + + log.Tracef("Scan found latest block file #%d with length %d", lastFile, + fileLen) + return lastFile, fileLen +} + +// newBlockStore returns a new block store with the current block file number +// and offset set and all fields initialized. +func newBlockStore(basePath string, network wire.BitcoinNet) *blockStore { + // Look for the end of the latest block to file to determine what the + // write cursor position is from the viewpoing of the block files on + // disk. + fileNum, fileOff := scanBlockFiles(basePath) + if fileNum == -1 { + fileNum = 0 + fileOff = 0 + } + + store := &blockStore{ + network: network, + basePath: basePath, + maxBlockFileSize: maxBlockFileSize, + openBlockFiles: make(map[uint32]*lockableFile), + openBlocksLRU: list.New(), + fileNumToLRUElem: make(map[uint32]*list.Element), + + writeCursor: &writeCursor{ + curFile: &lockableFile{}, + curFileNum: uint32(fileNum), + curOffset: uint32(fileOff), + }, + } + store.openFileFunc = store.openFile + store.openWriteFileFunc = store.openWriteFile + store.deleteFileFunc = store.deleteFile + return store +} diff --git a/database2/ffldb/db.go b/database2/ffldb/db.go new file mode 100644 index 00000000..93bd03dd --- /dev/null +++ b/database2/ffldb/db.go @@ -0,0 +1,2092 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package ffldb + +import ( + "bytes" + "encoding/binary" + "fmt" + "os" + "path/filepath" + "runtime" + "sort" + "sync" + + database "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/database2/internal/treap" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/btcsuite/goleveldb/leveldb" + "github.com/btcsuite/goleveldb/leveldb/comparer" + ldberrors "github.com/btcsuite/goleveldb/leveldb/errors" + "github.com/btcsuite/goleveldb/leveldb/filter" + "github.com/btcsuite/goleveldb/leveldb/iterator" + "github.com/btcsuite/goleveldb/leveldb/opt" + "github.com/btcsuite/goleveldb/leveldb/util" +) + +const ( + // metadataDbName is the name used for the metadata database. + metadataDbName = "metadata" + + // blockHdrSize is the size of a block header. This is simply the + // constant from wire and is only provided here for convenience since + // wire.MaxBlockHeaderPayload is quite long. + blockHdrSize = wire.MaxBlockHeaderPayload + + // blockHdrOffset defines the offsets into a block index row for the + // block header. + // + // The serialized block index row format is: + // + blockHdrOffset = blockLocSize +) + +var ( + // byteOrder is the preferred byte order used through the database and + // block files. Sometimes big endian will be used to allow ordered byte + // sortable integer values. + byteOrder = binary.LittleEndian + + // bucketIndexPrefix is the prefix used for all entries in the bucket + // index. + bucketIndexPrefix = []byte("bidx") + + // curBucketIDKeyName is the name of the key used to keep track of the + // current bucket ID counter. + curBucketIDKeyName = []byte("bidx-cbid") + + // metadataBucketID is the ID of the top-level metadata bucket. + // It is the value 0 encoded as an unsigned big-endian uint32. + metadataBucketID = [4]byte{} + + // blockIdxBucketID is the ID of the internal block metadata bucket. + // It is the value 1 encoded as an unsigned big-endian uint32. + blockIdxBucketID = [4]byte{0x00, 0x00, 0x00, 0x01} + + // blockIdxBucketName is the bucket used internally to track block + // metadata. + blockIdxBucketName = []byte("ffldb-blockidx") + + // writeLocKeyName is the key used to store the current write file + // location. + writeLocKeyName = []byte("ffldb-writeloc") +) + +// Common error strings. +const ( + // errDbNotOpenStr is the text to use for the database.ErrDbNotOpen + // error code. + errDbNotOpenStr = "database is not open" + + // errTxClosedStr is the text to use for the database.ErrTxClosed error + // code. + errTxClosedStr = "database tx is closed" +) + +// bulkFetchData is allows a block location to be specified along with the +// index it was requested from. This in turn allows the bulk data loading +// functions to sort the data accesses based on the location to improve +// performance while keeping track of which result the data is for. +type bulkFetchData struct { + *blockLocation + replyIndex int +} + +// bulkFetchDataSorter implements sort.Interface to allow a slice of +// bulkFetchData to be sorted. In particular it sorts by file and then +// offset so that reads from files are grouped and linear. +type bulkFetchDataSorter []bulkFetchData + +// Len returns the number of items in the slice. It is part of the +// sort.Interface implementation. +func (s bulkFetchDataSorter) Len() int { + return len(s) +} + +// Swap swaps the items at the passed indices. It is part of the +// sort.Interface implementation. +func (s bulkFetchDataSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less returns whether the item with index i should sort before the item with +// index j. It is part of the sort.Interface implementation. +func (s bulkFetchDataSorter) Less(i, j int) bool { + if s[i].blockFileNum < s[j].blockFileNum { + return true + } + if s[i].blockFileNum > s[j].blockFileNum { + return false + } + + return s[i].fileOffset < s[j].fileOffset +} + +// makeDbErr creates a database.Error given a set of arguments. +func makeDbErr(c database.ErrorCode, desc string, err error) database.Error { + return database.Error{ErrorCode: c, Description: desc, Err: err} +} + +// convertErr converts the passed leveldb error into a database error with an +// equivalent error code and the passed description. It also sets the passed +// error as the underlying error. +func convertErr(desc string, ldbErr error) database.Error { + // Use the driver-specific error code by default. The code below will + // update this with the converted error if it's recognized. + var code = database.ErrDriverSpecific + + switch { + // Database corruption errors. + case ldberrors.IsCorrupted(ldbErr): + code = database.ErrCorruption + + // Database open/create errors. + case ldbErr == leveldb.ErrClosed: + code = database.ErrDbNotOpen + + // Transaction errors. + case ldbErr == leveldb.ErrSnapshotReleased: + code = database.ErrTxClosed + case ldbErr == leveldb.ErrIterReleased: + code = database.ErrTxClosed + } + + return database.Error{ErrorCode: code, Description: desc, Err: ldbErr} +} + +// copySlice returns a copy of the passed slice. This is mostly used to copy +// leveldb iterator keys and values since they are only valid until the iterator +// is moved instead of during the entirety of the transaction. +func copySlice(slice []byte) []byte { + ret := make([]byte, len(slice)) + copy(ret, slice) + return ret +} + +// cursor is an internal type used to represent a cursor over key/value pairs +// and nested buckets of a bucket and implements the database.Cursor interface. +type cursor struct { + bucket *bucket + dbIter iterator.Iterator + pendingIter iterator.Iterator + currentIter iterator.Iterator +} + +// Enforce cursor implements the database.Cursor interface. +var _ database.Cursor = (*cursor)(nil) + +// Bucket returns the bucket the cursor was created for. +// +// This function is part of the database.Cursor interface implementation. +func (c *cursor) Bucket() database.Bucket { + // Ensure transaction state is valid. + if err := c.bucket.tx.checkClosed(); err != nil { + return nil + } + + return c.bucket +} + +// Delete removes the current key/value pair the cursor is at without +// invalidating the cursor. +// +// Returns the following errors as required by the interface contract: +// - ErrIncompatibleValue if attempted when the cursor points to a nested +// bucket +// - ErrTxNotWritable if attempted against a read-only transaction +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Cursor interface implementation. +func (c *cursor) Delete() error { + // Ensure transaction state is valid. + if err := c.bucket.tx.checkClosed(); err != nil { + return err + } + + // Error if the cursor is exhausted. + if c.currentIter == nil { + str := "cursor is exhausted" + return makeDbErr(database.ErrIncompatibleValue, str, nil) + } + + // Do not allow buckets to be deleted via the cursor. + key := c.currentIter.Key() + if bytes.HasPrefix(key, bucketIndexPrefix) { + str := "buckets may not be deleted from a cursor" + return makeDbErr(database.ErrIncompatibleValue, str, nil) + } + + c.bucket.tx.deleteKey(copySlice(key), true) + return nil +} + +// skipPendingUpdates skips any keys at the current database iterator position +// that are being updated by the transaction. The forwards flag indicates the +// direction the cursor is moving moved. +func (c *cursor) skipPendingUpdates(forwards bool) { + for c.dbIter.Valid() { + var skip bool + key := c.dbIter.Key() + if _, ok := c.bucket.tx.pendingRemove[string(key)]; ok { + skip = true + } else if c.bucket.tx.pendingKeys.Has(key) { + skip = true + } + if !skip { + break + } + + if forwards { + c.dbIter.Next() + } else { + c.dbIter.Prev() + } + } +} + +// chooseIterator first skips any entries in the database iterator that are +// being updated by the transaction and sets the current iterator to the +// appropriate iterator depending on their validatidy and the order they compare +// in while taking into account the direction flag. When the cursor is being +// moved forwards and both iterators are valid, the iterator with the smaller +// key is chosen and vice versa when the cursor is being moved backwards. +func (c *cursor) chooseIterator(forwards bool) bool { + // Skip any keys at the current database iterator position that are + // being updated by the transaction. + c.skipPendingUpdates(forwards) + + // When bother iterators are exhausted, the cursor is exhausted too. + if !c.dbIter.Valid() && !c.pendingIter.Valid() { + c.currentIter = nil + return false + } + + // Choose the database iterator when the pending keys iterator is + // exhausted. + if !c.pendingIter.Valid() { + c.currentIter = c.dbIter + return true + } + + // Choose the pending keys iterator when the database iterator is + // exhausted. + if !c.dbIter.Valid() { + c.currentIter = c.pendingIter + return true + } + + // Both iterators are valid, so choose the iterator with either the + // smaller or larger key depending on the forwards flag. + compare := bytes.Compare(c.dbIter.Key(), c.pendingIter.Key()) + if (forwards && compare > 0) || (!forwards && compare < 0) { + c.currentIter = c.pendingIter + } else { + c.currentIter = c.dbIter + } + return true +} + +// First positions the cursor at the first key/value pair and returns whether or +// not the pair exists. +// +// This function is part of the database.Cursor interface implementation. +func (c *cursor) First() bool { + // Ensure transaction state is valid. + if err := c.bucket.tx.checkClosed(); err != nil { + return false + } + + // Seek to the first key in both the database and pending iterators and + // choose the iterator that is both valid and has the smaller key. + c.dbIter.First() + c.pendingIter.First() + return c.chooseIterator(true) +} + +// Last positions the cursor at the last key/value pair and returns whether or +// not the pair exists. +// +// This function is part of the database.Cursor interface implementation. +func (c *cursor) Last() bool { + // Ensure transaction state is valid. + if err := c.bucket.tx.checkClosed(); err != nil { + return false + } + + // Seek to the last key in both the database and pending iterators and + // choose the iterator that is both valid and has the larger key. + c.dbIter.Last() + c.pendingIter.Last() + return c.chooseIterator(false) +} + +// Next moves the cursor one key/value pair forward and returns whether or not +// the pair exists. +// +// This function is part of the database.Cursor interface implementation. +func (c *cursor) Next() bool { + // Ensure transaction state is valid. + if err := c.bucket.tx.checkClosed(); err != nil { + return false + } + + // Nothing to return if cursor is exhausted. + if c.currentIter == nil { + return false + } + + // Move the current iterator to the next entry and choose the iterator + // that is both valid and has the smaller key. + c.currentIter.Next() + return c.chooseIterator(true) +} + +// Prev moves the cursor one key/value pair backward and returns whether or not +// the pair exists. +// +// This function is part of the database.Cursor interface implementation. +func (c *cursor) Prev() bool { + // Ensure transaction state is valid. + if err := c.bucket.tx.checkClosed(); err != nil { + return false + } + + // Nothing to return if cursor is exhausted. + if c.currentIter == nil { + return false + } + + // Move the current iterator to the previous entry and choose the + // iterator that is both valid and has the larger key. + c.currentIter.Prev() + return c.chooseIterator(false) +} + +// Seek positions the cursor at the first key/value pair that is greater than or +// equal to the passed seek key. Returns false if no suitable key was found. +// +// This function is part of the database.Cursor interface implementation. +func (c *cursor) Seek(seek []byte) bool { + // Ensure transaction state is valid. + if err := c.bucket.tx.checkClosed(); err != nil { + return false + } + + // Seek to the provided key in both the database and pending iterators + // then choose the iterator that is both valid and has the larger key. + seekKey := bucketizedKey(c.bucket.id, seek) + c.dbIter.Seek(seekKey) + c.pendingIter.Seek(seekKey) + return c.chooseIterator(true) +} + +// rawKey returns the current key the cursor is pointing to without stripping +// the current bucket prefix or bucket index prefix. +func (c *cursor) rawKey() []byte { + // Nothing to return if cursor is exhausted. + if c.currentIter == nil { + return nil + } + + return copySlice(c.currentIter.Key()) +} + +// Key returns the current key the cursor is pointing to. +// +// This function is part of the database.Cursor interface implementation. +func (c *cursor) Key() []byte { + // Ensure transaction state is valid. + if err := c.bucket.tx.checkClosed(); err != nil { + return nil + } + + // Nothing to return if cursor is exhausted. + if c.currentIter == nil { + return nil + } + + // Slice out the actual key name and make a copy since it is no longer + // valid after iterating to the next item. + // + // The key is after the bucket index prefix and parent ID when the + // cursor is pointing to a nested bucket. + key := c.currentIter.Key() + if bytes.HasPrefix(key, bucketIndexPrefix) { + key = key[len(bucketIndexPrefix)+4:] + return copySlice(key) + } + + // The key is after the bucket ID when the cursor is pointing to a + // normal entry. + key = key[len(c.bucket.id):] + return copySlice(key) +} + +// rawValue returns the current value the cursor is pointing to without +// stripping without filtering bucket index values. +func (c *cursor) rawValue() []byte { + // Nothing to return if cursor is exhausted. + if c.currentIter == nil { + return nil + } + + return copySlice(c.currentIter.Value()) +} + +// Value returns the current value the cursor is pointing to. This will be nil +// for nested buckets. +// +// This function is part of the database.Cursor interface implementation. +func (c *cursor) Value() []byte { + // Ensure transaction state is valid. + if err := c.bucket.tx.checkClosed(); err != nil { + return nil + } + + // Nothing to return if cursor is exhausted. + if c.currentIter == nil { + return nil + } + + // Return nil for the value when the cursor is pointing to a nested + // bucket. + if bytes.HasPrefix(c.currentIter.Key(), bucketIndexPrefix) { + return nil + } + + return copySlice(c.currentIter.Value()) +} + +// cursorType defines the type of cursor to create. +type cursorType int + +// The following constants define the allowed cursor types. +const ( + // ctKeys iterates through all of the keys in a given bucket. + ctKeys cursorType = iota + + // ctBuckets iterates through all directly nested buckets in a given + // bucket. + ctBuckets + + // ctFull iterates through both the keys and the directly nested buckets + // in a given bucket. + ctFull +) + +// cursorFinalizer is either invoked when a cursor is being garbage collected or +// called manually to ensure the underlying cursor iterators are released. +func cursorFinalizer(c *cursor) { + c.dbIter.Release() + c.pendingIter.Release() +} + +// newCursor returns a new cursor for the given bucket, bucket ID, and cursor +// type. +// +// NOTE: The caller is responsible for calling the cursorFinalizer function on +// the returned cursor. +func newCursor(b *bucket, bucketID []byte, cursorTyp cursorType) *cursor { + var dbIter, pendingIter iterator.Iterator + switch cursorTyp { + case ctKeys: + keyRange := util.BytesPrefix(bucketID) + dbIter = b.tx.snapshot.NewIterator(keyRange, nil) + pendingKeyIter := newLdbTreapIter(b.tx, keyRange) + pendingIter = pendingKeyIter + + case ctBuckets: + // The serialized bucket index key format is: + // + + // Create an iterator for the both the database and the pending + // keys which are prefixed by the bucket index identifier and + // the provided bucket ID. + prefix := make([]byte, len(bucketIndexPrefix)+4) + copy(prefix, bucketIndexPrefix) + copy(prefix[len(bucketIndexPrefix):], bucketID) + bucketRange := util.BytesPrefix(prefix) + + dbIter = b.tx.snapshot.NewIterator(bucketRange, nil) + pendingBucketIter := newLdbTreapIter(b.tx, bucketRange) + pendingIter = pendingBucketIter + + case ctFull: + fallthrough + default: + // The serialized bucket index key format is: + // + prefix := make([]byte, len(bucketIndexPrefix)+4) + copy(prefix, bucketIndexPrefix) + copy(prefix[len(bucketIndexPrefix):], bucketID) + bucketRange := util.BytesPrefix(prefix) + keyRange := util.BytesPrefix(bucketID) + + // Since both keys and buckets are needed from the database, + // create an individual iterator for each prefix and then create + // a merged iterator from them. + dbKeyIter := b.tx.snapshot.NewIterator(keyRange, nil) + dbBucketIter := b.tx.snapshot.NewIterator(bucketRange, nil) + iters := []iterator.Iterator{dbKeyIter, dbBucketIter} + dbIter = iterator.NewMergedIterator(iters, + comparer.DefaultComparer, true) + + // Since both keys and buckets are needed from the pending keys, + // create an individual iterator for each prefix and then create + // a merged iterator from them. + pendingKeyIter := newLdbTreapIter(b.tx, keyRange) + pendingBucketIter := newLdbTreapIter(b.tx, bucketRange) + iters = []iterator.Iterator{pendingKeyIter, pendingBucketIter} + pendingIter = iterator.NewMergedIterator(iters, + comparer.DefaultComparer, true) + } + + // Create the cursor using the iterators. + return &cursor{bucket: b, dbIter: dbIter, pendingIter: pendingIter} +} + +// bucket is an internal type used to represent a collection of key/value pairs +// and implements the database.Bucket interface. +type bucket struct { + tx *transaction + id [4]byte +} + +// Enforce bucket implements the database.Bucket interface. +var _ database.Bucket = (*bucket)(nil) + +// bucketIndexKey returns the actual key to use for storing and retrieving a +// child bucket in the bucket index. This is required because additional +// information is needed to distinguish nested buckets with the same name. +func bucketIndexKey(parentID [4]byte, key []byte) []byte { + // The serialized bucket index key format is: + // + indexKey := make([]byte, len(bucketIndexPrefix)+4+len(key)) + copy(indexKey, bucketIndexPrefix) + copy(indexKey[len(bucketIndexPrefix):], parentID[:]) + copy(indexKey[len(bucketIndexPrefix)+4:], key) + return indexKey +} + +// bucketizedKey returns the actual key to use for storing and retrieving a key +// for the provided bucket ID. This is required because bucketizing is handled +// through the use of a unique prefix per bucket. +func bucketizedKey(bucketID [4]byte, key []byte) []byte { + // The serialized block index key format is: + // + bKey := make([]byte, 4+len(key)) + copy(bKey, bucketID[:]) + copy(bKey[4:], key) + return bKey +} + +// Bucket retrieves a nested bucket with the given key. Returns nil if +// the bucket does not exist. +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) Bucket(key []byte) database.Bucket { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return nil + } + + // Attempt to fetch the ID for the child bucket. The bucket does not + // exist if the bucket index entry does not exist. + childID := b.tx.fetchKey(bucketIndexKey(b.id, key)) + if childID == nil { + return nil + } + + childBucket := &bucket{tx: b.tx} + copy(childBucket.id[:], childID) + return childBucket +} + +// CreateBucket creates and returns a new nested bucket with the given key. +// +// Returns the following errors as required by the interface contract: +// - ErrBucketExists if the bucket already exists +// - ErrBucketNameRequired if the key is empty +// - ErrIncompatibleValue if the key is otherwise invalid for the particular +// implementation +// - ErrTxNotWritable if attempted against a read-only transaction +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) CreateBucket(key []byte) (database.Bucket, error) { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return nil, err + } + + // Ensure the transaction is writable. + if !b.tx.writable { + str := "create bucket requires a writable database transaction" + return nil, makeDbErr(database.ErrTxNotWritable, str, nil) + } + + // Ensure a key was provided. + if len(key) == 0 { + str := "create bucket requires a key" + return nil, makeDbErr(database.ErrBucketNameRequired, str, nil) + } + + // Ensure bucket does not already exist. + bidxKey := bucketIndexKey(b.id, key) + if b.tx.hasKey(bidxKey) { + str := "bucket already exists" + return nil, makeDbErr(database.ErrBucketExists, str, nil) + } + + // Find the appropriate next bucket ID to use for the new bucket. In + // the case of the special internal block index, keep the fixed ID. + var childID [4]byte + if b.id == metadataBucketID && bytes.Equal(key, blockIdxBucketName) { + childID = blockIdxBucketID + } else { + var err error + childID, err = b.tx.nextBucketID() + if err != nil { + return nil, err + } + } + + // Add the new bucket to the bucket index. + if err := b.tx.putKey(bidxKey, childID[:]); err != nil { + str := fmt.Sprintf("failed to create bucket with key %q", key) + return nil, convertErr(str, err) + } + return &bucket{tx: b.tx, id: childID}, nil +} + +// CreateBucketIfNotExists creates and returns a new nested bucket with the +// given key if it does not already exist. +// +// Returns the following errors as required by the interface contract: +// - ErrBucketNameRequired if the key is empty +// - ErrIncompatibleValue if the key is otherwise invalid for the particular +// implementation +// - ErrTxNotWritable if attempted against a read-only transaction +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) CreateBucketIfNotExists(key []byte) (database.Bucket, error) { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return nil, err + } + + // Ensure the transaction is writable. + if !b.tx.writable { + str := "create bucket requires a writable database transaction" + return nil, makeDbErr(database.ErrTxNotWritable, str, nil) + } + + // Return existing bucket if it already exists, otherwise create it. + if bucket := b.Bucket(key); bucket != nil { + return bucket, nil + } + return b.CreateBucket(key) +} + +// DeleteBucket removes a nested bucket with the given key. +// +// Returns the following errors as required by the interface contract: +// - ErrBucketNotFound if the specified bucket does not exist +// - ErrTxNotWritable if attempted against a read-only transaction +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) DeleteBucket(key []byte) error { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return err + } + + // Ensure the transaction is writable. + if !b.tx.writable { + str := "delete bucket requires a writable database transaction" + return makeDbErr(database.ErrTxNotWritable, str, nil) + } + + // Attempt to fetch the ID for the child bucket. The bucket does not + // exist if the bucket index entry does not exist. In the case of the + // special internal block index, keep the fixed ID. + bidxKey := bucketIndexKey(b.id, key) + childID := b.tx.fetchKey(bidxKey) + if childID == nil { + str := fmt.Sprintf("bucket %q does not exist", key) + return makeDbErr(database.ErrBucketNotFound, str, nil) + } + + // Remove all nested buckets and their keys. + childIDs := [][]byte{childID} + for len(childIDs) > 0 { + childID = childIDs[len(childIDs)-1] + childIDs = childIDs[:len(childIDs)-1] + + // Delete all keys in the nested bucket. + keyCursor := newCursor(b, childID, ctKeys) + for ok := keyCursor.First(); ok; ok = keyCursor.Next() { + b.tx.deleteKey(keyCursor.rawKey(), false) + } + cursorFinalizer(keyCursor) + + // Iterate through all nested buckets. + bucketCursor := newCursor(b, childID, ctBuckets) + for ok := bucketCursor.First(); ok; ok = bucketCursor.Next() { + // Push the id of the nested bucket onto the stack for + // the next iteration. + childID := bucketCursor.rawValue() + childIDs = append(childIDs, childID) + + // Remove the nested bucket from the bucket index. + b.tx.deleteKey(bucketCursor.rawKey(), false) + } + cursorFinalizer(bucketCursor) + } + + // Remove the nested bucket from the bucket index. Any buckets nested + // under it were already removed above. + b.tx.deleteKey(bidxKey, true) + return nil +} + +// Cursor returns a new cursor, allowing for iteration over the bucket's +// key/value pairs and nested buckets in forward or backward order. +// +// You must seek to a position using the First, Last, or Seek functions before +// calling the Next, Prev, Key, or Value functions. Failure to do so will +// result in the same return values as an exhausted cursor, which is false for +// the Prev and Next functions and nil for Key and Value functions. +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) Cursor() database.Cursor { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return &cursor{bucket: b} + } + + // Create the cursor and setup a runtime finalizer to ensure the + // iterators are released when the cursor is garbage collected. + c := newCursor(b, b.id[:], ctFull) + runtime.SetFinalizer(c, cursorFinalizer) + return c +} + +// ForEach invokes the passed function with every key/value pair in the bucket. +// This does not include nested buckets or the key/value pairs within those +// nested buckets. +// +// WARNING: It is not safe to mutate data while iterating with this method. +// Doing so may cause the underlying cursor to be invalidated and return +// unexpected keys and/or values. +// +// Returns the following errors as required by the interface contract: +// - ErrTxClosed if the transaction has already been closed +// +// NOTE: The values returned by this function are only valid during a +// transaction. Attempting to access them after a transaction has ended will +// likely result in an access violation. +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) ForEach(fn func(k, v []byte) error) error { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return err + } + + // Invoke the callback for each cursor item. Return the error returned + // from the callback when it is non-nil. + c := newCursor(b, b.id[:], ctKeys) + defer cursorFinalizer(c) + for ok := c.First(); ok; ok = c.Next() { + err := fn(c.Key(), c.Value()) + if err != nil { + return err + } + } + + return nil +} + +// ForEachBucket invokes the passed function with the key of every nested bucket +// in the current bucket. This does not include any nested buckets within those +// nested buckets. +// +// WARNING: It is not safe to mutate data while iterating with this method. +// Doing so may cause the underlying cursor to be invalidated and return +// unexpected keys. +// +// Returns the following errors as required by the interface contract: +// - ErrTxClosed if the transaction has already been closed +// +// NOTE: The values returned by this function are only valid during a +// transaction. Attempting to access them after a transaction has ended will +// likely result in an access violation. +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) ForEachBucket(fn func(k []byte) error) error { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return err + } + + // Invoke the callback for each cursor item. Return the error returned + // from the callback when it is non-nil. + c := newCursor(b, b.id[:], ctBuckets) + defer cursorFinalizer(c) + for ok := c.First(); ok; ok = c.Next() { + err := fn(c.Key()) + if err != nil { + return err + } + } + + return nil +} + +// Writable returns whether or not the bucket is writable. +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) Writable() bool { + return b.tx.writable +} + +// Put saves the specified key/value pair to the bucket. Keys that do not +// already exist are added and keys that already exist are overwritten. +// +// Returns the following errors as required by the interface contract: +// - ErrKeyRequired if the key is empty +// - ErrIncompatibleValue if the key is the same as an existing bucket +// - ErrTxNotWritable if attempted against a read-only transaction +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) Put(key, value []byte) error { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return err + } + + // Ensure the transaction is writable. + if !b.tx.writable { + str := "setting a key requires a writable database transaction" + return makeDbErr(database.ErrTxNotWritable, str, nil) + } + + // Ensure a key was provided. + if len(key) == 0 { + str := "put requires a key" + return makeDbErr(database.ErrKeyRequired, str, nil) + } + + return b.tx.putKey(bucketizedKey(b.id, key), value) +} + +// Get returns the value for the given key. Returns nil if the key does not +// exist in this bucket. An empty slice is returned for keys that exist but +// have no value assigned. +// +// NOTE: The value returned by this function is only valid during a transaction. +// Attempting to access it after a transaction has ended results in undefined +// behavior. Additionally, the value must NOT be modified by the caller. +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) Get(key []byte) []byte { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return nil + } + + // Nothing to return if there is no key. + if len(key) == 0 { + return nil + } + + return b.tx.fetchKey(bucketizedKey(b.id, key)) +} + +// Delete removes the specified key from the bucket. Deleting a key that does +// not exist does not return an error. +// +// Returns the following errors as required by the interface contract: +// - ErrKeyRequired if the key is empty +// - ErrIncompatibleValue if the key is the same as an existing bucket +// - ErrTxNotWritable if attempted against a read-only transaction +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Bucket interface implementation. +func (b *bucket) Delete(key []byte) error { + // Ensure transaction state is valid. + if err := b.tx.checkClosed(); err != nil { + return err + } + + // Ensure the transaction is writable. + if !b.tx.writable { + str := "deleting a value requires a writable database transaction" + return makeDbErr(database.ErrTxNotWritable, str, nil) + } + + // Nothing to do if there is no key. + if len(key) == 0 { + return nil + } + + b.tx.deleteKey(bucketizedKey(b.id, key), true) + return nil +} + +// pendingBlock houses a block that will be written to disk when the database +// transaction is committed. +type pendingBlock struct { + hash *wire.ShaHash + bytes []byte +} + +// transaction represents a database transaction. It can either be read-only or +// read-write and implements the database.Bucket interface. The transaction +// provides a root bucket against which all read and writes occur. +type transaction struct { + managed bool // Is the transaction managed? + closed bool // Is the transaction closed? + writable bool // Is the transaction writable? + db *db // DB instance the tx was created from. + snapshot *leveldb.Snapshot // Underlying snapshot for txns. + metaBucket *bucket // The root metadata bucket. + blockIdxBucket *bucket // The block index bucket. + + // Blocks that need to be stored on commit. The pendingBlocks map is + // kept to allow quick lookups of pending data by block hash. + pendingBlocks map[wire.ShaHash]int + pendingBlockData []pendingBlock + + // Keys that need to be stored or deleted on commit. + pendingKeys *treap.Mutable + pendingRemove map[string]struct{} + + // Active iterators that need to be notified when the pending keys have + // been updated so the cursors can properly handle updates to the + // transaction state. + activeIterLock sync.RWMutex + activeIters []*treap.Iterator +} + +// Enforce transaction implements the database.Tx interface. +var _ database.Tx = (*transaction)(nil) + +// removeActiveIter removes the passed iterator from the list of active +// iterators against the pending keys treap. +func (tx *transaction) removeActiveIter(iter *treap.Iterator) { + // An indexing for loop is intentionally used over a range here as range + // does not reevaluate the slice on each iteration nor does it adjust + // the index for the modified slice. + tx.activeIterLock.Lock() + for i := 0; i < len(tx.activeIters); i++ { + if tx.activeIters[i] == iter { + copy(tx.activeIters[i:], tx.activeIters[i+1:]) + tx.activeIters[len(tx.activeIters)-1] = nil + tx.activeIters = tx.activeIters[:len(tx.activeIters)-1] + } + } + tx.activeIterLock.Unlock() +} + +// addActiveIter adds the passed iterator to the list of active iterators for +// the pending keys treap. +func (tx *transaction) addActiveIter(iter *treap.Iterator) { + tx.activeIterLock.Lock() + tx.activeIters = append(tx.activeIters, iter) + tx.activeIterLock.Unlock() +} + +// notifyActiveIters notifies all of the active iterators for the pending keys +// treap that it has been updated. +func (tx *transaction) notifyActiveIters() { + tx.activeIterLock.RLock() + for _, iter := range tx.activeIters { + iter.ForceReseek() + } + tx.activeIterLock.RUnlock() +} + +// checkClosed returns an error if the the database or transaction is closed. +func (tx *transaction) checkClosed() error { + // The transaction is no longer valid if it has been closed. + if tx.closed { + return makeDbErr(database.ErrTxClosed, errTxClosedStr, nil) + } + + return nil +} + +// hasKey returns whether or not the provided key exists in the database while +// taking into account the current transaction state. +func (tx *transaction) hasKey(key []byte) bool { + // When the transaction is writable, check the pending transaction + // state first. + if tx.writable { + if _, ok := tx.pendingRemove[string(key)]; ok { + return false + } + if tx.pendingKeys.Has(key) { + return true + } + } + + // Consult the database. + hasKey, _ := tx.snapshot.Has(key, nil) + return hasKey +} + +// putKey adds the provided key to the list of keys to be updated in the +// database when the transaction is committed. +// +// NOTE: This function must only be called on a writable transaction. Since it +// is an internal helper function, it does not check. +func (tx *transaction) putKey(key, value []byte) error { + // Prevent the key from being deleted if it was previously scheduled + // to be deleted on transaction commit. + delete(tx.pendingRemove, string(key)) + + // Add the key/value pair to the list to be written on transaction + // commit. + tx.pendingKeys.Put(key, value) + tx.notifyActiveIters() + return nil +} + +// fetchKey attempts to fetch the provided key from the database while taking +// into account the current transaction state. Returns nil if the key does not +// exist. +func (tx *transaction) fetchKey(key []byte) []byte { + // When the transaction is writable, check the pending transaction + // state first. + if tx.writable { + if _, ok := tx.pendingRemove[string(key)]; ok { + return nil + } + if value := tx.pendingKeys.Get(key); value != nil { + return value + } + } + + value, err := tx.snapshot.Get(key, nil) + if err != nil { + return nil + } + return value +} + +// deleteKey adds the provided key to the list of keys to be deleted from the +// database when the transaction is committed. The notify iterators flag is +// useful to delay notifying iterators about the changes during bulk deletes. +// +// NOTE: This function must only be called on a writable transaction. Since it +// is an internal helper function, it does not check. +func (tx *transaction) deleteKey(key []byte, notifyIterators bool) { + // Remove the key from the list of pendings keys to be written on + // transaction commit if needed. + tx.pendingKeys.Delete(key) + + // Add the key to the list to be deleted on transaction commit. + if tx.pendingRemove == nil { + tx.pendingRemove = make(map[string]struct{}) + } + tx.pendingRemove[string(key)] = struct{}{} + + // Notify the active iterators about the change if the flag is set. + if notifyIterators { + tx.notifyActiveIters() + } +} + +// nextBucketID returns the next bucket ID to use for creating a new bucket. +// +// NOTE: This function must only be called on a writable transaction. Since it +// is an internal helper function, it does not check. +func (tx *transaction) nextBucketID() ([4]byte, error) { + // Load the currently highest used bucket ID. + curIDBytes := tx.fetchKey(curBucketIDKeyName) + curBucketNum := binary.BigEndian.Uint32(curIDBytes) + + // Increment and update the current bucket ID and return it. + var nextBucketID [4]byte + binary.BigEndian.PutUint32(nextBucketID[:], curBucketNum+1) + if err := tx.putKey(curBucketIDKeyName, nextBucketID[:]); err != nil { + return [4]byte{}, err + } + return nextBucketID, nil +} + +// Metadata returns the top-most bucket for all metadata storage. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) Metadata() database.Bucket { + return tx.metaBucket +} + +// hasBlock returns whether or not a block with the given hash exists. +func (tx *transaction) hasBlock(hash *wire.ShaHash) bool { + // Return true if the block is pending to be written on commit since + // it exists from the viewpoint of this transaction. + if _, exists := tx.pendingBlocks[*hash]; exists { + return true + } + + return tx.hasKey(bucketizedKey(blockIdxBucketID, hash[:])) +} + +// StoreBlock stores the provided block into the database. There are no checks +// to ensure the block connects to a previous block, contains double spends, or +// any additional functionality such as transaction indexing. It simply stores +// the block in the database. +// +// Returns the following errors as required by the interface contract: +// - ErrBlockExists when the block hash already exists +// - ErrTxNotWritable if attempted against a read-only transaction +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) StoreBlock(block *btcutil.Block) error { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return err + } + + // Ensure the transaction is writable. + if !tx.writable { + str := "store block requires a writable database transaction" + return makeDbErr(database.ErrTxNotWritable, str, nil) + } + + // Reject the block if it already exists. + blockHash := block.Sha() + if tx.hasBlock(blockHash) { + str := fmt.Sprintf("block %s already exists", blockHash) + return makeDbErr(database.ErrBlockExists, str, nil) + } + + blockBytes, err := block.Bytes() + if err != nil { + str := fmt.Sprintf("failed to get serialized bytes for block %s", + blockHash) + return makeDbErr(database.ErrDriverSpecific, str, err) + } + + // Add the block to be stored to the list of pending blocks to store + // when the transaction is committed. Also, add it to pending blocks + // map so it is easy to determine the block is pending based on the + // block hash. + if tx.pendingBlocks == nil { + tx.pendingBlocks = make(map[wire.ShaHash]int) + } + tx.pendingBlocks[*blockHash] = len(tx.pendingBlockData) + tx.pendingBlockData = append(tx.pendingBlockData, pendingBlock{ + hash: blockHash, + bytes: blockBytes, + }) + log.Tracef("Added block %s to pending blocks", blockHash) + + return nil +} + +// HasBlock returns whether or not a block with the given hash exists in the +// database. +// +// Returns the following errors as required by the interface contract: +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) HasBlock(hash *wire.ShaHash) (bool, error) { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return false, err + } + + return tx.hasBlock(hash), nil +} + +// HasBlocks returns whether or not the blocks with the provided hashes +// exist in the database. +// +// Returns the following errors as required by the interface contract: +// - ErrTxClosed if the transaction has already been closed +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) HasBlocks(hashes []wire.ShaHash) ([]bool, error) { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return nil, err + } + + results := make([]bool, len(hashes)) + for i := range hashes { + results[i] = tx.hasBlock(&hashes[i]) + } + + return results, nil +} + +// fetchBlockRow fetches the metadata stored in the block index for the provided +// hash. It will return ErrBlockNotFound if there is no entry. +func (tx *transaction) fetchBlockRow(hash *wire.ShaHash) ([]byte, error) { + blockRow := tx.blockIdxBucket.Get(hash[:]) + if blockRow == nil { + str := fmt.Sprintf("block %s does not exist", hash) + return nil, makeDbErr(database.ErrBlockNotFound, str, nil) + } + + return blockRow, nil +} + +// FetchBlockHeader returns the raw serialized bytes for the block header +// identified by the given hash. The raw bytes are in the format returned by +// Serialize on a wire.BlockHeader. +// +// Returns the following errors as required by the interface contract: +// - ErrBlockNotFound if the requested block hash does not exist +// - ErrTxClosed if the transaction has already been closed +// - ErrCorruption if the database has somehow become corrupted +// +// NOTE: The data returned by this function is only valid during a +// database transaction. Attempting to access it after a transaction +// has ended results in undefined behavior. This constraint prevents +// additional data copies and allows support for memory-mapped database +// implementations. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) FetchBlockHeader(hash *wire.ShaHash) ([]byte, error) { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return nil, err + } + + // When the block is pending to be written on commit return the bytes + // from there. + if idx, exists := tx.pendingBlocks[*hash]; exists { + blockBytes := tx.pendingBlockData[idx].bytes + return blockBytes[0:blockHdrSize:blockHdrSize], nil + } + + // Fetch the block index row and slice off the header. Notice the use + // of the cap on the subslice to prevent the caller from accidentally + // appending into the db data. + blockRow, err := tx.fetchBlockRow(hash) + if err != nil { + return nil, err + } + endOffset := blockLocSize + blockHdrSize + return blockRow[blockLocSize:endOffset:endOffset], nil +} + +// FetchBlockHeaders returns the raw serialized bytes for the block headers +// identified by the given hashes. The raw bytes are in the format returned by +// Serialize on a wire.BlockHeader. +// +// Returns the following errors as required by the interface contract: +// - ErrBlockNotFound if the any of the requested block hashes do not exist +// - ErrTxClosed if the transaction has already been closed +// - ErrCorruption if the database has somehow become corrupted +// +// NOTE: The data returned by this function is only valid during a database +// transaction. Attempting to access it after a transaction has ended results +// in undefined behavior. This constraint prevents additional data copies and +// allows support for memory-mapped database implementations. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) FetchBlockHeaders(hashes []wire.ShaHash) ([][]byte, error) { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return nil, err + } + + // NOTE: This could check for the existence of all blocks before loading + // any of the headers which would be faster in the failure case, however + // callers will not typically be calling this function with invalid + // values, so optimize for the common case. + + // Load the headers. + headers := make([][]byte, len(hashes)) + for i := range hashes { + hash := &hashes[i] + + // When the block is pending to be written on commit return the + // bytes from there. + if idx, exists := tx.pendingBlocks[*hash]; exists { + blkBytes := tx.pendingBlockData[idx].bytes + headers[i] = blkBytes[0:blockHdrSize:blockHdrSize] + continue + } + + // Fetch the block index row and slice off the header. Notice + // the use of the cap on the subslice to prevent the caller + // from accidentally appending into the db data. + blockRow, err := tx.fetchBlockRow(hash) + if err != nil { + return nil, err + } + endOffset := blockLocSize + blockHdrSize + headers[i] = blockRow[blockLocSize:endOffset:endOffset] + } + + return headers, nil +} + +// FetchBlock returns the raw serialized bytes for the block identified by the +// given hash. The raw bytes are in the format returned by Serialize on a +// wire.MsgBlock. +// +// Returns the following errors as required by the interface contract: +// - ErrBlockNotFound if the requested block hash does not exist +// - ErrTxClosed if the transaction has already been closed +// - ErrCorruption if the database has somehow become corrupted +// +// In addition, returns ErrDriverSpecific if any failures occur when reading the +// block files. +// +// NOTE: The data returned by this function is only valid during a database +// transaction. Attempting to access it after a transaction has ended results +// in undefined behavior. This constraint prevents additional data copies and +// allows support for memory-mapped database implementations. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) FetchBlock(hash *wire.ShaHash) ([]byte, error) { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return nil, err + } + + // When the block is pending to be written on commit return the bytes + // from there. + if idx, exists := tx.pendingBlocks[*hash]; exists { + return tx.pendingBlockData[idx].bytes, nil + } + + // Lookup the location of the block in the files from the block index. + blockRow, err := tx.fetchBlockRow(hash) + if err != nil { + return nil, err + } + location := deserializeBlockLoc(blockRow) + + // Read the block from the appropriate location. The function also + // performs a checksum over the data to detect data corruption. + blockBytes, err := tx.db.store.readBlock(hash, location) + if err != nil { + return nil, err + } + + return blockBytes, nil +} + +// FetchBlocks returns the raw serialized bytes for the blocks identified by the +// given hashes. The raw bytes are in the format returned by Serialize on a +// wire.MsgBlock. +// +// Returns the following errors as required by the interface contract: +// - ErrBlockNotFound if any of the requested block hashed do not exist +// - ErrTxClosed if the transaction has already been closed +// - ErrCorruption if the database has somehow become corrupted +// +// In addition, returns ErrDriverSpecific if any failures occur when reading the +// block files. +// +// NOTE: The data returned by this function is only valid during a database +// transaction. Attempting to access it after a transaction has ended results +// in undefined behavior. This constraint prevents additional data copies and +// allows support for memory-mapped database implementations. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) FetchBlocks(hashes []wire.ShaHash) ([][]byte, error) { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return nil, err + } + + // NOTE: This could check for the existence of all blocks before loading + // any of them which would be faster in the failure case, however + // callers will not typically be calling this function with invalid + // values, so optimize for the common case. + + // Load the blocks. + blocks := make([][]byte, len(hashes)) + for i := range hashes { + var err error + blocks[i], err = tx.FetchBlock(&hashes[i]) + if err != nil { + return nil, err + } + } + + return blocks, nil +} + +// fetchPendingRegion attempts to fetch the provided region from any block which +// are pending to be written on commit. It will return nil for the byte slice +// when the region references a block which is not pending. When the region +// does reference a pending block, it is bounds checked and returns +// ErrBlockRegionInvalid if invalid. +func (tx *transaction) fetchPendingRegion(region *database.BlockRegion) ([]byte, error) { + // Nothing to do if the block is not pending to be written on commit. + idx, exists := tx.pendingBlocks[*region.Hash] + if !exists { + return nil, nil + } + + // Ensure the region is within the bounds of the block. + blockBytes := tx.pendingBlockData[idx].bytes + blockLen := uint32(len(blockBytes)) + endOffset := region.Offset + region.Len + if endOffset < region.Offset || endOffset > blockLen { + str := fmt.Sprintf("block %s region offset %d, length %d "+ + "exceeds block length of %d", region.Hash, + region.Offset, region.Len, blockLen) + return nil, makeDbErr(database.ErrBlockRegionInvalid, str, nil) + } + + // Return the bytes from the pending block. + return blockBytes[region.Offset:endOffset:endOffset], nil +} + +// FetchBlockRegion returns the raw serialized bytes for the given block region. +// +// For example, it is possible to directly extract Bitcoin transactions and/or +// scripts from a block with this function. Depending on the backend +// implementation, this can provide significant savings by avoiding the need to +// load entire blocks. +// +// The raw bytes are in the format returned by Serialize on a wire.MsgBlock and +// the Offset field in the provided BlockRegion is zero-based and relative to +// the start of the block (byte 0). +// +// Returns the following errors as required by the interface contract: +// - ErrBlockNotFound if the requested block hash does not exist +// - ErrBlockRegionInvalid if the region exceeds the bounds of the associated +// block +// - ErrTxClosed if the transaction has already been closed +// - ErrCorruption if the database has somehow become corrupted +// +// In addition, returns ErrDriverSpecific if any failures occur when reading the +// block files. +// +// NOTE: The data returned by this function is only valid during a database +// transaction. Attempting to access it after a transaction has ended results +// in undefined behavior. This constraint prevents additional data copies and +// allows support for memory-mapped database implementations. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) FetchBlockRegion(region *database.BlockRegion) ([]byte, error) { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return nil, err + } + + // When the block is pending to be written on commit return the bytes + // from there. + if tx.pendingBlocks != nil { + regionBytes, err := tx.fetchPendingRegion(region) + if err != nil { + return nil, err + } + if regionBytes != nil { + return regionBytes, nil + } + } + + // Lookup the location of the block in the files from the block index. + blockRow, err := tx.fetchBlockRow(region.Hash) + if err != nil { + return nil, err + } + location := deserializeBlockLoc(blockRow) + + // Ensure the region is within the bounds of the block. + endOffset := region.Offset + region.Len + if endOffset < region.Offset || endOffset > location.blockLen { + str := fmt.Sprintf("block %s region offset %d, length %d "+ + "exceeds block length of %d", region.Hash, + region.Offset, region.Len, location.blockLen) + return nil, makeDbErr(database.ErrBlockRegionInvalid, str, nil) + + } + + // Read the region from the appropriate disk block file. + regionBytes, err := tx.db.store.readBlockRegion(location, region.Offset, + region.Len) + if err != nil { + return nil, err + } + + return regionBytes, nil +} + +// FetchBlockRegions returns the raw serialized bytes for the given block +// regions. +// +// For example, it is possible to directly extract Bitcoin transactions and/or +// scripts from various blocks with this function. Depending on the backend +// implementation, this can provide significant savings by avoiding the need to +// load entire blocks. +// +// The raw bytes are in the format returned by Serialize on a wire.MsgBlock and +// the Offset fields in the provided BlockRegions are zero-based and relative to +// the start of the block (byte 0). +// +// Returns the following errors as required by the interface contract: +// - ErrBlockNotFound if any of the request block hashes do not exist +// - ErrBlockRegionInvalid if one or more region exceed the bounds of the +// associated block +// - ErrTxClosed if the transaction has already been closed +// - ErrCorruption if the database has somehow become corrupted +// +// In addition, returns ErrDriverSpecific if any failures occur when reading the +// block files. +// +// NOTE: The data returned by this function is only valid during a database +// transaction. Attempting to access it after a transaction has ended results +// in undefined behavior. This constraint prevents additional data copies and +// allows support for memory-mapped database implementations. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) FetchBlockRegions(regions []database.BlockRegion) ([][]byte, error) { + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return nil, err + } + + // NOTE: This could check for the existence of all blocks before + // deserializing the locations and building up the fetch list which + // would be faster in the failure case, however callers will not + // typically be calling this function with invalid values, so optimize + // for the common case. + + // NOTE: A potential optimization here would be to combine adjacent + // regions to reduce the number of reads. + + // In order to improve efficiency of loading the bulk data, first grab + // the block location for all of the requested block hashes and sort + // the reads by filenum:offset so that all reads are grouped by file + // and linear within each file. This can result in quite a significant + // performance increase depending on how spread out the requested hashes + // are by reducing the number of file open/closes and random accesses + // needed. The fetchList is intentionally allocated with a cap because + // some of the regions might be fetched from the pending blocks and + // hence there is no need to fetch those from disk. + blockRegions := make([][]byte, len(regions)) + fetchList := make([]bulkFetchData, 0, len(regions)) + for i := range regions { + region := ®ions[i] + + // When the block is pending to be written on commit grab the + // bytes from there. + if tx.pendingBlocks != nil { + regionBytes, err := tx.fetchPendingRegion(region) + if err != nil { + return nil, err + } + if regionBytes != nil { + blockRegions[i] = regionBytes + continue + } + } + + // Lookup the location of the block in the files from the block + // index. + blockRow, err := tx.fetchBlockRow(region.Hash) + if err != nil { + return nil, err + } + location := deserializeBlockLoc(blockRow) + + // Ensure the region is within the bounds of the block. + endOffset := region.Offset + region.Len + if endOffset < region.Offset || endOffset > location.blockLen { + str := fmt.Sprintf("block %s region offset %d, length "+ + "%d exceeds block length of %d", region.Hash, + region.Offset, region.Len, location.blockLen) + return nil, makeDbErr(database.ErrBlockRegionInvalid, str, nil) + } + + fetchList = append(fetchList, bulkFetchData{&location, i}) + } + sort.Sort(bulkFetchDataSorter(fetchList)) + + // Read all of the regions in the fetch list and set the results. + for i := range fetchList { + fetchData := &fetchList[i] + ri := fetchData.replyIndex + region := ®ions[ri] + location := fetchData.blockLocation + regionBytes, err := tx.db.store.readBlockRegion(*location, + region.Offset, region.Len) + if err != nil { + return nil, err + } + blockRegions[ri] = regionBytes + } + + return blockRegions, nil +} + +// close marks the transaction closed then releases any pending data, the +// underlying snapshot, the transaction read lock, and the write lock when the +// transaction is writable. +func (tx *transaction) close() { + tx.closed = true + + // Clear pending blocks that would have been written on commit. + tx.pendingBlocks = nil + tx.pendingBlockData = nil + + // Clear pending keys that would have been written or deleted on commit. + tx.pendingKeys.Reset() + tx.pendingRemove = nil + + // Release the snapshot. + if tx.snapshot != nil { + tx.snapshot.Release() + tx.snapshot = nil + } + + tx.db.closeLock.RUnlock() + + // Release the writer lock for writable transactions to unblock any + // other write transaction which are possibly waiting. + if tx.writable { + tx.db.writeLock.Unlock() + } +} + +// serializeBlockRow serializes a block row into a format suitable for storage +// into the block index. +func serializeBlockRow(blockLoc blockLocation, blockHdr []byte) []byte { + // The serialized block index row format is: + // + // [0:blockLocSize] Block location + // [blockLocSize:blockLocSize+blockHdrSize] Block header + serializedRow := make([]byte, blockLocSize+blockHdrSize) + copy(serializedRow, serializeBlockLoc(blockLoc)) + copy(serializedRow[blockHdrOffset:], blockHdr) + return serializedRow +} + +// writePendingAndCommit writes pending block data to the flat block files, +// updates the metadata with their locations as well as the new current write +// location, and commits the metadata to the underlying database. It also +// properly handles rollback in the case of failures. +// +// This function MUST only be called when there is pending data to be written. +func (tx *transaction) writePendingAndCommit() error { + // Save the current block store write position for potential rollback. + // These variables are only updated here in this function and there can + // only be one write transaction active at a time, so it's safe to store + // them for potential rollback. + wc := tx.db.store.writeCursor + wc.RLock() + oldBlkFileNum := wc.curFileNum + oldBlkOffset := wc.curOffset + wc.RUnlock() + + // rollback is a closure that is used to rollback all writes to the + // block files. + rollback := func() { + // Rollback any modifications made to the block files if needed. + tx.db.store.handleRollback(oldBlkFileNum, oldBlkOffset) + } + + // Loop through all of the pending blocks to store and write them. + for _, blockData := range tx.pendingBlockData { + log.Tracef("Storing block %s", blockData.hash) + location, err := tx.db.store.writeBlock(blockData.bytes) + if err != nil { + rollback() + return err + } + + // Add a record in the block index for the block. The record + // includes the location information needed to locate the block + // on the filesystem as well as the block header since they are + // so commonly needed. + blockHdr := blockData.bytes[0:blockHdrSize] + blockRow := serializeBlockRow(location, blockHdr) + err = tx.blockIdxBucket.Put(blockData.hash[:], blockRow) + if err != nil { + rollback() + return err + } + } + + // Update the metadata for the current write file and offset. + writeRow := serializeWriteRow(wc.curFileNum, wc.curOffset) + if err := tx.metaBucket.Put(writeLocKeyName, writeRow); err != nil { + rollback() + return convertErr("failed to store write cursor", err) + } + + // Perform all leveldb update operations using a batch for atomicity. + batch := new(leveldb.Batch) + tx.pendingKeys.ForEach(func(k, v []byte) bool { + batch.Put(k, v) + return true + }) + for k := range tx.pendingRemove { + batch.Delete([]byte(k)) + } + if err := tx.db.ldb.Write(batch, nil); err != nil { + rollback() + return convertErr("failed to commit transaction", err) + } + + return nil +} + +// Commit commits all changes that have been made through the root bucket and +// all of its sub-buckets to persistent storage. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) Commit() error { + // Prevent commits on managed transactions. + if tx.managed { + tx.close() + panic("managed transaction commit not allowed") + } + + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return err + } + + // Regardless of whether the commit succeeds, the transaction is closed + // on return. + defer tx.close() + + // Ensure the transaction is writable. + if !tx.writable { + str := "Commit requires a writable database transaction" + return makeDbErr(database.ErrTxNotWritable, str, nil) + } + + // Write pending data. The function will rollback if any errors occur. + return tx.writePendingAndCommit() +} + +// Rollback undoes all changes that have been made to the root bucket and all of +// its sub-buckets. +// +// This function is part of the database.Tx interface implementation. +func (tx *transaction) Rollback() error { + // Prevent rollbacks on managed transactions. + if tx.managed { + tx.close() + panic("managed transaction rollback not allowed") + } + + // Ensure transaction state is valid. + if err := tx.checkClosed(); err != nil { + return err + } + + tx.close() + return nil +} + +// db represents a collection of namespaces which are persisted and implements +// the database.DB interface. All database access is performed through +// transactions which are obtained through the specific Namespace. +type db struct { + writeLock sync.Mutex // Limit to one write transaction at a time. + closeLock sync.RWMutex // Make database close block while txns active. + closed bool // Is the database closed? + ldb *leveldb.DB // The underlying leveldb DB for metadata. + store *blockStore // Handles read/writing blocks to flat files. +} + +// Enforce db implements the database.DB interface. +var _ database.DB = (*db)(nil) + +// Type returns the database driver type the current database instance was +// created with. +// +// This function is part of the database.DB interface implementation. +func (db *db) Type() string { + return dbType +} + +// begin is the implementation function for the Begin database method. See its +// documentation for more details. +// +// This function is only separate because it returns the internal transaction +// which is used by the managed transaction code while the database method +// returns the interface. +func (db *db) begin(writable bool) (*transaction, error) { + // Whenever a new writable transaction is started, grab the write lock + // to ensure only a single write transaction can be active at the same + // time. This lock will not be released until the transaction is + // closed (via Rollback or Commit). + if writable { + db.writeLock.Lock() + } + + // Whenever a new transaction is started, grab a read lock against the + // database to ensure Close will wait for the transaction to finish. + // This lock will not be released until the transaction is closed (via + // Rollback or Commit). + db.closeLock.RLock() + if db.closed { + db.closeLock.RUnlock() + if writable { + db.writeLock.Unlock() + } + return nil, makeDbErr(database.ErrDbNotOpen, errDbNotOpenStr, + nil) + } + + snapshot, err := db.ldb.GetSnapshot() + if err != nil { + db.closeLock.RUnlock() + if writable { + db.writeLock.Unlock() + } + + str := "failed to open transaction" + return nil, convertErr(str, err) + } + + // The metadata and block index buckets are internal-only buckets, so + // they have defined IDs. + tx := &transaction{ + writable: writable, + db: db, + snapshot: snapshot, + pendingKeys: treap.NewMutable(), + } + tx.metaBucket = &bucket{tx: tx, id: metadataBucketID} + tx.blockIdxBucket = &bucket{tx: tx, id: blockIdxBucketID} + return tx, nil +} + +// Begin starts a transaction which is either read-only or read-write depending +// on the specified flag. Multiple read-only transactions can be started +// simultaneously while only a single read-write transaction can be started at a +// time. The call will block when starting a read-write transaction when one is +// already open. +// +// NOTE: The transaction must be closed by calling Rollback or Commit on it when +// it is no longer needed. Failure to do so will result in unclaimed memory. +// +// This function is part of the database.DB interface implementation. +func (db *db) Begin(writable bool) (database.Tx, error) { + return db.begin(writable) +} + +// rollbackOnPanic rolls the passed transaction back if the code in the calling +// function panics. This is needed since the mutex on a transaction must be +// released and a panic in called code would prevent that from happening. +// +// NOTE: This can only be handled manually for managed transactions since they +// control the life-cycle of the transaction. As the documentation on Begin +// calls out, callers opting to use manual transactions will have to ensure the +// transaction is rolled back on panic if it desires that functionality as well +// or the database will fail to close since the read-lock will never be +// released. +func rollbackOnPanic(tx *transaction) { + if err := recover(); err != nil { + tx.managed = false + _ = tx.Rollback() + panic(err) + } +} + +// View invokes the passed function in the context of a managed read-only +// transaction with the root bucket for the namespace. Any errors returned from +// the user-supplied function are returned from this function. +// +// This function is part of the database.DB interface implementation. +func (db *db) View(fn func(database.Tx) error) error { + // Start a read-only transaction. + tx, err := db.begin(false) + if err != nil { + return err + } + + // Since the user-provided function might panic, ensure the transaction + // releases all mutexes and resources. There is no guarantee the caller + // won't use recover and keep going. Thus, the database must still be + // in a usable state on panics due to caller issues. + defer rollbackOnPanic(tx) + + tx.managed = true + err = fn(tx) + tx.managed = false + if err != nil { + // The error is ignored here because nothing was written yet + // and regardless of a rollback failure, the tx is closed now + // anyways. + _ = tx.Rollback() + return err + } + + return tx.Rollback() +} + +// Update invokes the passed function in the context of a managed read-write +// transaction with the root bucket for the namespace. Any errors returned from +// the user-supplied function will cause the transaction to be rolled back and +// are returned from this function. Otherwise, the transaction is committed +// when the user-supplied function returns a nil error. +// +// This function is part of the database.DB interface implementation. +func (db *db) Update(fn func(database.Tx) error) error { + // Start a read-write transaction. + tx, err := db.begin(true) + if err != nil { + return err + } + + // Since the user-provided function might panic, ensure the transaction + // releases all mutexes and resources. There is no guarantee the caller + // won't use recover and keep going. Thus, the database must still be + // in a usable state on panics due to caller issues. + defer rollbackOnPanic(tx) + + tx.managed = true + err = fn(tx) + tx.managed = false + if err != nil { + // The error is ignored here because nothing was written yet + // and regardless of a rollback failure, the tx is closed now + // anyways. + _ = tx.Rollback() + return err + } + + return tx.Commit() +} + +// Close cleanly shuts down the database and syncs all data. Any data in +// database transactions which have not been committed will be lost, so it is +// important to ensure all transactions are finalized prior to calling this +// function if that data is intended to be stored. +// +// This function is part of the database.DB interface implementation. +func (db *db) Close() error { + // Since all transactions have a read lock on this mutex, this will + // cause Close to wait for all readers to complete. + db.closeLock.Lock() + defer db.closeLock.Unlock() + + if db.closed { + return makeDbErr(database.ErrDbNotOpen, errDbNotOpenStr, nil) + } + db.closed = true + + // NOTE: Since the above lock waits for all transactions to finish and + // prevents any new ones from being started, it is safe to clear all + // state without the individual locks. + + // Close any open flat files that house the blocks. + wc := db.store.writeCursor + if wc.curFile.file != nil { + _ = wc.curFile.file.Close() + wc.curFile.file = nil + } + for _, blockFile := range db.store.openBlockFiles { + _ = blockFile.file.Close() + } + db.store.openBlockFiles = nil + db.store.openBlocksLRU.Init() + db.store.fileNumToLRUElem = nil + + if err := db.ldb.Close(); err != nil { + str := "failed to close underlying leveldb database" + return convertErr(str, err) + } + + return nil +} + +// filesExists reports whether the named file or directory exists. +func fileExists(name string) bool { + if _, err := os.Stat(name); err != nil { + if os.IsNotExist(err) { + return false + } + } + return true +} + +// initDB creates the initial buckets and values used by the package. This is +// mainly in a separate function for testing purposes. +func initDB(ldb *leveldb.DB) error { + // The starting block file write cursor location is file num 0, offset + // 0. + batch := new(leveldb.Batch) + batch.Put(bucketizedKey(metadataBucketID, writeLocKeyName), + serializeWriteRow(0, 0)) + + // Create block index bucket and set the current bucket id. + // + // NOTE: Since buckets are virtualized through the use of prefixes, + // there is no need to store the bucket index data for the metadata + // bucket in the database. However, the first bucket ID to use does + // need to account for it to ensure there are no key collisions. + batch.Put(bucketIndexKey(metadataBucketID, blockIdxBucketName), + blockIdxBucketID[:]) + batch.Put(curBucketIDKeyName, blockIdxBucketID[:]) + + // Write everything as a single batch. + if err := ldb.Write(batch, nil); err != nil { + str := fmt.Sprintf("failed to initialize metadata database: %v", + err) + return convertErr(str, err) + } + + return nil +} + +// openDB opens the database at the provided path. database.ErrDbDoesNotExist +// is returned if the database doesn't exist and the create flag is not set. +func openDB(dbPath string, network wire.BitcoinNet, create bool) (database.DB, error) { + // Error if the database doesn't exist and the create flag is not set. + metadataDbPath := filepath.Join(dbPath, metadataDbName) + dbExists := fileExists(metadataDbPath) + if !create && !dbExists { + str := fmt.Sprintf("database %q does not exist", metadataDbPath) + return nil, makeDbErr(database.ErrDbDoesNotExist, str, nil) + } + + // Ensure the full path to the database exists. + if !dbExists { + // The error can be ignored here since the call to + // leveldb.OpenFile will fail if the directory couldn't be + // created. + _ = os.MkdirAll(dbPath, 0700) + } + + // Open the metadata database (will create it if needed). + opts := opt.Options{ + ErrorIfExist: create, + Strict: opt.DefaultStrict, + Compression: opt.NoCompression, + Filter: filter.NewBloomFilter(10), + } + ldb, err := leveldb.OpenFile(metadataDbPath, &opts) + if err != nil { + return nil, convertErr(err.Error(), err) + } + + // Create the block store which includes scanning the existing flat + // block files to find what the current write cursor position is + // according to the data that is actually on disk. + store := newBlockStore(dbPath, network) + pdb := &db{ldb: ldb, store: store} + + // Perform any reconciliation needed between the block and metadata as + // well as database initialization, if needed. + return reconcileDB(pdb, create) +} diff --git a/database2/ffldb/doc.go b/database2/ffldb/doc.go new file mode 100644 index 00000000..96a2992c --- /dev/null +++ b/database2/ffldb/doc.go @@ -0,0 +1,29 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +Package ffldb implements a driver for the database package that uses leveldb +for the backing metadata and flat files for block storage. + +This driver is the recommended driver for use with btcd. It makes use leveldb +for the metadata, flat files for block storage, and checksums in key areas to +ensure data integrity. + +Usage + +This package is a driver to the database package and provides the database type +of "ffldb". The parameters the Open and Create functions take are the +database path as a string and the block network: + + db, err := database.Open("ffldb", "path/to/database", wire.MainNet) + if err != nil { + // Handle error + } + + db, err := database.Create("ffldb", "path/to/database", wire.MainNet) + if err != nil { + // Handle error + } +*/ +package ffldb diff --git a/database2/ffldb/driver.go b/database2/ffldb/driver.go new file mode 100644 index 00000000..9d6d59d3 --- /dev/null +++ b/database2/ffldb/driver.go @@ -0,0 +1,84 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package ffldb + +import ( + "fmt" + + database "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btclog" +) + +var log = btclog.Disabled + +const ( + dbType = "ffldb" +) + +// parseArgs parses the arguments from the database Open/Create methods. +func parseArgs(funcName string, args ...interface{}) (string, wire.BitcoinNet, error) { + if len(args) != 2 { + return "", 0, fmt.Errorf("invalid arguments to %s.%s -- "+ + "expected database path and block network", dbType, + funcName) + } + + dbPath, ok := args[0].(string) + if !ok { + return "", 0, fmt.Errorf("first argument to %s.%s is invalid -- "+ + "expected database path string", dbType, funcName) + } + + network, ok := args[1].(wire.BitcoinNet) + if !ok { + return "", 0, fmt.Errorf("second argument to %s.%s is invalid -- "+ + "expected block network", dbType, funcName) + } + + return dbPath, network, nil +} + +// openDBDriver is the callback provided during driver registration that opens +// an existing database for use. +func openDBDriver(args ...interface{}) (database.DB, error) { + dbPath, network, err := parseArgs("Open", args...) + if err != nil { + return nil, err + } + + return openDB(dbPath, network, false) +} + +// createDBDriver is the callback provided during driver registration that +// creates, initializes, and opens a database for use. +func createDBDriver(args ...interface{}) (database.DB, error) { + dbPath, network, err := parseArgs("Create", args...) + if err != nil { + return nil, err + } + + return openDB(dbPath, network, true) +} + +// useLogger is the callback provided during driver registration that sets the +// current logger to the provided one. +func useLogger(logger btclog.Logger) { + log = logger +} + +func init() { + // Register the driver. + driver := database.Driver{ + DbType: dbType, + Create: createDBDriver, + Open: openDBDriver, + UseLogger: useLogger, + } + if err := database.RegisterDriver(driver); err != nil { + panic(fmt.Sprintf("Failed to regiser database driver '%s': %v", + dbType, err)) + } +} diff --git a/database2/ffldb/driver_test.go b/database2/ffldb/driver_test.go new file mode 100644 index 00000000..3ed827a5 --- /dev/null +++ b/database2/ffldb/driver_test.go @@ -0,0 +1,288 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package ffldb_test + +import ( + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/btcsuite/btcd/chaincfg" + database "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/database2/ffldb" + "github.com/btcsuite/btcutil" +) + +// dbType is the database type name for this driver. +const dbType = "ffldb" + +// TestCreateOpenFail ensures that errors related to creating and opening a +// database are handled properly. +func TestCreateOpenFail(t *testing.T) { + t.Parallel() + + // Ensure that attempting to open a database that doesn't exist returns + // the expected error. + wantErrCode := database.ErrDbDoesNotExist + _, err := database.Open(dbType, "noexist", blockDataNet) + if !checkDbError(t, "Open", err, wantErrCode) { + return + } + + // Ensure that attempting to open a database with the wrong number of + // parameters returns the expected error. + wantErr := fmt.Errorf("invalid arguments to %s.Open -- expected "+ + "database path and block network", dbType) + _, err = database.Open(dbType, 1, 2, 3) + if err.Error() != wantErr.Error() { + t.Errorf("Open: did not receive expected error - got %v, "+ + "want %v", err, wantErr) + return + } + + // Ensure that attempting to open a database with an invalid type for + // the first parameter returns the expected error. + wantErr = fmt.Errorf("first argument to %s.Open is invalid -- "+ + "expected database path string", dbType) + _, err = database.Open(dbType, 1, blockDataNet) + if err.Error() != wantErr.Error() { + t.Errorf("Open: did not receive expected error - got %v, "+ + "want %v", err, wantErr) + return + } + + // Ensure that attempting to open a database with an invalid type for + // the second parameter returns the expected error. + wantErr = fmt.Errorf("second argument to %s.Open is invalid -- "+ + "expected block network", dbType) + _, err = database.Open(dbType, "noexist", "invalid") + if err.Error() != wantErr.Error() { + t.Errorf("Open: did not receive expected error - got %v, "+ + "want %v", err, wantErr) + return + } + + // Ensure that attempting to create a database with the wrong number of + // parameters returns the expected error. + wantErr = fmt.Errorf("invalid arguments to %s.Create -- expected "+ + "database path and block network", dbType) + _, err = database.Create(dbType, 1, 2, 3) + if err.Error() != wantErr.Error() { + t.Errorf("Create: did not receive expected error - got %v, "+ + "want %v", err, wantErr) + return + } + + // Ensure that attempting to create a database with an invalid type for + // the first parameter returns the expected error. + wantErr = fmt.Errorf("first argument to %s.Create is invalid -- "+ + "expected database path string", dbType) + _, err = database.Create(dbType, 1, blockDataNet) + if err.Error() != wantErr.Error() { + t.Errorf("Create: did not receive expected error - got %v, "+ + "want %v", err, wantErr) + return + } + + // Ensure that attempting to create a database with an invalid type for + // the second parameter returns the expected error. + wantErr = fmt.Errorf("second argument to %s.Create is invalid -- "+ + "expected block network", dbType) + _, err = database.Create(dbType, "noexist", "invalid") + if err.Error() != wantErr.Error() { + t.Errorf("Create: did not receive expected error - got %v, "+ + "want %v", err, wantErr) + return + } + + // Ensure operations against a closed database return the expected + // error. + dbPath := filepath.Join(os.TempDir(), "ffldb-createfail") + _ = os.RemoveAll(dbPath) + db, err := database.Create(dbType, dbPath, blockDataNet) + if err != nil { + t.Errorf("Create: unexpected error: %v", err) + return + } + defer os.RemoveAll(dbPath) + db.Close() + + wantErrCode = database.ErrDbNotOpen + err = db.View(func(tx database.Tx) error { + return nil + }) + if !checkDbError(t, "View", err, wantErrCode) { + return + } + + wantErrCode = database.ErrDbNotOpen + err = db.Update(func(tx database.Tx) error { + return nil + }) + if !checkDbError(t, "Update", err, wantErrCode) { + return + } + + wantErrCode = database.ErrDbNotOpen + _, err = db.Begin(false) + if !checkDbError(t, "Begin(false)", err, wantErrCode) { + return + } + + wantErrCode = database.ErrDbNotOpen + _, err = db.Begin(true) + if !checkDbError(t, "Begin(true)", err, wantErrCode) { + return + } + + wantErrCode = database.ErrDbNotOpen + err = db.Close() + if !checkDbError(t, "Close", err, wantErrCode) { + return + } +} + +// TestPersistence ensures that values stored are still valid after closing and +// reopening the database. +func TestPersistence(t *testing.T) { + t.Parallel() + + // Create a new database to run tests against. + dbPath := filepath.Join(os.TempDir(), "ffldb-persistencetest") + _ = os.RemoveAll(dbPath) + db, err := database.Create(dbType, dbPath, blockDataNet) + if err != nil { + t.Errorf("Failed to create test database (%s) %v", dbType, err) + return + } + defer os.RemoveAll(dbPath) + defer db.Close() + + // Create a bucket, put some values into it, and store a block so they + // can be tested for existence on re-open. + bucket1Key := []byte("bucket1") + storeValues := map[string]string{ + "b1key1": "foo1", + "b1key2": "foo2", + "b1key3": "foo3", + } + genesisBlock := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + genesisHash := chaincfg.MainNetParams.GenesisHash + err = db.Update(func(tx database.Tx) error { + metadataBucket := tx.Metadata() + if metadataBucket == nil { + return fmt.Errorf("Metadata: unexpected nil bucket") + } + + bucket1, err := metadataBucket.CreateBucket(bucket1Key) + if err != nil { + return fmt.Errorf("CreateBucket: unexpected error: %v", + err) + } + + for k, v := range storeValues { + err := bucket1.Put([]byte(k), []byte(v)) + if err != nil { + return fmt.Errorf("Put: unexpected error: %v", + err) + } + } + + if err := tx.StoreBlock(genesisBlock); err != nil { + return fmt.Errorf("StoreBlock: unexpected error: %v", + err) + } + + return nil + }) + if err != nil { + t.Errorf("Update: unexpected error: %v", err) + return + } + + // Close and reopen the database to ensure the values persist. + db.Close() + db, err = database.Open(dbType, dbPath, blockDataNet) + if err != nil { + t.Errorf("Failed to open test database (%s) %v", dbType, err) + return + } + defer db.Close() + + // Ensure the values previously stored in the 3rd namespace still exist + // and are correct. + err = db.View(func(tx database.Tx) error { + metadataBucket := tx.Metadata() + if metadataBucket == nil { + return fmt.Errorf("Metadata: unexpected nil bucket") + } + + bucket1 := metadataBucket.Bucket(bucket1Key) + if bucket1 == nil { + return fmt.Errorf("Bucket1: unexpected nil bucket") + } + + for k, v := range storeValues { + gotVal := bucket1.Get([]byte(k)) + if !reflect.DeepEqual(gotVal, []byte(v)) { + return fmt.Errorf("Get: key '%s' does not "+ + "match expected value - got %s, want %s", + k, gotVal, v) + } + } + + genesisBlockBytes, _ := genesisBlock.Bytes() + gotBytes, err := tx.FetchBlock(genesisHash) + if err != nil { + return fmt.Errorf("FetchBlock: unexpected error: %v", + err) + } + if !reflect.DeepEqual(gotBytes, genesisBlockBytes) { + return fmt.Errorf("FetchBlock: stored block mismatch") + } + + return nil + }) + if err != nil { + t.Errorf("View: unexpected error: %v", err) + return + } +} + +// TestInterface performs all interfaces tests for this database driver. +func TestInterface(t *testing.T) { + t.Parallel() + + // Create a new database to run tests against. + dbPath := filepath.Join(os.TempDir(), "ffldb-interfacetest") + _ = os.RemoveAll(dbPath) + db, err := database.Create(dbType, dbPath, blockDataNet) + if err != nil { + t.Errorf("Failed to create test database (%s) %v", dbType, err) + return + } + defer os.RemoveAll(dbPath) + defer db.Close() + + // Ensure the driver type is the expected value. + gotDbType := db.Type() + if gotDbType != dbType { + t.Errorf("Type: unepxected driver type - got %v, want %v", + gotDbType, dbType) + return + } + + // Run all of the interface tests against the database. + runtime.GOMAXPROCS(runtime.NumCPU()) + + // Change the maximum file size to a small value to force multiple flat + // files with the test data set. + ffldb.TstRunWithMaxBlockFileSize(db, 2048, func() { + testInterface(t, db) + }) +} diff --git a/database2/ffldb/export_test.go b/database2/ffldb/export_test.go new file mode 100644 index 00000000..eb30141e --- /dev/null +++ b/database2/ffldb/export_test.go @@ -0,0 +1,26 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +This test file is part of the ffldb package rather than than the ffldb_test +package so it can bridge access to the internals to properly test cases which +are either not possible or can't reliably be tested via the public interface. +The functions are only exported while the tests are being run. +*/ + +package ffldb + +import database "github.com/btcsuite/btcd/database2" + +// TstRunWithMaxBlockFileSize runs the passed function with the maximum allowed +// file size for the database set to the provided value. The value will be set +// back to the original value upon completion. +func TstRunWithMaxBlockFileSize(idb database.DB, size uint32, fn func()) { + ffldb := idb.(*db) + origSize := ffldb.store.maxBlockFileSize + + ffldb.store.maxBlockFileSize = size + fn() + ffldb.store.maxBlockFileSize = origSize +} diff --git a/database2/ffldb/interface_test.go b/database2/ffldb/interface_test.go new file mode 100644 index 00000000..125fd7c7 --- /dev/null +++ b/database2/ffldb/interface_test.go @@ -0,0 +1,2322 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// This file intended to be copied into each backend driver directory. Each +// driver should have their own driver_test.go file which creates a database and +// invokes the testInterface function in this file to ensure the driver properly +// implements the interface. +// +// NOTE: When copying this file into the backend driver folder, the package name +// will need to be changed accordingly. + +package ffldb_test + +import ( + "bytes" + "compress/bzip2" + "encoding/binary" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "sync/atomic" + "testing" + "time" + + "github.com/btcsuite/btcd/chaincfg" + database "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" +) + +var ( + // blockDataNet is the expected network in the test block data. + blockDataNet = wire.MainNet + + // blockDataFile is the path to a file containing the first 256 blocks + // of the block chain. + blockDataFile = filepath.Join("..", "testdata", "blocks1-256.bz2") + + // errSubTestFail is used to signal that a sub test returned false. + errSubTestFail = fmt.Errorf("sub test failure") +) + +// loadBlocks loads the blocks contained in the testdata directory and returns +// a slice of them. +func loadBlocks(t *testing.T, dataFile string, network wire.BitcoinNet) ([]*btcutil.Block, error) { + // Open the file that contains the blocks for reading. + fi, err := os.Open(dataFile) + if err != nil { + t.Errorf("failed to open file %v, err %v", dataFile, err) + return nil, err + } + defer func() { + if err := fi.Close(); err != nil { + t.Errorf("failed to close file %v %v", dataFile, + err) + } + }() + dr := bzip2.NewReader(fi) + + // Set the first block as the genesis block. + blocks := make([]*btcutil.Block, 0, 256) + genesis := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + blocks = append(blocks, genesis) + + // Load the remaining blocks. + for height := 1; ; height++ { + var net uint32 + err := binary.Read(dr, binary.LittleEndian, &net) + if err == io.EOF { + // Hit end of file at the expected offset. No error. + break + } + if err != nil { + t.Errorf("Failed to load network type for block %d: %v", + height, err) + return nil, err + } + if net != uint32(network) { + t.Errorf("Block doesn't match network: %v expects %v", + net, network) + return nil, err + } + + var blockLen uint32 + err = binary.Read(dr, binary.LittleEndian, &blockLen) + if err != nil { + t.Errorf("Failed to load block size for block %d: %v", + height, err) + return nil, err + } + + // Read the block. + blockBytes := make([]byte, blockLen) + _, err = io.ReadFull(dr, blockBytes) + if err != nil { + t.Errorf("Failed to load block %d: %v", height, err) + return nil, err + } + + // Deserialize and store the block. + block, err := btcutil.NewBlockFromBytes(blockBytes) + if err != nil { + t.Errorf("Failed to parse block %v: %v", height, err) + return nil, err + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +// checkDbError ensures the passed error is a database.Error with an error code +// that matches the passed error code. +func checkDbError(t *testing.T, testName string, gotErr error, wantErrCode database.ErrorCode) bool { + dbErr, ok := gotErr.(database.Error) + if !ok { + t.Errorf("%s: unexpected error type - got %T, want %T", + testName, gotErr, database.Error{}) + return false + } + if dbErr.ErrorCode != wantErrCode { + t.Errorf("%s: unexpected error code - got %s (%s), want %s", + testName, dbErr.ErrorCode, dbErr.Description, + wantErrCode) + return false + } + + return true +} + +// testContext is used to store context information about a running test which +// is passed into helper functions. +type testContext struct { + t *testing.T + db database.DB + bucketDepth int + isWritable bool + blocks []*btcutil.Block +} + +// keyPair houses a key/value pair. It is used over maps so ordering can be +// maintained. +type keyPair struct { + key []byte + value []byte +} + +// lookupKey is a convenience method to lookup the requested key from the +// provided keypair slice along with whether or not the key was found. +func lookupKey(key []byte, values []keyPair) ([]byte, bool) { + for _, item := range values { + if bytes.Equal(item.key, key) { + return item.value, true + } + } + + return nil, false +} + +// toGetValues returns a copy of the provided keypairs with all of the nil +// values set to an empty byte slice. This is used to ensure that keys set to +// nil values result in empty byte slices when retrieved instead of nil. +func toGetValues(values []keyPair) []keyPair { + ret := make([]keyPair, len(values)) + copy(ret, values) + for i := range ret { + if ret[i].value == nil { + ret[i].value = make([]byte, 0) + } + } + return ret +} + +// rollbackValues returns a copy of the provided keypairs with all values set to +// nil. This is used to test that values are properly rolled back. +func rollbackValues(values []keyPair) []keyPair { + ret := make([]keyPair, len(values)) + copy(ret, values) + for i := range ret { + ret[i].value = nil + } + return ret +} + +// testCursorKeyPair checks that the provide key and value match the expected +// keypair at the provided index. It also ensures the index is in range for the +// provided slice of expected keypairs. +func testCursorKeyPair(tc *testContext, k, v []byte, index int, values []keyPair) bool { + if index >= len(values) || index < 0 { + tc.t.Errorf("Cursor: exceeded the expected range of values - "+ + "index %d, num values %d", index, len(values)) + return false + } + + pair := &values[index] + if !bytes.Equal(k, pair.key) { + tc.t.Errorf("Mismatched cursor key: index %d does not match "+ + "the expected key - got %q, want %q", index, k, + pair.key) + return false + } + if !bytes.Equal(v, pair.value) { + tc.t.Errorf("Mismatched cursor value: index %d does not match "+ + "the expected value - got %q, want %q", index, v, + pair.value) + return false + } + + return true +} + +// testGetValues checks that all of the provided key/value pairs can be +// retrieved from the database and the retrieved values match the provided +// values. +func testGetValues(tc *testContext, bucket database.Bucket, values []keyPair) bool { + for _, item := range values { + gotValue := bucket.Get(item.key) + if !reflect.DeepEqual(gotValue, item.value) { + tc.t.Errorf("Get: unexpected value for %q - got %q, "+ + "want %q", item.key, gotValue, item.value) + return false + } + } + + return true +} + +// testPutValues stores all of the provided key/value pairs in the provided +// bucket while checking for errors. +func testPutValues(tc *testContext, bucket database.Bucket, values []keyPair) bool { + for _, item := range values { + if err := bucket.Put(item.key, item.value); err != nil { + tc.t.Errorf("Put: unexpected error: %v", err) + return false + } + } + + return true +} + +// testDeleteValues removes all of the provided key/value pairs from the +// provided bucket. +func testDeleteValues(tc *testContext, bucket database.Bucket, values []keyPair) bool { + for _, item := range values { + if err := bucket.Delete(item.key); err != nil { + tc.t.Errorf("Delete: unexpected error: %v", err) + return false + } + } + + return true +} + +// testCursorInterface ensures the cursor itnerface is working properly by +// exercising all of its functions on the passed bucket. +func testCursorInterface(tc *testContext, bucket database.Bucket) bool { + // Ensure a cursor can be obtained for the bucket. + cursor := bucket.Cursor() + if cursor == nil { + tc.t.Error("Bucket.Cursor: unexpected nil cursor returned") + return false + } + + // Ensure the cursor returns the same bucket it was created for. + if cursor.Bucket() != bucket { + tc.t.Error("Cursor.Bucket: does not match the bucket it was " + + "created for") + return false + } + + if tc.isWritable { + unsortedValues := []keyPair{ + {[]byte("cursor"), []byte("val1")}, + {[]byte("abcd"), []byte("val2")}, + {[]byte("bcd"), []byte("val3")}, + {[]byte("defg"), nil}, + } + sortedValues := []keyPair{ + {[]byte("abcd"), []byte("val2")}, + {[]byte("bcd"), []byte("val3")}, + {[]byte("cursor"), []byte("val1")}, + {[]byte("defg"), nil}, + } + + // Store the values to be used in the cursor tests in unsorted + // order and ensure they were actually stored. + if !testPutValues(tc, bucket, unsortedValues) { + return false + } + if !testGetValues(tc, bucket, toGetValues(unsortedValues)) { + return false + } + + // Ensure the cursor returns all items in byte-sorted order when + // iterating forward. + curIdx := 0 + for ok := cursor.First(); ok; ok = cursor.Next() { + k, v := cursor.Key(), cursor.Value() + if !testCursorKeyPair(tc, k, v, curIdx, sortedValues) { + return false + } + curIdx++ + } + if curIdx != len(unsortedValues) { + tc.t.Errorf("Cursor: expected to iterate %d values, "+ + "but only iterated %d", len(unsortedValues), + curIdx) + return false + } + + // Ensure the cursor returns all items in reverse byte-sorted + // order when iterating in reverse. + curIdx = len(sortedValues) - 1 + for ok := cursor.Last(); ok; ok = cursor.Prev() { + k, v := cursor.Key(), cursor.Value() + if !testCursorKeyPair(tc, k, v, curIdx, sortedValues) { + return false + } + curIdx-- + } + if curIdx > -1 { + tc.t.Errorf("Reverse cursor: expected to iterate %d "+ + "values, but only iterated %d", + len(sortedValues), len(sortedValues)-(curIdx+1)) + return false + } + + // Ensure foward iteration works as expected after seeking. + middleIdx := (len(sortedValues) - 1) / 2 + seekKey := sortedValues[middleIdx].key + curIdx = middleIdx + for ok := cursor.Seek(seekKey); ok; ok = cursor.Next() { + k, v := cursor.Key(), cursor.Value() + if !testCursorKeyPair(tc, k, v, curIdx, sortedValues) { + return false + } + curIdx++ + } + if curIdx != len(sortedValues) { + tc.t.Errorf("Cursor after seek: expected to iterate "+ + "%d values, but only iterated %d", + len(sortedValues)-middleIdx, curIdx-middleIdx) + return false + } + + // Ensure reverse iteration works as expected after seeking. + curIdx = middleIdx + for ok := cursor.Seek(seekKey); ok; ok = cursor.Prev() { + k, v := cursor.Key(), cursor.Value() + if !testCursorKeyPair(tc, k, v, curIdx, sortedValues) { + return false + } + curIdx-- + } + if curIdx > -1 { + tc.t.Errorf("Reverse cursor after seek: expected to "+ + "iterate %d values, but only iterated %d", + len(sortedValues)-middleIdx, middleIdx-curIdx) + return false + } + + // Ensure the cursor deletes items properly. + if !cursor.First() { + tc.t.Errorf("Cursor.First: no value") + return false + } + k := cursor.Key() + if err := cursor.Delete(); err != nil { + tc.t.Errorf("Cursor.Delete: unexpected error: %v", err) + return false + } + if val := bucket.Get(k); val != nil { + tc.t.Errorf("Cursor.Delete: value for key %q was not "+ + "deleted", k) + return false + } + } + + return true +} + +// testNestedBucket reruns the testBucketInterface against a nested bucket along +// with a counter to only test a couple of level deep. +func testNestedBucket(tc *testContext, testBucket database.Bucket) bool { + // Don't go more than 2 nested levels deep. + if tc.bucketDepth > 1 { + return true + } + + tc.bucketDepth++ + defer func() { + tc.bucketDepth-- + }() + if !testBucketInterface(tc, testBucket) { + return false + } + + return true +} + +// testBucketInterface ensures the bucket interface is working properly by +// exercising all of its functions. This includes the cursor interface for the +// cursor returned from the bucket. +func testBucketInterface(tc *testContext, bucket database.Bucket) bool { + if bucket.Writable() != tc.isWritable { + tc.t.Errorf("Bucket writable state does not match.") + return false + } + + if tc.isWritable { + // keyValues holds the keys and values to use when putting + // values into the bucket. + keyValues := []keyPair{ + {[]byte("bucketkey1"), []byte("foo1")}, + {[]byte("bucketkey2"), []byte("foo2")}, + {[]byte("bucketkey3"), []byte("foo3")}, + {[]byte("bucketkey4"), nil}, + } + expectedKeyValues := toGetValues(keyValues) + if !testPutValues(tc, bucket, keyValues) { + return false + } + + if !testGetValues(tc, bucket, expectedKeyValues) { + return false + } + + // Ensure errors returned from the user-supplied ForEach + // function are returned. + forEachError := fmt.Errorf("example foreach error") + err := bucket.ForEach(func(k, v []byte) error { + return forEachError + }) + if err != forEachError { + tc.t.Errorf("ForEach: inner function error not "+ + "returned - got %v, want %v", err, forEachError) + return false + } + + // Iterate all of the keys using ForEach while making sure the + // stored values are the expected values. + keysFound := make(map[string]struct{}, len(keyValues)) + err = bucket.ForEach(func(k, v []byte) error { + wantV, found := lookupKey(k, expectedKeyValues) + if !found { + return fmt.Errorf("ForEach: key '%s' should "+ + "exist", k) + } + + if !reflect.DeepEqual(v, wantV) { + return fmt.Errorf("ForEach: value for key '%s' "+ + "does not match - got %s, want %s", k, + v, wantV) + } + + keysFound[string(k)] = struct{}{} + return nil + }) + if err != nil { + tc.t.Errorf("%v", err) + return false + } + + // Ensure all keys were iterated. + for _, item := range keyValues { + if _, ok := keysFound[string(item.key)]; !ok { + tc.t.Errorf("ForEach: key '%s' was not iterated "+ + "when it should have been", item.key) + return false + } + } + + // Delete the keys and ensure they were deleted. + if !testDeleteValues(tc, bucket, keyValues) { + return false + } + if !testGetValues(tc, bucket, rollbackValues(keyValues)) { + return false + } + + // Ensure creating a new bucket works as expected. + testBucketName := []byte("testbucket") + testBucket, err := bucket.CreateBucket(testBucketName) + if err != nil { + tc.t.Errorf("CreateBucket: unexpected error: %v", err) + return false + } + if !testNestedBucket(tc, testBucket) { + return false + } + + // Ensure errors returned from the user-supplied ForEachBucket + // function are returned. + err = bucket.ForEachBucket(func(k []byte) error { + return forEachError + }) + if err != forEachError { + tc.t.Errorf("ForEachBucket: inner function error not "+ + "returned - got %v, want %v", err, forEachError) + return false + } + + // Ensure creating a bucket that already exists fails with the + // expected error. + wantErrCode := database.ErrBucketExists + _, err = bucket.CreateBucket(testBucketName) + if !checkDbError(tc.t, "CreateBucket", err, wantErrCode) { + return false + } + + // Ensure CreateBucketIfNotExists returns an existing bucket. + testBucket, err = bucket.CreateBucketIfNotExists(testBucketName) + if err != nil { + tc.t.Errorf("CreateBucketIfNotExists: unexpected "+ + "error: %v", err) + return false + } + if !testNestedBucket(tc, testBucket) { + return false + } + + // Ensure retrieving an existing bucket works as expected. + testBucket = bucket.Bucket(testBucketName) + if !testNestedBucket(tc, testBucket) { + return false + } + + // Ensure deleting a bucket works as intended. + if err := bucket.DeleteBucket(testBucketName); err != nil { + tc.t.Errorf("DeleteBucket: unexpected error: %v", err) + return false + } + if b := bucket.Bucket(testBucketName); b != nil { + tc.t.Errorf("DeleteBucket: bucket '%s' still exists", + testBucketName) + return false + } + + // Ensure deleting a bucket that doesn't exist returns the + // expected error. + wantErrCode = database.ErrBucketNotFound + err = bucket.DeleteBucket(testBucketName) + if !checkDbError(tc.t, "DeleteBucket", err, wantErrCode) { + return false + } + + // Ensure CreateBucketIfNotExists creates a new bucket when + // it doesn't already exist. + testBucket, err = bucket.CreateBucketIfNotExists(testBucketName) + if err != nil { + tc.t.Errorf("CreateBucketIfNotExists: unexpected "+ + "error: %v", err) + return false + } + if !testNestedBucket(tc, testBucket) { + return false + } + + // Ensure the cursor interface works as expected. + if !testCursorInterface(tc, testBucket) { + return false + } + + // Delete the test bucket to avoid leaving it around for future + // calls. + if err := bucket.DeleteBucket(testBucketName); err != nil { + tc.t.Errorf("DeleteBucket: unexpected error: %v", err) + return false + } + if b := bucket.Bucket(testBucketName); b != nil { + tc.t.Errorf("DeleteBucket: bucket '%s' still exists", + testBucketName) + return false + } + } else { + // Put should fail with bucket that is not writable. + testName := "unwritable tx put" + wantErrCode := database.ErrTxNotWritable + failBytes := []byte("fail") + err := bucket.Put(failBytes, failBytes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Delete should fail with bucket that is not writable. + testName = "unwritable tx delete" + err = bucket.Delete(failBytes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // CreateBucket should fail with bucket that is not writable. + testName = "unwritable tx create bucket" + _, err = bucket.CreateBucket(failBytes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // CreateBucketIfNotExists should fail with bucket that is not + // writable. + testName = "unwritable tx create bucket if not exists" + _, err = bucket.CreateBucketIfNotExists(failBytes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // DeleteBucket should fail with bucket that is not writable. + testName = "unwritable tx delete bucket" + err = bucket.DeleteBucket(failBytes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure the cursor interface works as expected with read-only + // buckets. + if !testCursorInterface(tc, bucket) { + return false + } + } + + return true +} + +// rollbackOnPanic rolls the passed transaction back if the code in the calling +// function panics. This is useful in case the tests unexpectedly panic which +// would leave any manually created transactions with the database mutex locked +// thereby leading to a deadlock and masking the real reason for the panic. It +// also logs a test error and repanics so the original panic can be traced. +func rollbackOnPanic(t *testing.T, tx database.Tx) { + if err := recover(); err != nil { + t.Errorf("Unexpected panic: %v", err) + _ = tx.Rollback() + panic(err) + } +} + +// testMetadataManualTxInterface ensures that the manual transactions metadata +// interface works as expected. +func testMetadataManualTxInterface(tc *testContext) bool { + // populateValues tests that populating values works as expected. + // + // When the writable flag is false, a read-only tranasction is created, + // standard bucket tests for read-only transactions are performed, and + // the Commit function is checked to ensure it fails as expected. + // + // Otherwise, a read-write transaction is created, the values are + // written, standard bucket tests for read-write transactions are + // performed, and then the transaction is either commited or rolled + // back depending on the flag. + bucket1Name := []byte("bucket1") + populateValues := func(writable, rollback bool, putValues []keyPair) bool { + tx, err := tc.db.Begin(writable) + if err != nil { + tc.t.Errorf("Begin: unexpected error %v", err) + return false + } + defer rollbackOnPanic(tc.t, tx) + + metadataBucket := tx.Metadata() + if metadataBucket == nil { + tc.t.Errorf("Metadata: unexpected nil bucket") + _ = tx.Rollback() + return false + } + + bucket1 := metadataBucket.Bucket(bucket1Name) + if bucket1 == nil { + tc.t.Errorf("Bucket1: unexpected nil bucket") + return false + } + + tc.isWritable = writable + if !testBucketInterface(tc, bucket1) { + _ = tx.Rollback() + return false + } + + if !writable { + // The transaction is not writable, so it should fail + // the commit. + testName := "unwritable tx commit" + wantErrCode := database.ErrTxNotWritable + err := tx.Commit() + if !checkDbError(tc.t, testName, err, wantErrCode) { + _ = tx.Rollback() + return false + } + } else { + if !testPutValues(tc, bucket1, putValues) { + return false + } + + if rollback { + // Rollback the transaction. + if err := tx.Rollback(); err != nil { + tc.t.Errorf("Rollback: unexpected "+ + "error %v", err) + return false + } + } else { + // The commit should succeed. + if err := tx.Commit(); err != nil { + tc.t.Errorf("Commit: unexpected error "+ + "%v", err) + return false + } + } + } + + return true + } + + // checkValues starts a read-only transaction and checks that all of + // the key/value pairs specified in the expectedValues parameter match + // what's in the database. + checkValues := func(expectedValues []keyPair) bool { + tx, err := tc.db.Begin(false) + if err != nil { + tc.t.Errorf("Begin: unexpected error %v", err) + return false + } + defer rollbackOnPanic(tc.t, tx) + + metadataBucket := tx.Metadata() + if metadataBucket == nil { + tc.t.Errorf("Metadata: unexpected nil bucket") + _ = tx.Rollback() + return false + } + + bucket1 := metadataBucket.Bucket(bucket1Name) + if bucket1 == nil { + tc.t.Errorf("Bucket1: unexpected nil bucket") + return false + } + + if !testGetValues(tc, bucket1, expectedValues) { + _ = tx.Rollback() + return false + } + + // Rollback the read-only transaction. + if err := tx.Rollback(); err != nil { + tc.t.Errorf("Commit: unexpected error %v", err) + return false + } + + return true + } + + // deleteValues starts a read-write transaction and deletes the keys + // in the passed key/value pairs. + deleteValues := func(values []keyPair) bool { + tx, err := tc.db.Begin(true) + if err != nil { + + } + defer rollbackOnPanic(tc.t, tx) + + metadataBucket := tx.Metadata() + if metadataBucket == nil { + tc.t.Errorf("Metadata: unexpected nil bucket") + _ = tx.Rollback() + return false + } + + bucket1 := metadataBucket.Bucket(bucket1Name) + if bucket1 == nil { + tc.t.Errorf("Bucket1: unexpected nil bucket") + return false + } + + // Delete the keys and ensure they were deleted. + if !testDeleteValues(tc, bucket1, values) { + _ = tx.Rollback() + return false + } + if !testGetValues(tc, bucket1, rollbackValues(values)) { + _ = tx.Rollback() + return false + } + + // Commit the changes and ensure it was successful. + if err := tx.Commit(); err != nil { + tc.t.Errorf("Commit: unexpected error %v", err) + return false + } + + return true + } + + // keyValues holds the keys and values to use when putting values into a + // bucket. + var keyValues = []keyPair{ + {[]byte("umtxkey1"), []byte("foo1")}, + {[]byte("umtxkey2"), []byte("foo2")}, + {[]byte("umtxkey3"), []byte("foo3")}, + {[]byte("umtxkey4"), nil}, + } + + // Ensure that attempting populating the values using a read-only + // transaction fails as expected. + if !populateValues(false, true, keyValues) { + return false + } + if !checkValues(rollbackValues(keyValues)) { + return false + } + + // Ensure that attempting populating the values using a read-write + // transaction and then rolling it back yields the expected values. + if !populateValues(true, true, keyValues) { + return false + } + if !checkValues(rollbackValues(keyValues)) { + return false + } + + // Ensure that attempting populating the values using a read-write + // transaction and then committing it stores the expected values. + if !populateValues(true, false, keyValues) { + return false + } + if !checkValues(toGetValues(keyValues)) { + return false + } + + // Clean up the keys. + if !deleteValues(keyValues) { + return false + } + + return true +} + +// testManagedTxPanics ensures calling Rollback of Commit inside a managed +// transaction panics. +func testManagedTxPanics(tc *testContext) bool { + testPanic := func(fn func()) (paniced bool) { + // Setup a defer to catch the expected panic and update the + // return variable. + defer func() { + if err := recover(); err != nil { + paniced = true + } + }() + + fn() + return false + } + + // Ensure calling Commit on a managed read-only transaction panics. + paniced := testPanic(func() { + tc.db.View(func(tx database.Tx) error { + tx.Commit() + return nil + }) + }) + if !paniced { + tc.t.Error("Commit called inside View did not panic") + return false + } + + // Ensure calling Rollback on a managed read-only transaction panics. + paniced = testPanic(func() { + tc.db.View(func(tx database.Tx) error { + tx.Rollback() + return nil + }) + }) + if !paniced { + tc.t.Error("Rollback called inside View did not panic") + return false + } + + // Ensure calling Commit on a managed read-write transaction panics. + paniced = testPanic(func() { + tc.db.Update(func(tx database.Tx) error { + tx.Commit() + return nil + }) + }) + if !paniced { + tc.t.Error("Commit called inside Update did not panic") + return false + } + + // Ensure calling Rollback on a managed read-write transaction panics. + paniced = testPanic(func() { + tc.db.Update(func(tx database.Tx) error { + tx.Rollback() + return nil + }) + }) + if !paniced { + tc.t.Error("Rollback called inside Update did not panic") + return false + } + + return true +} + +// testMetadataTxInterface tests all facets of the managed read/write and +// manual transaction metadata interfaces as well as the bucket interfaces under +// them. +func testMetadataTxInterface(tc *testContext) bool { + if !testManagedTxPanics(tc) { + return false + } + + bucket1Name := []byte("bucket1") + err := tc.db.Update(func(tx database.Tx) error { + _, err := tx.Metadata().CreateBucket(bucket1Name) + return err + }) + if err != nil { + tc.t.Errorf("Update: unexpected error creating bucket: %v", err) + return false + } + + if !testMetadataManualTxInterface(tc) { + return false + } + + // keyValues holds the keys and values to use when putting values + // into a bucket. + keyValues := []keyPair{ + {[]byte("mtxkey1"), []byte("foo1")}, + {[]byte("mtxkey2"), []byte("foo2")}, + {[]byte("mtxkey3"), []byte("foo3")}, + {[]byte("mtxkey4"), nil}, + } + + // Test the bucket interface via a managed read-only transaction. + err = tc.db.View(func(tx database.Tx) error { + metadataBucket := tx.Metadata() + if metadataBucket == nil { + return fmt.Errorf("Metadata: unexpected nil bucket") + } + + bucket1 := metadataBucket.Bucket(bucket1Name) + if bucket1 == nil { + return fmt.Errorf("Bucket1: unexpected nil bucket") + } + + tc.isWritable = false + if !testBucketInterface(tc, bucket1) { + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + // Ensure errors returned from the user-supplied View function are + // returned. + viewError := fmt.Errorf("example view error") + err = tc.db.View(func(tx database.Tx) error { + return viewError + }) + if err != viewError { + tc.t.Errorf("View: inner function error not returned - got "+ + "%v, want %v", err, viewError) + return false + } + + // Test the bucket interface via a managed read-write transaction. + // Also, put a series of values and force a rollback so the following + // code can ensure the values were not stored. + forceRollbackError := fmt.Errorf("force rollback") + err = tc.db.Update(func(tx database.Tx) error { + metadataBucket := tx.Metadata() + if metadataBucket == nil { + return fmt.Errorf("Metadata: unexpected nil bucket") + } + + bucket1 := metadataBucket.Bucket(bucket1Name) + if bucket1 == nil { + return fmt.Errorf("Bucket1: unexpected nil bucket") + } + + tc.isWritable = true + if !testBucketInterface(tc, bucket1) { + return errSubTestFail + } + + if !testPutValues(tc, bucket1, keyValues) { + return errSubTestFail + } + + // Return an error to force a rollback. + return forceRollbackError + }) + if err != forceRollbackError { + if err == errSubTestFail { + return false + } + + tc.t.Errorf("Update: inner function error not returned - got "+ + "%v, want %v", err, forceRollbackError) + return false + } + + // Ensure the values that should not have been stored due to the forced + // rollback above were not actually stored. + err = tc.db.View(func(tx database.Tx) error { + metadataBucket := tx.Metadata() + if metadataBucket == nil { + return fmt.Errorf("Metadata: unexpected nil bucket") + } + + if !testGetValues(tc, metadataBucket, rollbackValues(keyValues)) { + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + // Store a series of values via a managed read-write transaction. + err = tc.db.Update(func(tx database.Tx) error { + metadataBucket := tx.Metadata() + if metadataBucket == nil { + return fmt.Errorf("Metadata: unexpected nil bucket") + } + + bucket1 := metadataBucket.Bucket(bucket1Name) + if bucket1 == nil { + return fmt.Errorf("Bucket1: unexpected nil bucket") + } + + if !testPutValues(tc, bucket1, keyValues) { + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + // Ensure the values stored above were committed as expected. + err = tc.db.View(func(tx database.Tx) error { + metadataBucket := tx.Metadata() + if metadataBucket == nil { + return fmt.Errorf("Metadata: unexpected nil bucket") + } + + bucket1 := metadataBucket.Bucket(bucket1Name) + if bucket1 == nil { + return fmt.Errorf("Bucket1: unexpected nil bucket") + } + + if !testGetValues(tc, bucket1, toGetValues(keyValues)) { + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + // Clean up the values stored above in a managed read-write transaction. + err = tc.db.Update(func(tx database.Tx) error { + metadataBucket := tx.Metadata() + if metadataBucket == nil { + return fmt.Errorf("Metadata: unexpected nil bucket") + } + + bucket1 := metadataBucket.Bucket(bucket1Name) + if bucket1 == nil { + return fmt.Errorf("Bucket1: unexpected nil bucket") + } + + if !testDeleteValues(tc, bucket1, keyValues) { + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + return true +} + +// testFetchBlockIOMissing ensures that all of the block retrieval API functions +// work as expected when requesting blocks that don't exist. +func testFetchBlockIOMissing(tc *testContext, tx database.Tx) bool { + wantErrCode := database.ErrBlockNotFound + + // --------------------- + // Non-bulk Block IO API + // --------------------- + + // Test the individual block APIs one block at a time to ensure they + // return the expected error. Also, build the data needed to test the + // bulk APIs below while looping. + allBlockHashes := make([]wire.ShaHash, len(tc.blocks)) + allBlockRegions := make([]database.BlockRegion, len(tc.blocks)) + for i, block := range tc.blocks { + blockHash := block.Sha() + allBlockHashes[i] = *blockHash + + txLocs, err := block.TxLoc() + if err != nil { + tc.t.Errorf("block.TxLoc(%d): unexpected error: %v", i, + err) + return false + } + + // Ensure FetchBlock returns expected error. + testName := fmt.Sprintf("FetchBlock #%d on missing block", i) + _, err = tx.FetchBlock(blockHash) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure FetchBlockHeader returns expected error. + testName = fmt.Sprintf("FetchBlockHeader #%d on missing block", + i) + _, err = tx.FetchBlockHeader(blockHash) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure the first transaction fetched as a block region from + // the database returns the expected error. + region := database.BlockRegion{ + Hash: blockHash, + Offset: uint32(txLocs[0].TxStart), + Len: uint32(txLocs[0].TxLen), + } + allBlockRegions[i] = region + _, err = tx.FetchBlockRegion(®ion) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure HasBlock returns false. + hasBlock, err := tx.HasBlock(blockHash) + if err != nil { + tc.t.Errorf("HasBlock #%d: unexpected err: %v", i, err) + return false + } + if hasBlock { + tc.t.Errorf("HasBlock #%d: should not have block", i) + return false + } + } + + // ----------------- + // Bulk Block IO API + // ----------------- + + // Ensure FetchBlocks returns expected error. + testName := "FetchBlocks on missing blocks" + _, err := tx.FetchBlocks(allBlockHashes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure FetchBlockHeaders returns expected error. + testName = "FetchBlockHeaders on missing blocks" + _, err = tx.FetchBlockHeaders(allBlockHashes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure FetchBlockRegions returns expected error. + testName = "FetchBlockRegions on missing blocks" + _, err = tx.FetchBlockRegions(allBlockRegions) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure HasBlocks returns false for all blocks. + hasBlocks, err := tx.HasBlocks(allBlockHashes) + if err != nil { + tc.t.Errorf("HasBlocks: unexpected err: %v", err) + } + for i, hasBlock := range hasBlocks { + if hasBlock { + tc.t.Errorf("HasBlocks #%d: should not have block", i) + return false + } + } + + return true +} + +// testFetchBlockIO ensures all of the block retrieval API functions work as +// expected for the provide set of blocks. The blocks must already be stored in +// the database, or at least stored into the the passed transaction. It also +// tests several error conditions such as ensuring the expected errors are +// returned when fetching blocks, headers, and regions that don't exist. +func testFetchBlockIO(tc *testContext, tx database.Tx) bool { + // --------------------- + // Non-bulk Block IO API + // --------------------- + + // Test the individual block APIs one block at a time. Also, build the + // data needed to test the bulk APIs below while looping. + allBlockHashes := make([]wire.ShaHash, len(tc.blocks)) + allBlockBytes := make([][]byte, len(tc.blocks)) + allBlockTxLocs := make([][]wire.TxLoc, len(tc.blocks)) + allBlockRegions := make([]database.BlockRegion, len(tc.blocks)) + for i, block := range tc.blocks { + blockHash := block.Sha() + allBlockHashes[i] = *blockHash + + blockBytes, err := block.Bytes() + if err != nil { + tc.t.Errorf("block.Bytes(%d): unexpected error: %v", i, + err) + return false + } + allBlockBytes[i] = blockBytes + + txLocs, err := block.TxLoc() + if err != nil { + tc.t.Errorf("block.TxLoc(%d): unexpected error: %v", i, + err) + return false + } + allBlockTxLocs[i] = txLocs + + // Ensure the block data fetched from the database matches the + // expected bytes. + gotBlockBytes, err := tx.FetchBlock(blockHash) + if err != nil { + tc.t.Errorf("FetchBlock(%s): unexpected error: %v", + blockHash, err) + return false + } + if !bytes.Equal(gotBlockBytes, blockBytes) { + tc.t.Errorf("FetchBlock(%s): bytes mismatch: got %x, "+ + "want %x", blockHash, gotBlockBytes, blockBytes) + return false + } + + // Ensure the block header fetched from the database matches the + // expected bytes. + wantHeaderBytes := blockBytes[0:wire.MaxBlockHeaderPayload] + gotHeaderBytes, err := tx.FetchBlockHeader(blockHash) + if err != nil { + tc.t.Errorf("FetchBlockHeader(%s): unexpected error: %v", + blockHash, err) + return false + } + if !bytes.Equal(gotHeaderBytes, wantHeaderBytes) { + tc.t.Errorf("FetchBlockHeader(%s): bytes mismatch: "+ + "got %x, want %x", blockHash, gotHeaderBytes, + wantHeaderBytes) + return false + } + + // Ensure the first transaction fetched as a block region from + // the database matches the expected bytes. + region := database.BlockRegion{ + Hash: blockHash, + Offset: uint32(txLocs[0].TxStart), + Len: uint32(txLocs[0].TxLen), + } + allBlockRegions[i] = region + endRegionOffset := region.Offset + region.Len + wantRegionBytes := blockBytes[region.Offset:endRegionOffset] + gotRegionBytes, err := tx.FetchBlockRegion(®ion) + if err != nil { + tc.t.Errorf("FetchBlockRegion(%s): unexpected error: %v", + blockHash, err) + return false + } + if !bytes.Equal(gotRegionBytes, wantRegionBytes) { + tc.t.Errorf("FetchBlockRegion(%s): bytes mismatch: "+ + "got %x, want %x", blockHash, gotRegionBytes, + wantRegionBytes) + return false + } + + // Ensure the block header fetched from the database matches the + // expected bytes. + hasBlock, err := tx.HasBlock(blockHash) + if err != nil { + tc.t.Errorf("HasBlock(%s): unexpected error: %v", + blockHash, err) + return false + } + if !hasBlock { + tc.t.Errorf("HasBlock(%s): database claims it doesn't "+ + "have the block when it should", blockHash) + return false + } + + // ----------------------- + // Invalid blocks/regions. + // ----------------------- + + // Ensure fetching a block that doesn't exist returns the + // expected error. + badBlockHash := &wire.ShaHash{} + testName := fmt.Sprintf("FetchBlock(%s) invalid block", + badBlockHash) + wantErrCode := database.ErrBlockNotFound + _, err = tx.FetchBlock(badBlockHash) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure fetching a block header that doesn't exist returns + // the expected error. + testName = fmt.Sprintf("FetchBlockHeader(%s) invalid block", + badBlockHash) + _, err = tx.FetchBlockHeader(badBlockHash) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure fetching a block region in a block that doesn't exist + // return the expected error. + testName = fmt.Sprintf("FetchBlockRegion(%s) invalid hash", + badBlockHash) + wantErrCode = database.ErrBlockNotFound + region.Hash = badBlockHash + region.Offset = ^uint32(0) + _, err = tx.FetchBlockRegion(®ion) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure fetching a block region that is out of bounds returns + // the expected error. + testName = fmt.Sprintf("FetchBlockRegion(%s) invalid region", + blockHash) + wantErrCode = database.ErrBlockRegionInvalid + region.Hash = blockHash + region.Offset = ^uint32(0) + _, err = tx.FetchBlockRegion(®ion) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + } + + // ----------------- + // Bulk Block IO API + // ----------------- + + // Ensure the bulk block data fetched from the database matches the + // expected bytes. + blockData, err := tx.FetchBlocks(allBlockHashes) + if err != nil { + tc.t.Errorf("FetchBlocks: unexpected error: %v", err) + return false + } + if len(blockData) != len(allBlockBytes) { + tc.t.Errorf("FetchBlocks: unexpected number of results - got "+ + "%d, want %d", len(blockData), len(allBlockBytes)) + return false + } + for i := 0; i < len(blockData); i++ { + blockHash := allBlockHashes[i] + wantBlockBytes := allBlockBytes[i] + gotBlockBytes := blockData[i] + if !bytes.Equal(gotBlockBytes, wantBlockBytes) { + tc.t.Errorf("FetchBlocks(%s): bytes mismatch: got %x, "+ + "want %x", blockHash, gotBlockBytes, + wantBlockBytes) + return false + } + } + + // Ensure the bulk block headers fetched from the database match the + // expected bytes. + blockHeaderData, err := tx.FetchBlockHeaders(allBlockHashes) + if err != nil { + tc.t.Errorf("FetchBlockHeaders: unexpected error: %v", err) + return false + } + if len(blockHeaderData) != len(allBlockBytes) { + tc.t.Errorf("FetchBlockHeaders: unexpected number of results "+ + "- got %d, want %d", len(blockHeaderData), + len(allBlockBytes)) + return false + } + for i := 0; i < len(blockHeaderData); i++ { + blockHash := allBlockHashes[i] + wantHeaderBytes := allBlockBytes[i][0:wire.MaxBlockHeaderPayload] + gotHeaderBytes := blockHeaderData[i] + if !bytes.Equal(gotHeaderBytes, wantHeaderBytes) { + tc.t.Errorf("FetchBlockHeaders(%s): bytes mismatch: "+ + "got %x, want %x", blockHash, gotHeaderBytes, + wantHeaderBytes) + return false + } + } + + // Ensure the first transaction of every block fetched in bulk block + // regions from the database matches the expected bytes. + allRegionBytes, err := tx.FetchBlockRegions(allBlockRegions) + if err != nil { + tc.t.Errorf("FetchBlockRegions: unexpected error: %v", err) + return false + + } + if len(allRegionBytes) != len(allBlockRegions) { + tc.t.Errorf("FetchBlockRegions: unexpected number of results "+ + "- got %d, want %d", len(allRegionBytes), + len(allBlockRegions)) + return false + } + for i, gotRegionBytes := range allRegionBytes { + region := &allBlockRegions[i] + endRegionOffset := region.Offset + region.Len + wantRegionBytes := blockData[i][region.Offset:endRegionOffset] + if !bytes.Equal(gotRegionBytes, wantRegionBytes) { + tc.t.Errorf("FetchBlockRegions(%d): bytes mismatch: "+ + "got %x, want %x", i, gotRegionBytes, + wantRegionBytes) + return false + } + } + + // Ensure the bulk determination of whether a set of block hashes are in + // the database returns true for all loaded blocks. + hasBlocks, err := tx.HasBlocks(allBlockHashes) + if err != nil { + tc.t.Errorf("HasBlocks: unexpected error: %v", err) + return false + } + for i, hasBlock := range hasBlocks { + if !hasBlock { + tc.t.Errorf("HasBlocks(%d): should have block", i) + return false + } + } + + // ----------------------- + // Invalid blocks/regions. + // ----------------------- + + // Ensure fetching blocks for which one doesn't exist returns the + // expected error. + testName := "FetchBlocks invalid hash" + badBlockHashes := make([]wire.ShaHash, len(allBlockHashes)+1) + copy(badBlockHashes, allBlockHashes) + badBlockHashes[len(badBlockHashes)-1] = wire.ShaHash{} + wantErrCode := database.ErrBlockNotFound + _, err = tx.FetchBlocks(badBlockHashes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure fetching block headers for which one doesn't exist returns the + // expected error. + testName = "FetchBlockHeaders invalid hash" + _, err = tx.FetchBlockHeaders(badBlockHashes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure fetching block regions for which one of blocks doesn't exist + // returns expected error. + testName = "FetchBlockRegions invalid hash" + badBlockRegions := make([]database.BlockRegion, len(allBlockRegions)+1) + copy(badBlockRegions, allBlockRegions) + badBlockRegions[len(badBlockRegions)-1].Hash = &wire.ShaHash{} + wantErrCode = database.ErrBlockNotFound + _, err = tx.FetchBlockRegions(badBlockRegions) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure fetching block regions that are out of bounds returns the + // expected error. + testName = "FetchBlockRegions invalid regions" + badBlockRegions = badBlockRegions[:len(badBlockRegions)-1] + for i := range badBlockRegions { + badBlockRegions[i].Offset = ^uint32(0) + } + wantErrCode = database.ErrBlockRegionInvalid + _, err = tx.FetchBlockRegions(badBlockRegions) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + return true +} + +// testBlockIOTxInterface ensures that the block IO interface works as expected +// for both managed read/write and manual transactions. This function leaves +// all of the stored blocks in the database. +func testBlockIOTxInterface(tc *testContext) bool { + // Ensure attempting to store a block with a read-only transaction fails + // with the expected error. + err := tc.db.View(func(tx database.Tx) error { + wantErrCode := database.ErrTxNotWritable + for i, block := range tc.blocks { + testName := fmt.Sprintf("StoreBlock(%d) on ro tx", i) + err := tx.StoreBlock(block) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return errSubTestFail + } + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + // Populate the database with loaded blocks and ensure all of the data + // fetching APIs work properly on them within the transaction before a + // commit or rollback. Then, force a rollback so the code below can + // ensure none of the data actually gets stored. + forceRollbackError := fmt.Errorf("force rollback") + err = tc.db.Update(func(tx database.Tx) error { + // Store all blocks in the same transaction. + for i, block := range tc.blocks { + err := tx.StoreBlock(block) + if err != nil { + tc.t.Errorf("StoreBlock #%d: unexpected error: "+ + "%v", i, err) + return errSubTestFail + } + } + + // Ensure attempting to store the same block again, before the + // transaction has been committed, returns the expected error. + wantErrCode := database.ErrBlockExists + for i, block := range tc.blocks { + testName := fmt.Sprintf("duplicate block entry #%d "+ + "(before commit)", i) + err := tx.StoreBlock(block) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return errSubTestFail + } + } + + // Ensure that all data fetches from the stored blocks before + // the transaction has been committed work as expected. + if !testFetchBlockIO(tc, tx) { + return errSubTestFail + } + + return forceRollbackError + }) + if err != forceRollbackError { + if err == errSubTestFail { + return false + } + + tc.t.Errorf("Update: inner function error not returned - got "+ + "%v, want %v", err, forceRollbackError) + return false + } + + // Ensure rollback was successful + err = tc.db.View(func(tx database.Tx) error { + if !testFetchBlockIOMissing(tc, tx) { + return errSubTestFail + } + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + // Populate the database with loaded blocks and ensure all of the data + // fetching APIs work properly. + err = tc.db.Update(func(tx database.Tx) error { + // Store a bunch of blocks in the same transaction. + for i, block := range tc.blocks { + err := tx.StoreBlock(block) + if err != nil { + tc.t.Errorf("StoreBlock #%d: unexpected error: "+ + "%v", i, err) + return errSubTestFail + } + } + + // Ensure attempting to store the same block again while in the + // same transaction, but before it has been committed, returns + // the expected error. + for i, block := range tc.blocks { + testName := fmt.Sprintf("duplicate block entry #%d "+ + "(before commit)", i) + wantErrCode := database.ErrBlockExists + err := tx.StoreBlock(block) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return errSubTestFail + } + } + + // Ensure that all data fetches from the stored blocks before + // the transaction has been committed work as expected. + if !testFetchBlockIO(tc, tx) { + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + // Ensure all data fetch tests work as expected using a managed + // read-only transaction after the data was successfully committed + // above. + err = tc.db.View(func(tx database.Tx) error { + if !testFetchBlockIO(tc, tx) { + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + // Ensure all data fetch tests work as expected using a managed + // read-write transaction after the data was successfully committed + // above. + err = tc.db.Update(func(tx database.Tx) error { + if !testFetchBlockIO(tc, tx) { + return errSubTestFail + } + + // Ensure attempting to store existing blocks again returns the + // expected error. Note that this is different from the + // previous version since this is a new transaction after the + // blocks have been committed. + wantErrCode := database.ErrBlockExists + for i, block := range tc.blocks { + testName := fmt.Sprintf("duplicate block entry #%d "+ + "(before commit)", i) + err := tx.StoreBlock(block) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return errSubTestFail + } + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("%v", err) + } + return false + } + + return true +} + +// testClosedTxInterface ensures that both the metadata and block IO API +// functions behave as expected when attempted against a closed transaction. +func testClosedTxInterface(tc *testContext, tx database.Tx) bool { + wantErrCode := database.ErrTxClosed + bucket := tx.Metadata() + cursor := tx.Metadata().Cursor() + bucketName := []byte("closedtxbucket") + keyName := []byte("closedtxkey") + + // ------------ + // Metadata API + // ------------ + + // Ensure that attempting to get an existing bucket returns nil when the + // transaction is closed. + if b := bucket.Bucket(bucketName); b != nil { + tc.t.Errorf("Bucket: did not return nil on closed tx") + return false + } + + // Ensure CreateBucket returns expected error. + testName := "CreateBucket on closed tx" + _, err := bucket.CreateBucket(bucketName) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure CreateBucketIfNotExists returns expected error. + testName = "CreateBucketIfNotExists on closed tx" + _, err = bucket.CreateBucketIfNotExists(bucketName) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure Delete returns expected error. + testName = "Delete on closed tx" + err = bucket.Delete(keyName) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure DeleteBucket returns expected error. + testName = "DeleteBucket on closed tx" + err = bucket.DeleteBucket(bucketName) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure ForEach returns expected error. + testName = "ForEach on closed tx" + err = bucket.ForEach(nil) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure ForEachBucket returns expected error. + testName = "ForEachBucket on closed tx" + err = bucket.ForEachBucket(nil) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure Get returns expected error. + testName = "Get on closed tx" + if k := bucket.Get(keyName); k != nil { + tc.t.Errorf("Get: did not return nil on closed tx") + return false + } + + // Ensure Put returns expected error. + testName = "Put on closed tx" + err = bucket.Put(keyName, []byte("test")) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // ------------------- + // Metadata Cursor API + // ------------------- + + // Ensure attempting to get a bucket from a cursor on a closed tx gives + // back nil. + if b := cursor.Bucket(); b != nil { + tc.t.Error("Cursor.Bucket: returned non-nil on closed tx") + return false + } + + // Ensure Cursor.Delete returns expected error. + testName = "Cursor.Delete on closed tx" + err = cursor.Delete() + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure Cursor.First on a closed tx returns false and nil key/value. + if cursor.First() { + tc.t.Error("Cursor.First: claims ok on closed tx") + return false + } + if cursor.Key() != nil || cursor.Value() != nil { + tc.t.Error("Cursor.First: key and/or value are not nil on " + + "closed tx") + return false + } + + // Ensure Cursor.Last on a closed tx returns false and nil key/value. + if cursor.Last() { + tc.t.Error("Cursor.Last: claims ok on closed tx") + return false + } + if cursor.Key() != nil || cursor.Value() != nil { + tc.t.Error("Cursor.Last: key and/or value are not nil on " + + "closed tx") + return false + } + + // Ensure Cursor.Next on a closed tx returns false and nil key/value. + if cursor.Next() { + tc.t.Error("Cursor.Next: claims ok on closed tx") + return false + } + if cursor.Key() != nil || cursor.Value() != nil { + tc.t.Error("Cursor.Next: key and/or value are not nil on " + + "closed tx") + return false + } + + // Ensure Cursor.Prev on a closed tx returns false and nil key/value. + if cursor.Prev() { + tc.t.Error("Cursor.Prev: claims ok on closed tx") + return false + } + if cursor.Key() != nil || cursor.Value() != nil { + tc.t.Error("Cursor.Prev: key and/or value are not nil on " + + "closed tx") + return false + } + + // Ensure Cursor.Seek on a closed tx returns false and nil key/value. + if cursor.Seek([]byte{}) { + tc.t.Error("Cursor.Seek: claims ok on closed tx") + return false + } + if cursor.Key() != nil || cursor.Value() != nil { + tc.t.Error("Cursor.Seek: key and/or value are not nil on " + + "closed tx") + return false + } + + // --------------------- + // Non-bulk Block IO API + // --------------------- + + // Test the individual block APIs one block at a time to ensure they + // return the expected error. Also, build the data needed to test the + // bulk APIs below while looping. + allBlockHashes := make([]wire.ShaHash, len(tc.blocks)) + allBlockRegions := make([]database.BlockRegion, len(tc.blocks)) + for i, block := range tc.blocks { + blockHash := block.Sha() + allBlockHashes[i] = *blockHash + + txLocs, err := block.TxLoc() + if err != nil { + tc.t.Errorf("block.TxLoc(%d): unexpected error: %v", i, + err) + return false + } + + // Ensure StoreBlock returns expected error. + testName = "StoreBlock on closed tx" + err = tx.StoreBlock(block) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure FetchBlock returns expected error. + testName = fmt.Sprintf("FetchBlock #%d on closed tx", i) + _, err = tx.FetchBlock(blockHash) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure FetchBlockHeader returns expected error. + testName = fmt.Sprintf("FetchBlockHeader #%d on closed tx", i) + _, err = tx.FetchBlockHeader(blockHash) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure the first transaction fetched as a block region from + // the database returns the expected error. + region := database.BlockRegion{ + Hash: blockHash, + Offset: uint32(txLocs[0].TxStart), + Len: uint32(txLocs[0].TxLen), + } + allBlockRegions[i] = region + _, err = tx.FetchBlockRegion(®ion) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure HasBlock returns expected error. + testName = fmt.Sprintf("HasBlock #%d on closed tx", i) + _, err = tx.HasBlock(blockHash) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + } + + // ----------------- + // Bulk Block IO API + // ----------------- + + // Ensure FetchBlocks returns expected error. + testName = "FetchBlocks on closed tx" + _, err = tx.FetchBlocks(allBlockHashes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure FetchBlockHeaders returns expected error. + testName = "FetchBlockHeaders on closed tx" + _, err = tx.FetchBlockHeaders(allBlockHashes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure FetchBlockRegions returns expected error. + testName = "FetchBlockRegions on closed tx" + _, err = tx.FetchBlockRegions(allBlockRegions) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // Ensure HasBlocks returns expected error. + testName = "HasBlocks on closed tx" + _, err = tx.HasBlocks(allBlockHashes) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return false + } + + // --------------- + // Commit/Rollback + // --------------- + + // Ensure that attempting to rollback or commit a transaction that is + // already closed returns the expected error. + err = tx.Rollback() + if !checkDbError(tc.t, "closed tx rollback", err, wantErrCode) { + return false + } + err = tx.Commit() + if !checkDbError(tc.t, "closed tx commit", err, wantErrCode) { + return false + } + + return true +} + +// testTxClosed ensures that both the metadata and block IO API functions behave +// as expected when attempted against both read-only and read-write +// transactions. +func testTxClosed(tc *testContext) bool { + bucketName := []byte("closedtxbucket") + keyName := []byte("closedtxkey") + + // Start a transaction, create a bucket and key used for testing, and + // immediately perform a commit on it so it is closed. + tx, err := tc.db.Begin(true) + if err != nil { + tc.t.Errorf("Begin(true): unexpected error: %v", err) + return false + } + defer rollbackOnPanic(tc.t, tx) + if _, err := tx.Metadata().CreateBucket(bucketName); err != nil { + tc.t.Errorf("CreateBucket: unexpected error: %v", err) + return false + } + if err := tx.Metadata().Put(keyName, []byte("test")); err != nil { + tc.t.Errorf("Put: unexpected error: %v", err) + return false + } + if err := tx.Commit(); err != nil { + tc.t.Errorf("Commit: unexpected error: %v", err) + return false + } + + // Ensure invoking all of the functions on the closed read-write + // transaction behave as expected. + if !testClosedTxInterface(tc, tx) { + return false + } + + // Repeat the tests with a rolled-back read-only transaction. + tx, err = tc.db.Begin(false) + if err != nil { + tc.t.Errorf("Begin(false): unexpected error: %v", err) + return false + } + defer rollbackOnPanic(tc.t, tx) + if err := tx.Rollback(); err != nil { + tc.t.Errorf("Rollback: unexpected error: %v", err) + return false + } + + // Ensure invoking all of the functions on the closed read-only + // transaction behave as expected. + return testClosedTxInterface(tc, tx) +} + +// testConcurrecy ensure the database properly supports concurrent readers and +// only a single writer. It also ensures views act as snapshots at the time +// they are acquired. +func testConcurrecy(tc *testContext) bool { + // sleepTime is how long each of the concurrent readers should sleep to + // aid in detection of whether or not the data is actually being read + // concurrently. It starts with a sane lower bound. + var sleepTime = time.Millisecond * 250 + + // Determine about how long it takes for a single block read. When it's + // longer than the default minimum sleep time, adjust the sleep time to + // help prevent durations that are too short which would cause erroneous + // test failures on slower systems. + startTime := time.Now() + err := tc.db.View(func(tx database.Tx) error { + _, err := tx.FetchBlock(tc.blocks[0].Sha()) + if err != nil { + return err + } + return nil + }) + if err != nil { + tc.t.Errorf("Unexpected error in view: %v", err) + return false + } + elapsed := time.Now().Sub(startTime) + if sleepTime < elapsed { + sleepTime = elapsed + } + tc.t.Logf("Time to load block 0: %v, using sleep time: %v", elapsed, + sleepTime) + + // reader takes a block number to load and channel to return the result + // of the operation on. It is used below to launch multiple concurrent + // readers. + numReaders := len(tc.blocks) + resultChan := make(chan bool, numReaders) + reader := func(blockNum int) { + err := tc.db.View(func(tx database.Tx) error { + time.Sleep(sleepTime) + _, err := tx.FetchBlock(tc.blocks[blockNum].Sha()) + if err != nil { + return err + } + return nil + }) + if err != nil { + tc.t.Errorf("Unexpected error in concurrent view: %v", + err) + resultChan <- false + } + resultChan <- true + } + + // Start up several concurrent readers for the same block and wait for + // the results. + startTime = time.Now() + for i := 0; i < numReaders; i++ { + go reader(0) + } + for i := 0; i < numReaders; i++ { + if result := <-resultChan; !result { + return false + } + } + elapsed = time.Now().Sub(startTime) + tc.t.Logf("%d concurrent reads of same block elapsed: %v", numReaders, + elapsed) + + // Consider it a failure if it took longer than half the time it would + // take with no concurrency. + if elapsed > sleepTime*time.Duration(numReaders/2) { + tc.t.Errorf("Concurrent views for same block did not appear to "+ + "run simultaneously: elapsed %v", elapsed) + return false + } + + // Start up several concurrent readers for different blocks and wait for + // the results. + startTime = time.Now() + for i := 0; i < numReaders; i++ { + go reader(i) + } + for i := 0; i < numReaders; i++ { + if result := <-resultChan; !result { + return false + } + } + elapsed = time.Now().Sub(startTime) + tc.t.Logf("%d concurrent reads of different blocks elapsed: %v", + numReaders, elapsed) + + // Consider it a failure if it took longer than half the time it would + // take with no concurrency. + if elapsed > sleepTime*time.Duration(numReaders/2) { + tc.t.Errorf("Concurrent views for different blocks did not "+ + "appear to run simultaneously: elapsed %v", elapsed) + return false + } + + // Start up a few readers and wait for them to acquire views. Each + // reader waits for a signal from the writer to be finished to ensure + // that the data written by the writer is not seen by the view since it + // was started before the data was set. + concurrentKey := []byte("notthere") + concurrentVal := []byte("someval") + started := make(chan struct{}) + writeComplete := make(chan struct{}) + reader = func(blockNum int) { + err := tc.db.View(func(tx database.Tx) error { + started <- struct{}{} + + // Wait for the writer to complete. + <-writeComplete + + // Since this reader was created before the write took + // place, the data it added should not be visible. + val := tx.Metadata().Get(concurrentKey) + if val != nil { + return fmt.Errorf("%s should not be visible", + concurrentKey) + } + return nil + }) + if err != nil { + tc.t.Errorf("Unexpected error in concurrent view: %v", + err) + resultChan <- false + } + resultChan <- true + } + for i := 0; i < numReaders; i++ { + go reader(0) + } + for i := 0; i < numReaders; i++ { + <-started + } + + // All readers are started and waiting for completion of the writer. + // Set some data the readers are expecting to not find and signal the + // readers the write is done by closing the writeComplete channel. + err = tc.db.Update(func(tx database.Tx) error { + err := tx.Metadata().Put(concurrentKey, concurrentVal) + if err != nil { + return err + } + return nil + }) + if err != nil { + tc.t.Errorf("Unexpected error in update: %v", err) + return false + } + close(writeComplete) + + // Wait for reader results. + for i := 0; i < numReaders; i++ { + if result := <-resultChan; !result { + return false + } + } + + // Start a few writers and ensure the total time is at least the + // writeSleepTime * numWriters. This ensures only one write transaction + // can be active at a time. + writeSleepTime := time.Millisecond * 250 + writer := func() { + err := tc.db.Update(func(tx database.Tx) error { + time.Sleep(writeSleepTime) + return nil + }) + if err != nil { + tc.t.Errorf("Unexpected error in concurrent view: %v", + err) + resultChan <- false + } + resultChan <- true + } + numWriters := 3 + startTime = time.Now() + for i := 0; i < numWriters; i++ { + go writer() + } + for i := 0; i < numWriters; i++ { + if result := <-resultChan; !result { + return false + } + } + elapsed = time.Now().Sub(startTime) + tc.t.Logf("%d concurrent writers elapsed using sleep time %v: %v", + numWriters, writeSleepTime, elapsed) + + // The total time must have been at least the sum of all sleeps if the + // writes blocked properly. + if elapsed < writeSleepTime*time.Duration(numWriters) { + tc.t.Errorf("Concurrent writes appeared to run simultaneously: "+ + "elapsed %v", elapsed) + return false + } + + return true +} + +// testConcurrentClose ensures that closing the database with open transactions +// blocks until the transactions are finished. +// +// The database will be closed upon returning from this function. +func testConcurrentClose(tc *testContext) bool { + // Start up a few readers and wait for them to acquire views. Each + // reader waits for a signal to complete to ensure the transactions stay + // open until they are explicitly signalled to be closed. + var activeReaders int32 + numReaders := 3 + started := make(chan struct{}) + finishReaders := make(chan struct{}) + resultChan := make(chan bool, numReaders+1) + reader := func() { + err := tc.db.View(func(tx database.Tx) error { + atomic.AddInt32(&activeReaders, 1) + started <- struct{}{} + <-finishReaders + atomic.AddInt32(&activeReaders, -1) + return nil + }) + if err != nil { + tc.t.Errorf("Unexpected error in concurrent view: %v", + err) + resultChan <- false + } + resultChan <- true + } + for i := 0; i < numReaders; i++ { + go reader() + } + for i := 0; i < numReaders; i++ { + <-started + } + + // Close the database in a separate goroutine. This should block until + // the transactions are finished. Once the close has taken place, the + // dbClosed channel is closed to signal the main goroutine below. + dbClosed := make(chan struct{}) + go func() { + started <- struct{}{} + err := tc.db.Close() + if err != nil { + tc.t.Errorf("Unexpected error in concurrent view: %v", + err) + resultChan <- false + } + close(dbClosed) + resultChan <- true + }() + <-started + + // Wait a short period and then signal the reader transactions to + // finish. When the db closed channel is received, ensure there are no + // active readers open. + time.AfterFunc(time.Millisecond*250, func() { close(finishReaders) }) + <-dbClosed + if nr := atomic.LoadInt32(&activeReaders); nr != 0 { + tc.t.Errorf("Close did not appear to block with active "+ + "readers: %d active", nr) + return false + } + + // Wait for all results. + for i := 0; i < numReaders+1; i++ { + if result := <-resultChan; !result { + return false + } + } + + return true +} + +// testInterface tests performs tests for the various interfaces of the database +// package which require state in the database for the given database type. +func testInterface(t *testing.T, db database.DB) { + // Create a test context to pass around. + context := testContext{t: t, db: db} + + // Load the test blocks and store in the test context for use throughout + // the tests. + blocks, err := loadBlocks(t, blockDataFile, blockDataNet) + if err != nil { + t.Errorf("loadBlocks: Unexpected error: %v", err) + return + } + context.blocks = blocks + + // Test the transaction metadata interface including managed and manual + // transactions as well as buckets. + if !testMetadataTxInterface(&context) { + return + } + + // Test the transaction block IO interface using managed and manual + // transactions. This function leaves all of the stored blocks in the + // database since they're used later. + if !testBlockIOTxInterface(&context) { + return + } + + // Test all of the transaction interface functions against a closed + // transaction work as expected. + if !testTxClosed(&context) { + return + } + + // Test the database properly supports concurrency. + if !testConcurrecy(&context) { + return + } + + // Test that closing the database with open transactions blocks until + // the transactions are finished. + // + // The database will be closed upon returning from this function, so it + // must be the last thing called. + testConcurrentClose(&context) +} diff --git a/database2/ffldb/ldbtreapiter.go b/database2/ffldb/ldbtreapiter.go new file mode 100644 index 00000000..7415e446 --- /dev/null +++ b/database2/ffldb/ldbtreapiter.go @@ -0,0 +1,58 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package ffldb + +import ( + "github.com/btcsuite/btcd/database2/internal/treap" + "github.com/btcsuite/goleveldb/leveldb/iterator" + "github.com/btcsuite/goleveldb/leveldb/util" +) + +// ldbTreapIter wraps a treap iterator to provide the additional functionality +// needed to satisfy the leveldb iterator.Iterator interface. +type ldbTreapIter struct { + *treap.Iterator + tx *transaction + released bool +} + +// Enforce ldbTreapIter implements the leveldb iterator.Iterator interface. +var _ iterator.Iterator = (*ldbTreapIter)(nil) + +// Error is only provided to satisfy the iterator interface as there are no +// errors for this memory-only structure. +// +// This is part of the leveldb iterator.Iterator interface implementation. +func (iter *ldbTreapIter) Error() error { + return nil +} + +// SetReleaser is only provided to satisfy the iterator interface as there is no +// need to override it. +// +// This is part of the leveldb iterator.Iterator interface implementation. +func (iter *ldbTreapIter) SetReleaser(releaser util.Releaser) { +} + +// Release releases the iterator by removing the underlying treap iterator from +// the list of active iterators against the pending keys treap. +// +// This is part of the leveldb iterator.Iterator interface implementation. +func (iter *ldbTreapIter) Release() { + if !iter.released { + iter.tx.removeActiveIter(iter.Iterator) + iter.released = true + } +} + +// newLdbTreapIter create a new treap iterator for the given slice against the +// pending keys for the passed transaction and returns it wrapped in an +// ldbTreapIter so it can be used as a leveldb iterator. It also adds the new +// iterator to the list of active iterators for the transaction. +func newLdbTreapIter(tx *transaction, slice *util.Range) *ldbTreapIter { + iter := tx.pendingKeys.Iterator(slice.Start, slice.Limit) + tx.addActiveIter(iter) + return &ldbTreapIter{Iterator: iter, tx: tx} +} diff --git a/database2/ffldb/mockfile_test.go b/database2/ffldb/mockfile_test.go new file mode 100644 index 00000000..15fbd6e5 --- /dev/null +++ b/database2/ffldb/mockfile_test.go @@ -0,0 +1,163 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// This file is part of the ffldb package rather than the ffldb_test package as +// it is part of the whitebox testing. + +package ffldb + +import ( + "errors" + "io" + "sync" +) + +// Errors used for the mock file. +var ( + // errMockFileClosed is used to indicate a mock file is closed. + errMockFileClosed = errors.New("file closed") + + // errInvalidOffset is used to indicate an offset that is out of range + // for the file was provided. + errInvalidOffset = errors.New("invalid offset") + + // errSyncFail is used to indicate simulated sync failure. + errSyncFail = errors.New("simulated sync failure") +) + +// mockFile implements the filer interface and used in order to force failures +// the database code related to reading and writing from the flat block files. +// A maxSize of -1 is unlimited. +type mockFile struct { + sync.RWMutex + maxSize int64 + data []byte + forceSyncErr bool + closed bool +} + +// Close closes the mock file without releasing any data associated with it. +// This allows it to be "reopened" without losing the data. +// +// This is part of the filer implementation. +func (f *mockFile) Close() error { + f.Lock() + defer f.Unlock() + + if f.closed { + return errMockFileClosed + } + f.closed = true + return nil +} + +// ReadAt reads len(b) bytes from the mock file starting at byte offset off. It +// returns the number of bytes read and the error, if any. ReadAt always +// returns a non-nil error when n < len(b). At end of file, that error is +// io.EOF. +// +// This is part of the filer implementation. +func (f *mockFile) ReadAt(b []byte, off int64) (int, error) { + f.RLock() + defer f.RUnlock() + + if f.closed { + return 0, errMockFileClosed + } + maxSize := int64(len(f.data)) + if f.maxSize > -1 && maxSize > f.maxSize { + maxSize = f.maxSize + } + if off < 0 || off > maxSize { + return 0, errInvalidOffset + } + + // Limit to the max size field, if set. + numToRead := int64(len(b)) + endOffset := off + numToRead + if endOffset > maxSize { + numToRead = maxSize - off + } + + copy(b, f.data[off:off+numToRead]) + if numToRead < int64(len(b)) { + return int(numToRead), io.EOF + } + return int(numToRead), nil +} + +// Truncate changes the size of the mock file. +// +// This is part of the filer implementation. +func (f *mockFile) Truncate(size int64) error { + f.Lock() + defer f.Unlock() + + if f.closed { + return errMockFileClosed + } + maxSize := int64(len(f.data)) + if f.maxSize > -1 && maxSize > f.maxSize { + maxSize = f.maxSize + } + if size > maxSize { + return errInvalidOffset + } + + f.data = f.data[:size] + return nil +} + +// Write writes len(b) bytes to the mock file. It returns the number of bytes +// written and an error, if any. Write returns a non-nil error any time +// n != len(b). +// +// This is part of the filer implementation. +func (f *mockFile) WriteAt(b []byte, off int64) (int, error) { + f.Lock() + defer f.Unlock() + + if f.closed { + return 0, errMockFileClosed + } + maxSize := f.maxSize + if maxSize < 0 { + maxSize = 100 * 1024 // 100KiB + } + if off < 0 || off > maxSize { + return 0, errInvalidOffset + } + + // Limit to the max size field, if set, and grow the slice if needed. + numToWrite := int64(len(b)) + if off+numToWrite > maxSize { + numToWrite = maxSize - off + } + if off+numToWrite > int64(len(f.data)) { + newData := make([]byte, off+numToWrite) + copy(newData, f.data) + f.data = newData + } + + copy(f.data[off:], b[:numToWrite]) + if numToWrite < int64(len(b)) { + return int(numToWrite), io.EOF + } + return int(numToWrite), nil +} + +// Sync doesn't do anything for mock files. However, it will return an error if +// the mock file's forceSyncErr flag is set. +// +// This is part of the filer implementation. +func (f *mockFile) Sync() error { + if f.forceSyncErr { + return errSyncFail + } + + return nil +} + +// Ensure the mockFile type implements the filer interface. +var _ filer = (*mockFile)(nil) diff --git a/database2/ffldb/reconcile.go b/database2/ffldb/reconcile.go new file mode 100644 index 00000000..d0382a27 --- /dev/null +++ b/database2/ffldb/reconcile.go @@ -0,0 +1,117 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package ffldb + +import ( + "fmt" + "hash/crc32" + + database "github.com/btcsuite/btcd/database2" +) + +// The serialized write cursor location format is: +// +// [0:4] Block file (4 bytes) +// [4:8] File offset (4 bytes) +// [8:12] Castagnoli CRC-32 checksum (4 bytes) + +// serializeWriteRow serialize the current block file and offset where new +// will be written into a format suitable for storage into the metadata. +func serializeWriteRow(curBlockFileNum, curFileOffset uint32) []byte { + var serializedRow [12]byte + byteOrder.PutUint32(serializedRow[0:4], curBlockFileNum) + byteOrder.PutUint32(serializedRow[4:8], curFileOffset) + checksum := crc32.Checksum(serializedRow[:8], castagnoli) + byteOrder.PutUint32(serializedRow[8:12], checksum) + return serializedRow[:] +} + +// deserializeWriteRow deserializes the write cursor location stored in the +// metadata. Returns ErrCorruption if the checksum of the entry doesn't match. +func deserializeWriteRow(writeRow []byte) (uint32, uint32, error) { + // Ensure the checksum matches. The checksum is at the end. + gotChecksum := crc32.Checksum(writeRow[:8], castagnoli) + wantChecksumBytes := writeRow[8:12] + wantChecksum := byteOrder.Uint32(wantChecksumBytes) + if gotChecksum != wantChecksum { + str := fmt.Sprintf("metadata for write cursor does not match "+ + "the expected checksum - got %d, want %d", gotChecksum, + wantChecksum) + return 0, 0, makeDbErr(database.ErrCorruption, str, nil) + } + + fileNum := byteOrder.Uint32(writeRow[0:4]) + fileOffset := byteOrder.Uint32(writeRow[4:8]) + return fileNum, fileOffset, nil +} + +// reconcileDB reconciles the metadata with the flat block files on disk. It +// will also initialize the underlying database if the create flag is set. +func reconcileDB(pdb *db, create bool) (database.DB, error) { + // Perform initial internal bucket and value creation during database + // creation. + if create { + if err := initDB(pdb.ldb); err != nil { + return nil, err + } + } + + // Load the current write cursor position from the metadata. + var curFileNum, curOffset uint32 + err := pdb.View(func(tx database.Tx) error { + writeRow := tx.Metadata().Get(writeLocKeyName) + if writeRow == nil { + str := "write cursor does not exist" + return makeDbErr(database.ErrCorruption, str, nil) + } + + var err error + curFileNum, curOffset, err = deserializeWriteRow(writeRow) + return err + }) + if err != nil { + return nil, err + } + + // When the write cursor position found by scanning the block files on + // disk is AFTER the position the metadata believes to be true, truncate + // the files on disk to match the metadata. This can be a fairly common + // occurrence in unclean shutdown scenarios while the block files are in + // the middle of being written. Since the metadata isn't updated until + // after the block data is written, this is effectively just a rollback + // to the known good point before the unclean shutdown. + wc := pdb.store.writeCursor + if wc.curFileNum > curFileNum || (wc.curFileNum == curFileNum && + wc.curOffset > curOffset) { + + log.Info("Detected unclean shutdown - Repairing...") + log.Debugf("Metadata claims file %d, offset %d. Block data is "+ + "at file %d, offset %d", curFileNum, curOffset, + wc.curFileNum, wc.curOffset) + pdb.store.handleRollback(curFileNum, curOffset) + log.Infof("Database sync complete") + } + + // When the write cursor position found by scanning the block files on + // disk is BEFORE the position the metadata believes to be true, return + // a corruption error. Since sync is called after each block is written + // and before the metadata is updated, this should only happen in the + // case of missing, deleted, or truncated block files, which generally + // is not an easily recoverable scenario. In the future, it might be + // possible to rescan and rebuild the metadata from the block files, + // however, that would need to happen with coordination from a higher + // layer since it could invalidate other metadata. + if wc.curFileNum < curFileNum || (wc.curFileNum == curFileNum && + wc.curOffset < curOffset) { + + str := fmt.Sprintf("metadata claims file %d, offset %d, but "+ + "block data is at file %d, offset %d", curFileNum, + curOffset, wc.curFileNum, wc.curOffset) + _ = log.Warnf("***Database corruption detected***: %v", str) + return nil, makeDbErr(database.ErrCorruption, str, nil) + } + + return pdb, nil +} diff --git a/database2/ffldb/whitebox_test.go b/database2/ffldb/whitebox_test.go new file mode 100644 index 00000000..eeeabd56 --- /dev/null +++ b/database2/ffldb/whitebox_test.go @@ -0,0 +1,721 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// This file is part of the ffldb package rather than the ffldb_test package as +// it provides whitebox testing. + +package ffldb + +import ( + "compress/bzip2" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "testing" + + "github.com/btcsuite/btcd/chaincfg" + database "github.com/btcsuite/btcd/database2" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/btcsuite/goleveldb/leveldb" + ldberrors "github.com/btcsuite/goleveldb/leveldb/errors" +) + +var ( + // blockDataNet is the expected network in the test block data. + blockDataNet = wire.MainNet + + // blockDataFile is the path to a file containing the first 256 blocks + // of the block chain. + blockDataFile = filepath.Join("..", "testdata", "blocks1-256.bz2") + + // errSubTestFail is used to signal that a sub test returned false. + errSubTestFail = fmt.Errorf("sub test failure") +) + +// loadBlocks loads the blocks contained in the testdata directory and returns +// a slice of them. +func loadBlocks(t *testing.T, dataFile string, network wire.BitcoinNet) ([]*btcutil.Block, error) { + // Open the file that contains the blocks for reading. + fi, err := os.Open(dataFile) + if err != nil { + t.Errorf("failed to open file %v, err %v", dataFile, err) + return nil, err + } + defer func() { + if err := fi.Close(); err != nil { + t.Errorf("failed to close file %v %v", dataFile, + err) + } + }() + dr := bzip2.NewReader(fi) + + // Set the first block as the genesis block. + blocks := make([]*btcutil.Block, 0, 256) + genesis := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock) + blocks = append(blocks, genesis) + + // Load the remaining blocks. + for height := 1; ; height++ { + var net uint32 + err := binary.Read(dr, binary.LittleEndian, &net) + if err == io.EOF { + // Hit end of file at the expected offset. No error. + break + } + if err != nil { + t.Errorf("Failed to load network type for block %d: %v", + height, err) + return nil, err + } + if net != uint32(network) { + t.Errorf("Block doesn't match network: %v expects %v", + net, network) + return nil, err + } + + var blockLen uint32 + err = binary.Read(dr, binary.LittleEndian, &blockLen) + if err != nil { + t.Errorf("Failed to load block size for block %d: %v", + height, err) + return nil, err + } + + // Read the block. + blockBytes := make([]byte, blockLen) + _, err = io.ReadFull(dr, blockBytes) + if err != nil { + t.Errorf("Failed to load block %d: %v", height, err) + return nil, err + } + + // Deserialize and store the block. + block, err := btcutil.NewBlockFromBytes(blockBytes) + if err != nil { + t.Errorf("Failed to parse block %v: %v", height, err) + return nil, err + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +// checkDbError ensures the passed error is a database.Error with an error code +// that matches the passed error code. +func checkDbError(t *testing.T, testName string, gotErr error, wantErrCode database.ErrorCode) bool { + dbErr, ok := gotErr.(database.Error) + if !ok { + t.Errorf("%s: unexpected error type - got %T, want %T", + testName, gotErr, database.Error{}) + return false + } + if dbErr.ErrorCode != wantErrCode { + t.Errorf("%s: unexpected error code - got %s (%s), want %s", + testName, dbErr.ErrorCode, dbErr.Description, + wantErrCode) + return false + } + + return true +} + +// testContext is used to store context information about a running test which +// is passed into helper functions. +type testContext struct { + t *testing.T + db database.DB + files map[uint32]*lockableFile + maxFileSizes map[uint32]int64 + blocks []*btcutil.Block +} + +// TestConvertErr ensures the leveldb error to database error conversion works +// as expected. +func TestConvertErr(t *testing.T) { + t.Parallel() + + tests := []struct { + err error + wantErrCode database.ErrorCode + }{ + {&ldberrors.ErrCorrupted{}, database.ErrCorruption}, + {leveldb.ErrClosed, database.ErrDbNotOpen}, + {leveldb.ErrSnapshotReleased, database.ErrTxClosed}, + {leveldb.ErrIterReleased, database.ErrTxClosed}, + } + + for i, test := range tests { + gotErr := convertErr("test", test.err) + if gotErr.ErrorCode != test.wantErrCode { + t.Errorf("convertErr #%d unexpected error - got %v, "+ + "want %v", i, gotErr.ErrorCode, test.wantErrCode) + continue + } + } +} + +// TestCornerCases ensures several corner cases which can happen when opening +// a database and/or block files work as expected. +func TestCornerCases(t *testing.T) { + t.Parallel() + + // Create a file at the datapase path to force the open below to fail. + dbPath := filepath.Join(os.TempDir(), "ffldb-errors") + _ = os.RemoveAll(dbPath) + fi, err := os.Create(dbPath) + if err != nil { + t.Errorf("os.Create: unexpected error: %v", err) + return + } + fi.Close() + + // Ensure creating a new database fails when a file exists where a + // directory is needed. + testName := "openDB: fail due to file at target location" + wantErrCode := database.ErrDriverSpecific + idb, err := openDB(dbPath, blockDataNet, true) + if !checkDbError(t, testName, err, wantErrCode) { + if err == nil { + idb.Close() + } + _ = os.RemoveAll(dbPath) + return + } + + // Remove the file and create the database to run tests against. It + // should be successful this time. + _ = os.RemoveAll(dbPath) + idb, err = openDB(dbPath, blockDataNet, true) + if err != nil { + t.Errorf("openDB: unexpected error: %v", err) + return + } + defer os.RemoveAll(dbPath) + defer idb.Close() + + // Ensure attempting to write to a file that can't be created returns + // the expected error. + testName = "writeBlock: open file failure" + filePath := blockFilePath(dbPath, 0) + if err := os.Mkdir(filePath, 0755); err != nil { + t.Errorf("os.Mkdir: unexpected error: %v", err) + return + } + store := idb.(*db).store + _, err = store.writeBlock([]byte{0x00}) + if !checkDbError(t, testName, err, database.ErrDriverSpecific) { + return + } + _ = os.RemoveAll(filePath) + + // Start a transaction and close the underlying leveldb database out + // from under it. + dbTx, err := idb.Begin(true) + if err != nil { + t.Errorf("Begin: unexpected error: %v", err) + return + } + ldb := idb.(*db).ldb + ldb.Close() + + // Ensure initilization errors in the underlying database work as + // expected. + testName = "initDB: reinitialization" + wantErrCode = database.ErrDbNotOpen + err = initDB(ldb) + if !checkDbError(t, testName, err, wantErrCode) { + return + } + + // Ensure errors in the underlying database during a transaction commit + // are handled properly. + testName = "Commit: underlying leveldb error" + wantErrCode = database.ErrDbNotOpen + err = dbTx.Commit() + if !checkDbError(t, testName, err, wantErrCode) { + return + } + + // Ensure the View handles errors in the underlying leveldb database + // properly. + testName = "View: underlying leveldb error" + wantErrCode = database.ErrDbNotOpen + err = idb.View(func(tx database.Tx) error { + return nil + }) + if !checkDbError(t, testName, err, wantErrCode) { + return + } + + // Ensure the Update handles errors in the underlying leveldb database + // properly. + testName = "Update: underlying leveldb error" + err = idb.Update(func(tx database.Tx) error { + return nil + }) + if !checkDbError(t, testName, err, wantErrCode) { + return + } +} + +// resetDatabase removes everything from the opened database associated with the +// test context including all metadata and the mock files. +func resetDatabase(tc *testContext) bool { + // Reset the metadata. + err := tc.db.Update(func(tx database.Tx) error { + // Remove all the keys using a cursor while also generating a + // list of buckets. It's not safe to remove keys during ForEach + // iteration nor is it safe to remove buckets during cursor + // iteration, so this dual approach is needed. + var bucketNames [][]byte + cursor := tx.Metadata().Cursor() + for ok := cursor.First(); ok; ok = cursor.Next() { + if cursor.Value() != nil { + if err := cursor.Delete(); err != nil { + return err + } + } else { + bucketNames = append(bucketNames, cursor.Key()) + } + } + + // Remove the buckets. + for _, k := range bucketNames { + if err := tx.Metadata().DeleteBucket(k); err != nil { + return err + } + } + + _, err := tx.Metadata().CreateBucket(blockIdxBucketName) + return err + }) + if err != nil { + tc.t.Errorf("Update: unexpected error: %v", err) + return false + } + + // Reset the mock files. + store := tc.db.(*db).store + wc := store.writeCursor + wc.curFile.Lock() + if wc.curFile.file != nil { + wc.curFile.file.Close() + wc.curFile.file = nil + } + wc.curFile.Unlock() + wc.Lock() + wc.curFileNum = 0 + wc.curOffset = 0 + wc.Unlock() + tc.files = make(map[uint32]*lockableFile) + tc.maxFileSizes = make(map[uint32]int64) + return true +} + +// testWriteFailures tests various failures paths when writing to the block +// files. +func testWriteFailures(tc *testContext) bool { + if !resetDatabase(tc) { + return false + } + + // Ensure file sync errors during writeBlock return the expected error. + store := tc.db.(*db).store + testName := "writeBlock: file sync failure" + store.writeCursor.Lock() + oldFile := store.writeCursor.curFile + store.writeCursor.curFile = &lockableFile{ + file: &mockFile{forceSyncErr: true, maxSize: -1}, + } + store.writeCursor.Unlock() + _, err := store.writeBlock([]byte{0x00}) + if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) { + return false + } + store.writeCursor.Lock() + store.writeCursor.curFile = oldFile + store.writeCursor.Unlock() + + // Force errors in the various error paths when writing data by using + // mock files with a limited max size. + block0Bytes, _ := tc.blocks[0].Bytes() + tests := []struct { + fileNum uint32 + maxSize int64 + }{ + // Force an error when writing the network bytes. + {fileNum: 0, maxSize: 2}, + + // Force an error when writing the block size. + {fileNum: 0, maxSize: 6}, + + // Force an error when writing the block. + {fileNum: 0, maxSize: 17}, + + // Force an error when writing the checksum. + {fileNum: 0, maxSize: int64(len(block0Bytes)) + 10}, + + // Force an error after writing enough blocks for force multiple + // files. + {fileNum: 15, maxSize: 1}, + } + + for i, test := range tests { + if !resetDatabase(tc) { + return false + } + + // Ensure storing the specified number of blocks using a mock + // file that fails the write fails when the transaction is + // committed, not when the block is stored. + tc.maxFileSizes = map[uint32]int64{test.fileNum: test.maxSize} + err := tc.db.Update(func(tx database.Tx) error { + for i, block := range tc.blocks { + err := tx.StoreBlock(block) + if err != nil { + tc.t.Errorf("StoreBlock (%d): unexpected "+ + "error: %v", i, err) + return errSubTestFail + } + } + + return nil + }) + testName := fmt.Sprintf("Force update commit failure - test "+ + "%d, fileNum %d, maxsize %d", i, test.fileNum, + test.maxSize) + if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) { + tc.t.Errorf("%v", err) + return false + } + + // Ensure the commit rollback removed all extra files and data. + if len(tc.files) != 1 { + tc.t.Errorf("Update rollback: new not removed - want "+ + "1 file, got %d", len(tc.files)) + return false + } + if _, ok := tc.files[0]; !ok { + tc.t.Error("Update rollback: file 0 does not exist") + return false + } + file := tc.files[0].file.(*mockFile) + if len(file.data) != 0 { + tc.t.Errorf("Update rollback: file did not truncate - "+ + "want len 0, got len %d", len(file.data)) + return false + } + } + + return true +} + +// testBlockFileErrors ensures the database returns expected errors with various +// file-related issues such as closed and missing files. +func testBlockFileErrors(tc *testContext) bool { + if !resetDatabase(tc) { + return false + } + + // Ensure errors in blockFile and openFile when requesting invalid file + // numbers. + store := tc.db.(*db).store + testName := "blockFile invalid file open" + _, err := store.blockFile(^uint32(0)) + if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) { + return false + } + testName = "openFile invalid file open" + _, err = store.openFile(^uint32(0)) + if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) { + return false + } + + // Insert the first block into the mock file. + err = tc.db.Update(func(tx database.Tx) error { + err := tx.StoreBlock(tc.blocks[0]) + if err != nil { + tc.t.Errorf("StoreBlock: unexpected error: %v", err) + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("Update: unexpected error: %v", err) + } + return false + } + + // Ensure errors in readBlock and readBlockRegion when requesting a file + // number that doesn't exist. + block0Hash := tc.blocks[0].Sha() + testName = "readBlock invalid file number" + invalidLoc := blockLocation{ + blockFileNum: ^uint32(0), + blockLen: 80, + } + _, err = store.readBlock(block0Hash, invalidLoc) + if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) { + return false + } + testName = "readBlockRegion invalid file number" + _, err = store.readBlockRegion(invalidLoc, 0, 80) + if !checkDbError(tc.t, testName, err, database.ErrDriverSpecific) { + return false + } + + // Close the block file out from under the database. + store.writeCursor.curFile.Lock() + store.writeCursor.curFile.file.Close() + store.writeCursor.curFile.Unlock() + + // Ensure failures in FetchBlock and FetchBlockRegion(s) since the + // underlying file they need to read from has been closed. + err = tc.db.View(func(tx database.Tx) error { + testName = "FetchBlock closed file" + wantErrCode := database.ErrDriverSpecific + _, err := tx.FetchBlock(block0Hash) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return errSubTestFail + } + + testName = "FetchBlockRegion closed file" + regions := []database.BlockRegion{ + { + Hash: block0Hash, + Len: 80, + Offset: 0, + }, + } + _, err = tx.FetchBlockRegion(®ions[0]) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return errSubTestFail + } + + testName = "FetchBlockRegions closed file" + _, err = tx.FetchBlockRegions(regions) + if !checkDbError(tc.t, testName, err, wantErrCode) { + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("View: unexpected error: %v", err) + } + return false + } + + return true +} + +// testCorruption ensures the database returns expected errors under various +// corruption scenarios. +func testCorruption(tc *testContext) bool { + if !resetDatabase(tc) { + return false + } + + // Insert the first block into the mock file. + err := tc.db.Update(func(tx database.Tx) error { + err := tx.StoreBlock(tc.blocks[0]) + if err != nil { + tc.t.Errorf("StoreBlock: unexpected error: %v", err) + return errSubTestFail + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("Update: unexpected error: %v", err) + } + return false + } + + // Ensure corruption is detected by intentionally modifying the bytes + // stored to the mock file and reading the block. + block0Bytes, _ := tc.blocks[0].Bytes() + block0Hash := tc.blocks[0].Sha() + tests := []struct { + offset uint32 + fixChecksum bool + wantErrCode database.ErrorCode + }{ + // One of the network bytes. The checksum needs to be fixed so + // the invalid network is detected. + {2, true, database.ErrDriverSpecific}, + + // The same network byte, but this time don't fix the checksum + // to ensure the corruption is detected. + {2, false, database.ErrCorruption}, + + // One of the block length bytes. + {6, false, database.ErrCorruption}, + + // Random header byte. + {17, false, database.ErrCorruption}, + + // Random transaction byte. + {90, false, database.ErrCorruption}, + + // Random checksum byte. + {uint32(len(block0Bytes)) + 10, false, database.ErrCorruption}, + } + err = tc.db.View(func(tx database.Tx) error { + data := tc.files[0].file.(*mockFile).data + for i, test := range tests { + // Corrupt the byte at the offset by a single bit. + data[test.offset] ^= 0x10 + + // Fix the checksum if requested to force other errors. + fileLen := len(data) + var oldChecksumBytes [4]byte + copy(oldChecksumBytes[:], data[fileLen-4:]) + if test.fixChecksum { + toSum := data[:fileLen-4] + cksum := crc32.Checksum(toSum, castagnoli) + binary.BigEndian.PutUint32(data[fileLen-4:], cksum) + } + + testName := fmt.Sprintf("FetchBlock (test #%d): "+ + "corruption", i) + _, err := tx.FetchBlock(block0Hash) + if !checkDbError(tc.t, testName, err, test.wantErrCode) { + return errSubTestFail + } + + // Reset the corrupted data back to the original. + data[test.offset] ^= 0x10 + if test.fixChecksum { + copy(data[fileLen-4:], oldChecksumBytes[:]) + } + } + + return nil + }) + if err != nil { + if err != errSubTestFail { + tc.t.Errorf("View: unexpected error: %v", err) + } + return false + } + + return true +} + +// TestFailureScenarios ensures several failure scenarios such as database +// corruption, block file write failures, and rollback failures are handled +// correctly. +func TestFailureScenarios(t *testing.T) { + // Create a new database to run tests against. + dbPath := filepath.Join(os.TempDir(), "ffldb-failurescenarios") + _ = os.RemoveAll(dbPath) + idb, err := database.Create(dbType, dbPath, blockDataNet) + if err != nil { + t.Errorf("Failed to create test database (%s) %v", dbType, err) + return + } + defer os.RemoveAll(dbPath) + defer idb.Close() + + // Create a test context to pass around. + tc := &testContext{ + t: t, + db: idb, + files: make(map[uint32]*lockableFile), + maxFileSizes: make(map[uint32]int64), + } + + // Change the maximum file size to a small value to force multiple flat + // files with the test data set and replace the file-related functions + // to make use of mock files in memory. This allows injection of + // various file-related errors. + store := idb.(*db).store + store.maxBlockFileSize = 1024 // 1KiB + store.openWriteFileFunc = func(fileNum uint32) (filer, error) { + if file, ok := tc.files[fileNum]; ok { + // "Reopen" the file. + file.Lock() + mock := file.file.(*mockFile) + mock.Lock() + mock.closed = false + mock.Unlock() + file.Unlock() + return mock, nil + } + + // Limit the max size of the mock file as specified in the test + // context. + maxSize := int64(-1) + if maxFileSize, ok := tc.maxFileSizes[fileNum]; ok { + maxSize = int64(maxFileSize) + } + file := &mockFile{maxSize: int64(maxSize)} + tc.files[fileNum] = &lockableFile{file: file} + return file, nil + } + store.openFileFunc = func(fileNum uint32) (*lockableFile, error) { + // Force error when trying to open max file num. + if fileNum == ^uint32(0) { + return nil, makeDbErr(database.ErrDriverSpecific, + "test", nil) + } + if file, ok := tc.files[fileNum]; ok { + // "Reopen" the file. + file.Lock() + mock := file.file.(*mockFile) + mock.Lock() + mock.closed = false + mock.Unlock() + file.Unlock() + return file, nil + } + file := &lockableFile{file: &mockFile{}} + tc.files[fileNum] = file + return file, nil + } + store.deleteFileFunc = func(fileNum uint32) error { + if file, ok := tc.files[fileNum]; ok { + file.Lock() + file.file.Close() + file.Unlock() + delete(tc.files, fileNum) + return nil + } + + str := fmt.Sprintf("file %d does not exist", fileNum) + return makeDbErr(database.ErrDriverSpecific, str, nil) + } + + // Load the test blocks and save in the test context for use throughout + // the tests. + blocks, err := loadBlocks(t, blockDataFile, blockDataNet) + if err != nil { + t.Errorf("loadBlocks: Unexpected error: %v", err) + return + } + tc.blocks = blocks + + // Test various failures paths when writing to the block files. + if !testWriteFailures(tc) { + return + } + + // Test various file-related issues such as closed and missing files. + if !testBlockFileErrors(tc) { + return + } + + // Test various corruption scenarios. + testCorruption(tc) +} diff --git a/database2/interface.go b/database2/interface.go new file mode 100644 index 00000000..eed1c2fd --- /dev/null +++ b/database2/interface.go @@ -0,0 +1,460 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// Parts of this interface were inspired heavily by the excellent boltdb project +// at https://github.com/boltdb/bolt by Ben B. Johnson. + +package database2 + +import ( + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" +) + +// Cursor represents a cursor over key/value pairs and nested buckets of a +// bucket. +// +// Note that open cursors are not tracked on bucket changes and any +// modifications to the bucket, with the exception of Cursor.Delete, invalidates +// the cursor. After invalidation, the cursor must be repositioned, or the keys +// and values returned may be unpredictable. +type Cursor interface { + // Bucket returns the bucket the cursor was created for. + Bucket() Bucket + + // Delete removes the current key/value pair the cursor is at without + // invalidating the cursor. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrIncompatibleValue if attempted when the cursor points to a + // nested bucket + // - ErrTxNotWritable if attempted against a read-only transaction + // - ErrTxClosed if the transaction has already been closed + Delete() error + + // First positions the cursor at the first key/value pair and returns + // whether or not the pair exists. + First() bool + + // Last positions the cursor at the last key/value pair and returns + // whether or not the pair exists. + Last() bool + + // Next moves the cursor one key/value pair forward and returns whether + // or not the pair exists. + Next() bool + + // Prev moves the cursor one key/value pair backward and returns whether + // or not the pair exists. + Prev() bool + + // Seek positions the cursor at the first key/value pair that is greater + // than or equal to the passed seek key. Returns whether or not the + // pair exists. + Seek(seek []byte) bool + + // Key returns the current key the cursor is pointing to. + Key() []byte + + // Value returns the current value the cursor is pointing to. This will + // be nil for nested buckets. + Value() []byte +} + +// Bucket represents a collection of key/value pairs. +type Bucket interface { + // Bucket retrieves a nested bucket with the given key. Returns nil if + // the bucket does not exist. + Bucket(key []byte) Bucket + + // CreateBucket creates and returns a new nested bucket with the given + // key. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBucketExists if the bucket already exists + // - ErrBucketNameRequired if the key is empty + // - ErrIncompatibleValue if the key is otherwise invalid for the + // particular implementation + // - ErrTxNotWritable if attempted against a read-only transaction + // - ErrTxClosed if the transaction has already been closed + CreateBucket(key []byte) (Bucket, error) + + // CreateBucketIfNotExists creates and returns a new nested bucket with + // the given key if it does not already exist. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBucketNameRequired if the key is empty + // - ErrIncompatibleValue if the key is otherwise invalid for the + // particular implementation + // - ErrTxNotWritable if attempted against a read-only transaction + // - ErrTxClosed if the transaction has already been closed + CreateBucketIfNotExists(key []byte) (Bucket, error) + + // DeleteBucket removes a nested bucket with the given key. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBucketNotFound if the specified bucket does not exist + // - ErrTxNotWritable if attempted against a read-only transaction + // - ErrTxClosed if the transaction has already been closed + DeleteBucket(key []byte) error + + // ForEach invokes the passed function with every key/value pair in the + // bucket. This does not include nested buckets or the key/value pairs + // within those nested buckets. + // + // WARNING: It is not safe to mutate data while iterating with this + // method. Doing so may cause the underlying cursor to be invalidated + // and return unexpected keys and/or values. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrTxClosed if the transaction has already been closed + // + // NOTE: The slices returned by this function are only valid during a + // transaction. Attempting to access them after a transaction has ended + // results in undefined behavior. Additionally, the slices must NOT + // be modified by the caller. These constraints prevent additional data + // copies and allows support for memory-mapped database implementations. + ForEach(func(k, v []byte) error) error + + // ForEachBucket invokes the passed function with the key of every + // nested bucket in the current bucket. This does not include any + // nested buckets within those nested buckets. + // + // WARNING: It is not safe to mutate data while iterating with this + // method. Doing so may cause the underlying cursor to be invalidated + // and return unexpected keys and/or values. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrTxClosed if the transaction has already been closed + // + // NOTE: The keys returned by this function are only valid during a + // transaction. Attempting to access them after a transaction has ended + // results in undefined behavior. This constraint prevents additional + // data copies and allows support for memory-mapped database + // implementations. + ForEachBucket(func(k []byte) error) error + + // Cursor returns a new cursor, allowing for iteration over the bucket's + // key/value pairs and nested buckets in forward or backward order. + // + // You must seek to a position using the First, Last, or Seek functions + // before calling the Next, Prev, Key, or Value functions. Failure to + // do so will result in the same return values as an exhausted cursor, + // which is false for the Prev and Next functions and nil for Key and + // Value functions. + Cursor() Cursor + + // Writable returns whether or not the bucket is writable. + Writable() bool + + // Put saves the specified key/value pair to the bucket. Keys that do + // not already exist are added and keys that already exist are + // overwritten. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrKeyRequired if the key is empty + // - ErrIncompatibleValue if the key is the same as an existing bucket + // - ErrTxNotWritable if attempted against a read-only transaction + // - ErrTxClosed if the transaction has already been closed + // + // NOTE: The slices passed to this function must NOT be modified by the + // caller. This constraint prevents the requirement for additional data + // copies and allows support for memory-mapped database implementations. + Put(key, value []byte) error + + // Get returns the value for the given key. Returns nil if the key does + // not exist in this bucket. An empty slice is returned for keys that + // exist but have no value assigned. + // + // NOTE: The value returned by this function is only valid during a + // transaction. Attempting to access it after a transaction has ended + // results in undefined behavior. Additionally, the value must NOT + // be modified by the caller. These constraints prevent additional data + // copies and allows support for memory-mapped database implementations. + Get(key []byte) []byte + + // Delete removes the specified key from the bucket. Deleting a key + // that does not exist does not return an error. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrKeyRequired if the key is empty + // - ErrIncompatibleValue if the key is the same as an existing bucket + // - ErrTxNotWritable if attempted against a read-only transaction + // - ErrTxClosed if the transaction has already been closed + Delete(key []byte) error +} + +// BlockRegion specifies a particular region of a block identified by the +// specified hash, given an offset and length. +type BlockRegion struct { + Hash *wire.ShaHash + Offset uint32 + Len uint32 +} + +// Tx represents a database transaction. It can either by read-only or +// read-write. The transaction provides a metadata bucket against which all +// read and writes occur. +// +// As would be expected with a transaction, no changes will be saved to the +// database until it has been committed. The transaction will only provide a +// view of the database at the time it was created. Transactions should not be +// long running operations. +type Tx interface { + // Metadata returns the top-most bucket for all metadata storage. + Metadata() Bucket + + // StoreBlock stores the provided block into the database. There are no + // checks to ensure the block connects to a previous block, contains + // double spends, or any additional functionality such as transaction + // indexing. It simply stores the block in the database. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBlockExists when the block hash already exists + // - ErrTxNotWritable if attempted against a read-only transaction + // - ErrTxClosed if the transaction has already been closed + // + // Other errors are possible depending on the implementation. + StoreBlock(block *btcutil.Block) error + + // HasBlock returns whether or not a block with the given hash exists + // in the database. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrTxClosed if the transaction has already been closed + // + // Other errors are possible depending on the implementation. + HasBlock(hash *wire.ShaHash) (bool, error) + + // HasBlocks returns whether or not the blocks with the provided hashes + // exist in the database. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrTxClosed if the transaction has already been closed + // + // Other errors are possible depending on the implementation. + HasBlocks(hashes []wire.ShaHash) ([]bool, error) + + // FetchBlockHeader returns the raw serialized bytes for the block + // header identified by the given hash. The raw bytes are in the format + // returned by Serialize on a wire.BlockHeader. + // + // It is highly recommended to use this function (or FetchBlockHeaders) + // to obtain block headers over the FetchBlockRegion(s) functions since + // it provides the backend drivers the freedom to perform very specific + // optimizations which can result in significant speed advantages when + // working with headers. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBlockNotFound if the requested block hash does not exist + // - ErrTxClosed if the transaction has already been closed + // - ErrCorruption if the database has somehow become corrupted + // + // NOTE: The data returned by this function is only valid during a + // database transaction. Attempting to access it after a transaction + // has ended results in undefined behavior. This constraint prevents + // additional data copies and allows support for memory-mapped database + // implementations. + FetchBlockHeader(hash *wire.ShaHash) ([]byte, error) + + // FetchBlockHeaders returns the raw serialized bytes for the block + // headers identified by the given hashes. The raw bytes are in the + // format returned by Serialize on a wire.BlockHeader. + // + // It is highly recommended to use this function (or FetchBlockHeader) + // to obtain block headers over the FetchBlockRegion(s) functions since + // it provides the backend drivers the freedom to perform very specific + // optimizations which can result in significant speed advantages when + // working with headers. + // + // Furthermore, depending on the specific implementation, this function + // can be more efficient for bulk loading multiple block headers than + // loading them one-by-one with FetchBlockHeader. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBlockNotFound if any of the request block hashes do not exist + // - ErrTxClosed if the transaction has already been closed + // - ErrCorruption if the database has somehow become corrupted + // + // NOTE: The data returned by this function is only valid during a + // database transaction. Attempting to access it after a transaction + // has ended results in undefined behavior. This constraint prevents + // additional data copies and allows support for memory-mapped database + // implementations. + FetchBlockHeaders(hashes []wire.ShaHash) ([][]byte, error) + + // FetchBlock returns the raw serialized bytes for the block identified + // by the given hash. The raw bytes are in the format returned by + // Serialize on a wire.MsgBlock. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBlockNotFound if the requested block hash does not exist + // - ErrTxClosed if the transaction has already been closed + // - ErrCorruption if the database has somehow become corrupted + // + // NOTE: The data returned by this function is only valid during a + // database transaction. Attempting to access it after a transaction + // has ended results in undefined behavior. This constraint prevents + // additional data copies and allows support for memory-mapped database + // implementations. + FetchBlock(hash *wire.ShaHash) ([]byte, error) + + // FetchBlocks returns the raw serialized bytes for the blocks + // identified by the given hashes. The raw bytes are in the format + // returned by Serialize on a wire.MsgBlock. + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBlockNotFound if the any of the requested block hashes do not + // exist + // - ErrTxClosed if the transaction has already been closed + // - ErrCorruption if the database has somehow become corrupted + // + // NOTE: The data returned by this function is only valid during a + // database transaction. Attempting to access it after a transaction + // has ended results in undefined behavior. This constraint prevents + // additional data copies and allows support for memory-mapped database + // implementations. + FetchBlocks(hashes []wire.ShaHash) ([][]byte, error) + + // FetchBlockRegion returns the raw serialized bytes for the given + // block region. + // + // For example, it is possible to directly extract Bitcoin transactions + // and/or scripts from a block with this function. Depending on the + // backend implementation, this can provide significant savings by + // avoiding the need to load entire blocks. + // + // The raw bytes are in the format returned by Serialize on a + // wire.MsgBlock and the Offset field in the provided BlockRegion is + // zero-based and relative to the start of the block (byte 0). + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBlockNotFound if the requested block hash does not exist + // - ErrBlockRegionInvalid if the region exceeds the bounds of the + // associated block + // - ErrTxClosed if the transaction has already been closed + // - ErrCorruption if the database has somehow become corrupted + // + // NOTE: The data returned by this function is only valid during a + // database transaction. Attempting to access it after a transaction + // has ended results in undefined behavior. This constraint prevents + // additional data copies and allows support for memory-mapped database + // implementations. + FetchBlockRegion(region *BlockRegion) ([]byte, error) + + // FetchBlockRegions returns the raw serialized bytes for the given + // block regions. + // + // For example, it is possible to directly extract Bitcoin transactions + // and/or scripts from various blocks with this function. Depending on + // the backend implementation, this can provide significant savings by + // avoiding the need to load entire blocks. + // + // The raw bytes are in the format returned by Serialize on a + // wire.MsgBlock and the Offset fields in the provided BlockRegions are + // zero-based and relative to the start of the block (byte 0). + // + // The interface contract guarantees at least the following errors will + // be returned (other implementation-specific errors are possible): + // - ErrBlockNotFound if any of the requested block hashed do not + // exist + // - ErrBlockRegionInvalid if one or more region exceed the bounds of + // the associated block + // - ErrTxClosed if the transaction has already been closed + // - ErrCorruption if the database has somehow become corrupted + // + // NOTE: The data returned by this function is only valid during a + // database transaction. Attempting to access it after a transaction + // has ended results in undefined behavior. This constraint prevents + // additional data copies and allows support for memory-mapped database + // implementations. + FetchBlockRegions(regions []BlockRegion) ([][]byte, error) + + // ****************************************************************** + // Methods related to both atomic metadata storage and block storage. + // ****************************************************************** + + // Commit commits all changes that have been made to the metadata or + // block storage to persistent storage. Calling this function on a + // managed transaction will result in a panic. + Commit() error + + // Rollback undoes all changes that have been made to the metadata or + // block storage. Calling this function on a managed transaction will + // result in a panic. + Rollback() error +} + +// DB provides a generic interface that is used to store bitcoin blocks and +// related metadata. This interface is intended to be agnostic to the actual +// mechanism used for backend data storage. The RegisterDriver function can be +// used to add a new backend data storage method. +// +// This interface is divided into two distinct categories of functionality. +// +// The first category is atomic metadata storage with bucket support. This is +// accomplished through the use of database transactions. +// +// The second category is generic block storage. This functionality is +// intentionally separate because the mechanism used for block storage may or +// may not be the same mechanism used for metadata storage. For example, it is +// often more efficient to store the block data as flat files while the metadata +// is kept in a database. However, this interface aims to be generic enough to +// support blocks in the database too, if needed by a particular backend. +type DB interface { + // Type returns the database driver type the current database instance + // was created with. + Type() string + + // Begin starts a transaction which is either read-only or read-write + // depending on the specified flag. Multiple read-only transactions + // can be started simultaneously while only a single read-write + // transaction can be started at a time. The call will block when + // starting a read-write transaction when one is already open. + // + // NOTE: The transaction must be closed by calling Rollback or Commit on + // it when it is no longer needed. Failure to do so can result in + // unclaimed memory and/or inablity to close the database due to locks + // depending on the specific database implementation. + Begin(writable bool) (Tx, error) + + // View invokes the passed function in the context of a managed + // read-only transaction. Any errors returned from the user-supplied + // function are returned from this function. + // + // Calling Rollback or Commit on the transaction passed to the + // user-supplied function will result in a panic. + View(fn func(tx Tx) error) error + + // Update invokes the passed function in the context of a managed + // read-write transaction. Any errors returned from the user-supplied + // function will cause the transaction to be rolled back and are + // returned from this function. Otherwise, the transaction is commited + // when the user-supplied function returns a nil error. + // + // Calling Rollback or Commit on the transaction passed to the + // user-supplied function will result in a panic. + Update(fn func(tx Tx) error) error + + // Close cleanly shuts down the database and syncs all data. It will + // block until all database transactions have been finalized (rolled + // back or committed). + Close() error +} diff --git a/database2/internal/treap/README.md b/database2/internal/treap/README.md new file mode 100644 index 00000000..57136e3c --- /dev/null +++ b/database2/internal/treap/README.md @@ -0,0 +1,51 @@ +treap +===== + +[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)] +(https://travis-ci.org/btcsuite/btcd) + +Package treap implements a treap data structure that is used to hold ordered +key/value pairs using a combination of binary search tree and heap semantics. +It is a self-organizing and randomized data structure that doesn't require +complex operations to to maintain balance. Search, insert, and delete +operations are all O(log n). Both mutable and immutable variants are provided. + +The mutable variant is typically faster since it is able to simply update the +treap when modifications are made. However, a mutable treap is not safe for +concurrent access without careful use of locking by the caller and care must be +taken when iterating since it can change out from under the iterator. + +The immutable variant works by creating a new version of the treap for all +mutations by replacing modified nodes with new nodes that have updated values +while sharing all unmodified nodes with the previous version. This is extremely +useful in concurrent applications since the caller only has to atomically +replace the treap pointer with the newly returned version after performing any +mutations. All readers can simply use their existing pointer as a snapshot +since the treap it points to is immutable. This effectively provides O(1) +snapshot capability with efficient memory usage characteristics since the old +nodes only remain allocated until there are no longer any references to them. + +Package treap is licensed under the copyfree ISC license. + +## Usage + +This package is only used internally in the database code and as such is not +available for use outside of it. + +## Documentation + +[![GoDoc](https://godoc.org/github.com/btcsuite/btcd/database/internal/treap?status.png)] +(http://godoc.org/github.com/btcsuite/btcd/database/internal/treap) + +Full `go doc` style documentation for the project can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/btcsuite/btcd/database/internal/treap + +You can also view the documentation locally once the package is installed with +the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to +http://localhost:6060/pkg/github.com/btcsuite/btcd/database/internal/treap + +## License + +Package treap is licensed under the [copyfree](http://copyfree.org) ISC +License. diff --git a/database2/internal/treap/common.go b/database2/internal/treap/common.go new file mode 100644 index 00000000..090a7bd5 --- /dev/null +++ b/database2/internal/treap/common.go @@ -0,0 +1,136 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package treap + +import ( + "math/rand" + "time" +) + +const ( + // staticDepth is the size of the static array to use for keeping track + // of the parent stack during treap iteration. Since a treap has a very + // high probability that the tree height is logarithmic, it is + // exceedingly unlikely that the parent stack will ever exceed this size + // even for extremely large numbers of items. + staticDepth = 128 + + // nodeFieldsSize is the size the fields of each node takes excluding + // the contents of the key and value. It assumes 64-bit pointers so + // technically it is smaller on 32-bit platforms, but overestimating the + // size in that case is acceptable since it avoids the need to import + // unsafe. It consists of 24-bytes for each key and value + 8 bytes for + // each of the priority, left, and right fields (24*2 + 8*3). + nodeFieldsSize = 72 +) + +var ( + // emptySlice is used for keys that have no value associated with them + // so callers can distinguish between a key that does not exist and one + // that has no value associated with it. + emptySlice = make([]byte, 0) +) + +// treapNode represents a node in the treap. +type treapNode struct { + key []byte + value []byte + priority int + left *treapNode + right *treapNode +} + +// nodeSize returns the number of bytes the specified node occupies including +// the struct fields and the contents of the key and value. +func nodeSize(node *treapNode) uint64 { + return nodeFieldsSize + uint64(len(node.key)+len(node.value)) +} + +// newTreapNode returns a new node from the given key, value, and priority. The +// node is not initially linked to any others. +func newTreapNode(key, value []byte, priority int) *treapNode { + return &treapNode{key: key, value: value, priority: priority} +} + +// parentStack represents a stack of parent treap nodes that are used during +// iteration. It consists of a static array for holding the parents and a +// dynamic overflow slice. It is extremely unlikely the overflow will ever be +// hit during normal operation, however, since a treap's height is +// probabilistic, the overflow case needs to be handled properly. This approach +// is used because it is much more efficient for the majority case than +// dynamically allocating heap space every time the treap is iterated. +type parentStack struct { + index int + items [staticDepth]*treapNode + overflow []*treapNode +} + +// Len returns the current number of items in the stack. +func (s *parentStack) Len() int { + return s.index +} + +// At returns the item n number of items from the top of the stack, where 0 is +// the topmost item, without removing it. It returns nil if n exceeds the +// number of items on the stack. +func (s *parentStack) At(n int) *treapNode { + index := s.index - n - 1 + if index < 0 { + return nil + } + + if index < staticDepth { + return s.items[index] + } + + return s.overflow[index-staticDepth] +} + +// Pop removes the top item from the stack. It returns nil if the stack is +// empty. +func (s *parentStack) Pop() *treapNode { + if s.index == 0 { + return nil + } + + s.index-- + if s.index < staticDepth { + node := s.items[s.index] + s.items[s.index] = nil + return node + } + + node := s.overflow[s.index-staticDepth] + s.overflow[s.index-staticDepth] = nil + return node +} + +// Push pushes the passed item onto the top of the stack. +func (s *parentStack) Push(node *treapNode) { + if s.index < staticDepth { + s.items[s.index] = node + s.index++ + return + } + + // This approach is used over append because reslicing the slice to pop + // the item causes the compiler to make unneeded allocations. Also, + // since the max number of items is related to the tree depth which + // requires expontentially more items to increase, only increase the cap + // one item at a time. This is more intelligent than the generic append + // expansion algorithm which often doubles the cap. + index := s.index - staticDepth + if index+1 > cap(s.overflow) { + overflow := make([]*treapNode, index+1) + copy(overflow, s.overflow) + s.overflow = overflow + } + s.overflow[index] = node + s.index++ +} + +func init() { + rand.Seed(time.Now().UnixNano()) +} diff --git a/database2/internal/treap/common_test.go b/database2/internal/treap/common_test.go new file mode 100644 index 00000000..c43e678d --- /dev/null +++ b/database2/internal/treap/common_test.go @@ -0,0 +1,121 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package treap + +import ( + "encoding/binary" + "encoding/hex" + "math/rand" + "reflect" + "testing" +) + +// fromHex converts the passed hex string into a byte slice and will panic if +// there is an error. This is only provided for the hard-coded constants so +// errors in the source code can be detected. It will only (and must only) be +// called for initialization purposes. +func fromHex(s string) []byte { + r, err := hex.DecodeString(s) + if err != nil { + panic("invalid hex in source file: " + s) + } + return r +} + +// serializeUint32 returns the big-endian encoding of the passed uint32. +func serializeUint32(ui uint32) []byte { + var ret [4]byte + binary.BigEndian.PutUint32(ret[:], ui) + return ret[:] +} + +// TestParentStack ensures the treapParentStack functionality works as intended. +func TestParentStack(t *testing.T) { + t.Parallel() + + tests := []struct { + numNodes int + }{ + {numNodes: 1}, + {numNodes: staticDepth}, + {numNodes: staticDepth + 1}, // Test dynamic code paths + } + +testLoop: + for i, test := range tests { + nodes := make([]*treapNode, 0, test.numNodes) + for j := 0; j < test.numNodes; j++ { + var key [4]byte + binary.BigEndian.PutUint32(key[:], uint32(j)) + node := newTreapNode(key[:], key[:], 0) + nodes = append(nodes, node) + } + + // Push all of the nodes onto the parent stack while testing + // various stack properties. + stack := &parentStack{} + for j, node := range nodes { + stack.Push(node) + + // Ensure the stack length is the expected value. + if stack.Len() != j+1 { + t.Errorf("Len #%d (%d): unexpected stack "+ + "length - got %d, want %d", i, j, + stack.Len(), j+1) + continue testLoop + } + + // Ensure the node at each index is the expected one. + for k := 0; k <= j; k++ { + atNode := stack.At(j - k) + if !reflect.DeepEqual(atNode, nodes[k]) { + t.Errorf("At #%d (%d): mismatched node "+ + "- got %v, want %v", i, j-k, + atNode, nodes[k]) + continue testLoop + } + } + } + + // Ensure each popped node is the expected one. + for j := 0; j < len(nodes); j++ { + node := stack.Pop() + expected := nodes[len(nodes)-j-1] + if !reflect.DeepEqual(node, expected) { + t.Errorf("At #%d (%d): mismatched node - "+ + "got %v, want %v", i, j, node, expected) + continue testLoop + } + } + + // Ensure the stack is now empty. + if stack.Len() != 0 { + t.Errorf("Len #%d: stack is not empty - got %d", i, + stack.Len()) + continue testLoop + } + + // Ensure attempting to retrieve a node at an index beyond the + // stack's length returns nil. + if node := stack.At(2); node != nil { + t.Errorf("At #%d: did not give back nil - got %v", i, + node) + continue testLoop + } + + // Ensure attempting to pop a node from an empty stack returns + // nil. + if node := stack.Pop(); node != nil { + t.Errorf("Pop #%d: did not give back nil - got %v", i, + node) + continue testLoop + } + } +} + +func init() { + // Force the same pseudo random numbers for each test run. + rand.Seed(0) +} diff --git a/database2/internal/treap/doc.go b/database2/internal/treap/doc.go new file mode 100644 index 00000000..4f46e057 --- /dev/null +++ b/database2/internal/treap/doc.go @@ -0,0 +1,27 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +Package treap implements a treap data structure that is used to hold ordered +key/value pairs using a combination of binary search tree and heap semantics. +It is a self-organizing and randomized data structure that doesn't require +complex operations to to maintain balance. Search, insert, and delete +operations are all O(log n). Both mutable and immutable variants are provided. + +The mutable variant is typically faster since it is able to simply update the +treap when modifications are made. However, a mutable treap is not safe for +concurrent access without careful use of locking by the caller and care must be +taken when iterating since it can change out from under the iterator. + +The immutable variant works by creating a new version of the treap for all +mutations by replacing modified nodes with new nodes that have updated values +while sharing all unmodified nodes with the previous version. This is extremely +useful in concurrent applications since the caller only has to atomically +replace the treap pointer with the newly returned version after performing any +mutations. All readers can simply use their existing pointer as a snapshot +since the treap it points to is immutable. This effectively provides O(1) +snapshot capability with efficient memory usage characteristics since the old +nodes only remain allocated until there are no longer any references to them. +*/ +package treap diff --git a/database2/internal/treap/immutable.go b/database2/internal/treap/immutable.go new file mode 100644 index 00000000..a6e13ff4 --- /dev/null +++ b/database2/internal/treap/immutable.go @@ -0,0 +1,360 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package treap + +import ( + "bytes" + "math/rand" +) + +// cloneTreapNode returns a shallow copy of the passed node. +func cloneTreapNode(node *treapNode) *treapNode { + return &treapNode{ + key: node.key, + value: node.value, + priority: node.priority, + left: node.left, + right: node.right, + } +} + +// Immutable represents a treap data structure which is used to hold ordered +// key/value pairs using a combination of binary search tree and heap semantics. +// It is a self-organizing and randomized data structure that doesn't require +// complex operations to maintain balance. Search, insert, and delete +// operations are all O(log n). In addition, it provides O(1) snapshots for +// multi-version concurrency control (MVCC). +// +// All operations which result in modifying the treap return a new version of +// the treap with only the modified nodes updated. All unmodified nodes are +// shared with the previous version. This is extremely useful in concurrent +// applications since the caller only has to atomically replace the treap +// pointer with the newly returned version after performing any mutations. All +// readers can simply use their existing pointer as a snapshot since the treap +// it points to is immutable. This effectively provides O(1) snapshot +// capability with efficient memory usage characteristics since the old nodes +// only remain allocated until there are no longer any references to them. +type Immutable struct { + root *treapNode + count int + + // totalSize is the best estimate of the total size of of all data in + // the treap including the keys, values, and node sizes. + totalSize uint64 +} + +// newImmutable returns a new immutable treap given the passed parameters. +func newImmutable(root *treapNode, count int, totalSize uint64) *Immutable { + return &Immutable{root: root, count: count, totalSize: totalSize} +} + +// Len returns the number of items stored in the treap. +func (t *Immutable) Len() int { + return t.count +} + +// Size returns a best estimate of the total number of bytes the treap is +// consuming including all of the fields used to represent the nodes as well as +// the size of the keys and values. Shared values are not detected, so the +// returned size assumes each value is pointing to different memory. +func (t *Immutable) Size() uint64 { + return t.totalSize +} + +// get returns the treap node that contains the passed key. It will return nil +// when the key does not exist. +func (t *Immutable) get(key []byte) *treapNode { + for node := t.root; node != nil; { + // Traverse left or right depending on the result of the + // comparison. + compareResult := bytes.Compare(key, node.key) + if compareResult < 0 { + node = node.left + continue + } + if compareResult > 0 { + node = node.right + continue + } + + // The key exists. + return node + } + + // A nil node was reached which means the key does not exist. + return nil +} + +// Has returns whether or not the passed key exists. +func (t *Immutable) Has(key []byte) bool { + if node := t.get(key); node != nil { + return true + } + return false +} + +// Get returns the value for the passed key. The function will return nil when +// the key does not exist. +func (t *Immutable) Get(key []byte) []byte { + if node := t.get(key); node != nil { + return node.value + } + return nil +} + +// Put inserts the passed key/value pair. +func (t *Immutable) Put(key, value []byte) *Immutable { + // Use an empty byte slice for the value when none was provided. This + // ultimately allows key existence to be determined from the value since + // an empty byte slice is distinguishable from nil. + if value == nil { + value = emptySlice + } + + // The node is the root of the tree if there isn't already one. + if t.root == nil { + root := newTreapNode(key, value, rand.Int()) + return newImmutable(root, 1, nodeSize(root)) + } + + // Find the binary tree insertion point and construct a replaced list of + // parents while doing so. This is done because this is an immutable + // data structure so regardless of where in the treap the new key/value + // pair ends up, all ancestors up to and including the root need to be + // replaced. + // + // When the key matches an entry already in the treap, replace the node + // with a new one that has the new value set and return. + var parents parentStack + var compareResult int + for node := t.root; node != nil; { + // Clone the node and link its parent to it if needed. + nodeCopy := cloneTreapNode(node) + if oldParent := parents.At(0); oldParent != nil { + if oldParent.left == node { + oldParent.left = nodeCopy + } else { + oldParent.right = nodeCopy + } + } + parents.Push(nodeCopy) + + // Traverse left or right depending on the result of comparing + // the keys. + compareResult = bytes.Compare(key, node.key) + if compareResult < 0 { + node = node.left + continue + } + if compareResult > 0 { + node = node.right + continue + } + + // The key already exists, so update its value. + nodeCopy.value = value + + // Return new immutable treap with the replaced node and + // ancestors up to and including the root of the tree. + newRoot := parents.At(parents.Len() - 1) + newTotalSize := t.totalSize - uint64(len(node.value)) + + uint64(len(value)) + return newImmutable(newRoot, t.count, newTotalSize) + } + + // Link the new node into the binary tree in the correct position. + node := newTreapNode(key, value, rand.Int()) + parent := parents.At(0) + if compareResult < 0 { + parent.left = node + } else { + parent.right = node + } + + // Perform any rotations needed to maintain the min-heap and replace + // the ancestors up to and including the tree root. + newRoot := parents.At(parents.Len() - 1) + for parents.Len() > 0 { + // There is nothing left to do when the node's priority is + // greater than or equal to its parent's priority. + parent = parents.Pop() + if node.priority >= parent.priority { + break + } + + // Perform a right rotation if the node is on the left side or + // a left rotation if the node is on the right side. + if parent.left == node { + node.right, parent.left = parent, node.right + } else { + node.left, parent.right = parent, node.left + } + + // Either set the new root of the tree when there is no + // grandparent or relink the grandparent to the node based on + // which side the old parent the node is replacing was on. + grandparent := parents.At(0) + if grandparent == nil { + newRoot = node + } else if grandparent.left == parent { + grandparent.left = node + } else { + grandparent.right = node + } + } + + return newImmutable(newRoot, t.count+1, t.totalSize+nodeSize(node)) +} + +// Delete removes the passed key from the treap and returns the resulting treap +// if it exists. The original immutable treap is returned if the key does not +// exist. +func (t *Immutable) Delete(key []byte) *Immutable { + // Find the node for the key while constructing a list of parents while + // doing so. + var parents parentStack + var delNode *treapNode + for node := t.root; node != nil; { + parents.Push(node) + + // Traverse left or right depending on the result of the + // comparison. + compareResult := bytes.Compare(key, node.key) + if compareResult < 0 { + node = node.left + continue + } + if compareResult > 0 { + node = node.right + continue + } + + // The key exists. + delNode = node + break + } + + // There is nothing to do if the key does not exist. + if delNode == nil { + return t + } + + // When the only node in the tree is the root node and it is the one + // being deleted, there is nothing else to do besides removing it. + parent := parents.At(1) + if parent == nil && delNode.left == nil && delNode.right == nil { + return newImmutable(nil, 0, 0) + } + + // Construct a replaced list of parents and the node to delete itself. + // This is done because this is an immutable data structure and + // therefore all ancestors of the node that will be deleted, up to and + // including the root, need to be replaced. + var newParents parentStack + for i := parents.Len(); i > 0; i-- { + node := parents.At(i - 1) + nodeCopy := cloneTreapNode(node) + if oldParent := newParents.At(0); oldParent != nil { + if oldParent.left == node { + oldParent.left = nodeCopy + } else { + oldParent.right = nodeCopy + } + } + newParents.Push(nodeCopy) + } + delNode = newParents.Pop() + parent = newParents.At(0) + + // Perform rotations to move the node to delete to a leaf position while + // maintaining the min-heap while replacing the modified children. + var child *treapNode + newRoot := newParents.At(newParents.Len() - 1) + for delNode.left != nil || delNode.right != nil { + // Choose the child with the higher priority. + var isLeft bool + if delNode.left == nil { + child = delNode.right + } else if delNode.right == nil { + child = delNode.left + isLeft = true + } else if delNode.left.priority >= delNode.right.priority { + child = delNode.left + isLeft = true + } else { + child = delNode.right + } + + // Rotate left or right depending on which side the child node + // is on. This has the effect of moving the node to delete + // towards the bottom of the tree while maintaining the + // min-heap. + child = cloneTreapNode(child) + if isLeft { + child.right, delNode.left = delNode, child.right + } else { + child.left, delNode.right = delNode, child.left + } + + // Either set the new root of the tree when there is no + // grandparent or relink the grandparent to the node based on + // which side the old parent the node is replacing was on. + // + // Since the node to be deleted was just moved down a level, the + // new grandparent is now the current parent and the new parent + // is the current child. + if parent == nil { + newRoot = child + } else if parent.left == delNode { + parent.left = child + } else { + parent.right = child + } + + // The parent for the node to delete is now what was previously + // its child. + parent = child + } + + // Delete the node, which is now a leaf node, by disconnecting it from + // its parent. + if parent.right == delNode { + parent.right = nil + } else { + parent.left = nil + } + + return newImmutable(newRoot, t.count-1, t.totalSize-nodeSize(delNode)) +} + +// ForEach invokes the passed function with every key/value pair in the treap +// in ascending order. +func (t *Immutable) ForEach(fn func(k, v []byte) bool) { + // Add the root node and all children to the left of it to the list of + // nodes to traverse and loop until they, and all of their child nodes, + // have been traversed. + var parents parentStack + for node := t.root; node != nil; node = node.left { + parents.Push(node) + } + for parents.Len() > 0 { + node := parents.Pop() + if !fn(node.key, node.value) { + return + } + + // Extend the nodes to traverse by all children to the left of + // the current node's right child. + for node := node.right; node != nil; node = node.left { + parents.Push(node) + } + } +} + +// NewImmutable returns a new empty immutable treap ready for use. See the +// documentation for the Immutable structure for more details. +func NewImmutable() *Immutable { + return &Immutable{} +} diff --git a/database2/internal/treap/immutable_test.go b/database2/internal/treap/immutable_test.go new file mode 100644 index 00000000..176a2c46 --- /dev/null +++ b/database2/internal/treap/immutable_test.go @@ -0,0 +1,500 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package treap + +import ( + "bytes" + "crypto/sha256" + "testing" +) + +// TestImmutableEmpty ensures calling functions on an empty immutable treap +// works as expected. +func TestImmutableEmpty(t *testing.T) { + t.Parallel() + + // Ensure the treap length is the expected value. + testTreap := NewImmutable() + if gotLen := testTreap.Len(); gotLen != 0 { + t.Fatalf("Len: unexpected length - got %d, want %d", gotLen, 0) + } + + // Ensure the reported size is 0. + if gotSize := testTreap.Size(); gotSize != 0 { + t.Fatalf("Size: unexpected byte size - got %d, want 0", + gotSize) + } + + // Ensure there are no errors with requesting keys from an empty treap. + key := serializeUint32(0) + if gotVal := testTreap.Has(key); gotVal != false { + t.Fatalf("Has: unexpected result - got %v, want false", gotVal) + } + if gotVal := testTreap.Get(key); gotVal != nil { + t.Fatalf("Get: unexpected result - got %x, want nil", gotVal) + } + + // Ensure there are no panics when deleting keys from an empty treap. + testTreap.Delete(key) + + // Ensure the number of keys iterated by ForEach on an empty treap is + // zero. + var numIterated int + testTreap.ForEach(func(k, v []byte) bool { + numIterated++ + return true + }) + if numIterated != 0 { + t.Fatalf("ForEach: unexpected iterate count - got %d, want 0", + numIterated) + } +} + +// TestImmutableSequential ensures that putting keys into an immutable treap in +// sequential order works as expected. +func TestImmutableSequential(t *testing.T) { + t.Parallel() + + // Insert a bunch of sequential keys while checking several of the treap + // functions work as expected. + expectedSize := uint64(0) + numItems := 1000 + testTreap := NewImmutable() + for i := 0; i < numItems; i++ { + key := serializeUint32(uint32(i)) + testTreap = testTreap.Put(key, key) + + // Ensure the treap length is the expected value. + if gotLen := testTreap.Len(); gotLen != i+1 { + t.Fatalf("Len #%d: unexpected length - got %d, want %d", + i, gotLen, i+1) + } + + // Ensure the treap has the key. + if !testTreap.Has(key) { + t.Fatalf("Has #%d: key %q is not in treap", i, key) + } + + // Get the key from the treap and ensure it is the expected + // value. + if gotVal := testTreap.Get(key); !bytes.Equal(gotVal, key) { + t.Fatalf("Get #%d: unexpected value - got %x, want %x", + i, gotVal, key) + } + + // Ensure the expected size is reported. + expectedSize += (nodeFieldsSize + 8) + if gotSize := testTreap.Size(); gotSize != expectedSize { + t.Fatalf("Size #%d: unexpected byte size - got %d, "+ + "want %d", i, gotSize, expectedSize) + } + } + + // Ensure the all keys are iterated by ForEach in order. + var numIterated int + testTreap.ForEach(func(k, v []byte) bool { + wantKey := serializeUint32(uint32(numIterated)) + + // Ensure the key is as expected. + if !bytes.Equal(k, wantKey) { + t.Fatalf("ForEach #%d: unexpected key - got %x, want %x", + numIterated, k, wantKey) + } + + // Ensure the value is as expected. + if !bytes.Equal(v, wantKey) { + t.Fatalf("ForEach #%d: unexpected value - got %x, want %x", + numIterated, v, wantKey) + } + + numIterated++ + return true + }) + + // Ensure all items were iterated. + if numIterated != numItems { + t.Fatalf("ForEach: unexpected iterate count - got %d, want %d", + numIterated, numItems) + } + + // Delete the keys one-by-one while checking several of the treap + // functions work as expected. + for i := 0; i < numItems; i++ { + key := serializeUint32(uint32(i)) + testTreap = testTreap.Delete(key) + + // Ensure the treap length is the expected value. + if gotLen := testTreap.Len(); gotLen != numItems-i-1 { + t.Fatalf("Len #%d: unexpected length - got %d, want %d", + i, gotLen, numItems-i-1) + } + + // Ensure the treap no longer has the key. + if testTreap.Has(key) { + t.Fatalf("Has #%d: key %q is in treap", i, key) + } + + // Get the key that no longer exists from the treap and ensure + // it is nil. + if gotVal := testTreap.Get(key); gotVal != nil { + t.Fatalf("Get #%d: unexpected value - got %x, want nil", + i, gotVal) + } + + // Ensure the expected size is reported. + expectedSize -= (nodeFieldsSize + 8) + if gotSize := testTreap.Size(); gotSize != expectedSize { + t.Fatalf("Size #%d: unexpected byte size - got %d, "+ + "want %d", i, gotSize, expectedSize) + } + } +} + +// TestImmutableReverseSequential ensures that putting keys into an immutable +// treap in reverse sequential order works as expected. +func TestImmutableReverseSequential(t *testing.T) { + t.Parallel() + + // Insert a bunch of sequential keys while checking several of the treap + // functions work as expected. + expectedSize := uint64(0) + numItems := 1000 + testTreap := NewImmutable() + for i := 0; i < numItems; i++ { + key := serializeUint32(uint32(numItems - i - 1)) + testTreap = testTreap.Put(key, key) + + // Ensure the treap length is the expected value. + if gotLen := testTreap.Len(); gotLen != i+1 { + t.Fatalf("Len #%d: unexpected length - got %d, want %d", + i, gotLen, i+1) + } + + // Ensure the treap has the key. + if !testTreap.Has(key) { + t.Fatalf("Has #%d: key %q is not in treap", i, key) + } + + // Get the key from the treap and ensure it is the expected + // value. + if gotVal := testTreap.Get(key); !bytes.Equal(gotVal, key) { + t.Fatalf("Get #%d: unexpected value - got %x, want %x", + i, gotVal, key) + } + + // Ensure the expected size is reported. + expectedSize += (nodeFieldsSize + 8) + if gotSize := testTreap.Size(); gotSize != expectedSize { + t.Fatalf("Size #%d: unexpected byte size - got %d, "+ + "want %d", i, gotSize, expectedSize) + } + } + + // Ensure the all keys are iterated by ForEach in order. + var numIterated int + testTreap.ForEach(func(k, v []byte) bool { + wantKey := serializeUint32(uint32(numIterated)) + + // Ensure the key is as expected. + if !bytes.Equal(k, wantKey) { + t.Fatalf("ForEach #%d: unexpected key - got %x, want %x", + numIterated, k, wantKey) + } + + // Ensure the value is as expected. + if !bytes.Equal(v, wantKey) { + t.Fatalf("ForEach #%d: unexpected value - got %x, want %x", + numIterated, v, wantKey) + } + + numIterated++ + return true + }) + + // Ensure all items were iterated. + if numIterated != numItems { + t.Fatalf("ForEach: unexpected iterate count - got %d, want %d", + numIterated, numItems) + } + + // Delete the keys one-by-one while checking several of the treap + // functions work as expected. + for i := 0; i < numItems; i++ { + // Intentionally use the reverse order they were inserted here. + key := serializeUint32(uint32(i)) + testTreap = testTreap.Delete(key) + + // Ensure the treap length is the expected value. + if gotLen := testTreap.Len(); gotLen != numItems-i-1 { + t.Fatalf("Len #%d: unexpected length - got %d, want %d", + i, gotLen, numItems-i-1) + } + + // Ensure the treap no longer has the key. + if testTreap.Has(key) { + t.Fatalf("Has #%d: key %q is in treap", i, key) + } + + // Get the key that no longer exists from the treap and ensure + // it is nil. + if gotVal := testTreap.Get(key); gotVal != nil { + t.Fatalf("Get #%d: unexpected value - got %x, want nil", + i, gotVal) + } + + // Ensure the expected size is reported. + expectedSize -= (nodeFieldsSize + 8) + if gotSize := testTreap.Size(); gotSize != expectedSize { + t.Fatalf("Size #%d: unexpected byte size - got %d, "+ + "want %d", i, gotSize, expectedSize) + } + } +} + +// TestImmutableUnordered ensures that putting keys into an immutable treap in +// no paritcular order works as expected. +func TestImmutableUnordered(t *testing.T) { + t.Parallel() + + // Insert a bunch of out-of-order keys while checking several of the + // treap functions work as expected. + expectedSize := uint64(0) + numItems := 1000 + testTreap := NewImmutable() + for i := 0; i < numItems; i++ { + // Hash the serialized int to generate out-of-order keys. + hash := sha256.Sum256(serializeUint32(uint32(i))) + key := hash[:] + testTreap = testTreap.Put(key, key) + + // Ensure the treap length is the expected value. + if gotLen := testTreap.Len(); gotLen != i+1 { + t.Fatalf("Len #%d: unexpected length - got %d, want %d", + i, gotLen, i+1) + } + + // Ensure the treap has the key. + if !testTreap.Has(key) { + t.Fatalf("Has #%d: key %q is not in treap", i, key) + } + + // Get the key from the treap and ensure it is the expected + // value. + if gotVal := testTreap.Get(key); !bytes.Equal(gotVal, key) { + t.Fatalf("Get #%d: unexpected value - got %x, want %x", + i, gotVal, key) + } + + // Ensure the expected size is reported. + expectedSize += nodeFieldsSize + uint64(len(key)+len(key)) + if gotSize := testTreap.Size(); gotSize != expectedSize { + t.Fatalf("Size #%d: unexpected byte size - got %d, "+ + "want %d", i, gotSize, expectedSize) + } + } + + // Delete the keys one-by-one while checking several of the treap + // functions work as expected. + for i := 0; i < numItems; i++ { + // Hash the serialized int to generate out-of-order keys. + hash := sha256.Sum256(serializeUint32(uint32(i))) + key := hash[:] + testTreap = testTreap.Delete(key) + + // Ensure the treap length is the expected value. + if gotLen := testTreap.Len(); gotLen != numItems-i-1 { + t.Fatalf("Len #%d: unexpected length - got %d, want %d", + i, gotLen, numItems-i-1) + } + + // Ensure the treap no longer has the key. + if testTreap.Has(key) { + t.Fatalf("Has #%d: key %q is in treap", i, key) + } + + // Get the key that no longer exists from the treap and ensure + // it is nil. + if gotVal := testTreap.Get(key); gotVal != nil { + t.Fatalf("Get #%d: unexpected value - got %x, want nil", + i, gotVal) + } + + // Ensure the expected size is reported. + expectedSize -= (nodeFieldsSize + 64) + if gotSize := testTreap.Size(); gotSize != expectedSize { + t.Fatalf("Size #%d: unexpected byte size - got %d, "+ + "want %d", i, gotSize, expectedSize) + } + } +} + +// TestImmutableDuplicatePut ensures that putting a duplicate key into an +// immutable treap works as expected. +func TestImmutableDuplicatePut(t *testing.T) { + t.Parallel() + + expectedVal := []byte("testval") + expectedSize := uint64(0) + numItems := 1000 + testTreap := NewImmutable() + for i := 0; i < numItems; i++ { + key := serializeUint32(uint32(i)) + testTreap = testTreap.Put(key, key) + expectedSize += nodeFieldsSize + uint64(len(key)+len(key)) + + // Put a duplicate key with the the expected final value. + testTreap = testTreap.Put(key, expectedVal) + + // Ensure the key still exists and is the new value. + if gotVal := testTreap.Has(key); gotVal != true { + t.Fatalf("Has: unexpected result - got %v, want false", + gotVal) + } + if gotVal := testTreap.Get(key); !bytes.Equal(gotVal, expectedVal) { + t.Fatalf("Get: unexpected result - got %x, want %x", + gotVal, expectedVal) + } + + // Ensure the expected size is reported. + expectedSize -= uint64(len(key)) + expectedSize += uint64(len(expectedVal)) + if gotSize := testTreap.Size(); gotSize != expectedSize { + t.Fatalf("Size: unexpected byte size - got %d, want %d", + gotSize, expectedSize) + } + } +} + +// TestImmutableNilValue ensures that putting a nil value into an immutable +// treap results in a key being added with an empty byte slice. +func TestImmutableNilValue(t *testing.T) { + t.Parallel() + + key := serializeUint32(0) + + // Put the key with a nil value. + testTreap := NewImmutable() + testTreap = testTreap.Put(key, nil) + + // Ensure the key exists and is an empty byte slice. + if gotVal := testTreap.Has(key); gotVal != true { + t.Fatalf("Has: unexpected result - got %v, want false", gotVal) + } + if gotVal := testTreap.Get(key); gotVal == nil { + t.Fatalf("Get: unexpected result - got nil, want empty slice") + } + if gotVal := testTreap.Get(key); len(gotVal) != 0 { + t.Fatalf("Get: unexpected result - got %x, want empty slice", + gotVal) + } +} + +// TestImmutableForEachStopIterator ensures that returning false from the ForEach +// callback on an immutable treap stops iteration early. +func TestImmutableForEachStopIterator(t *testing.T) { + t.Parallel() + + // Insert a few keys. + numItems := 10 + testTreap := NewImmutable() + for i := 0; i < numItems; i++ { + key := serializeUint32(uint32(i)) + testTreap = testTreap.Put(key, key) + } + + // Ensure ForEach exits early on false return by caller. + var numIterated int + testTreap.ForEach(func(k, v []byte) bool { + numIterated++ + if numIterated == numItems/2 { + return false + } + return true + }) + if numIterated != numItems/2 { + t.Fatalf("ForEach: unexpected iterate count - got %d, want %d", + numIterated, numItems/2) + } +} + +// TestImmutableSnapshot ensures that immutable treaps are actually immutable by +// keeping a reference to the previous treap, performing a mutation, and then +// ensuring the referenced treap does not have the mutation applied. +func TestImmutableSnapshot(t *testing.T) { + t.Parallel() + + // Insert a bunch of sequential keys while checking several of the treap + // functions work as expected. + expectedSize := uint64(0) + numItems := 1000 + testTreap := NewImmutable() + for i := 0; i < numItems; i++ { + treapSnap := testTreap + + key := serializeUint32(uint32(i)) + testTreap = testTreap.Put(key, key) + + // Ensure the length of the treap snapshot is the expected + // value. + if gotLen := treapSnap.Len(); gotLen != i { + t.Fatalf("Len #%d: unexpected length - got %d, want %d", + i, gotLen, i) + } + + // Ensure the treap snapshot does not have the key. + if treapSnap.Has(key) { + t.Fatalf("Has #%d: key %q is in treap", i, key) + } + + // Get the key that doesn't exist in the treap snapshot and + // ensure it is nil. + if gotVal := treapSnap.Get(key); gotVal != nil { + t.Fatalf("Get #%d: unexpected value - got %x, want nil", + i, gotVal) + } + + // Ensure the expected size is reported. + if gotSize := treapSnap.Size(); gotSize != expectedSize { + t.Fatalf("Size #%d: unexpected byte size - got %d, "+ + "want %d", i, gotSize, expectedSize) + } + expectedSize += (nodeFieldsSize + 8) + } + + // Delete the keys one-by-one while checking several of the treap + // functions work as expected. + for i := 0; i < numItems; i++ { + treapSnap := testTreap + + key := serializeUint32(uint32(i)) + testTreap = testTreap.Delete(key) + + // Ensure the length of the treap snapshot is the expected + // value. + if gotLen := treapSnap.Len(); gotLen != numItems-i { + t.Fatalf("Len #%d: unexpected length - got %d, want %d", + i, gotLen, numItems-i) + } + + // Ensure the treap snapshot still has the key. + if !treapSnap.Has(key) { + t.Fatalf("Has #%d: key %q is not in treap", i, key) + } + + // Get the key from the treap snapshot and ensure it is still + // the expected value. + if gotVal := treapSnap.Get(key); !bytes.Equal(gotVal, key) { + t.Fatalf("Get #%d: unexpected value - got %x, want %x", + i, gotVal, key) + } + + // Ensure the expected size is reported. + if gotSize := treapSnap.Size(); gotSize != expectedSize { + t.Fatalf("Size #%d: unexpected byte size - got %d, "+ + "want %d", i, gotSize, expectedSize) + } + expectedSize -= (nodeFieldsSize + 8) + } +} diff --git a/database2/internal/treap/mutable.go b/database2/internal/treap/mutable.go new file mode 100644 index 00000000..84ebe671 --- /dev/null +++ b/database2/internal/treap/mutable.go @@ -0,0 +1,278 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package treap + +import ( + "bytes" + "math/rand" +) + +// Mutable represents a treap data structure which is used to hold ordered +// key/value pairs using a combination of binary search tree and heap semantics. +// It is a self-organizing and randomized data structure that doesn't require +// complex operations to maintain balance. Search, insert, and delete +// operations are all O(log n). +type Mutable struct { + root *treapNode + count int + + // totalSize is the best estimate of the total size of of all data in + // the treap including the keys, values, and node sizes. + totalSize uint64 +} + +// Len returns the number of items stored in the treap. +func (t *Mutable) Len() int { + return t.count +} + +// Size returns a best estimate of the total number of bytes the treap is +// consuming including all of the fields used to represent the nodes as well as +// the size of the keys and values. Shared values are not detected, so the +// returned size assumes each value is pointing to different memory. +func (t *Mutable) Size() uint64 { + return t.totalSize +} + +// get returns the treap node that contains the passed key and its parent. When +// the found node is the root of the tree, the parent will be nil. When the key +// does not exist, both the node and the parent will be nil. +func (t *Mutable) get(key []byte) (*treapNode, *treapNode) { + var parent *treapNode + for node := t.root; node != nil; { + // Traverse left or right depending on the result of the + // comparison. + compareResult := bytes.Compare(key, node.key) + if compareResult < 0 { + parent = node + node = node.left + continue + } + if compareResult > 0 { + parent = node + node = node.right + continue + } + + // The key exists. + return node, parent + } + + // A nil node was reached which means the key does not exist. + return nil, nil +} + +// Has returns whether or not the passed key exists. +func (t *Mutable) Has(key []byte) bool { + if node, _ := t.get(key); node != nil { + return true + } + return false +} + +// Get returns the value for the passed key. The function will return nil when +// the key does not exist. +func (t *Mutable) Get(key []byte) []byte { + if node, _ := t.get(key); node != nil { + return node.value + } + return nil +} + +// relinkGrandparent relinks the node into the treap after it has been rotated +// by changing the passed grandparent's left or right pointer, depending on +// where the old parent was, to point at the passed node. Otherwise, when there +// is no grandparent, it means the node is now the root of the tree, so update +// it accordingly. +func (t *Mutable) relinkGrandparent(node, parent, grandparent *treapNode) { + // The node is now the root of the tree when there is no grandparent. + if grandparent == nil { + t.root = node + return + } + + // Relink the grandparent's left or right pointer based on which side + // the old parent was. + if grandparent.left == parent { + grandparent.left = node + } else { + grandparent.right = node + } +} + +// Put inserts the passed key/value pair. +func (t *Mutable) Put(key, value []byte) { + // Use an empty byte slice for the value when none was provided. This + // ultimately allows key existence to be determined from the value since + // an empty byte slice is distinguishable from nil. + if value == nil { + value = emptySlice + } + + // The node is the root of the tree if there isn't already one. + if t.root == nil { + node := newTreapNode(key, value, rand.Int()) + t.count = 1 + t.totalSize = nodeSize(node) + t.root = node + return + } + + // Find the binary tree insertion point and construct a list of parents + // while doing so. When the key matches an entry already in the treap, + // just update its value and return. + var parents parentStack + var compareResult int + for node := t.root; node != nil; { + parents.Push(node) + compareResult = bytes.Compare(key, node.key) + if compareResult < 0 { + node = node.left + continue + } + if compareResult > 0 { + node = node.right + continue + } + + // The key already exists, so update its value. + t.totalSize -= uint64(len(node.value)) + t.totalSize += uint64(len(value)) + node.value = value + return + } + + // Link the new node into the binary tree in the correct position. + node := newTreapNode(key, value, rand.Int()) + t.count++ + t.totalSize += nodeSize(node) + parent := parents.At(0) + if compareResult < 0 { + parent.left = node + } else { + parent.right = node + } + + // Perform any rotations needed to maintain the min-heap. + for parents.Len() > 0 { + // There is nothing left to do when the node's priority is + // greater than or equal to its parent's priority. + parent = parents.Pop() + if node.priority >= parent.priority { + break + } + + // Perform a right rotation if the node is on the left side or + // a left rotation if the node is on the right side. + if parent.left == node { + node.right, parent.left = parent, node.right + } else { + node.left, parent.right = parent, node.left + } + t.relinkGrandparent(node, parent, parents.At(0)) + } +} + +// Delete removes the passed key if it exists. +func (t *Mutable) Delete(key []byte) { + // Find the node for the key along with its parent. There is nothing to + // do if the key does not exist. + node, parent := t.get(key) + if node == nil { + return + } + + // When the only node in the tree is the root node and it is the one + // being deleted, there is nothing else to do besides removing it. + if parent == nil && node.left == nil && node.right == nil { + t.root = nil + t.count = 0 + t.totalSize = 0 + return + } + + // Perform rotations to move the node to delete to a leaf position while + // maintaining the min-heap. + var isLeft bool + var child *treapNode + for node.left != nil || node.right != nil { + // Choose the child with the higher priority. + if node.left == nil { + child = node.right + isLeft = false + } else if node.right == nil { + child = node.left + isLeft = true + } else if node.left.priority >= node.right.priority { + child = node.left + isLeft = true + } else { + child = node.right + isLeft = false + } + + // Rotate left or right depending on which side the child node + // is on. This has the effect of moving the node to delete + // towards the bottom of the tree while maintaining the + // min-heap. + if isLeft { + child.right, node.left = node, child.right + } else { + child.left, node.right = node, child.left + } + t.relinkGrandparent(child, node, parent) + + // The parent for the node to delete is now what was previously + // its child. + parent = child + } + + // Delete the node, which is now a leaf node, by disconnecting it from + // its parent. + if parent.right == node { + parent.right = nil + } else { + parent.left = nil + } + t.count-- + t.totalSize -= nodeSize(node) +} + +// ForEach invokes the passed function with every key/value pair in the treap +// in ascending order. +func (t *Mutable) ForEach(fn func(k, v []byte) bool) { + // Add the root node and all children to the left of it to the list of + // nodes to traverse and loop until they, and all of their child nodes, + // have been traversed. + var parents parentStack + for node := t.root; node != nil; node = node.left { + parents.Push(node) + } + for parents.Len() > 0 { + node := parents.Pop() + if !fn(node.key, node.value) { + return + } + + // Extend the nodes to traverse by all children to the left of + // the current node's right child. + for node := node.right; node != nil; node = node.left { + parents.Push(node) + } + } +} + +// Reset efficiently removes all items in the treap. +func (t *Mutable) Reset() { + t.count = 0 + t.totalSize = 0 + t.root = nil +} + +// NewMutable returns a new empty mutable treap ready for use. See the +// documentation for the Mutable structure for more details. +func NewMutable() *Mutable { + return &Mutable{} +} diff --git a/database2/internal/treap/mutable_test.go b/database2/internal/treap/mutable_test.go new file mode 100644 index 00000000..c22ced0e --- /dev/null +++ b/database2/internal/treap/mutable_test.go @@ -0,0 +1,468 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package treap + +import ( + "bytes" + "crypto/sha256" + "testing" +) + +// TestMutableEmpty ensures calling functions on an empty mutable treap works as +// expected. +func TestMutableEmpty(t *testing.T) { + t.Parallel() + + // Ensure the treap length is the expected value. + testTreap := NewMutable() + if gotLen := testTreap.Len(); gotLen != 0 { + t.Fatalf("Len: unexpected length - got %d, want %d", gotLen, 0) + } + + // Ensure the reported size is 0. + if gotSize := testTreap.Size(); gotSize != 0 { + t.Fatalf("Size: unexpected byte size - got %d, want 0", + gotSize) + } + + // Ensure there are no errors with requesting keys from an empty treap. + key := serializeUint32(0) + if gotVal := testTreap.Has(key); gotVal != false { + t.Fatalf("Has: unexpected result - got %v, want false", gotVal) + } + if gotVal := testTreap.Get(key); gotVal != nil { + t.Fatalf("Get: unexpected result - got %x, want nil", gotVal) + } + + // Ensure there are no panics when deleting keys from an empty treap. + testTreap.Delete(key) + + // Ensure the number of keys iterated by ForEach on an empty treap is + // zero. + var numIterated int + testTreap.ForEach(func(k, v []byte) bool { + numIterated++ + return true + }) + if numIterated != 0 { + t.Fatalf("ForEach: unexpected iterate count - got %d, want 0", + numIterated) + } +} + +// TestMutableReset ensures that resetting an existing mutable treap works as +// expected. +func TestMutableReset(t *testing.T) { + t.Parallel() + + // Insert a few keys. + numItems := 10 + testTreap := NewMutable() + for i := 0; i < numItems; i++ { + key := serializeUint32(uint32(i)) + testTreap.Put(key, key) + } + + // Reset it. + testTreap.Reset() + + // Ensure the treap length is now 0. + if gotLen := testTreap.Len(); gotLen != 0 { + t.Fatalf("Len: unexpected length - got %d, want %d", gotLen, 0) + } + + // Ensure the reported size is now 0. + if gotSize := testTreap.Size(); gotSize != 0 { + t.Fatalf("Size: unexpected byte size - got %d, want 0", + gotSize) + } + + // Ensure the treap no longer has any of the keys. + for i := 0; i < numItems; i++ { + key := serializeUint32(uint32(i)) + + // Ensure the treap no longer has the key. + if testTreap.Has(key) { + t.Fatalf("Has #%d: key %q is in treap", i, key) + } + + // Get the key that no longer exists from the treap and ensure + // it is nil. + if gotVal := testTreap.Get(key); gotVal != nil { + t.Fatalf("Get #%d: unexpected value - got %x, want nil", + i, gotVal) + } + } + + // Ensure the number of keys iterated by ForEach is zero. + var numIterated int + testTreap.ForEach(func(k, v []byte) bool { + numIterated++ + return true + }) + if numIterated != 0 { + t.Fatalf("ForEach: unexpected iterate count - got %d, want 0", + numIterated) + } +} + +// TestMutableSequential ensures that putting keys into a mutable treap in +// sequential order works as expected. +func TestMutableSequential(t *testing.T) { + t.Parallel() + + // Insert a bunch of sequential keys while checking several of the treap + // functions work as expected. + expectedSize := uint64(0) + numItems := 1000 + testTreap := NewMutable() + for i := 0; i < numItems; i++ { + key := serializeUint32(uint32(i)) + testTreap.Put(key, key) + + // Ensure the treap length is the expected value. + if gotLen := testTreap.Len(); gotLen != i+1 { + t.Fatalf("Len #%d: unexpected length - got %d, want %d", + i, gotLen, i+1) + } + + // Ensure the treap has the key. + if !testTreap.Has(key) { + t.Fatalf("Has #%d: key %q is not in treap", i, key) + } + + // Get the key from the treap and ensure it is the expected + // value. + if gotVal := testTreap.Get(key); !bytes.Equal(gotVal, key) { + t.Fatalf("Get #%d: unexpected value - got %x, want %x", + i, gotVal, key) + } + + // Ensure the expected size is reported. + expectedSize += (nodeFieldsSize + 8) + if gotSize := testTreap.Size(); gotSize != expectedSize { + t.Fatalf("Size #%d: unexpected byte size - got %d, "+ + "want %d", i, gotSize, expectedSize) + } + } + + // Ensure the all keys are iterated by ForEach in order. + var numIterated int + testTreap.ForEach(func(k, v []byte) bool { + wantKey := serializeUint32(uint32(numIterated)) + + // Ensure the key is as expected. + if !bytes.Equal(k, wantKey) { + t.Fatalf("ForEach #%d: unexpected key - got %x, want %x", + numIterated, k, wantKey) + } + + // Ensure the value is as expected. + if !bytes.Equal(v, wantKey) { + t.Fatalf("ForEach #%d: unexpected value - got %x, want %x", + numIterated, v, wantKey) + } + + numIterated++ + return true + }) + + // Ensure all items were iterated. + if numIterated != numItems { + t.Fatalf("ForEach: unexpected iterate count - got %d, want %d", + numIterated, numItems) + } + + // Delete the keys one-by-one while checking several of the treap + // functions work as expected. + for i := 0; i < numItems; i++ { + key := serializeUint32(uint32(i)) + testTreap.Delete(key) + + // Ensure the treap length is the expected value. + if gotLen := testTreap.Len(); gotLen != numItems-i-1 { + t.Fatalf("Len #%d: unexpected length - got %d, want %d", + i, gotLen, numItems-i-1) + } + + // Ensure the treap no longer has the key. + if testTreap.Has(key) { + t.Fatalf("Has #%d: key %q is in treap", i, key) + } + + // Get the key that no longer exists from the treap and ensure + // it is nil. + if gotVal := testTreap.Get(key); gotVal != nil { + t.Fatalf("Get #%d: unexpected value - got %x, want nil", + i, gotVal) + } + + // Ensure the expected size is reported. + expectedSize -= (nodeFieldsSize + 8) + if gotSize := testTreap.Size(); gotSize != expectedSize { + t.Fatalf("Size #%d: unexpected byte size - got %d, "+ + "want %d", i, gotSize, expectedSize) + } + } +} + +// TestMutableReverseSequential ensures that putting keys into a mutable treap +// in reverse sequential order works as expected. +func TestMutableReverseSequential(t *testing.T) { + t.Parallel() + + // Insert a bunch of sequential keys while checking several of the treap + // functions work as expected. + expectedSize := uint64(0) + numItems := 1000 + testTreap := NewMutable() + for i := 0; i < numItems; i++ { + key := serializeUint32(uint32(numItems - i - 1)) + testTreap.Put(key, key) + + // Ensure the treap length is the expected value. + if gotLen := testTreap.Len(); gotLen != i+1 { + t.Fatalf("Len #%d: unexpected length - got %d, want %d", + i, gotLen, i+1) + } + + // Ensure the treap has the key. + if !testTreap.Has(key) { + t.Fatalf("Has #%d: key %q is not in treap", i, key) + } + + // Get the key from the treap and ensure it is the expected + // value. + if gotVal := testTreap.Get(key); !bytes.Equal(gotVal, key) { + t.Fatalf("Get #%d: unexpected value - got %x, want %x", + i, gotVal, key) + } + + // Ensure the expected size is reported. + expectedSize += (nodeFieldsSize + 8) + if gotSize := testTreap.Size(); gotSize != expectedSize { + t.Fatalf("Size #%d: unexpected byte size - got %d, "+ + "want %d", i, gotSize, expectedSize) + } + } + + // Ensure the all keys are iterated by ForEach in order. + var numIterated int + testTreap.ForEach(func(k, v []byte) bool { + wantKey := serializeUint32(uint32(numIterated)) + + // Ensure the key is as expected. + if !bytes.Equal(k, wantKey) { + t.Fatalf("ForEach #%d: unexpected key - got %x, want %x", + numIterated, k, wantKey) + } + + // Ensure the value is as expected. + if !bytes.Equal(v, wantKey) { + t.Fatalf("ForEach #%d: unexpected value - got %x, want %x", + numIterated, v, wantKey) + } + + numIterated++ + return true + }) + + // Ensure all items were iterated. + if numIterated != numItems { + t.Fatalf("ForEach: unexpected iterate count - got %d, want %d", + numIterated, numItems) + } + + // Delete the keys one-by-one while checking several of the treap + // functions work as expected. + for i := 0; i < numItems; i++ { + // Intentionally use the reverse order they were inserted here. + key := serializeUint32(uint32(i)) + testTreap.Delete(key) + + // Ensure the treap length is the expected value. + if gotLen := testTreap.Len(); gotLen != numItems-i-1 { + t.Fatalf("Len #%d: unexpected length - got %d, want %d", + i, gotLen, numItems-i-1) + } + + // Ensure the treap no longer has the key. + if testTreap.Has(key) { + t.Fatalf("Has #%d: key %q is in treap", i, key) + } + + // Get the key that no longer exists from the treap and ensure + // it is nil. + if gotVal := testTreap.Get(key); gotVal != nil { + t.Fatalf("Get #%d: unexpected value - got %x, want nil", + i, gotVal) + } + + // Ensure the expected size is reported. + expectedSize -= (nodeFieldsSize + 8) + if gotSize := testTreap.Size(); gotSize != expectedSize { + t.Fatalf("Size #%d: unexpected byte size - got %d, "+ + "want %d", i, gotSize, expectedSize) + } + } +} + +// TestMutableUnordered ensures that putting keys into a mutable treap in no +// paritcular order works as expected. +func TestMutableUnordered(t *testing.T) { + t.Parallel() + + // Insert a bunch of out-of-order keys while checking several of the + // treap functions work as expected. + expectedSize := uint64(0) + numItems := 1000 + testTreap := NewMutable() + for i := 0; i < numItems; i++ { + // Hash the serialized int to generate out-of-order keys. + hash := sha256.Sum256(serializeUint32(uint32(i))) + key := hash[:] + testTreap.Put(key, key) + + // Ensure the treap length is the expected value. + if gotLen := testTreap.Len(); gotLen != i+1 { + t.Fatalf("Len #%d: unexpected length - got %d, want %d", + i, gotLen, i+1) + } + + // Ensure the treap has the key. + if !testTreap.Has(key) { + t.Fatalf("Has #%d: key %q is not in treap", i, key) + } + + // Get the key from the treap and ensure it is the expected + // value. + if gotVal := testTreap.Get(key); !bytes.Equal(gotVal, key) { + t.Fatalf("Get #%d: unexpected value - got %x, want %x", + i, gotVal, key) + } + + // Ensure the expected size is reported. + expectedSize += nodeFieldsSize + uint64(len(key)+len(key)) + if gotSize := testTreap.Size(); gotSize != expectedSize { + t.Fatalf("Size #%d: unexpected byte size - got %d, "+ + "want %d", i, gotSize, expectedSize) + } + } + + // Delete the keys one-by-one while checking several of the treap + // functions work as expected. + for i := 0; i < numItems; i++ { + // Hash the serialized int to generate out-of-order keys. + hash := sha256.Sum256(serializeUint32(uint32(i))) + key := hash[:] + testTreap.Delete(key) + + // Ensure the treap length is the expected value. + if gotLen := testTreap.Len(); gotLen != numItems-i-1 { + t.Fatalf("Len #%d: unexpected length - got %d, want %d", + i, gotLen, numItems-i-1) + } + + // Ensure the treap no longer has the key. + if testTreap.Has(key) { + t.Fatalf("Has #%d: key %q is in treap", i, key) + } + + // Get the key that no longer exists from the treap and ensure + // it is nil. + if gotVal := testTreap.Get(key); gotVal != nil { + t.Fatalf("Get #%d: unexpected value - got %x, want nil", + i, gotVal) + } + + // Ensure the expected size is reported. + expectedSize -= (nodeFieldsSize + 64) + if gotSize := testTreap.Size(); gotSize != expectedSize { + t.Fatalf("Size #%d: unexpected byte size - got %d, "+ + "want %d", i, gotSize, expectedSize) + } + } +} + +// TestMutableDuplicatePut ensures that putting a duplicate key into a mutable +// treap updates the existing value. +func TestMutableDuplicatePut(t *testing.T) { + t.Parallel() + + key := serializeUint32(0) + val := []byte("testval") + + // Put the key twice with the second put being the expected final value. + testTreap := NewMutable() + testTreap.Put(key, key) + testTreap.Put(key, val) + + // Ensure the key still exists and is the new value. + if gotVal := testTreap.Has(key); gotVal != true { + t.Fatalf("Has: unexpected result - got %v, want false", gotVal) + } + if gotVal := testTreap.Get(key); !bytes.Equal(gotVal, val) { + t.Fatalf("Get: unexpected result - got %x, want %x", gotVal, val) + } + + // Ensure the expected size is reported. + expectedSize := uint64(nodeFieldsSize + len(key) + len(val)) + if gotSize := testTreap.Size(); gotSize != expectedSize { + t.Fatalf("Size: unexpected byte size - got %d, want %d", + gotSize, expectedSize) + } +} + +// TestMutableNilValue ensures that putting a nil value into a mutable treap +// results in a key being added with an empty byte slice. +func TestMutableNilValue(t *testing.T) { + t.Parallel() + + key := serializeUint32(0) + + // Put the key with a nil value. + testTreap := NewMutable() + testTreap.Put(key, nil) + + // Ensure the key exists and is an empty byte slice. + if gotVal := testTreap.Has(key); gotVal != true { + t.Fatalf("Has: unexpected result - got %v, want false", gotVal) + } + if gotVal := testTreap.Get(key); gotVal == nil { + t.Fatalf("Get: unexpected result - got nil, want empty slice") + } + if gotVal := testTreap.Get(key); len(gotVal) != 0 { + t.Fatalf("Get: unexpected result - got %x, want empty slice", + gotVal) + } +} + +// TestMutableForEachStopIterator ensures that returning false from the ForEach +// callback of a mutable treap stops iteration early. +func TestMutableForEachStopIterator(t *testing.T) { + t.Parallel() + + // Insert a few keys. + numItems := 10 + testTreap := NewMutable() + for i := 0; i < numItems; i++ { + key := serializeUint32(uint32(i)) + testTreap.Put(key, key) + } + + // Ensure ForEach exits early on false return by caller. + var numIterated int + testTreap.ForEach(func(k, v []byte) bool { + numIterated++ + if numIterated == numItems/2 { + return false + } + return true + }) + if numIterated != numItems/2 { + t.Fatalf("ForEach: unexpected iterate count - got %d, want %d", + numIterated, numItems/2) + } +} diff --git a/database2/internal/treap/treapiter.go b/database2/internal/treap/treapiter.go new file mode 100644 index 00000000..d6981aaf --- /dev/null +++ b/database2/internal/treap/treapiter.go @@ -0,0 +1,354 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package treap + +import "bytes" + +// Iterator represents an iterator for forwards and backwards iteration over +// the contents of a treap (mutable or immutable). +type Iterator struct { + t *Mutable // Mutable treap iterator is associated with or nil + root *treapNode // Root node of treap iterator is associated with + node *treapNode // The node the iterator is positioned at + parents parentStack // The stack of parents needed to iterate + isNew bool // Whether the iterator has been positioned + seekKey []byte // Used to handle dynamic updates for mutable treap + startKey []byte // Used to limit the iterator to a range + limitKey []byte // Used to limit the iterator to a range +} + +// limitIterator clears the current iterator node if it is outside of the range +// specified when the iterator was created. It returns whether the iterator is +// valid. +func (iter *Iterator) limitIterator() bool { + if iter.node == nil { + return false + } + + node := iter.node + if iter.startKey != nil && bytes.Compare(node.key, iter.startKey) < 0 { + iter.node = nil + return false + } + + if iter.limitKey != nil && bytes.Compare(node.key, iter.limitKey) >= 0 { + iter.node = nil + return false + } + + return true +} + +// seek moves the iterator based on the provided key and flags. +// +// When the exact match flag is set, the iterator will either be moved to first +// key in the treap that exactly matches the provided key, or the one +// before/after it depending on the greater flag. +// +// When the exact match flag is NOT set, the iterator will be moved to the first +// key in the treap before/after the provided key depending on the greater flag. +// +// In all cases, the limits specified when the iterator was created are +// respected. +func (iter *Iterator) seek(key []byte, exactMatch bool, greater bool) bool { + iter.node = nil + iter.parents = parentStack{} + var selectedNodeDepth int + for node := iter.root; node != nil; { + iter.parents.Push(node) + + // Traverse left or right depending on the result of the + // comparison. Also, set the iterator to the node depending on + // the flags so the iterator is positioned properly when an + // exact match isn't found. + compareResult := bytes.Compare(key, node.key) + if compareResult < 0 { + if greater { + iter.node = node + selectedNodeDepth = iter.parents.Len() - 1 + } + node = node.left + continue + } + if compareResult > 0 { + if !greater { + iter.node = node + selectedNodeDepth = iter.parents.Len() - 1 + } + node = node.right + continue + } + + // The key is an exact match. Set the iterator and return now + // when the exact match flag is set. + if exactMatch { + iter.node = node + iter.parents.Pop() + return iter.limitIterator() + } + + // The key is an exact match, but the exact match is not set, so + // choose which direction to go based on whether the larger or + // smaller key was requested. + if greater { + node = node.right + } else { + node = node.left + } + } + + // There was either no exact match or there was an exact match but the + // exact match flag was not set. In any case, the parent stack might + // need to be adjusted to only include the parents up to the selected + // node. Also, ensure the selected node's key does not exceed the + // allowed range of the iterator. + for i := iter.parents.Len(); i > selectedNodeDepth; i-- { + iter.parents.Pop() + } + return iter.limitIterator() +} + +// First moves the iterator to the first key/value pair. When there is only a +// single key/value pair both First and Last will point to the same pair. +// Returns false if there are no key/value pairs. +func (iter *Iterator) First() bool { + // Seek the start key if the iterator was created with one. This will + // result in either an exact match, the first greater key, or an + // exhausted iterator if no such key exists. + iter.isNew = false + if iter.startKey != nil { + return iter.seek(iter.startKey, true, true) + } + + // The smallest key is in the left-most node. + iter.parents = parentStack{} + for node := iter.root; node != nil; node = node.left { + if node.left == nil { + iter.node = node + return true + } + iter.parents.Push(node) + } + return false +} + +// Last moves the iterator to the last key/value pair. When there is only a +// single key/value pair both First and Last will point to the same pair. +// Returns false if there are no key/value pairs. +func (iter *Iterator) Last() bool { + // Seek the limit key if the iterator was created with one. This will + // result in the first key smaller than the limit key, or an exhausted + // iterator if no such key exists. + iter.isNew = false + if iter.limitKey != nil { + return iter.seek(iter.limitKey, false, false) + } + + // The highest key is in the right-most node. + iter.parents = parentStack{} + for node := iter.root; node != nil; node = node.right { + if node.right == nil { + iter.node = node + return true + } + iter.parents.Push(node) + } + return false +} + +// Next moves the iterator to the next key/value pair and returns false when the +// iterator is exhausted. When invoked on a newly created iterator it will +// position the iterator at the first item. +func (iter *Iterator) Next() bool { + if iter.isNew { + return iter.First() + } + + if iter.node == nil { + return false + } + + // Reseek the previous key without allowing for an exact match if a + // force seek was requested. This results in the key greater than the + // previous one or an exhausted iterator if there is no such key. + if seekKey := iter.seekKey; seekKey != nil { + iter.seekKey = nil + return iter.seek(seekKey, false, true) + } + + // When there is no right node walk the parents until the parent's right + // node is not equal to the previous child. This will be the next node. + if iter.node.right == nil { + parent := iter.parents.Pop() + for parent != nil && parent.right == iter.node { + iter.node = parent + parent = iter.parents.Pop() + } + iter.node = parent + return iter.limitIterator() + } + + // There is a right node, so the next node is the left-most node down + // the right sub-tree. + iter.parents.Push(iter.node) + iter.node = iter.node.right + for node := iter.node.left; node != nil; node = node.left { + iter.parents.Push(iter.node) + iter.node = node + } + return iter.limitIterator() +} + +// Prev moves the iterator to the previous key/value pair and returns false when +// the iterator is exhausted. When invoked on a newly created iterator it will +// position the iterator at the last item. +func (iter *Iterator) Prev() bool { + if iter.isNew { + return iter.Last() + } + + if iter.node == nil { + return false + } + + // Reseek the previous key without allowing for an exact match if a + // force seek was requested. This results in the key smaller than the + // previous one or an exhausted iterator if there is no such key. + if seekKey := iter.seekKey; seekKey != nil { + iter.seekKey = nil + return iter.seek(seekKey, false, false) + } + + // When there is no left node walk the parents until the parent's left + // node is not equal to the previous child. This will be the previous + // node. + for iter.node.left == nil { + parent := iter.parents.Pop() + for parent != nil && parent.left == iter.node { + iter.node = parent + parent = iter.parents.Pop() + } + iter.node = parent + return iter.limitIterator() + } + + // There is a left node, so the previous node is the right-most node + // down the left sub-tree. + iter.parents.Push(iter.node) + iter.node = iter.node.left + for node := iter.node.right; node != nil; node = node.right { + iter.parents.Push(iter.node) + iter.node = node + } + return iter.limitIterator() +} + +// Seek moves the iterator to the first key/value pair with a key that is +// greater than or equal to the given key and returns true if successful. +func (iter *Iterator) Seek(key []byte) bool { + iter.isNew = false + return iter.seek(key, true, true) +} + +// Key returns the key of the current key/value pair or nil when the iterator +// is exhausted. The caller should not modify the contents of the returned +// slice. +func (iter *Iterator) Key() []byte { + if iter.node == nil { + return nil + } + return iter.node.key +} + +// Value returns the value of the current key/value pair or nil when the +// iterator is exhausted. The caller should not modify the contents of the +// returned slice. +func (iter *Iterator) Value() []byte { + if iter.node == nil { + return nil + } + return iter.node.value +} + +// Valid indicates whether the iterator is positioned at a valid key/value pair. +// It will be considered invalid when the iterator is newly created or exhausted. +func (iter *Iterator) Valid() bool { + return iter.node != nil +} + +// ForceReseek notifies the iterator that the underlying mutable treap has been +// updated, so the next call to Prev or Next needs to reseek in order to allow +// the iterator to continue working properly. +// +// NOTE: Calling this function when the iterator is associated with an immutable +// treap has no effect as you would expect. +func (iter *Iterator) ForceReseek() { + // Nothing to do when the iterator is associated with an immutable + // treap. + if iter.t == nil { + return + } + + // Update the iterator root to the mutable treap root in case it + // changed. + iter.root = iter.t.root + + // Set the seek key to the current node. This will force the Next/Prev + // functions to reseek, and thus properly reconstruct the iterator, on + // their next call. + if iter.node == nil { + iter.seekKey = nil + return + } + iter.seekKey = iter.node.key +} + +// Iterator returns a new iterator for the mutable treap. The newly returned +// iterator is not pointing to a valid item until a call to one of the methods +// to position it is made. +// +// The start key and limit key parameters cause the iterator to be limited to +// a range of keys. The start key is inclusive and the limit key is exclusive. +// Either or both can be nil if the functionality is not desired. +// +// WARNING: The ForceSeek method must be called on the returned iterator if +// the treap is mutated. Failure to do so will cause the iterator to return +// unexpected keys and/or values. +// +// For example: +// iter := t.Iterator(nil, nil) +// for iter.Next() { +// if someCondition { +// t.Delete(iter.Key()) +// iter.ForceReseek() +// } +// } +func (t *Mutable) Iterator(startKey, limitKey []byte) *Iterator { + iter := &Iterator{ + t: t, + root: t.root, + isNew: true, + startKey: startKey, + limitKey: limitKey, + } + return iter +} + +// Iterator returns a new iterator for the immutable treap. The newly returned +// iterator is not pointing to a valid item until a call to one of the methods +// to position it is made. +// +// The start key and limit key parameters cause the iterator to be limited to +// a range of keys. The start key is inclusive and the limit key is exclusive. +// Either or both can be nil if the functionality is not desired. +func (t *Immutable) Iterator(startKey, limitKey []byte) *Iterator { + iter := &Iterator{ + root: t.root, + isNew: true, + startKey: startKey, + limitKey: limitKey, + } + return iter +} diff --git a/database2/internal/treap/treapiter_test.go b/database2/internal/treap/treapiter_test.go new file mode 100644 index 00000000..08b4335e --- /dev/null +++ b/database2/internal/treap/treapiter_test.go @@ -0,0 +1,719 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package treap + +import ( + "bytes" + "encoding/binary" + "testing" +) + +// TestMutableIterator ensures that the general behavior of mutable treap +// iterators is as expected including tests for first, last, ordered and reverse +// ordered iteration, limiting the range, seeking, and initially unpositioned. +func TestMutableIterator(t *testing.T) { + t.Parallel() + + tests := []struct { + numKeys int + step int + startKey []byte + limitKey []byte + expectedFirst []byte + expectedLast []byte + seekKey []byte + expectedSeek []byte + }{ + // No range limits. Values are the set (0, 1, 2, ..., 49). + // Seek existing value. + { + numKeys: 50, + step: 1, + expectedFirst: serializeUint32(0), + expectedLast: serializeUint32(49), + seekKey: serializeUint32(12), + expectedSeek: serializeUint32(12), + }, + + // Limited to range [24, end]. Values are the set + // (0, 2, 4, ..., 48). Seek value that doesn't exist and is + // greater than largest existing key. + { + numKeys: 50, + step: 2, + startKey: serializeUint32(24), + expectedFirst: serializeUint32(24), + expectedLast: serializeUint32(48), + seekKey: serializeUint32(49), + expectedSeek: nil, + }, + + // Limited to range [start, 25). Values are the set + // (0, 3, 6, ..., 48). Seek value that doesn't exist but is + // before an existing value within the range. + { + numKeys: 50, + step: 3, + limitKey: serializeUint32(25), + expectedFirst: serializeUint32(0), + expectedLast: serializeUint32(24), + seekKey: serializeUint32(17), + expectedSeek: serializeUint32(18), + }, + + // Limited to range [10, 21). Values are the set + // (0, 4, ..., 48). Seek value that exists, but is before the + // minimum allowed range. + { + numKeys: 50, + step: 4, + startKey: serializeUint32(10), + limitKey: serializeUint32(21), + expectedFirst: serializeUint32(12), + expectedLast: serializeUint32(20), + seekKey: serializeUint32(4), + expectedSeek: nil, + }, + + // Limited by prefix {0,0,0}, range [{0,0,0}, {0,0,1}). + // Since it's a bytewise compare, {0,0,0,...} < {0,0,1}. + // Seek existing value within the allowed range. + { + numKeys: 300, + step: 1, + startKey: []byte{0x00, 0x00, 0x00}, + limitKey: []byte{0x00, 0x00, 0x01}, + expectedFirst: serializeUint32(0), + expectedLast: serializeUint32(255), + seekKey: serializeUint32(100), + expectedSeek: serializeUint32(100), + }, + } + +testLoop: + for i, test := range tests { + // Insert a bunch of keys. + testTreap := NewMutable() + for i := 0; i < test.numKeys; i += test.step { + key := serializeUint32(uint32(i)) + testTreap.Put(key, key) + } + + // Create new iterator limited by the test params. + iter := testTreap.Iterator(test.startKey, test.limitKey) + + // Ensure the first item is accurate. + hasFirst := iter.First() + if !hasFirst && test.expectedFirst != nil { + t.Errorf("First #%d: unexpected exhausted iterator", i) + continue + } + gotKey := iter.Key() + if !bytes.Equal(gotKey, test.expectedFirst) { + t.Errorf("First.Key #%d: unexpected key - got %x, "+ + "want %x", i, gotKey, test.expectedFirst) + continue + } + gotVal := iter.Value() + if !bytes.Equal(gotVal, test.expectedFirst) { + t.Errorf("First.Value #%d: unexpected value - got %x, "+ + "want %x", i, gotVal, test.expectedFirst) + continue + } + + // Ensure the iterator gives the expected items in order. + curNum := binary.BigEndian.Uint32(test.expectedFirst) + for iter.Next() { + curNum += uint32(test.step) + + // Ensure key is as expected. + gotKey := iter.Key() + expectedKey := serializeUint32(curNum) + if !bytes.Equal(gotKey, expectedKey) { + t.Errorf("iter.Key #%d (%d): unexpected key - "+ + "got %x, want %x", i, curNum, gotKey, + expectedKey) + continue testLoop + } + + // Ensure value is as expected. + gotVal := iter.Value() + if !bytes.Equal(gotVal, expectedKey) { + t.Errorf("iter.Value #%d (%d): unexpected "+ + "value - got %x, want %x", i, curNum, + gotVal, expectedKey) + continue testLoop + } + } + + // Ensure iterator is exhausted. + if iter.Valid() { + t.Errorf("Valid #%d: iterator should be exhausted", i) + continue + } + + // Ensure the last item is accurate. + hasLast := iter.Last() + if !hasLast && test.expectedLast != nil { + t.Errorf("Last #%d: unexpected exhausted iterator", i) + continue + } + gotKey = iter.Key() + if !bytes.Equal(gotKey, test.expectedLast) { + t.Errorf("Last.Key #%d: unexpected key - got %x, "+ + "want %x", i, gotKey, test.expectedLast) + continue + } + gotVal = iter.Value() + if !bytes.Equal(gotVal, test.expectedLast) { + t.Errorf("Last.Value #%d: unexpected value - got %x, "+ + "want %x", i, gotVal, test.expectedLast) + continue + } + + // Ensure the iterator gives the expected items in reverse + // order. + curNum = binary.BigEndian.Uint32(test.expectedLast) + for iter.Prev() { + curNum -= uint32(test.step) + + // Ensure key is as expected. + gotKey := iter.Key() + expectedKey := serializeUint32(curNum) + if !bytes.Equal(gotKey, expectedKey) { + t.Errorf("iter.Key #%d (%d): unexpected key - "+ + "got %x, want %x", i, curNum, gotKey, + expectedKey) + continue testLoop + } + + // Ensure value is as expected. + gotVal := iter.Value() + if !bytes.Equal(gotVal, expectedKey) { + t.Errorf("iter.Value #%d (%d): unexpected "+ + "value - got %x, want %x", i, curNum, + gotVal, expectedKey) + continue testLoop + } + } + + // Ensure iterator is exhausted. + if iter.Valid() { + t.Errorf("Valid #%d: iterator should be exhausted", i) + continue + } + + // Seek to the provided key. + seekValid := iter.Seek(test.seekKey) + if !seekValid && test.expectedSeek != nil { + t.Errorf("Seek #%d: unexpected exhausted iterator", i) + continue + } + gotKey = iter.Key() + if !bytes.Equal(gotKey, test.expectedSeek) { + t.Errorf("Seek.Key #%d: unexpected key - got %x, "+ + "want %x", i, gotKey, test.expectedSeek) + continue + } + gotVal = iter.Value() + if !bytes.Equal(gotVal, test.expectedSeek) { + t.Errorf("Seek.Value #%d: unexpected value - got %x, "+ + "want %x", i, gotVal, test.expectedSeek) + continue + } + + // Recreate the iterator and ensure calling Next on it before it + // has been positioned gives the first element. + iter = testTreap.Iterator(test.startKey, test.limitKey) + hasNext := iter.Next() + if !hasNext && test.expectedFirst != nil { + t.Errorf("Next #%d: unexpected exhausted iterator", i) + continue + } + gotKey = iter.Key() + if !bytes.Equal(gotKey, test.expectedFirst) { + t.Errorf("Next.Key #%d: unexpected key - got %x, "+ + "want %x", i, gotKey, test.expectedFirst) + continue + } + gotVal = iter.Value() + if !bytes.Equal(gotVal, test.expectedFirst) { + t.Errorf("Next.Value #%d: unexpected value - got %x, "+ + "want %x", i, gotVal, test.expectedFirst) + continue + } + + // Recreate the iterator and ensure calling Prev on it before it + // has been positioned gives the first element. + iter = testTreap.Iterator(test.startKey, test.limitKey) + hasPrev := iter.Prev() + if !hasPrev && test.expectedLast != nil { + t.Errorf("Prev #%d: unexpected exhausted iterator", i) + continue + } + gotKey = iter.Key() + if !bytes.Equal(gotKey, test.expectedLast) { + t.Errorf("Prev.Key #%d: unexpected key - got %x, "+ + "want %x", i, gotKey, test.expectedLast) + continue + } + gotVal = iter.Value() + if !bytes.Equal(gotVal, test.expectedLast) { + t.Errorf("Next.Value #%d: unexpected value - got %x, "+ + "want %x", i, gotVal, test.expectedLast) + continue + } + } +} + +// TestMutableEmptyIterator ensures that the various functions behave as +// expected when a mutable treap is empty. +func TestMutableEmptyIterator(t *testing.T) { + t.Parallel() + + // Create iterator against empty treap. + testTreap := NewMutable() + iter := testTreap.Iterator(nil, nil) + + // Ensure Valid on empty iterator reports it as exhausted. + if iter.Valid() { + t.Fatal("Valid: iterator should be exhausted") + } + + // Ensure First and Last on empty iterator report it as exhausted. + if iter.First() { + t.Fatal("First: iterator should be exhausted") + } + if iter.Last() { + t.Fatal("Last: iterator should be exhausted") + } + + // Ensure Next and Prev on empty iterator report it as exhausted. + if iter.Next() { + t.Fatal("Next: iterator should be exhausted") + } + if iter.Prev() { + t.Fatal("Prev: iterator should be exhausted") + } + + // Ensure Key and Value on empty iterator are nil. + if gotKey := iter.Key(); gotKey != nil { + t.Fatalf("Key: should be nil - got %q", gotKey) + } + if gotVal := iter.Value(); gotVal != nil { + t.Fatalf("Value: should be nil - got %q", gotVal) + } + + // Ensure Next and Prev report exhausted after forcing a reseek on an + // empty iterator. + iter.ForceReseek() + if iter.Next() { + t.Fatal("Next: iterator should be exhausted") + } + iter.ForceReseek() + if iter.Prev() { + t.Fatal("Prev: iterator should be exhausted") + } +} + +// TestIteratorUpdates ensures that issuing a call to ForceReseek on an iterator +// that had the underlying mutable treap updated works as expected. +func TestIteratorUpdates(t *testing.T) { + t.Parallel() + + // Create a new treap with various values inserted in no particular + // order. The resulting keys are the set (2, 4, 7, 11, 18, 25). + testTreap := NewMutable() + testTreap.Put(serializeUint32(7), nil) + testTreap.Put(serializeUint32(2), nil) + testTreap.Put(serializeUint32(18), nil) + testTreap.Put(serializeUint32(11), nil) + testTreap.Put(serializeUint32(25), nil) + testTreap.Put(serializeUint32(4), nil) + + // Create an iterator against the treap with a range that excludes the + // lowest and highest entries. The limited set is then (4, 7, 11, 18) + iter := testTreap.Iterator(serializeUint32(3), serializeUint32(25)) + + // Delete a key from the middle of the range and notify the iterator to + // force a reseek. + testTreap.Delete(serializeUint32(11)) + iter.ForceReseek() + + // Ensure that calling Next on the iterator after the forced reseek + // gives the expected key. The limited set of keys at this point is + // (4, 7, 18) and the iterator has not yet been positioned. + if !iter.Next() { + t.Fatal("ForceReseek.Next: unexpected exhausted iterator") + } + wantKey := serializeUint32(4) + gotKey := iter.Key() + if !bytes.Equal(gotKey, wantKey) { + t.Fatalf("ForceReseek.Key: unexpected key - got %x, want %x", + gotKey, wantKey) + } + + // Delete the key the iterator is currently position at and notify the + // iterator to force a reseek. + testTreap.Delete(serializeUint32(4)) + iter.ForceReseek() + + // Ensure that calling Next on the iterator after the forced reseek + // gives the expected key. The limited set of keys at this point is + // (7, 18) and the iterator is positioned at a deleted entry before 7. + if !iter.Next() { + t.Fatal("ForceReseek.Next: unexpected exhausted iterator") + } + wantKey = serializeUint32(7) + gotKey = iter.Key() + if !bytes.Equal(gotKey, wantKey) { + t.Fatalf("ForceReseek.Key: unexpected key - got %x, want %x", + gotKey, wantKey) + } + + // Add a key before the current key the iterator is position at and + // notify the iterator to force a reseek. + testTreap.Put(serializeUint32(4), nil) + iter.ForceReseek() + + // Ensure that calling Prev on the iterator after the forced reseek + // gives the expected key. The limited set of keys at this point is + // (4, 7, 18) and the iterator is positioned at 7. + if !iter.Prev() { + t.Fatal("ForceReseek.Prev: unexpected exhausted iterator") + } + wantKey = serializeUint32(4) + gotKey = iter.Key() + if !bytes.Equal(gotKey, wantKey) { + t.Fatalf("ForceReseek.Key: unexpected key - got %x, want %x", + gotKey, wantKey) + } + + // Delete the next key the iterator would ordinarily move to then notify + // the iterator to force a reseek. + testTreap.Delete(serializeUint32(7)) + iter.ForceReseek() + + // Ensure that calling Next on the iterator after the forced reseek + // gives the expected key. The limited set of keys at this point is + // (4, 18) and the iterator is positioned at 4. + if !iter.Next() { + t.Fatal("ForceReseek.Next: unexpected exhausted iterator") + } + wantKey = serializeUint32(18) + gotKey = iter.Key() + if !bytes.Equal(gotKey, wantKey) { + t.Fatalf("ForceReseek.Key: unexpected key - got %x, want %x", + gotKey, wantKey) + } +} + +// TestImmutableIterator ensures that the general behavior of immutable treap +// iterators is as expected including tests for first, last, ordered and reverse +// ordered iteration, limiting the range, seeking, and initially unpositioned. +func TestImmutableIterator(t *testing.T) { + t.Parallel() + + tests := []struct { + numKeys int + step int + startKey []byte + limitKey []byte + expectedFirst []byte + expectedLast []byte + seekKey []byte + expectedSeek []byte + }{ + // No range limits. Values are the set (0, 1, 2, ..., 49). + // Seek existing value. + { + numKeys: 50, + step: 1, + expectedFirst: serializeUint32(0), + expectedLast: serializeUint32(49), + seekKey: serializeUint32(12), + expectedSeek: serializeUint32(12), + }, + + // Limited to range [24, end]. Values are the set + // (0, 2, 4, ..., 48). Seek value that doesn't exist and is + // greater than largest existing key. + { + numKeys: 50, + step: 2, + startKey: serializeUint32(24), + expectedFirst: serializeUint32(24), + expectedLast: serializeUint32(48), + seekKey: serializeUint32(49), + expectedSeek: nil, + }, + + // Limited to range [start, 25). Values are the set + // (0, 3, 6, ..., 48). Seek value that doesn't exist but is + // before an existing value within the range. + { + numKeys: 50, + step: 3, + limitKey: serializeUint32(25), + expectedFirst: serializeUint32(0), + expectedLast: serializeUint32(24), + seekKey: serializeUint32(17), + expectedSeek: serializeUint32(18), + }, + + // Limited to range [10, 21). Values are the set + // (0, 4, ..., 48). Seek value that exists, but is before the + // minimum allowed range. + { + numKeys: 50, + step: 4, + startKey: serializeUint32(10), + limitKey: serializeUint32(21), + expectedFirst: serializeUint32(12), + expectedLast: serializeUint32(20), + seekKey: serializeUint32(4), + expectedSeek: nil, + }, + + // Limited by prefix {0,0,0}, range [{0,0,0}, {0,0,1}). + // Since it's a bytewise compare, {0,0,0,...} < {0,0,1}. + // Seek existing value within the allowed range. + { + numKeys: 300, + step: 1, + startKey: []byte{0x00, 0x00, 0x00}, + limitKey: []byte{0x00, 0x00, 0x01}, + expectedFirst: serializeUint32(0), + expectedLast: serializeUint32(255), + seekKey: serializeUint32(100), + expectedSeek: serializeUint32(100), + }, + } + +testLoop: + for i, test := range tests { + // Insert a bunch of keys. + testTreap := NewImmutable() + for i := 0; i < test.numKeys; i += test.step { + key := serializeUint32(uint32(i)) + testTreap = testTreap.Put(key, key) + } + + // Create new iterator limited by the test params. + iter := testTreap.Iterator(test.startKey, test.limitKey) + + // Ensure the first item is accurate. + hasFirst := iter.First() + if !hasFirst && test.expectedFirst != nil { + t.Errorf("First #%d: unexpected exhausted iterator", i) + continue + } + gotKey := iter.Key() + if !bytes.Equal(gotKey, test.expectedFirst) { + t.Errorf("First.Key #%d: unexpected key - got %x, "+ + "want %x", i, gotKey, test.expectedFirst) + continue + } + gotVal := iter.Value() + if !bytes.Equal(gotVal, test.expectedFirst) { + t.Errorf("First.Value #%d: unexpected value - got %x, "+ + "want %x", i, gotVal, test.expectedFirst) + continue + } + + // Ensure the iterator gives the expected items in order. + curNum := binary.BigEndian.Uint32(test.expectedFirst) + for iter.Next() { + curNum += uint32(test.step) + + // Ensure key is as expected. + gotKey := iter.Key() + expectedKey := serializeUint32(curNum) + if !bytes.Equal(gotKey, expectedKey) { + t.Errorf("iter.Key #%d (%d): unexpected key - "+ + "got %x, want %x", i, curNum, gotKey, + expectedKey) + continue testLoop + } + + // Ensure value is as expected. + gotVal := iter.Value() + if !bytes.Equal(gotVal, expectedKey) { + t.Errorf("iter.Value #%d (%d): unexpected "+ + "value - got %x, want %x", i, curNum, + gotVal, expectedKey) + continue testLoop + } + } + + // Ensure iterator is exhausted. + if iter.Valid() { + t.Errorf("Valid #%d: iterator should be exhausted", i) + continue + } + + // Ensure the last item is accurate. + hasLast := iter.Last() + if !hasLast && test.expectedLast != nil { + t.Errorf("Last #%d: unexpected exhausted iterator", i) + continue + } + gotKey = iter.Key() + if !bytes.Equal(gotKey, test.expectedLast) { + t.Errorf("Last.Key #%d: unexpected key - got %x, "+ + "want %x", i, gotKey, test.expectedLast) + continue + } + gotVal = iter.Value() + if !bytes.Equal(gotVal, test.expectedLast) { + t.Errorf("Last.Value #%d: unexpected value - got %x, "+ + "want %x", i, gotVal, test.expectedLast) + continue + } + + // Ensure the iterator gives the expected items in reverse + // order. + curNum = binary.BigEndian.Uint32(test.expectedLast) + for iter.Prev() { + curNum -= uint32(test.step) + + // Ensure key is as expected. + gotKey := iter.Key() + expectedKey := serializeUint32(curNum) + if !bytes.Equal(gotKey, expectedKey) { + t.Errorf("iter.Key #%d (%d): unexpected key - "+ + "got %x, want %x", i, curNum, gotKey, + expectedKey) + continue testLoop + } + + // Ensure value is as expected. + gotVal := iter.Value() + if !bytes.Equal(gotVal, expectedKey) { + t.Errorf("iter.Value #%d (%d): unexpected "+ + "value - got %x, want %x", i, curNum, + gotVal, expectedKey) + continue testLoop + } + } + + // Ensure iterator is exhausted. + if iter.Valid() { + t.Errorf("Valid #%d: iterator should be exhausted", i) + continue + } + + // Seek to the provided key. + seekValid := iter.Seek(test.seekKey) + if !seekValid && test.expectedSeek != nil { + t.Errorf("Seek #%d: unexpected exhausted iterator", i) + continue + } + gotKey = iter.Key() + if !bytes.Equal(gotKey, test.expectedSeek) { + t.Errorf("Seek.Key #%d: unexpected key - got %x, "+ + "want %x", i, gotKey, test.expectedSeek) + continue + } + gotVal = iter.Value() + if !bytes.Equal(gotVal, test.expectedSeek) { + t.Errorf("Seek.Value #%d: unexpected value - got %x, "+ + "want %x", i, gotVal, test.expectedSeek) + continue + } + + // Recreate the iterator and ensure calling Next on it before it + // has been positioned gives the first element. + iter = testTreap.Iterator(test.startKey, test.limitKey) + hasNext := iter.Next() + if !hasNext && test.expectedFirst != nil { + t.Errorf("Next #%d: unexpected exhausted iterator", i) + continue + } + gotKey = iter.Key() + if !bytes.Equal(gotKey, test.expectedFirst) { + t.Errorf("Next.Key #%d: unexpected key - got %x, "+ + "want %x", i, gotKey, test.expectedFirst) + continue + } + gotVal = iter.Value() + if !bytes.Equal(gotVal, test.expectedFirst) { + t.Errorf("Next.Value #%d: unexpected value - got %x, "+ + "want %x", i, gotVal, test.expectedFirst) + continue + } + + // Recreate the iterator and ensure calling Prev on it before it + // has been positioned gives the first element. + iter = testTreap.Iterator(test.startKey, test.limitKey) + hasPrev := iter.Prev() + if !hasPrev && test.expectedLast != nil { + t.Errorf("Prev #%d: unexpected exhausted iterator", i) + continue + } + gotKey = iter.Key() + if !bytes.Equal(gotKey, test.expectedLast) { + t.Errorf("Prev.Key #%d: unexpected key - got %x, "+ + "want %x", i, gotKey, test.expectedLast) + continue + } + gotVal = iter.Value() + if !bytes.Equal(gotVal, test.expectedLast) { + t.Errorf("Next.Value #%d: unexpected value - got %x, "+ + "want %x", i, gotVal, test.expectedLast) + continue + } + } +} + +// TestImmutableEmptyIterator ensures that the various functions behave as +// expected when an immutable treap is empty. +func TestImmutableEmptyIterator(t *testing.T) { + t.Parallel() + + // Create iterator against empty treap. + testTreap := NewImmutable() + iter := testTreap.Iterator(nil, nil) + + // Ensure Valid on empty iterator reports it as exhausted. + if iter.Valid() { + t.Fatal("Valid: iterator should be exhausted") + } + + // Ensure First and Last on empty iterator report it as exhausted. + if iter.First() { + t.Fatal("First: iterator should be exhausted") + } + if iter.Last() { + t.Fatal("Last: iterator should be exhausted") + } + + // Ensure Next and Prev on empty iterator report it as exhausted. + if iter.Next() { + t.Fatal("Next: iterator should be exhausted") + } + if iter.Prev() { + t.Fatal("Prev: iterator should be exhausted") + } + + // Ensure Key and Value on empty iterator are nil. + if gotKey := iter.Key(); gotKey != nil { + t.Fatalf("Key: should be nil - got %q", gotKey) + } + if gotVal := iter.Value(); gotVal != nil { + t.Fatalf("Value: should be nil - got %q", gotVal) + } + + // Ensure calling ForceReseek on an immutable treap iterator does not + // cause any issues since it only applies to mutable treap iterators. + iter.ForceReseek() + if iter.Next() { + t.Fatal("Next: iterator should be exhausted") + } + iter.ForceReseek() + if iter.Prev() { + t.Fatal("Prev: iterator should be exhausted") + } +} diff --git a/database2/log.go b/database2/log.go new file mode 100644 index 00000000..a9736241 --- /dev/null +++ b/database2/log.go @@ -0,0 +1,65 @@ +// Copyright (c) 2013-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package database2 + +import ( + "errors" + "io" + + "github.com/btcsuite/btclog" +) + +// log is a logger that is initialized with no output filters. This +// means the package will not perform any logging by default until the caller +// requests it. +var log btclog.Logger + +// The default amount of logging is none. +func init() { + DisableLog() +} + +// DisableLog disables all library log output. Logging output is disabled +// by default until either UseLogger or SetLogWriter are called. +func DisableLog() { + log = btclog.Disabled +} + +// UseLogger uses a specified Logger to output package logging info. +// This should be used in preference to SetLogWriter if the caller is also +// using btclog. +func UseLogger(logger btclog.Logger) { + log = logger + + // Update the logger for the registered drivers. + for _, drv := range drivers { + if drv.UseLogger != nil { + drv.UseLogger(logger) + } + } +} + +// SetLogWriter uses a specified io.Writer to output package logging info. +// This allows a caller to direct package logging output without needing a +// dependency on seelog. If the caller is also using btclog, UseLogger should +// be used instead. +func SetLogWriter(w io.Writer, level string) error { + if w == nil { + return errors.New("nil writer") + } + + lvl, ok := btclog.LogLevelFromString(level) + if !ok { + return errors.New("invalid log level") + } + + l, err := btclog.NewLoggerFromWriter(w, lvl) + if err != nil { + return err + } + + UseLogger(l) + return nil +} diff --git a/database2/log_test.go b/database2/log_test.go new file mode 100644 index 00000000..d5cb9afd --- /dev/null +++ b/database2/log_test.go @@ -0,0 +1,67 @@ +// Copyright (c) 2015-2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package database2_test + +import ( + "errors" + "io" + "os" + "testing" + + database "github.com/btcsuite/btcd/database2" +) + +// TestSetLogWriter ensures the +func TestSetLogWriter(t *testing.T) { + tests := []struct { + name string + w io.Writer + level string + expected error + }{ + { + name: "nil writer", + w: nil, + level: "trace", + expected: errors.New("nil writer"), + }, + { + name: "invalid log level", + w: os.Stdout, + level: "wrong", + expected: errors.New("invalid log level"), + }, + { + name: "use off level", + w: os.Stdout, + level: "off", + expected: errors.New("min level can't be greater than max. Got min: 6, max: 5"), + }, + { + name: "pass", + w: os.Stdout, + level: "debug", + expected: nil, + }, + } + + t.Logf("Running %d tests", len(tests)) + for i, test := range tests { + err := database.SetLogWriter(test.w, test.level) + if err != nil { + if err.Error() != test.expected.Error() { + t.Errorf("SetLogWriter #%d (%s) wrong result\n"+ + "got: %v\nwant: %v", i, test.name, err, + test.expected) + } + } else { + if test.expected != nil { + t.Errorf("SetLogWriter #%d (%s) wrong result\n"+ + "got: %v\nwant: %v", i, test.name, err, + test.expected) + } + } + } +} diff --git a/database2/testdata/blocks1-256.bz2 b/database2/testdata/blocks1-256.bz2 new file mode 100644 index 00000000..6b8bda44 Binary files /dev/null and b/database2/testdata/blocks1-256.bz2 differ diff --git a/log.go b/log.go index ce3adc05..54b26e37 100644 --- a/log.go +++ b/log.go @@ -12,7 +12,7 @@ import ( "github.com/btcsuite/btcd/addrmgr" "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/database" + database "github.com/btcsuite/btcd/database2" "github.com/btcsuite/btcd/peer" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire"