database: Replace with new version.
This commit removes the old database package, moves the new package into its place, and updates all imports accordingly.
This commit is contained in:
parent
7c174620f7
commit
b580cdb7d3
92 changed files with 319 additions and 5877 deletions
|
@ -5,7 +5,7 @@
|
||||||
package blockchain
|
package blockchain
|
||||||
|
|
||||||
import (
|
import (
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/txscript"
|
"github.com/btcsuite/btcd/txscript"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
"math/big"
|
"math/big"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
)
|
)
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/txscript"
|
"github.com/btcsuite/btcd/txscript"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
|
|
|
@ -15,8 +15,8 @@ import (
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/blockchain"
|
"github.com/btcsuite/btcd/blockchain"
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
_ "github.com/btcsuite/btcd/database2/ffldb"
|
_ "github.com/btcsuite/btcd/database/ffldb"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -12,8 +12,8 @@ import (
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/blockchain"
|
"github.com/btcsuite/btcd/blockchain"
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
_ "github.com/btcsuite/btcd/database2/ffldb"
|
_ "github.com/btcsuite/btcd/database/ffldb"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/blockchain"
|
"github.com/btcsuite/btcd/blockchain"
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/txscript"
|
"github.com/btcsuite/btcd/txscript"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/blockchain"
|
"github.com/btcsuite/btcd/blockchain"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/blockchain"
|
"github.com/btcsuite/btcd/blockchain"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
)
|
)
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/blockchain"
|
"github.com/btcsuite/btcd/blockchain"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
)
|
)
|
||||||
|
|
|
@ -7,7 +7,7 @@ package blockchain
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
)
|
)
|
||||||
|
|
|
@ -7,7 +7,7 @@ package blockchain
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/txscript"
|
"github.com/btcsuite/btcd/txscript"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
|
|
|
@ -15,7 +15,7 @@ import (
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/blockchain"
|
"github.com/btcsuite/btcd/blockchain"
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
)
|
)
|
||||||
|
|
|
@ -10,7 +10,7 @@ import (
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/blockchain"
|
"github.com/btcsuite/btcd/blockchain"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/limits"
|
"github.com/btcsuite/btcd/limits"
|
||||||
"github.com/btcsuite/btclog"
|
"github.com/btcsuite/btclog"
|
||||||
)
|
)
|
||||||
|
|
|
@ -10,8 +10,8 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
_ "github.com/btcsuite/btcd/database2/ffldb"
|
_ "github.com/btcsuite/btcd/database/ffldb"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
flags "github.com/btcsuite/go-flags"
|
flags "github.com/btcsuite/go-flags"
|
||||||
|
|
|
@ -12,7 +12,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/blockchain"
|
"github.com/btcsuite/btcd/blockchain"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
)
|
)
|
||||||
|
|
|
@ -10,8 +10,8 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
_ "github.com/btcsuite/btcd/database2/ffldb"
|
_ "github.com/btcsuite/btcd/database/ffldb"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
flags "github.com/btcsuite/go-flags"
|
flags "github.com/btcsuite/go-flags"
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/blockchain"
|
"github.com/btcsuite/btcd/blockchain"
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -16,8 +16,8 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
_ "github.com/btcsuite/btcd/database2/ffldb"
|
_ "github.com/btcsuite/btcd/database/ffldb"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
flags "github.com/btcsuite/go-flags"
|
flags "github.com/btcsuite/go-flags"
|
||||||
|
|
|
@ -7,8 +7,7 @@ database
|
||||||
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)]
|
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)]
|
||||||
(http://godoc.org/github.com/btcsuite/btcd/database)
|
(http://godoc.org/github.com/btcsuite/btcd/database)
|
||||||
|
|
||||||
Package database provides a database interface for the bitcoin block chain and
|
Package database provides a block and metadata storage database.
|
||||||
transactions.
|
|
||||||
|
|
||||||
Please note that this package is intended to enable btcd to support different
|
Please note that this package is intended to enable btcd to support different
|
||||||
database backends and is not something that a client can directly access as only
|
database backends and is not something that a client can directly access as only
|
||||||
|
@ -20,6 +19,24 @@ likely want to use the [btcrpcclient](https://github.com/btcsuite/btcrpcclient)
|
||||||
package which makes use of the [JSON-RPC API]
|
package which makes use of the [JSON-RPC API]
|
||||||
(https://github.com/btcsuite/btcd/tree/master/docs/json_rpc_api.md).
|
(https://github.com/btcsuite/btcd/tree/master/docs/json_rpc_api.md).
|
||||||
|
|
||||||
|
However, this package could be extremely useful for any applications requiring
|
||||||
|
Bitcoin block storage capabilities.
|
||||||
|
|
||||||
|
The default backend, ffldb, has a strong focus on speed, efficiency, and
|
||||||
|
robustness. It makes use of leveldb for the metadata, flat files for block
|
||||||
|
storage, and strict checksums in key areas to ensure data integrity.
|
||||||
|
|
||||||
|
## Feature Overview
|
||||||
|
|
||||||
|
- Key/value metadata store
|
||||||
|
- Bitcoin block storage
|
||||||
|
- Efficient retrieval of block headers and regions (transactions, scripts, etc)
|
||||||
|
- Read-only and read-write transactions with both manual and managed modes
|
||||||
|
- Nested buckets
|
||||||
|
- Iteration support including cursors with seek capability
|
||||||
|
- Supports registration of backend databases
|
||||||
|
- Comprehensive test coverage
|
||||||
|
|
||||||
## Installation and Updating
|
## Installation and Updating
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
@ -28,37 +45,16 @@ $ go get -u github.com/btcsuite/btcd/database
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
* [CreateDB Example]
|
* [Basic Usage Example]
|
||||||
(http://godoc.org/github.com/btcsuite/btcd/database#example-CreateDB)
|
(http://godoc.org/github.com/btcsuite/btcd/database#example-package--BasicUsage)
|
||||||
Demonstrates creating a new database and inserting the genesis block into it.
|
Demonstrates creating a new database and using a managed read-write
|
||||||
|
transaction to store and retrieve metadata.
|
||||||
|
|
||||||
* [NewestSha Example]
|
* [Block Storage and Retrieval Example]
|
||||||
(http://godoc.org/github.com/btcsuite/btcd/database#example-Db--NewestSha)
|
(http://godoc.org/github.com/btcsuite/btcd/database#example-package--BlockStorageAndRetrieval)
|
||||||
Demonstrates querying the database for the most recent best block height and
|
Demonstrates creating a new database, using a managed read-write transaction
|
||||||
hash.
|
to store a block, and then using a managed read-only transaction to fetch the
|
||||||
|
block.
|
||||||
## TODO
|
|
||||||
- Increase test coverage to 100%
|
|
||||||
|
|
||||||
## GPG Verification Key
|
|
||||||
|
|
||||||
All official release tags are signed by Conformal so users can ensure the code
|
|
||||||
has not been tampered with and is coming from the btcsuite developers. To
|
|
||||||
verify the signature perform the following:
|
|
||||||
|
|
||||||
- Download the public key from the Conformal website at
|
|
||||||
https://opensource.conformal.com/GIT-GPG-KEY-conformal.txt
|
|
||||||
|
|
||||||
- Import the public key into your GPG keyring:
|
|
||||||
```bash
|
|
||||||
gpg --import GIT-GPG-KEY-conformal.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
- Verify the release tag with the following command where `TAG_NAME` is a
|
|
||||||
placeholder for the specific tag:
|
|
||||||
```bash
|
|
||||||
git tag -v TAG_NAME
|
|
||||||
```
|
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
)
|
)
|
||||||
|
|
|
@ -10,7 +10,7 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
)
|
)
|
||||||
|
|
|
@ -11,8 +11,8 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
_ "github.com/btcsuite/btcd/database2/ffldb"
|
_ "github.com/btcsuite/btcd/database/ffldb"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
)
|
)
|
|
@ -12,7 +12,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
)
|
)
|
|
@ -7,7 +7,7 @@ package main
|
||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
)
|
)
|
||||||
|
|
|
@ -10,7 +10,7 @@ import (
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btclog"
|
"github.com/btcsuite/btclog"
|
||||||
flags "github.com/btcsuite/go-flags"
|
flags "github.com/btcsuite/go-flags"
|
||||||
)
|
)
|
|
@ -1,221 +0,0 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package database_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"compress/bzip2"
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
|
||||||
"github.com/btcsuite/btcd/database"
|
|
||||||
_ "github.com/btcsuite/btcd/database/ldb"
|
|
||||||
_ "github.com/btcsuite/btcd/database/memdb"
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
|
||||||
"github.com/btcsuite/btcutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// network is the expected bitcoin network in the test block data.
|
|
||||||
network = wire.MainNet
|
|
||||||
|
|
||||||
// savedBlocks is used to store blocks loaded from the blockDataFile
|
|
||||||
// so multiple invocations to loadBlocks from the various test functions
|
|
||||||
// do not have to reload them from disk.
|
|
||||||
savedBlocks []*btcutil.Block
|
|
||||||
|
|
||||||
// blockDataFile is the path to a file containing the first 256 blocks
|
|
||||||
// of the block chain.
|
|
||||||
blockDataFile = filepath.Join("testdata", "blocks1-256.bz2")
|
|
||||||
)
|
|
||||||
|
|
||||||
var zeroHash = wire.ShaHash{}
|
|
||||||
|
|
||||||
// testDbRoot is the root directory used to create all test databases.
|
|
||||||
const testDbRoot = "testdbs"
|
|
||||||
|
|
||||||
// filesExists returns whether or not the named file or directory exists.
|
|
||||||
func fileExists(name string) bool {
|
|
||||||
if _, err := os.Stat(name); err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// openDB is used to open an existing database based on the database type and
|
|
||||||
// name.
|
|
||||||
func openDB(dbType, dbName string) (database.Db, error) {
|
|
||||||
// Handle memdb specially since it has no files on disk.
|
|
||||||
if dbType == "memdb" {
|
|
||||||
db, err := database.OpenDB(dbType)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error opening db: %v", err)
|
|
||||||
}
|
|
||||||
return db, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
dbPath := filepath.Join(testDbRoot, dbName)
|
|
||||||
db, err := database.OpenDB(dbType, dbPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error opening db: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return db, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// createDB creates a new db instance and returns a teardown function the caller
|
|
||||||
// should invoke when done testing to clean up. The close flag indicates
|
|
||||||
// whether or not the teardown function should sync and close the database
|
|
||||||
// during teardown.
|
|
||||||
func createDB(dbType, dbName string, close bool) (database.Db, func(), error) {
|
|
||||||
// Handle memory database specially since it doesn't need the disk
|
|
||||||
// specific handling.
|
|
||||||
if dbType == "memdb" {
|
|
||||||
db, err := database.CreateDB(dbType)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("error creating db: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setup a teardown function for cleaning up. This function is
|
|
||||||
// returned to the caller to be invoked when it is done testing.
|
|
||||||
teardown := func() {
|
|
||||||
if close {
|
|
||||||
db.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return db, teardown, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the root directory for test databases.
|
|
||||||
if !fileExists(testDbRoot) {
|
|
||||||
if err := os.MkdirAll(testDbRoot, 0700); err != nil {
|
|
||||||
err := fmt.Errorf("unable to create test db "+
|
|
||||||
"root: %v", err)
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new database to store the accepted blocks into.
|
|
||||||
dbPath := filepath.Join(testDbRoot, dbName)
|
|
||||||
_ = os.RemoveAll(dbPath)
|
|
||||||
db, err := database.CreateDB(dbType, dbPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("error creating db: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setup a teardown function for cleaning up. This function is
|
|
||||||
// returned to the caller to be invoked when it is done testing.
|
|
||||||
teardown := func() {
|
|
||||||
dbVersionPath := filepath.Join(testDbRoot, dbName+".ver")
|
|
||||||
if close {
|
|
||||||
db.Sync()
|
|
||||||
db.Close()
|
|
||||||
}
|
|
||||||
os.RemoveAll(dbPath)
|
|
||||||
os.Remove(dbVersionPath)
|
|
||||||
os.RemoveAll(testDbRoot)
|
|
||||||
}
|
|
||||||
|
|
||||||
return db, teardown, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// setupDB is used to create a new db instance with the genesis block already
|
|
||||||
// inserted. In addition to the new db instance, it returns a teardown function
|
|
||||||
// the caller should invoke when done testing to clean up.
|
|
||||||
func setupDB(dbType, dbName string) (database.Db, func(), error) {
|
|
||||||
db, teardown, err := createDB(dbType, dbName, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert the main network genesis block. This is part of the initial
|
|
||||||
// database setup.
|
|
||||||
genesisBlock := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock)
|
|
||||||
_, err = db.InsertBlock(genesisBlock)
|
|
||||||
if err != nil {
|
|
||||||
teardown()
|
|
||||||
err := fmt.Errorf("failed to insert genesis block: %v", err)
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return db, teardown, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadBlocks loads the blocks contained in the testdata directory and returns
|
|
||||||
// a slice of them.
|
|
||||||
func loadBlocks(t *testing.T) ([]*btcutil.Block, error) {
|
|
||||||
if len(savedBlocks) != 0 {
|
|
||||||
return savedBlocks, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var dr io.Reader
|
|
||||||
fi, err := os.Open(blockDataFile)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to open file %v, err %v", blockDataFile, err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(blockDataFile, ".bz2") {
|
|
||||||
z := bzip2.NewReader(fi)
|
|
||||||
dr = z
|
|
||||||
} else {
|
|
||||||
dr = fi
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err := fi.Close(); err != nil {
|
|
||||||
t.Errorf("failed to close file %v %v", blockDataFile, err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Set the first block as the genesis block.
|
|
||||||
blocks := make([]*btcutil.Block, 0, 256)
|
|
||||||
genesis := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock)
|
|
||||||
blocks = append(blocks, genesis)
|
|
||||||
|
|
||||||
for height := int64(1); err == nil; height++ {
|
|
||||||
var rintbuf uint32
|
|
||||||
err := binary.Read(dr, binary.LittleEndian, &rintbuf)
|
|
||||||
if err == io.EOF {
|
|
||||||
// hit end of file at expected offset: no warning
|
|
||||||
height--
|
|
||||||
err = nil
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to load network type, err %v", err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if rintbuf != uint32(network) {
|
|
||||||
t.Errorf("Block doesn't match network: %v expects %v",
|
|
||||||
rintbuf, network)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
|
|
||||||
blocklen := rintbuf
|
|
||||||
|
|
||||||
rbytes := make([]byte, blocklen)
|
|
||||||
|
|
||||||
// read block
|
|
||||||
dr.Read(rbytes)
|
|
||||||
|
|
||||||
block, err := btcutil.NewBlockFromBytes(rbytes)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to parse block %v", height)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
blocks = append(blocks, block)
|
|
||||||
}
|
|
||||||
|
|
||||||
savedBlocks = blocks
|
|
||||||
return blocks, nil
|
|
||||||
}
|
|
|
@ -1,10 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# This script uses go tool cover to generate a test coverage report.
|
|
||||||
go test -coverprofile=cov.out && go tool cover -func=cov.out && rm -f cov.out
|
|
||||||
echo "============================================================"
|
|
||||||
(cd ldb && go test -coverprofile=cov.out && go tool cover -func=cov.out && \
|
|
||||||
rm -f cov.out)
|
|
||||||
echo "============================================================"
|
|
||||||
(cd memdb && go test -coverprofile=cov.out && go tool cover -func=cov.out && \
|
|
||||||
rm -f cov.out)
|
|
225
database/db.go
225
database/db.go
|
@ -1,225 +0,0 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package database
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
|
||||||
"github.com/btcsuite/btcutil"
|
|
||||||
"github.com/btcsuite/golangcrypto/ripemd160"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Errors that the various database functions may return.
|
|
||||||
var (
|
|
||||||
ErrAddrIndexDoesNotExist = errors.New("address index hasn't been built or is an older version")
|
|
||||||
ErrUnsupportedAddressType = errors.New("address type is not supported " +
|
|
||||||
"by the address-index")
|
|
||||||
ErrPrevShaMissing = errors.New("previous sha missing from database")
|
|
||||||
ErrTxShaMissing = errors.New("requested transaction does not exist")
|
|
||||||
ErrBlockShaMissing = errors.New("requested block does not exist")
|
|
||||||
ErrDuplicateSha = errors.New("duplicate insert attempted")
|
|
||||||
ErrDbDoesNotExist = errors.New("non-existent database")
|
|
||||||
ErrDbUnknownType = errors.New("non-existent database type")
|
|
||||||
ErrNotImplemented = errors.New("method has not yet been implemented")
|
|
||||||
)
|
|
||||||
|
|
||||||
// AllShas is a special value that can be used as the final sha when requesting
|
|
||||||
// a range of shas by height to request them all.
|
|
||||||
const AllShas = int32(^uint32(0) >> 1)
|
|
||||||
|
|
||||||
// Db defines a generic interface that is used to request and insert data into
|
|
||||||
// the bitcoin block chain. This interface is intended to be agnostic to actual
|
|
||||||
// mechanism used for backend data storage. The AddDBDriver function can be
|
|
||||||
// used to add a new backend data storage method.
|
|
||||||
type Db interface {
|
|
||||||
// Close cleanly shuts down the database and syncs all data.
|
|
||||||
Close() (err error)
|
|
||||||
|
|
||||||
// DropAfterBlockBySha will remove any blocks from the database after
|
|
||||||
// the given block. It terminates any existing transaction and performs
|
|
||||||
// its operations in an atomic transaction which is committed before
|
|
||||||
// the function returns.
|
|
||||||
DropAfterBlockBySha(*wire.ShaHash) (err error)
|
|
||||||
|
|
||||||
// ExistsSha returns whether or not the given block hash is present in
|
|
||||||
// the database.
|
|
||||||
ExistsSha(sha *wire.ShaHash) (exists bool, err error)
|
|
||||||
|
|
||||||
// FetchBlockBySha returns a btcutil Block. The implementation may
|
|
||||||
// cache the underlying data if desired.
|
|
||||||
FetchBlockBySha(sha *wire.ShaHash) (blk *btcutil.Block, err error)
|
|
||||||
|
|
||||||
// FetchBlockHeightBySha returns the block height for the given hash.
|
|
||||||
FetchBlockHeightBySha(sha *wire.ShaHash) (height int32, err error)
|
|
||||||
|
|
||||||
// FetchBlockHeaderBySha returns a wire.BlockHeader for the given
|
|
||||||
// sha. The implementation may cache the underlying data if desired.
|
|
||||||
FetchBlockHeaderBySha(sha *wire.ShaHash) (bh *wire.BlockHeader, err error)
|
|
||||||
|
|
||||||
// FetchBlockShaByHeight returns a block hash based on its height in the
|
|
||||||
// block chain.
|
|
||||||
FetchBlockShaByHeight(height int32) (sha *wire.ShaHash, err error)
|
|
||||||
|
|
||||||
// FetchHeightRange looks up a range of blocks by the start and ending
|
|
||||||
// heights. Fetch is inclusive of the start height and exclusive of the
|
|
||||||
// ending height. To fetch all hashes from the start height until no
|
|
||||||
// more are present, use the special id `AllShas'.
|
|
||||||
FetchHeightRange(startHeight, endHeight int32) (rshalist []wire.ShaHash, err error)
|
|
||||||
|
|
||||||
// ExistsTxSha returns whether or not the given tx hash is present in
|
|
||||||
// the database
|
|
||||||
ExistsTxSha(sha *wire.ShaHash) (exists bool, err error)
|
|
||||||
|
|
||||||
// FetchTxBySha returns some data for the given transaction hash. The
|
|
||||||
// implementation may cache the underlying data if desired.
|
|
||||||
FetchTxBySha(txsha *wire.ShaHash) ([]*TxListReply, error)
|
|
||||||
|
|
||||||
// FetchTxByShaList returns a TxListReply given an array of transaction
|
|
||||||
// hashes. The implementation may cache the underlying data if desired.
|
|
||||||
// This differs from FetchUnSpentTxByShaList in that it will return
|
|
||||||
// the most recent known Tx, if it is fully spent or not.
|
|
||||||
//
|
|
||||||
// NOTE: This function does not return an error directly since it MUST
|
|
||||||
// return at least one TxListReply instance for each requested
|
|
||||||
// transaction. Each TxListReply instance then contains an Err field
|
|
||||||
// which can be used to detect errors.
|
|
||||||
FetchTxByShaList(txShaList []*wire.ShaHash) []*TxListReply
|
|
||||||
|
|
||||||
// FetchUnSpentTxByShaList returns a TxListReply given an array of
|
|
||||||
// transaction hashes. The implementation may cache the underlying
|
|
||||||
// data if desired. Fully spent transactions will not normally not
|
|
||||||
// be returned in this operation.
|
|
||||||
//
|
|
||||||
// NOTE: This function does not return an error directly since it MUST
|
|
||||||
// return at least one TxListReply instance for each requested
|
|
||||||
// transaction. Each TxListReply instance then contains an Err field
|
|
||||||
// which can be used to detect errors.
|
|
||||||
FetchUnSpentTxByShaList(txShaList []*wire.ShaHash) []*TxListReply
|
|
||||||
|
|
||||||
// InsertBlock inserts raw block and transaction data from a block
|
|
||||||
// into the database. The first block inserted into the database
|
|
||||||
// will be treated as the genesis block. Every subsequent block insert
|
|
||||||
// requires the referenced parent block to already exist.
|
|
||||||
InsertBlock(block *btcutil.Block) (height int32, err error)
|
|
||||||
|
|
||||||
// NewestSha returns the hash and block height of the most recent (end)
|
|
||||||
// block of the block chain. It will return the zero hash, -1 for
|
|
||||||
// the block height, and no error (nil) if there are not any blocks in
|
|
||||||
// the database yet.
|
|
||||||
NewestSha() (sha *wire.ShaHash, height int32, err error)
|
|
||||||
|
|
||||||
// FetchAddrIndexTip returns the hash and block height of the most recent
|
|
||||||
// block which has had its address index populated. It will return
|
|
||||||
// ErrAddrIndexDoesNotExist along with a zero hash, and -1 if the
|
|
||||||
// addrindex hasn't yet been built up.
|
|
||||||
FetchAddrIndexTip() (sha *wire.ShaHash, height int32, err error)
|
|
||||||
|
|
||||||
// UpdateAddrIndexForBlock updates the stored addrindex with passed
|
|
||||||
// index information for a particular block height. Additionally, it
|
|
||||||
// will update the stored meta-data related to the curent tip of the
|
|
||||||
// addr index. These two operations are performed in an atomic
|
|
||||||
// transaction which is committed before the function returns.
|
|
||||||
// Addresses are indexed by the raw bytes of their base58 decoded
|
|
||||||
// hash160.
|
|
||||||
UpdateAddrIndexForBlock(blkSha *wire.ShaHash, height int32,
|
|
||||||
addrIndex BlockAddrIndex) error
|
|
||||||
|
|
||||||
// FetchTxsForAddr looks up and returns all transactions which either
|
|
||||||
// spend a previously created output of the passed address, or create
|
|
||||||
// a new output locked to the passed address. The, `limit` parameter
|
|
||||||
// should be the max number of transactions to be returned.
|
|
||||||
// Additionally, if the caller wishes to skip forward in the results
|
|
||||||
// some amount, the 'seek' represents how many results to skip.
|
|
||||||
// The transactions are returned in chronological order by block height
|
|
||||||
// from old to new, or from new to old if `reverse` is set.
|
|
||||||
// NOTE: Values for both `seek` and `limit` MUST be positive.
|
|
||||||
// It will return the array of fetched transactions, along with the amount
|
|
||||||
// of transactions that were actually skipped.
|
|
||||||
FetchTxsForAddr(addr btcutil.Address, skip int, limit int, reverse bool) ([]*TxListReply, int, error)
|
|
||||||
|
|
||||||
// DeleteAddrIndex deletes the entire addrindex stored within the DB.
|
|
||||||
DeleteAddrIndex() error
|
|
||||||
|
|
||||||
// RollbackClose discards the recent database changes to the previously
|
|
||||||
// saved data at last Sync and closes the database.
|
|
||||||
RollbackClose() (err error)
|
|
||||||
|
|
||||||
// Sync verifies that the database is coherent on disk and no
|
|
||||||
// outstanding transactions are in flight.
|
|
||||||
Sync() (err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DriverDB defines a structure for backend drivers to use when they registered
|
|
||||||
// themselves as a backend which implements the Db interface.
|
|
||||||
type DriverDB struct {
|
|
||||||
DbType string
|
|
||||||
CreateDB func(args ...interface{}) (pbdb Db, err error)
|
|
||||||
OpenDB func(args ...interface{}) (pbdb Db, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TxListReply is used to return individual transaction information when
|
|
||||||
// data about multiple transactions is requested in a single call.
|
|
||||||
type TxListReply struct {
|
|
||||||
Sha *wire.ShaHash
|
|
||||||
Tx *wire.MsgTx
|
|
||||||
BlkSha *wire.ShaHash
|
|
||||||
Height int32
|
|
||||||
TxSpent []bool
|
|
||||||
Err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddrIndexKeySize is the number of bytes used by keys into the BlockAddrIndex.
|
|
||||||
const AddrIndexKeySize = ripemd160.Size
|
|
||||||
|
|
||||||
// BlockAddrIndex represents the indexing structure for addresses.
|
|
||||||
// It maps a hash160 to a list of transaction locations within a block that
|
|
||||||
// either pays to or spends from the passed UTXO for the hash160.
|
|
||||||
type BlockAddrIndex map[[AddrIndexKeySize]byte][]*wire.TxLoc
|
|
||||||
|
|
||||||
// driverList holds all of the registered database backends.
|
|
||||||
var driverList []DriverDB
|
|
||||||
|
|
||||||
// AddDBDriver adds a back end database driver to available interfaces.
|
|
||||||
func AddDBDriver(instance DriverDB) {
|
|
||||||
// TODO(drahn) Does this really need to check for duplicate names ?
|
|
||||||
for _, drv := range driverList {
|
|
||||||
// TODO(drahn) should duplicates be an error?
|
|
||||||
if drv.DbType == instance.DbType {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
driverList = append(driverList, instance)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateDB initializes and opens a database.
|
|
||||||
func CreateDB(dbtype string, args ...interface{}) (pbdb Db, err error) {
|
|
||||||
for _, drv := range driverList {
|
|
||||||
if drv.DbType == dbtype {
|
|
||||||
return drv.CreateDB(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, ErrDbUnknownType
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenDB opens an existing database.
|
|
||||||
func OpenDB(dbtype string, args ...interface{}) (pbdb Db, err error) {
|
|
||||||
for _, drv := range driverList {
|
|
||||||
if drv.DbType == dbtype {
|
|
||||||
return drv.OpenDB(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, ErrDbUnknownType
|
|
||||||
}
|
|
||||||
|
|
||||||
// SupportedDBs returns a slice of strings that represent the database drivers
|
|
||||||
// that have been registered and are therefore supported.
|
|
||||||
func SupportedDBs() []string {
|
|
||||||
var supportedDBs []string
|
|
||||||
for _, drv := range driverList {
|
|
||||||
supportedDBs = append(supportedDBs, drv.DbType)
|
|
||||||
}
|
|
||||||
return supportedDBs
|
|
||||||
}
|
|
|
@ -1,188 +0,0 @@
|
||||||
// Copyright (c) 2013-2015 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package database_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/database"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ignoreDbTypes are types which should be ignored when running tests
|
|
||||||
// that iterate all supported DB types. This allows some tests to add
|
|
||||||
// bogus drivers for testing purposes while still allowing other tests
|
|
||||||
// to easily iterate all supported drivers.
|
|
||||||
ignoreDbTypes = map[string]bool{"createopenfail": true}
|
|
||||||
)
|
|
||||||
|
|
||||||
// testNewestShaEmpty ensures that NewestSha returns the values expected by
|
|
||||||
// the interface contract.
|
|
||||||
func testNewestShaEmpty(t *testing.T, db database.Db) {
|
|
||||||
sha, height, err := db.NewestSha()
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("NewestSha error %v", err)
|
|
||||||
}
|
|
||||||
if !sha.IsEqual(&zeroHash) {
|
|
||||||
t.Errorf("NewestSha wrong hash got: %s, want %s", sha, &zeroHash)
|
|
||||||
|
|
||||||
}
|
|
||||||
if height != -1 {
|
|
||||||
t.Errorf("NewestSha wrong height got: %d, want %d", height, -1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestEmptyDB tests that empty databases are handled properly.
|
|
||||||
func TestEmptyDB(t *testing.T) {
|
|
||||||
for _, dbType := range database.SupportedDBs() {
|
|
||||||
// Ensure NewestSha returns expected values for a newly created
|
|
||||||
// db.
|
|
||||||
db, teardown, err := createDB(dbType, "emptydb", false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to create test database %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
testNewestShaEmpty(t, db)
|
|
||||||
|
|
||||||
// Ensure NewestSha still returns expected values for an empty
|
|
||||||
// database after reopen.
|
|
||||||
db.Close()
|
|
||||||
db, err = openDB(dbType, "emptydb")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to open test database %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
testNewestShaEmpty(t, db)
|
|
||||||
db.Close()
|
|
||||||
|
|
||||||
// Clean up the old db.
|
|
||||||
teardown()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestAddDuplicateDriver ensures that adding a duplicate driver does not
|
|
||||||
// overwrite an existing one.
|
|
||||||
func TestAddDuplicateDriver(t *testing.T) {
|
|
||||||
supportedDBs := database.SupportedDBs()
|
|
||||||
if len(supportedDBs) == 0 {
|
|
||||||
t.Errorf("TestAddDuplicateDriver: No backends to test")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
dbType := supportedDBs[0]
|
|
||||||
|
|
||||||
// bogusCreateDB is a function which acts as a bogus create and open
|
|
||||||
// driver function and intentionally returns a failure that can be
|
|
||||||
// detected if the interface allows a duplicate driver to overwrite an
|
|
||||||
// existing one.
|
|
||||||
bogusCreateDB := func(args ...interface{}) (database.Db, error) {
|
|
||||||
return nil, fmt.Errorf("duplicate driver allowed for database "+
|
|
||||||
"type [%v]", dbType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a driver that tries to replace an existing one. Set its
|
|
||||||
// create and open functions to a function that causes a test failure if
|
|
||||||
// they are invoked.
|
|
||||||
driver := database.DriverDB{
|
|
||||||
DbType: dbType,
|
|
||||||
CreateDB: bogusCreateDB,
|
|
||||||
OpenDB: bogusCreateDB,
|
|
||||||
}
|
|
||||||
database.AddDBDriver(driver)
|
|
||||||
|
|
||||||
// Ensure creating a database of the type that we tried to replace
|
|
||||||
// doesn't fail (if it does, it indicates the driver was erroneously
|
|
||||||
// replaced).
|
|
||||||
_, teardown, err := createDB(dbType, "dupdrivertest", true)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("TestAddDuplicateDriver: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
teardown()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestCreateOpenFail ensures that errors which occur while opening or closing
|
|
||||||
// a database are handled properly.
|
|
||||||
func TestCreateOpenFail(t *testing.T) {
|
|
||||||
// bogusCreateDB is a function which acts as a bogus create and open
|
|
||||||
// driver function that intentionally returns a failure which can be
|
|
||||||
// detected.
|
|
||||||
dbType := "createopenfail"
|
|
||||||
openError := fmt.Errorf("failed to create or open database for "+
|
|
||||||
"database type [%v]", dbType)
|
|
||||||
bogusCreateDB := func(args ...interface{}) (database.Db, error) {
|
|
||||||
return nil, openError
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create and add driver that intentionally fails when created or opened
|
|
||||||
// to ensure errors on database open and create are handled properly.
|
|
||||||
driver := database.DriverDB{
|
|
||||||
DbType: dbType,
|
|
||||||
CreateDB: bogusCreateDB,
|
|
||||||
OpenDB: bogusCreateDB,
|
|
||||||
}
|
|
||||||
database.AddDBDriver(driver)
|
|
||||||
|
|
||||||
// Ensure creating a database with the new type fails with the expected
|
|
||||||
// error.
|
|
||||||
_, err := database.CreateDB(dbType, "createfailtest")
|
|
||||||
if err != openError {
|
|
||||||
t.Errorf("TestCreateOpenFail: expected error not received - "+
|
|
||||||
"got: %v, want %v", err, openError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure opening a database with the new type fails with the expected
|
|
||||||
// error.
|
|
||||||
_, err = database.OpenDB(dbType, "openfailtest")
|
|
||||||
if err != openError {
|
|
||||||
t.Errorf("TestCreateOpenFail: expected error not received - "+
|
|
||||||
"got: %v, want %v", err, openError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestCreateOpenUnsupported ensures that attempting to create or open an
|
|
||||||
// unsupported database type is handled properly.
|
|
||||||
func TestCreateOpenUnsupported(t *testing.T) {
|
|
||||||
// Ensure creating a database with an unsupported type fails with the
|
|
||||||
// expected error.
|
|
||||||
dbType := "unsupported"
|
|
||||||
_, err := database.CreateDB(dbType, "unsupportedcreatetest")
|
|
||||||
if err != database.ErrDbUnknownType {
|
|
||||||
t.Errorf("TestCreateOpenUnsupported: expected error not "+
|
|
||||||
"received - got: %v, want %v", err, database.ErrDbUnknownType)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure opening a database with the new type fails with the expected
|
|
||||||
// error.
|
|
||||||
_, err = database.OpenDB(dbType, "unsupportedopentest")
|
|
||||||
if err != database.ErrDbUnknownType {
|
|
||||||
t.Errorf("TestCreateOpenUnsupported: expected error not "+
|
|
||||||
"received - got: %v, want %v", err, database.ErrDbUnknownType)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestInterface performs tests for the various interfaces of the database
|
|
||||||
// package which require state in the database for each supported database
|
|
||||||
// type (those loaded in common_test.go that is).
|
|
||||||
func TestInterface(t *testing.T) {
|
|
||||||
for _, dbType := range database.SupportedDBs() {
|
|
||||||
if _, exists := ignoreDbTypes[dbType]; !exists {
|
|
||||||
testInterface(t, dbType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestReorganization performs reorganization tests for each supported DB type
|
|
||||||
func TestReorganization(t *testing.T) {
|
|
||||||
for _, dbType := range database.SupportedDBs() {
|
|
||||||
if _, exists := ignoreDbTypes[dbType]; !exists {
|
|
||||||
testReorganization(t, dbType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
101
database/doc.go
101
database/doc.go
|
@ -1,31 +1,94 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
// Copyright (c) 2015-2016 The btcsuite developers
|
||||||
// Use of this source code is governed by an ISC
|
// Use of this source code is governed by an ISC
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package database provides a database interface for the Bitcoin block chain.
|
Package database provides a block and metadata storage database.
|
||||||
|
|
||||||
As of July 2014, there are over 309,000 blocks in the Bitcoin block chain and
|
Overview
|
||||||
and over 42 million transactions (which turns out to be over 21GB of data).
|
|
||||||
|
As of Feb 2016, there are over 400,000 blocks in the Bitcoin block chain and
|
||||||
|
and over 112 million transactions (which turns out to be over 60GB of data).
|
||||||
This package provides a database layer to store and retrieve this data in a
|
This package provides a database layer to store and retrieve this data in a
|
||||||
fairly simple and efficient manner. The use of this should not require specific
|
simple and efficient manner.
|
||||||
knowledge of the database backend.
|
|
||||||
|
|
||||||
Basic Design
|
The default backend, ffldb, has a strong focus on speed, efficiency, and
|
||||||
|
robustness. It makes use leveldb for the metadata, flat files for block
|
||||||
|
storage, and strict checksums in key areas to ensure data integrity.
|
||||||
|
|
||||||
The basic design of this package is to provide two classes of items in a
|
A quick overview of the features database provides are as follows:
|
||||||
database; blocks and transactions (tx) where the block number increases
|
|
||||||
monotonically. Each transaction belongs to a single block although a block can
|
|
||||||
have a variable number of transactions. Along with these two items, several
|
|
||||||
convenience functions for dealing with the database are provided as well as
|
|
||||||
functions to query specific items that may be present in a block or tx.
|
|
||||||
|
|
||||||
Usage
|
- Key/value metadata store
|
||||||
|
- Bitcoin block storage
|
||||||
|
- Efficient retrieval of block headers and regions (transactions, scripts, etc)
|
||||||
|
- Read-only and read-write transactions with both manual and managed modes
|
||||||
|
- Nested buckets
|
||||||
|
- Supports registration of backend databases
|
||||||
|
- Comprehensive test coverage
|
||||||
|
|
||||||
At the highest level, the use of this packages just requires that you import it,
|
Database
|
||||||
setup a database, insert some data into it, and optionally, query the data back.
|
|
||||||
The first block inserted into the database will be treated as the genesis block.
|
The main entry point is the DB interface. It exposes functionality for
|
||||||
Every subsequent block insert requires the referenced parent block to already
|
transactional-based access and storage of metadata and block data. It is
|
||||||
exist.
|
obtained via the Create and Open functions which take a database type string
|
||||||
|
that identifies the specific database driver (backend) to use as well as
|
||||||
|
arguments specific to the specified driver.
|
||||||
|
|
||||||
|
Namespaces
|
||||||
|
|
||||||
|
The Namespace interface is an abstraction that provides facilities for obtaining
|
||||||
|
transactions (the Tx interface) that are the basis of all database reads and
|
||||||
|
writes. Unlike some database interfaces that support reading and writing
|
||||||
|
without transactions, this interface requires transactions even when only
|
||||||
|
reading or writing a single key.
|
||||||
|
|
||||||
|
The Begin function provides an unmanaged transaction while the View and Update
|
||||||
|
functions provide a managed transaction. These are described in more detail
|
||||||
|
below.
|
||||||
|
|
||||||
|
Transactions
|
||||||
|
|
||||||
|
The Tx interface provides facilities for rolling back or committing changes that
|
||||||
|
took place while the transaction was active. It also provides the root metadata
|
||||||
|
bucket under which all keys, values, and nested buckets are stored. A
|
||||||
|
transaction can either be read-only or read-write and managed or unmanaged.
|
||||||
|
|
||||||
|
Managed versus Unmanaged Transactions
|
||||||
|
|
||||||
|
A managed transaction is one where the caller provides a function to execute
|
||||||
|
within the context of the transaction and the commit or rollback is handled
|
||||||
|
automatically depending on whether or not the provided function returns an
|
||||||
|
error. Attempting to manually call Rollback or Commit on the managed
|
||||||
|
transaction will result in a panic.
|
||||||
|
|
||||||
|
An unmanaged transaction, on the other hand, requires the caller to manually
|
||||||
|
call Commit or Rollback when they are finished with it. Leaving transactions
|
||||||
|
open for long periods of time can have several adverse effects, so it is
|
||||||
|
recommended that managed transactions are used instead.
|
||||||
|
|
||||||
|
Buckets
|
||||||
|
|
||||||
|
The Bucket interface provides the ability to manipulate key/value pairs and
|
||||||
|
nested buckets as well as iterate through them.
|
||||||
|
|
||||||
|
The Get, Put, and Delete functions work with key/value pairs, while the Bucket,
|
||||||
|
CreateBucket, CreateBucketIfNotExists, and DeleteBucket functions work with
|
||||||
|
buckets. The ForEach function allows the caller to provide a function to be
|
||||||
|
called with each key/value pair and nested bucket in the current bucket.
|
||||||
|
|
||||||
|
Metadata Bucket
|
||||||
|
|
||||||
|
As discussed above, all of the functions which are used to manipulate key/value
|
||||||
|
pairs and nested buckets exist on the Bucket interface. The root metadata
|
||||||
|
bucket is the upper-most bucket in which data is stored and is created at the
|
||||||
|
same time as the database. Use the Metadata function on the Tx interface
|
||||||
|
to retrieve it.
|
||||||
|
|
||||||
|
Nested Buckets
|
||||||
|
|
||||||
|
The CreateBucket and CreateBucketIfNotExists functions on the Bucket interface
|
||||||
|
provide the ability to create an arbitrary number of nested buckets. It is
|
||||||
|
a good idea to avoid a lot of buckets with little data in them as it could lead
|
||||||
|
to poor page utilization depending on the specific driver in use.
|
||||||
*/
|
*/
|
||||||
package database
|
package database
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
// Use of this source code is governed by an ISC
|
// Use of this source code is governed by an ISC
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package database2
|
package database
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
|
@ -2,14 +2,14 @@
|
||||||
// Use of this source code is governed by an ISC
|
// Use of this source code is governed by an ISC
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package database2_test
|
package database_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
_ "github.com/btcsuite/btcd/database2/ffldb"
|
_ "github.com/btcsuite/btcd/database/ffldb"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
|
@ -2,7 +2,7 @@
|
||||||
// Use of this source code is governed by an ISC
|
// Use of this source code is governed by an ISC
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package database2
|
package database
|
||||||
|
|
||||||
import "fmt"
|
import "fmt"
|
||||||
|
|
|
@ -2,13 +2,13 @@
|
||||||
// Use of this source code is governed by an ISC
|
// Use of this source code is governed by an ISC
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package database2_test
|
package database_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestErrorCodeStringer tests the stringized output for the ErrorCode type.
|
// TestErrorCodeStringer tests the stringized output for the ErrorCode type.
|
|
@ -1,94 +1,177 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
// Copyright (c) 2015-2016 The btcsuite developers
|
||||||
// Use of this source code is governed by an ISC
|
// Use of this source code is governed by an ISC
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package database_test
|
package database_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
"github.com/btcsuite/btcd/database"
|
"github.com/btcsuite/btcd/database"
|
||||||
_ "github.com/btcsuite/btcd/database/memdb"
|
_ "github.com/btcsuite/btcd/database/ffldb"
|
||||||
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This example demonstrates creating a new database and inserting the genesis
|
// This example demonstrates creating a new database.
|
||||||
// block into it.
|
func ExampleCreate() {
|
||||||
func ExampleCreateDB() {
|
// This example assumes the ffldb driver is imported.
|
||||||
// Notice in these example imports that the memdb driver is loaded.
|
//
|
||||||
// Ordinarily this would be whatever driver(s) your application
|
|
||||||
// requires.
|
|
||||||
// import (
|
// import (
|
||||||
// "github.com/btcsuite/btcd/database"
|
// "github.com/btcsuite/btcd/database"
|
||||||
// _ "github.com/btcsuite/btcd/database/memdb"
|
// _ "github.com/btcsuite/btcd/database/ffldb"
|
||||||
// )
|
// )
|
||||||
|
|
||||||
// Create a database and schedule it to be closed on exit. This example
|
// Create a database and schedule it to be closed and removed on exit.
|
||||||
// uses a memory-only database to avoid needing to write anything to
|
// Typically you wouldn't want to remove the database right away like
|
||||||
// the disk. Typically, you would specify a persistent database driver
|
// this, nor put it in the temp directory, but it's done here to ensure
|
||||||
// such as "leveldb" and give it a database name as the second
|
// the example cleans up after itself.
|
||||||
// parameter.
|
dbPath := filepath.Join(os.TempDir(), "examplecreate")
|
||||||
db, err := database.CreateDB("memdb")
|
db, err := database.Create("ffldb", dbPath, wire.MainNet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
defer os.RemoveAll(dbPath)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
// Insert the main network genesis block.
|
|
||||||
genesis := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock)
|
|
||||||
newHeight, err := db.InsertBlock(genesis)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println("New height:", newHeight)
|
|
||||||
|
|
||||||
// Output:
|
// Output:
|
||||||
// New height: 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// exampleLoadDB is used in the example to elide the setup code.
|
// This example demonstrates creating a new database and using a managed
|
||||||
func exampleLoadDB() (database.Db, error) {
|
// read-write transaction to store and retrieve metadata.
|
||||||
db, err := database.CreateDB("memdb")
|
func Example_basicUsage() {
|
||||||
if err != nil {
|
// This example assumes the ffldb driver is imported.
|
||||||
return nil, err
|
//
|
||||||
}
|
// import (
|
||||||
|
// "github.com/btcsuite/btcd/database"
|
||||||
|
// _ "github.com/btcsuite/btcd/database/ffldb"
|
||||||
|
// )
|
||||||
|
|
||||||
// Insert the main network genesis block.
|
// Create a database and schedule it to be closed and removed on exit.
|
||||||
genesis := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock)
|
// Typically you wouldn't want to remove the database right away like
|
||||||
_, err = db.InsertBlock(genesis)
|
// this, nor put it in the temp directory, but it's done here to ensure
|
||||||
if err != nil {
|
// the example cleans up after itself.
|
||||||
return nil, err
|
dbPath := filepath.Join(os.TempDir(), "exampleusage")
|
||||||
}
|
db, err := database.Create("ffldb", dbPath, wire.MainNet)
|
||||||
|
|
||||||
return db, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// This example demonstrates querying the database for the most recent best
|
|
||||||
// block height and hash.
|
|
||||||
func ExampleDb_newestSha() {
|
|
||||||
// Load a database for the purposes of this example and schedule it to
|
|
||||||
// be closed on exit. See the CreateDB example for more details on what
|
|
||||||
// this step is doing.
|
|
||||||
db, err := exampleLoadDB()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
defer os.RemoveAll(dbPath)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
latestHash, latestHeight, err := db.NewestSha()
|
// Use the Update function of the database to perform a managed
|
||||||
|
// read-write transaction. The transaction will automatically be rolled
|
||||||
|
// back if the supplied inner function returns a non-nil error.
|
||||||
|
err = db.Update(func(tx database.Tx) error {
|
||||||
|
// Store a key/value pair directly in the metadata bucket.
|
||||||
|
// Typically a nested bucket would be used for a given feature,
|
||||||
|
// but this example is using the metadata bucket directly for
|
||||||
|
// simplicity.
|
||||||
|
key := []byte("mykey")
|
||||||
|
value := []byte("myvalue")
|
||||||
|
if err := tx.Metadata().Put(key, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the key back and ensure it matches.
|
||||||
|
if !bytes.Equal(tx.Metadata().Get(key), value) {
|
||||||
|
return fmt.Errorf("unexpected value for key '%s'", key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new nested bucket under the metadata bucket.
|
||||||
|
nestedBucketKey := []byte("mybucket")
|
||||||
|
nestedBucket, err := tx.Metadata().CreateBucket(nestedBucketKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// The key from above that was set in the metadata bucket does
|
||||||
|
// not exist in this new nested bucket.
|
||||||
|
if nestedBucket.Get(key) != nil {
|
||||||
|
return fmt.Errorf("key '%s' is not expected nil", key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
fmt.Println("Latest hash:", latestHash)
|
|
||||||
fmt.Println("Latest height:", latestHeight)
|
|
||||||
|
|
||||||
// Output:
|
// Output:
|
||||||
// Latest hash: 000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f
|
}
|
||||||
// Latest height: 0
|
|
||||||
|
// This example demonstrates creating a new database, using a managed read-write
|
||||||
|
// transaction to store a block, and using a managed read-only transaction to
|
||||||
|
// fetch the block.
|
||||||
|
func Example_blockStorageAndRetrieval() {
|
||||||
|
// This example assumes the ffldb driver is imported.
|
||||||
|
//
|
||||||
|
// import (
|
||||||
|
// "github.com/btcsuite/btcd/database"
|
||||||
|
// _ "github.com/btcsuite/btcd/database/ffldb"
|
||||||
|
// )
|
||||||
|
|
||||||
|
// Create a database and schedule it to be closed and removed on exit.
|
||||||
|
// Typically you wouldn't want to remove the database right away like
|
||||||
|
// this, nor put it in the temp directory, but it's done here to ensure
|
||||||
|
// the example cleans up after itself.
|
||||||
|
dbPath := filepath.Join(os.TempDir(), "exampleblkstorage")
|
||||||
|
db, err := database.Create("ffldb", dbPath, wire.MainNet)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(dbPath)
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Use the Update function of the database to perform a managed
|
||||||
|
// read-write transaction and store a genesis block in the database as
|
||||||
|
// and example.
|
||||||
|
err = db.Update(func(tx database.Tx) error {
|
||||||
|
genesisBlock := chaincfg.MainNetParams.GenesisBlock
|
||||||
|
return tx.StoreBlock(btcutil.NewBlock(genesisBlock))
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use the View function of the database to perform a managed read-only
|
||||||
|
// transaction and fetch the block stored above.
|
||||||
|
var loadedBlockBytes []byte
|
||||||
|
err = db.Update(func(tx database.Tx) error {
|
||||||
|
genesisHash := chaincfg.MainNetParams.GenesisHash
|
||||||
|
blockBytes, err := tx.FetchBlock(genesisHash)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// As documented, all data fetched from the database is only
|
||||||
|
// valid during a database transaction in order to support
|
||||||
|
// zero-copy backends. Thus, make a copy of the data so it
|
||||||
|
// can be used outside of the transaction.
|
||||||
|
loadedBlockBytes = make([]byte, len(blockBytes))
|
||||||
|
copy(loadedBlockBytes, blockBytes)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Typically at this point, the block could be deserialized via the
|
||||||
|
// wire.MsgBlock.Deserialize function or used in its serialized form
|
||||||
|
// depending on need. However, for this example, just display the
|
||||||
|
// number of serialized bytes to show it was loaded as expected.
|
||||||
|
fmt.Printf("Serialized block size: %d bytes\n", len(loadedBlockBytes))
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// Serialized block size: 285 bytes
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@ interface. The functions, constants, and variables are only exported while the
|
||||||
tests are being run.
|
tests are being run.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package database2
|
package database
|
||||||
|
|
||||||
// TstNumErrorCodes makes the internal numErrorCodes parameter available to the
|
// TstNumErrorCodes makes the internal numErrorCodes parameter available to the
|
||||||
// test package.
|
// test package.
|
|
@ -10,7 +10,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
)
|
)
|
||||||
|
|
|
@ -17,7 +17,7 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
)
|
)
|
||||||
|
|
|
@ -14,8 +14,8 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/database2/internal/treap"
|
"github.com/btcsuite/btcd/database/internal/treap"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
"github.com/btcsuite/goleveldb/leveldb"
|
"github.com/btcsuite/goleveldb/leveldb"
|
|
@ -10,7 +10,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/database2/internal/treap"
|
"github.com/btcsuite/btcd/database/internal/treap"
|
||||||
"github.com/btcsuite/goleveldb/leveldb"
|
"github.com/btcsuite/goleveldb/leveldb"
|
||||||
"github.com/btcsuite/goleveldb/leveldb/iterator"
|
"github.com/btcsuite/goleveldb/leveldb/iterator"
|
||||||
"github.com/btcsuite/goleveldb/leveldb/util"
|
"github.com/btcsuite/goleveldb/leveldb/util"
|
|
@ -7,7 +7,7 @@ package ffldb
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btclog"
|
"github.com/btcsuite/btclog"
|
||||||
)
|
)
|
|
@ -13,8 +13,8 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/database2/ffldb"
|
"github.com/btcsuite/btcd/database/ffldb"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
)
|
)
|
||||||
|
|
|
@ -11,7 +11,7 @@ The functions are only exported while the tests are being run.
|
||||||
|
|
||||||
package ffldb
|
package ffldb
|
||||||
|
|
||||||
import database "github.com/btcsuite/btcd/database2"
|
import "github.com/btcsuite/btcd/database"
|
||||||
|
|
||||||
// TstRunWithMaxBlockFileSize runs the passed function with the maximum allowed
|
// TstRunWithMaxBlockFileSize runs the passed function with the maximum allowed
|
||||||
// file size for the database set to the provided value. The value will be set
|
// file size for the database set to the provided value. The value will be set
|
|
@ -26,7 +26,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
)
|
)
|
|
@ -5,7 +5,7 @@
|
||||||
package ffldb
|
package ffldb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/btcsuite/btcd/database2/internal/treap"
|
"github.com/btcsuite/btcd/database/internal/treap"
|
||||||
"github.com/btcsuite/goleveldb/leveldb/iterator"
|
"github.com/btcsuite/goleveldb/leveldb/iterator"
|
||||||
"github.com/btcsuite/goleveldb/leveldb/util"
|
"github.com/btcsuite/goleveldb/leveldb/util"
|
||||||
)
|
)
|
|
@ -8,7 +8,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/crc32"
|
"hash/crc32"
|
||||||
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The serialized write cursor location format is:
|
// The serialized write cursor location format is:
|
|
@ -18,7 +18,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
"github.com/btcsuite/goleveldb/leveldb"
|
"github.com/btcsuite/goleveldb/leveldb"
|
|
@ -5,7 +5,7 @@
|
||||||
// Parts of this interface were inspired heavily by the excellent boltdb project
|
// Parts of this interface were inspired heavily by the excellent boltdb project
|
||||||
// at https://github.com/boltdb/bolt by Ben B. Johnson.
|
// at https://github.com/boltdb/bolt by Ben B. Johnson.
|
||||||
|
|
||||||
package database2
|
package database
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
|
@ -94,7 +94,9 @@ type Bucket interface {
|
||||||
// - ErrTxClosed if the transaction has already been closed
|
// - ErrTxClosed if the transaction has already been closed
|
||||||
CreateBucketIfNotExists(key []byte) (Bucket, error)
|
CreateBucketIfNotExists(key []byte) (Bucket, error)
|
||||||
|
|
||||||
// DeleteBucket removes a nested bucket with the given key.
|
// DeleteBucket removes a nested bucket with the given key. This also
|
||||||
|
// includes removing all nested buckets and keys under the bucket being
|
||||||
|
// deleted.
|
||||||
//
|
//
|
||||||
// The interface contract guarantees at least the following errors will
|
// The interface contract guarantees at least the following errors will
|
||||||
// be returned (other implementation-specific errors are possible):
|
// be returned (other implementation-specific errors are possible):
|
|
@ -1,627 +0,0 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package database_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/database"
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
|
||||||
"github.com/btcsuite/btcutil"
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
)
|
|
||||||
|
|
||||||
// testContext is used to store context information about a running test which
|
|
||||||
// is passed into helper functions. The useSpends field indicates whether or
|
|
||||||
// not the spend data should be empty or figure it out based on the specific
|
|
||||||
// test blocks provided. This is needed because the first loop where the blocks
|
|
||||||
// are inserted, the tests are running against the latest block and therefore
|
|
||||||
// none of the outputs can be spent yet. However, on subsequent runs, all
|
|
||||||
// blocks have been inserted and therefore some of the transaction outputs are
|
|
||||||
// spent.
|
|
||||||
type testContext struct {
|
|
||||||
t *testing.T
|
|
||||||
dbType string
|
|
||||||
db database.Db
|
|
||||||
blockHeight int32
|
|
||||||
blockHash *wire.ShaHash
|
|
||||||
block *btcutil.Block
|
|
||||||
useSpends bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// testInsertBlock ensures InsertBlock conforms to the interface contract.
|
|
||||||
func testInsertBlock(tc *testContext) bool {
|
|
||||||
// The block must insert without any errors.
|
|
||||||
newHeight, err := tc.db.InsertBlock(tc.block)
|
|
||||||
if err != nil {
|
|
||||||
tc.t.Errorf("InsertBlock (%s): failed to insert block #%d (%s) "+
|
|
||||||
"err %v", tc.dbType, tc.blockHeight, tc.blockHash, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// The returned height must be the expected value.
|
|
||||||
if newHeight != tc.blockHeight {
|
|
||||||
tc.t.Errorf("InsertBlock (%s): height mismatch got: %v, "+
|
|
||||||
"want: %v", tc.dbType, newHeight, tc.blockHeight)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// testNewestSha ensures the NewestSha returns the values expected by the
|
|
||||||
// interface contract.
|
|
||||||
func testNewestSha(tc *testContext) bool {
|
|
||||||
// The news block hash and height must be returned without any errors.
|
|
||||||
sha, height, err := tc.db.NewestSha()
|
|
||||||
if err != nil {
|
|
||||||
tc.t.Errorf("NewestSha (%s): block #%d (%s) error %v",
|
|
||||||
tc.dbType, tc.blockHeight, tc.blockHash, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// The returned hash must be the expected value.
|
|
||||||
if !sha.IsEqual(tc.blockHash) {
|
|
||||||
tc.t.Errorf("NewestSha (%s): block #%d (%s) wrong hash got: %s",
|
|
||||||
tc.dbType, tc.blockHeight, tc.blockHash, sha)
|
|
||||||
return false
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// The returned height must be the expected value.
|
|
||||||
if height != tc.blockHeight {
|
|
||||||
tc.t.Errorf("NewestSha (%s): block #%d (%s) wrong height "+
|
|
||||||
"got: %d", tc.dbType, tc.blockHeight, tc.blockHash,
|
|
||||||
height)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// testExistsSha ensures ExistsSha conforms to the interface contract.
|
|
||||||
func testExistsSha(tc *testContext) bool {
|
|
||||||
// The block must exist in the database.
|
|
||||||
exists, err := tc.db.ExistsSha(tc.blockHash)
|
|
||||||
if err != nil {
|
|
||||||
tc.t.Errorf("ExistsSha (%s): block #%d (%s) unexpected error: "+
|
|
||||||
"%v", tc.dbType, tc.blockHeight, tc.blockHash, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !exists {
|
|
||||||
tc.t.Errorf("ExistsSha (%s): block #%d (%s) does not exist",
|
|
||||||
tc.dbType, tc.blockHeight, tc.blockHash)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// testFetchBlockBySha ensures FetchBlockBySha conforms to the interface
|
|
||||||
// contract.
|
|
||||||
func testFetchBlockBySha(tc *testContext) bool {
|
|
||||||
// The block must be fetchable by its hash without any errors.
|
|
||||||
blockFromDb, err := tc.db.FetchBlockBySha(tc.blockHash)
|
|
||||||
if err != nil {
|
|
||||||
tc.t.Errorf("FetchBlockBySha (%s): block #%d (%s) err: %v",
|
|
||||||
tc.dbType, tc.blockHeight, tc.blockHash, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// The block fetched from the database must give back the same MsgBlock
|
|
||||||
// and raw bytes that were stored.
|
|
||||||
if !reflect.DeepEqual(tc.block.MsgBlock(), blockFromDb.MsgBlock()) {
|
|
||||||
tc.t.Errorf("FetchBlockBySha (%s): block #%d (%s) does not "+
|
|
||||||
"match stored block\ngot: %v\nwant: %v", tc.dbType,
|
|
||||||
tc.blockHeight, tc.blockHash,
|
|
||||||
spew.Sdump(blockFromDb.MsgBlock()),
|
|
||||||
spew.Sdump(tc.block.MsgBlock()))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
blockBytes, err := tc.block.Bytes()
|
|
||||||
if err != nil {
|
|
||||||
tc.t.Errorf("block.Bytes: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
blockFromDbBytes, err := blockFromDb.Bytes()
|
|
||||||
if err != nil {
|
|
||||||
tc.t.Errorf("blockFromDb.Bytes: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(blockBytes, blockFromDbBytes) {
|
|
||||||
tc.t.Errorf("FetchBlockBySha (%s): block #%d (%s) bytes do "+
|
|
||||||
"not match stored bytes\ngot: %v\nwant: %v", tc.dbType,
|
|
||||||
tc.blockHeight, tc.blockHash,
|
|
||||||
spew.Sdump(blockFromDbBytes), spew.Sdump(blockBytes))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// testFetchBlockHeightBySha ensures FetchBlockHeightBySha conforms to the
|
|
||||||
// interface contract.
|
|
||||||
func testFetchBlockHeightBySha(tc *testContext) bool {
|
|
||||||
// The block height must be fetchable by its hash without any errors.
|
|
||||||
blockHeight, err := tc.db.FetchBlockHeightBySha(tc.blockHash)
|
|
||||||
if err != nil {
|
|
||||||
tc.t.Errorf("FetchBlockHeightBySha (%s): block #%d (%s) err: %v",
|
|
||||||
tc.dbType, tc.blockHeight, tc.blockHash, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// The block height fetched from the database must match the expected
|
|
||||||
// height.
|
|
||||||
if blockHeight != tc.blockHeight {
|
|
||||||
tc.t.Errorf("FetchBlockHeightBySha (%s): block #%d (%s) height "+
|
|
||||||
"does not match expected value - got: %v", tc.dbType,
|
|
||||||
tc.blockHeight, tc.blockHash, blockHeight)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// testFetchBlockHeaderBySha ensures FetchBlockHeaderBySha conforms to the
|
|
||||||
// interface contract.
|
|
||||||
func testFetchBlockHeaderBySha(tc *testContext) bool {
|
|
||||||
// The block header must be fetchable by its hash without any errors.
|
|
||||||
blockHeader, err := tc.db.FetchBlockHeaderBySha(tc.blockHash)
|
|
||||||
if err != nil {
|
|
||||||
tc.t.Errorf("FetchBlockHeaderBySha (%s): block #%d (%s) err: %v",
|
|
||||||
tc.dbType, tc.blockHeight, tc.blockHash, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// The block header fetched from the database must give back the same
|
|
||||||
// BlockHeader that was stored.
|
|
||||||
if !reflect.DeepEqual(&tc.block.MsgBlock().Header, blockHeader) {
|
|
||||||
tc.t.Errorf("FetchBlockHeaderBySha (%s): block header #%d (%s) "+
|
|
||||||
" does not match stored block\ngot: %v\nwant: %v",
|
|
||||||
tc.dbType, tc.blockHeight, tc.blockHash,
|
|
||||||
spew.Sdump(blockHeader),
|
|
||||||
spew.Sdump(&tc.block.MsgBlock().Header))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// testFetchBlockShaByHeight ensures FetchBlockShaByHeight conforms to the
|
|
||||||
// interface contract.
|
|
||||||
func testFetchBlockShaByHeight(tc *testContext) bool {
|
|
||||||
// The hash returned for the block by its height must be the expected
|
|
||||||
// value.
|
|
||||||
hashFromDb, err := tc.db.FetchBlockShaByHeight(tc.blockHeight)
|
|
||||||
if err != nil {
|
|
||||||
tc.t.Errorf("FetchBlockShaByHeight (%s): block #%d (%s) err: %v",
|
|
||||||
tc.dbType, tc.blockHeight, tc.blockHash, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !hashFromDb.IsEqual(tc.blockHash) {
|
|
||||||
tc.t.Errorf("FetchBlockShaByHeight (%s): block #%d (%s) hash "+
|
|
||||||
"does not match expected value - got: %v", tc.dbType,
|
|
||||||
tc.blockHeight, tc.blockHash, hashFromDb)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func testFetchBlockShaByHeightErrors(tc *testContext) bool {
|
|
||||||
// Invalid heights must error and return a nil hash.
|
|
||||||
tests := []int32{-1, tc.blockHeight + 1, tc.blockHeight + 2}
|
|
||||||
for i, wantHeight := range tests {
|
|
||||||
hashFromDb, err := tc.db.FetchBlockShaByHeight(wantHeight)
|
|
||||||
if err == nil {
|
|
||||||
tc.t.Errorf("FetchBlockShaByHeight #%d (%s): did not "+
|
|
||||||
"return error on invalid index: %d - got: %v, "+
|
|
||||||
"want: non-nil", i, tc.dbType, wantHeight, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if hashFromDb != nil {
|
|
||||||
tc.t.Errorf("FetchBlockShaByHeight #%d (%s): returned "+
|
|
||||||
"hash is not nil on invalid index: %d - got: "+
|
|
||||||
"%v, want: nil", i, tc.dbType, wantHeight, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// testExistsTxSha ensures ExistsTxSha conforms to the interface contract.
|
|
||||||
func testExistsTxSha(tc *testContext) bool {
|
|
||||||
for i, tx := range tc.block.Transactions() {
|
|
||||||
// The transaction must exist in the database.
|
|
||||||
txHash := tx.Sha()
|
|
||||||
exists, err := tc.db.ExistsTxSha(txHash)
|
|
||||||
if err != nil {
|
|
||||||
tc.t.Errorf("ExistsTxSha (%s): block #%d (%s) tx #%d "+
|
|
||||||
"(%s) unexpected error: %v", tc.dbType,
|
|
||||||
tc.blockHeight, tc.blockHash, i, txHash, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !exists {
|
|
||||||
_, err := tc.db.FetchTxBySha(txHash)
|
|
||||||
if err != nil {
|
|
||||||
tc.t.Errorf("ExistsTxSha (%s): block #%d (%s) "+
|
|
||||||
"tx #%d (%s) does not exist", tc.dbType,
|
|
||||||
tc.blockHeight, tc.blockHash, i, txHash)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// testFetchTxBySha ensures FetchTxBySha conforms to the interface contract.
|
|
||||||
func testFetchTxBySha(tc *testContext) bool {
|
|
||||||
for i, tx := range tc.block.Transactions() {
|
|
||||||
txHash := tx.Sha()
|
|
||||||
txReplyList, err := tc.db.FetchTxBySha(txHash)
|
|
||||||
if err != nil {
|
|
||||||
tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+
|
|
||||||
"tx #%d (%s) err: %v", tc.dbType, tc.blockHeight,
|
|
||||||
tc.blockHash, i, txHash, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(txReplyList) == 0 {
|
|
||||||
tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+
|
|
||||||
"tx #%d (%s) did not return reply data",
|
|
||||||
tc.dbType, tc.blockHeight, tc.blockHash, i,
|
|
||||||
txHash)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
txFromDb := txReplyList[len(txReplyList)-1].Tx
|
|
||||||
if !reflect.DeepEqual(tx.MsgTx(), txFromDb) {
|
|
||||||
tc.t.Errorf("FetchTxBySha (%s): block #%d (%s) "+
|
|
||||||
"tx #%d (%s) does not match stored tx\n"+
|
|
||||||
"got: %v\nwant: %v", tc.dbType, tc.blockHeight,
|
|
||||||
tc.blockHash, i, txHash, spew.Sdump(txFromDb),
|
|
||||||
spew.Sdump(tx.MsgTx()))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// expectedSpentBuf returns the expected transaction spend information depending
|
|
||||||
// on the block height and and transaction number. NOTE: These figures are
|
|
||||||
// only valid for the specific set of test data provided at the time these tests
|
|
||||||
// were written. In particular, this means the first 256 blocks of the mainnet
|
|
||||||
// block chain.
|
|
||||||
//
|
|
||||||
// The first run through while the blocks are still being inserted, the tests
|
|
||||||
// are running against the latest block and therefore none of the outputs can
|
|
||||||
// be spent yet. However, on subsequent runs, all blocks have been inserted and
|
|
||||||
// therefore some of the transaction outputs are spent.
|
|
||||||
func expectedSpentBuf(tc *testContext, txNum int) []bool {
|
|
||||||
numTxOut := len(tc.block.MsgBlock().Transactions[txNum].TxOut)
|
|
||||||
spentBuf := make([]bool, numTxOut)
|
|
||||||
if tc.useSpends {
|
|
||||||
if tc.blockHeight == 9 && txNum == 0 {
|
|
||||||
// Spent by block 170, tx 1, input 0.
|
|
||||||
// tx f4184fc596403b9d638783cf57adfe4c75c605f6356fbc91338530e9831e9e16
|
|
||||||
spentBuf[0] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if tc.blockHeight == 170 && txNum == 1 {
|
|
||||||
// Spent by block 181, tx 1, input 0.
|
|
||||||
// tx a16f3ce4dd5deb92d98ef5cf8afeaf0775ebca408f708b2146c4fb42b41e14be
|
|
||||||
spentBuf[1] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if tc.blockHeight == 181 && txNum == 1 {
|
|
||||||
// Spent by block 182, tx 1, input 0.
|
|
||||||
// tx 591e91f809d716912ca1d4a9295e70c3e78bab077683f79350f101da64588073
|
|
||||||
spentBuf[1] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if tc.blockHeight == 182 && txNum == 1 {
|
|
||||||
// Spent by block 221, tx 1, input 0.
|
|
||||||
// tx 298ca2045d174f8a158961806ffc4ef96fad02d71a6b84d9fa0491813a776160
|
|
||||||
spentBuf[0] = true
|
|
||||||
|
|
||||||
// Spent by block 183, tx 1, input 0.
|
|
||||||
// tx 12b5633bad1f9c167d523ad1aa1947b2732a865bf5414eab2f9e5ae5d5c191ba
|
|
||||||
spentBuf[1] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if tc.blockHeight == 183 && txNum == 1 {
|
|
||||||
// Spent by block 187, tx 1, input 0.
|
|
||||||
// tx 4385fcf8b14497d0659adccfe06ae7e38e0b5dc95ff8a13d7c62035994a0cd79
|
|
||||||
spentBuf[0] = true
|
|
||||||
|
|
||||||
// Spent by block 248, tx 1, input 0.
|
|
||||||
// tx 828ef3b079f9c23829c56fe86e85b4a69d9e06e5b54ea597eef5fb3ffef509fe
|
|
||||||
spentBuf[1] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return spentBuf
|
|
||||||
}
|
|
||||||
|
|
||||||
func testFetchTxByShaListCommon(tc *testContext, includeSpent bool) bool {
|
|
||||||
fetchFunc := tc.db.FetchUnSpentTxByShaList
|
|
||||||
funcName := "FetchUnSpentTxByShaList"
|
|
||||||
if includeSpent {
|
|
||||||
fetchFunc = tc.db.FetchTxByShaList
|
|
||||||
funcName = "FetchTxByShaList"
|
|
||||||
}
|
|
||||||
|
|
||||||
transactions := tc.block.Transactions()
|
|
||||||
txHashes := make([]*wire.ShaHash, len(transactions))
|
|
||||||
for i, tx := range transactions {
|
|
||||||
txHashes[i] = tx.Sha()
|
|
||||||
}
|
|
||||||
|
|
||||||
txReplyList := fetchFunc(txHashes)
|
|
||||||
if len(txReplyList) != len(txHashes) {
|
|
||||||
tc.t.Errorf("%s (%s): block #%d (%s) tx reply list does not "+
|
|
||||||
" match expected length - got: %v, want: %v", funcName,
|
|
||||||
tc.dbType, tc.blockHeight, tc.blockHash,
|
|
||||||
len(txReplyList), len(txHashes))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i, tx := range transactions {
|
|
||||||
txHash := tx.Sha()
|
|
||||||
txD := txReplyList[i]
|
|
||||||
|
|
||||||
// The transaction hash in the reply must be the expected value.
|
|
||||||
if !txD.Sha.IsEqual(txHash) {
|
|
||||||
tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+
|
|
||||||
"hash does not match expected value - got %v",
|
|
||||||
funcName, tc.dbType, tc.blockHeight,
|
|
||||||
tc.blockHash, i, txHash, txD.Sha)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// The reply must not indicate any errors.
|
|
||||||
if txD.Err != nil {
|
|
||||||
tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+
|
|
||||||
"returned unexpected error - got %v, want nil",
|
|
||||||
funcName, tc.dbType, tc.blockHeight,
|
|
||||||
tc.blockHash, i, txHash, txD.Err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// The transaction in the reply fetched from the database must
|
|
||||||
// be the same MsgTx that was stored.
|
|
||||||
if !reflect.DeepEqual(tx.MsgTx(), txD.Tx) {
|
|
||||||
tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) does "+
|
|
||||||
"not match stored tx\ngot: %v\nwant: %v",
|
|
||||||
funcName, tc.dbType, tc.blockHeight,
|
|
||||||
tc.blockHash, i, txHash, spew.Sdump(txD.Tx),
|
|
||||||
spew.Sdump(tx.MsgTx()))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// The block hash in the reply from the database must be the
|
|
||||||
// expected value.
|
|
||||||
if txD.BlkSha == nil {
|
|
||||||
tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+
|
|
||||||
"returned nil block hash", funcName, tc.dbType,
|
|
||||||
tc.blockHeight, tc.blockHash, i, txHash)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !txD.BlkSha.IsEqual(tc.blockHash) {
|
|
||||||
tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s)"+
|
|
||||||
"returned unexpected block hash - got %v",
|
|
||||||
funcName, tc.dbType, tc.blockHeight,
|
|
||||||
tc.blockHash, i, txHash, txD.BlkSha)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// The block height in the reply from the database must be the
|
|
||||||
// expected value.
|
|
||||||
if txD.Height != tc.blockHeight {
|
|
||||||
tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+
|
|
||||||
"returned unexpected block height - got %v",
|
|
||||||
funcName, tc.dbType, tc.blockHeight,
|
|
||||||
tc.blockHash, i, txHash, txD.Height)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// The spend data in the reply from the database must not
|
|
||||||
// indicate any of the transactions that were just inserted are
|
|
||||||
// spent.
|
|
||||||
if txD.TxSpent == nil {
|
|
||||||
tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+
|
|
||||||
"returned nil spend data", funcName, tc.dbType,
|
|
||||||
tc.blockHeight, tc.blockHash, i, txHash)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
spentBuf := expectedSpentBuf(tc, i)
|
|
||||||
if !reflect.DeepEqual(txD.TxSpent, spentBuf) {
|
|
||||||
tc.t.Errorf("%s (%s): block #%d (%s) tx #%d (%s) "+
|
|
||||||
"returned unexpected spend data - got %v, "+
|
|
||||||
"want %v", funcName, tc.dbType, tc.blockHeight,
|
|
||||||
tc.blockHash, i, txHash, txD.TxSpent, spentBuf)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// testFetchTxByShaList ensures FetchTxByShaList conforms to the interface
|
|
||||||
// contract.
|
|
||||||
func testFetchTxByShaList(tc *testContext) bool {
|
|
||||||
return testFetchTxByShaListCommon(tc, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// testFetchUnSpentTxByShaList ensures FetchUnSpentTxByShaList conforms to the
|
|
||||||
// interface contract.
|
|
||||||
func testFetchUnSpentTxByShaList(tc *testContext) bool {
|
|
||||||
return testFetchTxByShaListCommon(tc, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// testIntegrity performs a series of tests against the interface functions
|
|
||||||
// which fetch and check for data existence.
|
|
||||||
func testIntegrity(tc *testContext) bool {
|
|
||||||
// The block must now exist in the database.
|
|
||||||
if !testExistsSha(tc) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loading the block back from the database must give back
|
|
||||||
// the same MsgBlock and raw bytes that were stored.
|
|
||||||
if !testFetchBlockBySha(tc) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// The height returned for the block given its hash must be the
|
|
||||||
// expected value
|
|
||||||
if !testFetchBlockHeightBySha(tc) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loading the header back from the database must give back
|
|
||||||
// the same BlockHeader that was stored.
|
|
||||||
if !testFetchBlockHeaderBySha(tc) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// The hash returned for the block by its height must be the
|
|
||||||
// expected value.
|
|
||||||
if !testFetchBlockShaByHeight(tc) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// All of the transactions in the block must now exist in the
|
|
||||||
// database.
|
|
||||||
if !testExistsTxSha(tc) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loading all of the transactions in the block back from the
|
|
||||||
// database must give back the same MsgTx that was stored.
|
|
||||||
if !testFetchTxBySha(tc) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// All of the transactions in the block must be fetchable via
|
|
||||||
// FetchTxByShaList and all of the list replies must have the
|
|
||||||
// expected values.
|
|
||||||
if !testFetchTxByShaList(tc) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// All of the transactions in the block must be fetchable via
|
|
||||||
// FetchUnSpentTxByShaList and all of the list replies must have
|
|
||||||
// the expected values.
|
|
||||||
if !testFetchUnSpentTxByShaList(tc) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// testInterface tests performs tests for the various interfaces of the database
|
|
||||||
// package which require state in the database for the given database type.
|
|
||||||
func testInterface(t *testing.T, dbType string) {
|
|
||||||
db, teardown, err := setupDB(dbType, "interface")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to create test database (%s) %v", dbType, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer teardown()
|
|
||||||
|
|
||||||
// Load up a bunch of test blocks.
|
|
||||||
blocks, err := loadBlocks(t)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Unable to load blocks from test data %v: %v",
|
|
||||||
blockDataFile, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a test context to pass around.
|
|
||||||
context := testContext{t: t, dbType: dbType, db: db}
|
|
||||||
|
|
||||||
t.Logf("Loaded %d blocks for testing %s", len(blocks), dbType)
|
|
||||||
for height := int32(1); height < int32(len(blocks)); height++ {
|
|
||||||
// Get the appropriate block and hash and update the test
|
|
||||||
// context accordingly.
|
|
||||||
block := blocks[height]
|
|
||||||
context.blockHeight = height
|
|
||||||
context.blockHash = block.Sha()
|
|
||||||
context.block = block
|
|
||||||
|
|
||||||
// The block must insert without any errors and return the
|
|
||||||
// expected height.
|
|
||||||
if !testInsertBlock(&context) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// The NewestSha function must return the correct information
|
|
||||||
// about the block that was just inserted.
|
|
||||||
if !testNewestSha(&context) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// The block must pass all data integrity tests which involve
|
|
||||||
// invoking all and testing the result of all interface
|
|
||||||
// functions which deal with fetch and checking for data
|
|
||||||
// existence.
|
|
||||||
if !testIntegrity(&context) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if !testFetchBlockShaByHeightErrors(&context) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the data integrity tests again after all blocks have been
|
|
||||||
// inserted to ensure the spend tracking is working properly.
|
|
||||||
context.useSpends = true
|
|
||||||
for height := int32(0); height < int32(len(blocks)); height++ {
|
|
||||||
// Get the appropriate block and hash and update the
|
|
||||||
// test context accordingly.
|
|
||||||
block := blocks[height]
|
|
||||||
context.blockHeight = height
|
|
||||||
context.blockHash = block.Sha()
|
|
||||||
context.block = block
|
|
||||||
|
|
||||||
testIntegrity(&context)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(davec): Need to figure out how to handle the special checks
|
|
||||||
// required for the duplicate transactions allowed by blocks 91842 and
|
|
||||||
// 91880 on the main network due to the old miner + Satoshi client bug.
|
|
||||||
|
|
||||||
// TODO(davec): Add tests for error conditions:
|
|
||||||
/*
|
|
||||||
- Don't allow duplicate blocks
|
|
||||||
- Don't allow insertion of block that contains a transaction that
|
|
||||||
already exists unless the previous one is fully spent
|
|
||||||
- Don't allow block that has a duplicate transaction in itself
|
|
||||||
- Don't allow block which contains a tx that references a missing tx
|
|
||||||
- Don't allow block which contains a tx that references another tx
|
|
||||||
that comes after it in the same block
|
|
||||||
*/
|
|
||||||
|
|
||||||
// TODO(davec): Add tests for the following functions:
|
|
||||||
/*
|
|
||||||
- Close()
|
|
||||||
- DropAfterBlockBySha(*wire.ShaHash) (err error)
|
|
||||||
x ExistsSha(sha *wire.ShaHash) (exists bool)
|
|
||||||
x FetchBlockBySha(sha *wire.ShaHash) (blk *btcutil.Block, err error)
|
|
||||||
x FetchBlockShaByHeight(height int32) (sha *wire.ShaHash, err error)
|
|
||||||
- FetchHeightRange(startHeight, endHeight int32) (rshalist []wire.ShaHash, err error)
|
|
||||||
x ExistsTxSha(sha *wire.ShaHash) (exists bool)
|
|
||||||
x FetchTxBySha(txsha *wire.ShaHash) ([]*TxListReply, error)
|
|
||||||
x FetchTxByShaList(txShaList []*wire.ShaHash) []*TxListReply
|
|
||||||
x FetchUnSpentTxByShaList(txShaList []*wire.ShaHash) []*TxListReply
|
|
||||||
x InsertBlock(block *btcutil.Block) (height int32, err error)
|
|
||||||
x NewestSha() (sha *wire.ShaHash, height int32, err error)
|
|
||||||
- RollbackClose()
|
|
||||||
- Sync()
|
|
||||||
*/
|
|
||||||
}
|
|
|
@ -1,346 +0,0 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package ldb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/database"
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
|
||||||
"github.com/btcsuite/btcutil"
|
|
||||||
"github.com/btcsuite/goleveldb/leveldb"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FetchBlockBySha - return a btcutil Block
|
|
||||||
func (db *LevelDb) FetchBlockBySha(sha *wire.ShaHash) (blk *btcutil.Block, err error) {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
return db.fetchBlockBySha(sha)
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchBlockBySha - return a btcutil Block
|
|
||||||
// Must be called with db lock held.
|
|
||||||
func (db *LevelDb) fetchBlockBySha(sha *wire.ShaHash) (blk *btcutil.Block, err error) {
|
|
||||||
|
|
||||||
buf, height, err := db.fetchSha(sha)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
blk, err = btcutil.NewBlockFromBytes(buf)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
blk.SetHeight(height)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchBlockHeightBySha returns the block height for the given hash. This is
|
|
||||||
// part of the database.Db interface implementation.
|
|
||||||
func (db *LevelDb) FetchBlockHeightBySha(sha *wire.ShaHash) (int32, error) {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
return db.getBlkLoc(sha)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchBlockHeaderBySha - return a ShaHash
|
|
||||||
func (db *LevelDb) FetchBlockHeaderBySha(sha *wire.ShaHash) (bh *wire.BlockHeader, err error) {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
// Read the raw block from the database.
|
|
||||||
buf, _, err := db.fetchSha(sha)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only deserialize the header portion and ensure the transaction count
|
|
||||||
// is zero since this is a standalone header.
|
|
||||||
var blockHeader wire.BlockHeader
|
|
||||||
err = blockHeader.Deserialize(bytes.NewReader(buf))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
bh = &blockHeader
|
|
||||||
|
|
||||||
return bh, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LevelDb) getBlkLoc(sha *wire.ShaHash) (int32, error) {
|
|
||||||
key := shaBlkToKey(sha)
|
|
||||||
|
|
||||||
data, err := db.lDb.Get(key, db.ro)
|
|
||||||
if err != nil {
|
|
||||||
if err == leveldb.ErrNotFound {
|
|
||||||
err = database.ErrBlockShaMissing
|
|
||||||
}
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// deserialize
|
|
||||||
blkHeight := binary.LittleEndian.Uint64(data)
|
|
||||||
|
|
||||||
return int32(blkHeight), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LevelDb) getBlkByHeight(blkHeight int32) (rsha *wire.ShaHash, rbuf []byte, err error) {
|
|
||||||
var blkVal []byte
|
|
||||||
|
|
||||||
key := int64ToKey(int64(blkHeight))
|
|
||||||
|
|
||||||
blkVal, err = db.lDb.Get(key, db.ro)
|
|
||||||
if err != nil {
|
|
||||||
log.Tracef("failed to find height %v", blkHeight)
|
|
||||||
return // exists ???
|
|
||||||
}
|
|
||||||
|
|
||||||
var sha wire.ShaHash
|
|
||||||
|
|
||||||
sha.SetBytes(blkVal[0:32])
|
|
||||||
|
|
||||||
blockdata := make([]byte, len(blkVal[32:]))
|
|
||||||
copy(blockdata[:], blkVal[32:])
|
|
||||||
|
|
||||||
return &sha, blockdata, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LevelDb) getBlk(sha *wire.ShaHash) (rblkHeight int32, rbuf []byte, err error) {
|
|
||||||
var blkHeight int32
|
|
||||||
|
|
||||||
blkHeight, err = db.getBlkLoc(sha)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf []byte
|
|
||||||
|
|
||||||
_, buf, err = db.getBlkByHeight(blkHeight)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return blkHeight, buf, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LevelDb) setBlk(sha *wire.ShaHash, blkHeight int32, buf []byte) {
|
|
||||||
// serialize
|
|
||||||
var lw [8]byte
|
|
||||||
binary.LittleEndian.PutUint64(lw[0:8], uint64(blkHeight))
|
|
||||||
|
|
||||||
shaKey := shaBlkToKey(sha)
|
|
||||||
blkKey := int64ToKey(int64(blkHeight))
|
|
||||||
|
|
||||||
blkVal := make([]byte, len(sha)+len(buf))
|
|
||||||
copy(blkVal[0:], sha[:])
|
|
||||||
copy(blkVal[len(sha):], buf)
|
|
||||||
|
|
||||||
db.lBatch().Put(shaKey, lw[:])
|
|
||||||
db.lBatch().Put(blkKey, blkVal)
|
|
||||||
}
|
|
||||||
|
|
||||||
// insertSha stores a block hash and its associated data block with a
|
|
||||||
// previous sha of `prevSha'.
|
|
||||||
// insertSha shall be called with db lock held
|
|
||||||
func (db *LevelDb) insertBlockData(sha *wire.ShaHash, prevSha *wire.ShaHash, buf []byte) (int32, error) {
|
|
||||||
oBlkHeight, err := db.getBlkLoc(prevSha)
|
|
||||||
if err != nil {
|
|
||||||
// check current block count
|
|
||||||
// if count != 0 {
|
|
||||||
// err = database.PrevShaMissing
|
|
||||||
// return
|
|
||||||
// }
|
|
||||||
oBlkHeight = -1
|
|
||||||
if db.nextBlock != 0 {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(drahn) check curfile filesize, increment curfile if this puts it over
|
|
||||||
blkHeight := oBlkHeight + 1
|
|
||||||
|
|
||||||
db.setBlk(sha, blkHeight, buf)
|
|
||||||
|
|
||||||
// update the last block cache
|
|
||||||
db.lastBlkShaCached = true
|
|
||||||
db.lastBlkSha = *sha
|
|
||||||
db.lastBlkIdx = blkHeight
|
|
||||||
db.nextBlock = blkHeight + 1
|
|
||||||
|
|
||||||
return blkHeight, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchSha returns the datablock for the given ShaHash.
|
|
||||||
func (db *LevelDb) fetchSha(sha *wire.ShaHash) (rbuf []byte,
|
|
||||||
rblkHeight int32, err error) {
|
|
||||||
var blkHeight int32
|
|
||||||
var buf []byte
|
|
||||||
|
|
||||||
blkHeight, buf, err = db.getBlk(sha)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf, blkHeight, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExistsSha looks up the given block hash
|
|
||||||
// returns true if it is present in the database.
|
|
||||||
func (db *LevelDb) ExistsSha(sha *wire.ShaHash) (bool, error) {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
// not in cache, try database
|
|
||||||
return db.blkExistsSha(sha)
|
|
||||||
}
|
|
||||||
|
|
||||||
// blkExistsSha looks up the given block hash
|
|
||||||
// returns true if it is present in the database.
|
|
||||||
// CALLED WITH LOCK HELD
|
|
||||||
func (db *LevelDb) blkExistsSha(sha *wire.ShaHash) (bool, error) {
|
|
||||||
key := shaBlkToKey(sha)
|
|
||||||
|
|
||||||
return db.lDb.Has(key, db.ro)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchBlockShaByHeight returns a block hash based on its height in the
|
|
||||||
// block chain.
|
|
||||||
func (db *LevelDb) FetchBlockShaByHeight(height int32) (sha *wire.ShaHash, err error) {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
return db.fetchBlockShaByHeight(height)
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchBlockShaByHeight returns a block hash based on its height in the
|
|
||||||
// block chain.
|
|
||||||
func (db *LevelDb) fetchBlockShaByHeight(height int32) (rsha *wire.ShaHash, err error) {
|
|
||||||
key := int64ToKey(int64(height))
|
|
||||||
|
|
||||||
blkVal, err := db.lDb.Get(key, db.ro)
|
|
||||||
if err != nil {
|
|
||||||
log.Tracef("failed to find height %v", height)
|
|
||||||
return // exists ???
|
|
||||||
}
|
|
||||||
|
|
||||||
var sha wire.ShaHash
|
|
||||||
sha.SetBytes(blkVal[0:32])
|
|
||||||
|
|
||||||
return &sha, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchHeightRange looks up a range of blocks by the start and ending
|
|
||||||
// heights. Fetch is inclusive of the start height and exclusive of the
|
|
||||||
// ending height. To fetch all hashes from the start height until no
|
|
||||||
// more are present, use the special id `AllShas'.
|
|
||||||
func (db *LevelDb) FetchHeightRange(startHeight, endHeight int32) (rshalist []wire.ShaHash, err error) {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
var endidx int32
|
|
||||||
if endHeight == database.AllShas {
|
|
||||||
endidx = startHeight + 500
|
|
||||||
} else {
|
|
||||||
endidx = endHeight
|
|
||||||
}
|
|
||||||
|
|
||||||
shalist := make([]wire.ShaHash, 0, endidx-startHeight)
|
|
||||||
for height := startHeight; height < endidx; height++ {
|
|
||||||
// TODO(drahn) fix blkFile from height
|
|
||||||
|
|
||||||
key := int64ToKey(int64(height))
|
|
||||||
blkVal, lerr := db.lDb.Get(key, db.ro)
|
|
||||||
if lerr != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
var sha wire.ShaHash
|
|
||||||
sha.SetBytes(blkVal[0:32])
|
|
||||||
shalist = append(shalist, sha)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
//log.Tracef("FetchIdxRange idx %v %v returned %v shas err %v", startHeight, endHeight, len(shalist), err)
|
|
||||||
|
|
||||||
return shalist, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewestSha returns the hash and block height of the most recent (end) block of
|
|
||||||
// the block chain. It will return the zero hash, -1 for the block height, and
|
|
||||||
// no error (nil) if there are not any blocks in the database yet.
|
|
||||||
func (db *LevelDb) NewestSha() (rsha *wire.ShaHash, rblkid int32, err error) {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
if db.lastBlkIdx == -1 {
|
|
||||||
return &wire.ShaHash{}, -1, nil
|
|
||||||
}
|
|
||||||
sha := db.lastBlkSha
|
|
||||||
|
|
||||||
return &sha, db.lastBlkIdx, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkAddrIndexVersion returns an error if the address index version stored
|
|
||||||
// in the database is less than the current version, or if it doesn't exist.
|
|
||||||
// This function is used on startup to signal OpenDB to drop the address index
|
|
||||||
// if it's in an old, incompatible format.
|
|
||||||
func (db *LevelDb) checkAddrIndexVersion() error {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
data, err := db.lDb.Get(addrIndexVersionKey, db.ro)
|
|
||||||
if err != nil {
|
|
||||||
return database.ErrAddrIndexDoesNotExist
|
|
||||||
}
|
|
||||||
|
|
||||||
indexVersion := binary.LittleEndian.Uint16(data)
|
|
||||||
|
|
||||||
if indexVersion != uint16(addrIndexCurrentVersion) {
|
|
||||||
return database.ErrAddrIndexDoesNotExist
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchAddrIndexTip returns the last block height and block sha to be indexed.
|
|
||||||
// Meta-data about the address tip is currently cached in memory, and will be
|
|
||||||
// updated accordingly by functions that modify the state. This function is
|
|
||||||
// used on start up to load the info into memory. Callers will use the public
|
|
||||||
// version of this function below, which returns our cached copy.
|
|
||||||
func (db *LevelDb) fetchAddrIndexTip() (*wire.ShaHash, int32, error) {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
data, err := db.lDb.Get(addrIndexMetaDataKey, db.ro)
|
|
||||||
if err != nil {
|
|
||||||
return &wire.ShaHash{}, -1, database.ErrAddrIndexDoesNotExist
|
|
||||||
}
|
|
||||||
|
|
||||||
var blkSha wire.ShaHash
|
|
||||||
blkSha.SetBytes(data[0:32])
|
|
||||||
|
|
||||||
blkHeight := binary.LittleEndian.Uint64(data[32:])
|
|
||||||
|
|
||||||
return &blkSha, int32(blkHeight), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchAddrIndexTip returns the hash and block height of the most recent
|
|
||||||
// block whose transactions have been indexed by address. It will return
|
|
||||||
// ErrAddrIndexDoesNotExist along with a zero hash, and -1 if the
|
|
||||||
// addrindex hasn't yet been built up.
|
|
||||||
func (db *LevelDb) FetchAddrIndexTip() (*wire.ShaHash, int32, error) {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
if db.lastAddrIndexBlkIdx == -1 {
|
|
||||||
return &wire.ShaHash{}, -1, database.ErrAddrIndexDoesNotExist
|
|
||||||
}
|
|
||||||
sha := db.lastAddrIndexBlkSha
|
|
||||||
|
|
||||||
return &sha, db.lastAddrIndexBlkIdx, nil
|
|
||||||
}
|
|
|
@ -1,63 +0,0 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package ldb_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/database"
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
|
||||||
)
|
|
||||||
|
|
||||||
// we need to test for an empty database and make certain it returns the proper
|
|
||||||
// values
|
|
||||||
|
|
||||||
func TestEmptyDB(t *testing.T) {
|
|
||||||
|
|
||||||
dbname := "tstdbempty"
|
|
||||||
dbnamever := dbname + ".ver"
|
|
||||||
_ = os.RemoveAll(dbname)
|
|
||||||
_ = os.RemoveAll(dbnamever)
|
|
||||||
db, err := database.CreateDB("leveldb", dbname)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to open test database %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dbname)
|
|
||||||
defer os.RemoveAll(dbnamever)
|
|
||||||
|
|
||||||
sha, height, err := db.NewestSha()
|
|
||||||
if !sha.IsEqual(&wire.ShaHash{}) {
|
|
||||||
t.Errorf("sha not zero hash")
|
|
||||||
}
|
|
||||||
if height != -1 {
|
|
||||||
t.Errorf("height not -1 %v", height)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is a reopen test
|
|
||||||
if err := db.Close(); err != nil {
|
|
||||||
t.Errorf("Close: unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
db, err = database.OpenDB("leveldb", dbname)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to open test database %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := db.Close(); err != nil {
|
|
||||||
t.Errorf("Close: unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
sha, height, err = db.NewestSha()
|
|
||||||
if !sha.IsEqual(&wire.ShaHash{}) {
|
|
||||||
t.Errorf("sha not zero hash")
|
|
||||||
}
|
|
||||||
if height != -1 {
|
|
||||||
t.Errorf("height not -1 %v", height)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,58 +0,0 @@
|
||||||
//
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/btcsuite/goleveldb/leveldb"
|
|
||||||
"github.com/btcsuite/goleveldb/leveldb/opt"
|
|
||||||
)
|
|
||||||
|
|
||||||
type tst struct {
|
|
||||||
key int
|
|
||||||
value string
|
|
||||||
}
|
|
||||||
|
|
||||||
var dataset = []tst{
|
|
||||||
//var dataset = []struct { key int, value string } {
|
|
||||||
{1, "one"},
|
|
||||||
{2, "two"},
|
|
||||||
{3, "three"},
|
|
||||||
{4, "four"},
|
|
||||||
{5, "five"},
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
|
|
||||||
ro := &opt.ReadOptions{}
|
|
||||||
wo := &opt.WriteOptions{}
|
|
||||||
opts := &opt.Options{}
|
|
||||||
|
|
||||||
ldb, err := leveldb.OpenFile("dbfile", opts)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("db open failed %v\n", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
batch := new(leveldb.Batch)
|
|
||||||
for _, datum := range dataset {
|
|
||||||
key := fmt.Sprintf("%v", datum.key)
|
|
||||||
batch.Put([]byte(key), []byte(datum.value))
|
|
||||||
}
|
|
||||||
err = ldb.Write(batch, wo)
|
|
||||||
|
|
||||||
for _, datum := range dataset {
|
|
||||||
key := fmt.Sprintf("%v", datum.key)
|
|
||||||
data, err := ldb.Get([]byte(key), ro)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("db read failed %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if string(data) != datum.value {
|
|
||||||
fmt.Printf("mismatched data from db key %v val %v db %v", key, datum.value, data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Printf("completed\n")
|
|
||||||
ldb.Close()
|
|
||||||
}
|
|
|
@ -1,14 +0,0 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package ldb implements an instance of the database package backed by leveldb.
|
|
||||||
|
|
||||||
Database version number is stored in a flat file <dbname>.ver
|
|
||||||
Currently a single (littlendian) integer in the file. If there is
|
|
||||||
additional data to save in the future, the presence of additional
|
|
||||||
data can be indicated by changing the version number, then parsing the
|
|
||||||
file differently.
|
|
||||||
*/
|
|
||||||
package ldb
|
|
|
@ -1,185 +0,0 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package ldb_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/database"
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
|
||||||
"github.com/btcsuite/btcutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Test_dupTx(t *testing.T) {
|
|
||||||
|
|
||||||
// Ignore db remove errors since it means we didn't have an old one.
|
|
||||||
dbname := fmt.Sprintf("tstdbdup0")
|
|
||||||
dbnamever := dbname + ".ver"
|
|
||||||
_ = os.RemoveAll(dbname)
|
|
||||||
_ = os.RemoveAll(dbnamever)
|
|
||||||
db, err := database.CreateDB("leveldb", dbname)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to open test database %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dbname)
|
|
||||||
defer os.RemoveAll(dbnamever)
|
|
||||||
defer func() {
|
|
||||||
if err := db.Close(); err != nil {
|
|
||||||
t.Errorf("Close: unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
testdatafile := filepath.Join("testdata", "blocks1-256.bz2")
|
|
||||||
blocks, err := loadBlocks(t, testdatafile)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Unable to load blocks from test data for: %v",
|
|
||||||
err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var lastSha *wire.ShaHash
|
|
||||||
|
|
||||||
// Populate with the fisrt 256 blocks, so we have blocks to 'mess with'
|
|
||||||
err = nil
|
|
||||||
out:
|
|
||||||
for height := int32(0); height < int32(len(blocks)); height++ {
|
|
||||||
block := blocks[height]
|
|
||||||
|
|
||||||
// except for NoVerify which does not allow lookups check inputs
|
|
||||||
mblock := block.MsgBlock()
|
|
||||||
var txneededList []*wire.ShaHash
|
|
||||||
for _, tx := range mblock.Transactions {
|
|
||||||
for _, txin := range tx.TxIn {
|
|
||||||
if txin.PreviousOutPoint.Index == uint32(4294967295) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
origintxsha := &txin.PreviousOutPoint.Hash
|
|
||||||
txneededList = append(txneededList, origintxsha)
|
|
||||||
|
|
||||||
exists, err := db.ExistsTxSha(origintxsha)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("ExistsTxSha: unexpected error %v ", err)
|
|
||||||
}
|
|
||||||
if !exists {
|
|
||||||
t.Errorf("referenced tx not found %v ", origintxsha)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = db.FetchTxBySha(origintxsha)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("referenced tx not found %v err %v ", origintxsha, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
txlist := db.FetchUnSpentTxByShaList(txneededList)
|
|
||||||
for _, txe := range txlist {
|
|
||||||
if txe.Err != nil {
|
|
||||||
t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err)
|
|
||||||
break out
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
newheight, err := db.InsertBlock(block)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to insert block %v err %v", height, err)
|
|
||||||
break out
|
|
||||||
}
|
|
||||||
if newheight != height {
|
|
||||||
t.Errorf("height mismatch expect %v returned %v", height, newheight)
|
|
||||||
break out
|
|
||||||
}
|
|
||||||
|
|
||||||
newSha, blkid, err := db.NewestSha()
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to obtain latest sha %v %v", height, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if blkid != height {
|
|
||||||
t.Errorf("height doe not match latest block height %v %v %v", blkid, height, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
blkSha := block.Sha()
|
|
||||||
if *newSha != *blkSha {
|
|
||||||
t.Errorf("Newest block sha does not match freshly inserted one %v %v %v ", newSha, blkSha, err)
|
|
||||||
}
|
|
||||||
lastSha = blkSha
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate a new block based on the last sha
|
|
||||||
// these block are not verified, so there are a bunch of garbage fields
|
|
||||||
// in the 'generated' block.
|
|
||||||
|
|
||||||
var bh wire.BlockHeader
|
|
||||||
|
|
||||||
bh.Version = 2
|
|
||||||
bh.PrevBlock = *lastSha
|
|
||||||
// Bits, Nonce are not filled in
|
|
||||||
|
|
||||||
mblk := wire.NewMsgBlock(&bh)
|
|
||||||
|
|
||||||
hash, _ := wire.NewShaHashFromStr("df2b060fa2e5e9c8ed5eaf6a45c13753ec8c63282b2688322eba40cd98ea067a")
|
|
||||||
|
|
||||||
po := wire.NewOutPoint(hash, 0)
|
|
||||||
txI := wire.NewTxIn(po, []byte("garbage"))
|
|
||||||
txO := wire.NewTxOut(50000000, []byte("garbageout"))
|
|
||||||
|
|
||||||
var tx wire.MsgTx
|
|
||||||
tx.AddTxIn(txI)
|
|
||||||
tx.AddTxOut(txO)
|
|
||||||
|
|
||||||
mblk.AddTransaction(&tx)
|
|
||||||
|
|
||||||
blk := btcutil.NewBlock(mblk)
|
|
||||||
|
|
||||||
fetchList := []*wire.ShaHash{hash}
|
|
||||||
listReply := db.FetchUnSpentTxByShaList(fetchList)
|
|
||||||
for _, lr := range listReply {
|
|
||||||
if lr.Err != nil {
|
|
||||||
t.Errorf("sha %v spent %v err %v\n", lr.Sha,
|
|
||||||
lr.TxSpent, lr.Err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = db.InsertBlock(blk)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to insert phony block %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ok, did it 'spend' the tx ?
|
|
||||||
|
|
||||||
listReply = db.FetchUnSpentTxByShaList(fetchList)
|
|
||||||
for _, lr := range listReply {
|
|
||||||
if lr.Err != database.ErrTxShaMissing {
|
|
||||||
t.Errorf("sha %v spent %v err %v\n", lr.Sha,
|
|
||||||
lr.TxSpent, lr.Err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
txlist := blk.Transactions()
|
|
||||||
for _, tx := range txlist {
|
|
||||||
txsha := tx.Sha()
|
|
||||||
txReply, err := db.FetchTxBySha(txsha)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("fully spent lookup %v err %v\n", hash, err)
|
|
||||||
} else {
|
|
||||||
for _, lr := range txReply {
|
|
||||||
if lr.Err != nil {
|
|
||||||
t.Errorf("stx %v spent %v err %v\n", lr.Sha,
|
|
||||||
lr.TxSpent, lr.Err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Logf("Dropping block")
|
|
||||||
|
|
||||||
err = db.DropAfterBlockBySha(lastSha)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to drop spending block %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,198 +0,0 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package ldb_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/database"
|
|
||||||
_ "github.com/btcsuite/btcd/database/ldb"
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
|
||||||
"github.com/btcsuite/btcutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
var tstBlocks []*btcutil.Block
|
|
||||||
|
|
||||||
func loadblocks(t *testing.T) []*btcutil.Block {
|
|
||||||
if len(tstBlocks) != 0 {
|
|
||||||
return tstBlocks
|
|
||||||
}
|
|
||||||
|
|
||||||
testdatafile := filepath.Join("..", "testdata", "blocks1-256.bz2")
|
|
||||||
blocks, err := loadBlocks(t, testdatafile)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Unable to load blocks from test data: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
tstBlocks = blocks
|
|
||||||
return blocks
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUnspentInsert(t *testing.T) {
|
|
||||||
testUnspentInsert(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
// insert every block in the test chain
|
|
||||||
// after each insert, fetch all the tx affected by the latest
|
|
||||||
// block and verify that the the tx is spent/unspent
|
|
||||||
// new tx should be fully unspent, referenced tx should have
|
|
||||||
// the associated txout set to spent.
|
|
||||||
func testUnspentInsert(t *testing.T) {
|
|
||||||
// Ignore db remove errors since it means we didn't have an old one.
|
|
||||||
dbname := fmt.Sprintf("tstdbuspnt1")
|
|
||||||
dbnamever := dbname + ".ver"
|
|
||||||
_ = os.RemoveAll(dbname)
|
|
||||||
_ = os.RemoveAll(dbnamever)
|
|
||||||
db, err := database.CreateDB("leveldb", dbname)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to open test database %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dbname)
|
|
||||||
defer os.RemoveAll(dbnamever)
|
|
||||||
defer func() {
|
|
||||||
if err := db.Close(); err != nil {
|
|
||||||
t.Errorf("Close: unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
blocks := loadblocks(t)
|
|
||||||
endtest:
|
|
||||||
for height := int32(0); height < int32(len(blocks)); height++ {
|
|
||||||
|
|
||||||
block := blocks[height]
|
|
||||||
// look up inputs to this tx
|
|
||||||
mblock := block.MsgBlock()
|
|
||||||
var txneededList []*wire.ShaHash
|
|
||||||
var txlookupList []*wire.ShaHash
|
|
||||||
var txOutList []*wire.ShaHash
|
|
||||||
var txInList []*wire.OutPoint
|
|
||||||
for _, tx := range mblock.Transactions {
|
|
||||||
for _, txin := range tx.TxIn {
|
|
||||||
if txin.PreviousOutPoint.Index == uint32(4294967295) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
origintxsha := &txin.PreviousOutPoint.Hash
|
|
||||||
|
|
||||||
txInList = append(txInList, &txin.PreviousOutPoint)
|
|
||||||
txneededList = append(txneededList, origintxsha)
|
|
||||||
txlookupList = append(txlookupList, origintxsha)
|
|
||||||
|
|
||||||
exists, err := db.ExistsTxSha(origintxsha)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("ExistsTxSha: unexpected error %v ", err)
|
|
||||||
}
|
|
||||||
if !exists {
|
|
||||||
t.Errorf("referenced tx not found %v ", origintxsha)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
txshaname := tx.TxSha()
|
|
||||||
txlookupList = append(txlookupList, &txshaname)
|
|
||||||
txOutList = append(txOutList, &txshaname)
|
|
||||||
}
|
|
||||||
|
|
||||||
txneededmap := map[wire.ShaHash]*database.TxListReply{}
|
|
||||||
txlist := db.FetchUnSpentTxByShaList(txneededList)
|
|
||||||
for _, txe := range txlist {
|
|
||||||
if txe.Err != nil {
|
|
||||||
t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err)
|
|
||||||
break endtest
|
|
||||||
}
|
|
||||||
txneededmap[*txe.Sha] = txe
|
|
||||||
}
|
|
||||||
for _, spend := range txInList {
|
|
||||||
itxe := txneededmap[spend.Hash]
|
|
||||||
if itxe.TxSpent[spend.Index] == true {
|
|
||||||
t.Errorf("txin %v:%v is already spent", spend.Hash, spend.Index)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
newheight, err := db.InsertBlock(block)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to insert block %v err %v", height, err)
|
|
||||||
break endtest
|
|
||||||
}
|
|
||||||
if newheight != height {
|
|
||||||
t.Errorf("height mismatch expect %v returned %v", height, newheight)
|
|
||||||
break endtest
|
|
||||||
}
|
|
||||||
|
|
||||||
txlookupmap := map[wire.ShaHash]*database.TxListReply{}
|
|
||||||
txlist = db.FetchTxByShaList(txlookupList)
|
|
||||||
for _, txe := range txlist {
|
|
||||||
if txe.Err != nil {
|
|
||||||
t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err)
|
|
||||||
break endtest
|
|
||||||
}
|
|
||||||
txlookupmap[*txe.Sha] = txe
|
|
||||||
}
|
|
||||||
for _, spend := range txInList {
|
|
||||||
itxe := txlookupmap[spend.Hash]
|
|
||||||
if itxe.TxSpent[spend.Index] == false {
|
|
||||||
t.Errorf("txin %v:%v is unspent %v", spend.Hash, spend.Index, itxe.TxSpent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, txo := range txOutList {
|
|
||||||
itxe := txlookupmap[*txo]
|
|
||||||
for i, spent := range itxe.TxSpent {
|
|
||||||
if spent == true {
|
|
||||||
t.Errorf("freshly inserted tx %v already spent %v", txo, i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
if len(txInList) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dropblock := blocks[height-1]
|
|
||||||
|
|
||||||
err = db.DropAfterBlockBySha(dropblock.Sha())
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to drop block %v err %v", height, err)
|
|
||||||
break endtest
|
|
||||||
}
|
|
||||||
|
|
||||||
txlookupmap = map[wire.ShaHash]*database.TxListReply{}
|
|
||||||
txlist = db.FetchUnSpentTxByShaList(txlookupList)
|
|
||||||
for _, txe := range txlist {
|
|
||||||
if txe.Err != nil {
|
|
||||||
if _, ok := txneededmap[*txe.Sha]; ok {
|
|
||||||
t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err)
|
|
||||||
break endtest
|
|
||||||
}
|
|
||||||
}
|
|
||||||
txlookupmap[*txe.Sha] = txe
|
|
||||||
}
|
|
||||||
for _, spend := range txInList {
|
|
||||||
itxe := txlookupmap[spend.Hash]
|
|
||||||
if itxe.TxSpent[spend.Index] == true {
|
|
||||||
t.Errorf("txin %v:%v is unspent %v", spend.Hash, spend.Index, itxe.TxSpent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
newheight, err = db.InsertBlock(block)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to insert block %v err %v", height, err)
|
|
||||||
break endtest
|
|
||||||
}
|
|
||||||
txlookupmap = map[wire.ShaHash]*database.TxListReply{}
|
|
||||||
txlist = db.FetchTxByShaList(txlookupList)
|
|
||||||
for _, txe := range txlist {
|
|
||||||
if txe.Err != nil {
|
|
||||||
t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err)
|
|
||||||
break endtest
|
|
||||||
}
|
|
||||||
txlookupmap[*txe.Sha] = txe
|
|
||||||
}
|
|
||||||
for _, spend := range txInList {
|
|
||||||
itxe := txlookupmap[spend.Hash]
|
|
||||||
if itxe.TxSpent[spend.Index] == false {
|
|
||||||
t.Errorf("txin %v:%v is unspent %v", spend.Hash, spend.Index, itxe.TxSpent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,66 +0,0 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package ldb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcutil"
|
|
||||||
"github.com/btcsuite/golangcrypto/ripemd160"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAddrIndexKeySerialization(t *testing.T) {
|
|
||||||
var hash160Bytes [ripemd160.Size]byte
|
|
||||||
var packedIndex [12]byte
|
|
||||||
|
|
||||||
fakeHash160 := btcutil.Hash160([]byte("testing"))
|
|
||||||
copy(fakeHash160, hash160Bytes[:])
|
|
||||||
|
|
||||||
fakeIndex := txAddrIndex{
|
|
||||||
hash160: hash160Bytes,
|
|
||||||
blkHeight: 1,
|
|
||||||
txoffset: 5,
|
|
||||||
txlen: 360,
|
|
||||||
}
|
|
||||||
|
|
||||||
serializedKey := addrIndexToKey(&fakeIndex)
|
|
||||||
copy(packedIndex[:], serializedKey[23:35])
|
|
||||||
unpackedIndex := unpackTxIndex(packedIndex)
|
|
||||||
|
|
||||||
if unpackedIndex.blkHeight != fakeIndex.blkHeight {
|
|
||||||
t.Errorf("Incorrect block height. Unpack addr index key"+
|
|
||||||
"serialization failed. Expected %d, received %d",
|
|
||||||
1, unpackedIndex.blkHeight)
|
|
||||||
}
|
|
||||||
|
|
||||||
if unpackedIndex.txoffset != fakeIndex.txoffset {
|
|
||||||
t.Errorf("Incorrect tx offset. Unpack addr index key"+
|
|
||||||
"serialization failed. Expected %d, received %d",
|
|
||||||
5, unpackedIndex.txoffset)
|
|
||||||
}
|
|
||||||
|
|
||||||
if unpackedIndex.txlen != fakeIndex.txlen {
|
|
||||||
t.Errorf("Incorrect tx len. Unpack addr index key"+
|
|
||||||
"serialization failed. Expected %d, received %d",
|
|
||||||
360, unpackedIndex.txlen)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBytesPrefix(t *testing.T) {
|
|
||||||
testKey := []byte("a")
|
|
||||||
|
|
||||||
prefixRange := bytesPrefix(testKey)
|
|
||||||
if !bytes.Equal(prefixRange.Start, []byte("a")) {
|
|
||||||
t.Errorf("Wrong prefix start, got %d, expected %d", prefixRange.Start,
|
|
||||||
[]byte("a"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if !bytes.Equal(prefixRange.Limit, []byte("b")) {
|
|
||||||
t.Errorf("Wrong prefix end, got %d, expected %d", prefixRange.Limit,
|
|
||||||
[]byte("b"))
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,722 +0,0 @@
|
||||||
// Copyright (c) 2013-2015 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package ldb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/database"
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
|
||||||
"github.com/btcsuite/btclog"
|
|
||||||
"github.com/btcsuite/btcutil"
|
|
||||||
"github.com/btcsuite/goleveldb/leveldb"
|
|
||||||
"github.com/btcsuite/goleveldb/leveldb/opt"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
dbVersion int = 2
|
|
||||||
dbMaxTransCnt = 20000
|
|
||||||
dbMaxTransMem = 64 * 1024 * 1024 // 64 MB
|
|
||||||
)
|
|
||||||
|
|
||||||
var log = btclog.Disabled
|
|
||||||
|
|
||||||
type tTxInsertData struct {
|
|
||||||
txsha *wire.ShaHash
|
|
||||||
blockid int32
|
|
||||||
txoff int
|
|
||||||
txlen int
|
|
||||||
usedbuf []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// LevelDb holds internal state for databse.
|
|
||||||
type LevelDb struct {
|
|
||||||
// lock preventing multiple entry
|
|
||||||
dbLock sync.Mutex
|
|
||||||
|
|
||||||
// leveldb pieces
|
|
||||||
lDb *leveldb.DB
|
|
||||||
ro *opt.ReadOptions
|
|
||||||
wo *opt.WriteOptions
|
|
||||||
|
|
||||||
lbatch *leveldb.Batch
|
|
||||||
|
|
||||||
nextBlock int32
|
|
||||||
|
|
||||||
lastBlkShaCached bool
|
|
||||||
lastBlkSha wire.ShaHash
|
|
||||||
lastBlkIdx int32
|
|
||||||
|
|
||||||
lastAddrIndexBlkSha wire.ShaHash
|
|
||||||
lastAddrIndexBlkIdx int32
|
|
||||||
|
|
||||||
txUpdateMap map[wire.ShaHash]*txUpdateObj
|
|
||||||
txSpentUpdateMap map[wire.ShaHash]*spentTxUpdate
|
|
||||||
}
|
|
||||||
|
|
||||||
var self = database.DriverDB{DbType: "leveldb", CreateDB: CreateDB, OpenDB: OpenDB}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
database.AddDBDriver(self)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseArgs parses the arguments from the database package Open/Create methods.
|
|
||||||
func parseArgs(funcName string, args ...interface{}) (string, error) {
|
|
||||||
if len(args) != 1 {
|
|
||||||
return "", fmt.Errorf("Invalid arguments to ldb.%s -- "+
|
|
||||||
"expected database path string", funcName)
|
|
||||||
}
|
|
||||||
dbPath, ok := args[0].(string)
|
|
||||||
if !ok {
|
|
||||||
return "", fmt.Errorf("First argument to ldb.%s is invalid -- "+
|
|
||||||
"expected database path string", funcName)
|
|
||||||
}
|
|
||||||
return dbPath, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CurrentDBVersion is the database version.
|
|
||||||
var CurrentDBVersion int32 = 1
|
|
||||||
|
|
||||||
// OpenDB opens an existing database for use.
|
|
||||||
func OpenDB(args ...interface{}) (database.Db, error) {
|
|
||||||
dbpath, err := parseArgs("OpenDB", args...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log = database.GetLog()
|
|
||||||
|
|
||||||
db, err := openDB(dbpath, false)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Need to find last block and tx
|
|
||||||
var lastknownblock, nextunknownblock, testblock int32
|
|
||||||
|
|
||||||
increment := int32(100000)
|
|
||||||
ldb := db.(*LevelDb)
|
|
||||||
|
|
||||||
var lastSha *wire.ShaHash
|
|
||||||
// forward scan
|
|
||||||
blockforward:
|
|
||||||
for {
|
|
||||||
|
|
||||||
sha, err := ldb.fetchBlockShaByHeight(testblock)
|
|
||||||
if err == nil {
|
|
||||||
// block is found
|
|
||||||
lastSha = sha
|
|
||||||
lastknownblock = testblock
|
|
||||||
testblock += increment
|
|
||||||
} else {
|
|
||||||
if testblock == 0 {
|
|
||||||
//no blocks in db, odd but ok.
|
|
||||||
lastknownblock = -1
|
|
||||||
nextunknownblock = 0
|
|
||||||
var emptysha wire.ShaHash
|
|
||||||
lastSha = &emptysha
|
|
||||||
} else {
|
|
||||||
nextunknownblock = testblock
|
|
||||||
}
|
|
||||||
break blockforward
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// narrow search
|
|
||||||
blocknarrow:
|
|
||||||
for lastknownblock != -1 {
|
|
||||||
testblock = (lastknownblock + nextunknownblock) / 2
|
|
||||||
sha, err := ldb.fetchBlockShaByHeight(testblock)
|
|
||||||
if err == nil {
|
|
||||||
lastknownblock = testblock
|
|
||||||
lastSha = sha
|
|
||||||
} else {
|
|
||||||
nextunknownblock = testblock
|
|
||||||
}
|
|
||||||
if lastknownblock+1 == nextunknownblock {
|
|
||||||
break blocknarrow
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("Checking address index")
|
|
||||||
|
|
||||||
// Load the last block whose transactions have been indexed by address.
|
|
||||||
if sha, idx, err := ldb.fetchAddrIndexTip(); err == nil {
|
|
||||||
if err = ldb.checkAddrIndexVersion(); err == nil {
|
|
||||||
ldb.lastAddrIndexBlkSha = *sha
|
|
||||||
ldb.lastAddrIndexBlkIdx = idx
|
|
||||||
log.Infof("Address index good, continuing")
|
|
||||||
} else {
|
|
||||||
log.Infof("Address index in old, incompatible format, dropping...")
|
|
||||||
ldb.deleteOldAddrIndex()
|
|
||||||
ldb.DeleteAddrIndex()
|
|
||||||
log.Infof("Old, incompatible address index dropped and can now be rebuilt")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ldb.lastAddrIndexBlkIdx = -1
|
|
||||||
}
|
|
||||||
|
|
||||||
ldb.lastBlkSha = *lastSha
|
|
||||||
ldb.lastBlkIdx = lastknownblock
|
|
||||||
ldb.nextBlock = lastknownblock + 1
|
|
||||||
|
|
||||||
return db, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func openDB(dbpath string, create bool) (pbdb database.Db, err error) {
|
|
||||||
var db LevelDb
|
|
||||||
var tlDb *leveldb.DB
|
|
||||||
var dbversion int32
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err == nil {
|
|
||||||
db.lDb = tlDb
|
|
||||||
|
|
||||||
db.txUpdateMap = map[wire.ShaHash]*txUpdateObj{}
|
|
||||||
db.txSpentUpdateMap = make(map[wire.ShaHash]*spentTxUpdate)
|
|
||||||
|
|
||||||
pbdb = &db
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if create == true {
|
|
||||||
err = os.Mkdir(dbpath, 0750)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("mkdir failed %v %v", dbpath, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
_, err = os.Stat(dbpath)
|
|
||||||
if err != nil {
|
|
||||||
err = database.ErrDbDoesNotExist
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
needVersionFile := false
|
|
||||||
verfile := dbpath + ".ver"
|
|
||||||
fi, ferr := os.Open(verfile)
|
|
||||||
if ferr == nil {
|
|
||||||
defer fi.Close()
|
|
||||||
|
|
||||||
ferr = binary.Read(fi, binary.LittleEndian, &dbversion)
|
|
||||||
if ferr != nil {
|
|
||||||
dbversion = ^0
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if create == true {
|
|
||||||
needVersionFile = true
|
|
||||||
dbversion = CurrentDBVersion
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := &opt.Options{
|
|
||||||
BlockCacher: opt.DefaultBlockCacher,
|
|
||||||
Compression: opt.NoCompression,
|
|
||||||
OpenFilesCacher: opt.DefaultOpenFilesCacher,
|
|
||||||
}
|
|
||||||
|
|
||||||
switch dbversion {
|
|
||||||
case 0:
|
|
||||||
opts = &opt.Options{}
|
|
||||||
case 1:
|
|
||||||
// uses defaults from above
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("unsupported db version %v", dbversion)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
tlDb, err = leveldb.OpenFile(dbpath, opts)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we opened the database successfully on 'create'
|
|
||||||
// update the
|
|
||||||
if needVersionFile {
|
|
||||||
fo, ferr := os.Create(verfile)
|
|
||||||
if ferr != nil {
|
|
||||||
// TODO(design) close and delete database?
|
|
||||||
err = ferr
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer fo.Close()
|
|
||||||
err = binary.Write(fo, binary.LittleEndian, dbversion)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateDB creates, initializes and opens a database for use.
|
|
||||||
func CreateDB(args ...interface{}) (database.Db, error) {
|
|
||||||
dbpath, err := parseArgs("Create", args...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log = database.GetLog()
|
|
||||||
|
|
||||||
// No special setup needed, just OpenBB
|
|
||||||
db, err := openDB(dbpath, true)
|
|
||||||
if err == nil {
|
|
||||||
ldb := db.(*LevelDb)
|
|
||||||
ldb.lastBlkIdx = -1
|
|
||||||
ldb.lastAddrIndexBlkIdx = -1
|
|
||||||
ldb.nextBlock = 0
|
|
||||||
}
|
|
||||||
return db, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LevelDb) close() error {
|
|
||||||
return db.lDb.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync verifies that the database is coherent on disk,
|
|
||||||
// and no outstanding transactions are in flight.
|
|
||||||
func (db *LevelDb) Sync() error {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
// while specified by the API, does nothing
|
|
||||||
// however does grab lock to verify it does not return until other operations are complete.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close cleanly shuts down database, syncing all data.
|
|
||||||
func (db *LevelDb) Close() error {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
return db.close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// DropAfterBlockBySha will remove any blocks from the database after
|
|
||||||
// the given block.
|
|
||||||
func (db *LevelDb) DropAfterBlockBySha(sha *wire.ShaHash) (rerr error) {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
defer func() {
|
|
||||||
if rerr == nil {
|
|
||||||
rerr = db.processBatches()
|
|
||||||
} else {
|
|
||||||
db.lBatch().Reset()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
startheight := db.nextBlock - 1
|
|
||||||
|
|
||||||
keepidx, err := db.getBlkLoc(sha)
|
|
||||||
if err != nil {
|
|
||||||
// should the error here be normalized ?
|
|
||||||
log.Tracef("block loc failed %v ", sha)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for height := startheight; height > keepidx; height = height - 1 {
|
|
||||||
var blk *btcutil.Block
|
|
||||||
blksha, buf, err := db.getBlkByHeight(height)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
blk, err = btcutil.NewBlockFromBytes(buf)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tx := range blk.MsgBlock().Transactions {
|
|
||||||
err = db.unSpend(tx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// rather than iterate the list of tx backward, do it twice.
|
|
||||||
for _, tx := range blk.Transactions() {
|
|
||||||
var txUo txUpdateObj
|
|
||||||
txUo.delete = true
|
|
||||||
db.txUpdateMap[*tx.Sha()] = &txUo
|
|
||||||
}
|
|
||||||
db.lBatch().Delete(shaBlkToKey(blksha))
|
|
||||||
db.lBatch().Delete(int64ToKey(int64(height)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// update the last block cache
|
|
||||||
db.lastBlkShaCached = true
|
|
||||||
db.lastBlkSha = *sha
|
|
||||||
db.lastBlkIdx = keepidx
|
|
||||||
db.nextBlock = keepidx + 1
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertBlock inserts raw block and transaction data from a block into the
|
|
||||||
// database. The first block inserted into the database will be treated as the
|
|
||||||
// genesis block. Every subsequent block insert requires the referenced parent
|
|
||||||
// block to already exist.
|
|
||||||
func (db *LevelDb) InsertBlock(block *btcutil.Block) (height int32, rerr error) {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
defer func() {
|
|
||||||
if rerr == nil {
|
|
||||||
rerr = db.processBatches()
|
|
||||||
} else {
|
|
||||||
db.lBatch().Reset()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
blocksha := block.Sha()
|
|
||||||
mblock := block.MsgBlock()
|
|
||||||
rawMsg, err := block.Bytes()
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("Failed to obtain raw block sha %v", blocksha)
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
txloc, err := block.TxLoc()
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("Failed to obtain raw block sha %v", blocksha)
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert block into database
|
|
||||||
newheight, err := db.insertBlockData(blocksha, &mblock.Header.PrevBlock,
|
|
||||||
rawMsg)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("Failed to insert block %v %v %v", blocksha,
|
|
||||||
&mblock.Header.PrevBlock, err)
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// At least two blocks in the long past were generated by faulty
|
|
||||||
// miners, the sha of the transaction exists in a previous block,
|
|
||||||
// detect this condition and 'accept' the block.
|
|
||||||
for txidx, tx := range mblock.Transactions {
|
|
||||||
txsha, err := block.TxSha(txidx)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("failed to compute tx name block %v idx %v err %v", blocksha, txidx, err)
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
spentbuflen := (len(tx.TxOut) + 7) / 8
|
|
||||||
spentbuf := make([]byte, spentbuflen, spentbuflen)
|
|
||||||
if len(tx.TxOut)%8 != 0 {
|
|
||||||
for i := uint(len(tx.TxOut) % 8); i < 8; i++ {
|
|
||||||
spentbuf[spentbuflen-1] |= (byte(1) << i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = db.insertTx(txsha, newheight, txloc[txidx].TxStart, txloc[txidx].TxLen, spentbuf)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("block %v idx %v failed to insert tx %v %v err %v", blocksha, newheight, &txsha, txidx, err)
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Some old blocks contain duplicate transactions
|
|
||||||
// Attempt to cleanly bypass this problem by marking the
|
|
||||||
// first as fully spent.
|
|
||||||
// http://blockexplorer.com/b/91812 dup in 91842
|
|
||||||
// http://blockexplorer.com/b/91722 dup in 91880
|
|
||||||
if newheight == 91812 {
|
|
||||||
dupsha, err := wire.NewShaHashFromStr("d5d27987d2a3dfc724e359870c6644b40e497bdc0589a033220fe15429d88599")
|
|
||||||
if err != nil {
|
|
||||||
panic("invalid sha string in source")
|
|
||||||
}
|
|
||||||
if txsha.IsEqual(dupsha) {
|
|
||||||
// marking TxOut[0] as spent
|
|
||||||
po := wire.NewOutPoint(dupsha, 0)
|
|
||||||
txI := wire.NewTxIn(po, []byte("garbage"))
|
|
||||||
|
|
||||||
var spendtx wire.MsgTx
|
|
||||||
spendtx.AddTxIn(txI)
|
|
||||||
err = db.doSpend(&spendtx)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, &txsha, txidx, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if newheight == 91722 {
|
|
||||||
dupsha, err := wire.NewShaHashFromStr("e3bf3d07d4b0375638d5f1db5255fe07ba2c4cb067cd81b84ee974b6585fb468")
|
|
||||||
if err != nil {
|
|
||||||
panic("invalid sha string in source")
|
|
||||||
}
|
|
||||||
if txsha.IsEqual(dupsha) {
|
|
||||||
// marking TxOut[0] as spent
|
|
||||||
po := wire.NewOutPoint(dupsha, 0)
|
|
||||||
txI := wire.NewTxIn(po, []byte("garbage"))
|
|
||||||
|
|
||||||
var spendtx wire.MsgTx
|
|
||||||
spendtx.AddTxIn(txI)
|
|
||||||
err = db.doSpend(&spendtx)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, &txsha, txidx, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = db.doSpend(tx)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, txsha, txidx, err)
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return newheight, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// doSpend iterates all TxIn in a bitcoin transaction marking each associated
|
|
||||||
// TxOut as spent.
|
|
||||||
func (db *LevelDb) doSpend(tx *wire.MsgTx) error {
|
|
||||||
for txinidx := range tx.TxIn {
|
|
||||||
txin := tx.TxIn[txinidx]
|
|
||||||
|
|
||||||
inTxSha := txin.PreviousOutPoint.Hash
|
|
||||||
inTxidx := txin.PreviousOutPoint.Index
|
|
||||||
|
|
||||||
if inTxidx == ^uint32(0) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
//log.Infof("spending %v %v", &inTxSha, inTxidx)
|
|
||||||
|
|
||||||
err := db.setSpentData(&inTxSha, inTxidx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// unSpend iterates all TxIn in a bitcoin transaction marking each associated
|
|
||||||
// TxOut as unspent.
|
|
||||||
func (db *LevelDb) unSpend(tx *wire.MsgTx) error {
|
|
||||||
for txinidx := range tx.TxIn {
|
|
||||||
txin := tx.TxIn[txinidx]
|
|
||||||
|
|
||||||
inTxSha := txin.PreviousOutPoint.Hash
|
|
||||||
inTxidx := txin.PreviousOutPoint.Index
|
|
||||||
|
|
||||||
if inTxidx == ^uint32(0) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
err := db.clearSpentData(&inTxSha, inTxidx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LevelDb) setSpentData(sha *wire.ShaHash, idx uint32) error {
|
|
||||||
return db.setclearSpentData(sha, idx, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LevelDb) clearSpentData(sha *wire.ShaHash, idx uint32) error {
|
|
||||||
return db.setclearSpentData(sha, idx, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LevelDb) setclearSpentData(txsha *wire.ShaHash, idx uint32, set bool) error {
|
|
||||||
var txUo *txUpdateObj
|
|
||||||
var ok bool
|
|
||||||
|
|
||||||
if txUo, ok = db.txUpdateMap[*txsha]; !ok {
|
|
||||||
// not cached, load from db
|
|
||||||
var txU txUpdateObj
|
|
||||||
blkHeight, txOff, txLen, spentData, err := db.getTxData(txsha)
|
|
||||||
if err != nil {
|
|
||||||
// setting a fully spent tx is an error.
|
|
||||||
if set == true {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// if we are clearing a tx and it wasn't found
|
|
||||||
// in the tx table, it could be in the fully spent
|
|
||||||
// (duplicates) table.
|
|
||||||
spentTxList, err := db.getTxFullySpent(txsha)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// need to reslice the list to exclude the most recent.
|
|
||||||
sTx := spentTxList[len(spentTxList)-1]
|
|
||||||
spentTxList[len(spentTxList)-1] = nil
|
|
||||||
if len(spentTxList) == 1 {
|
|
||||||
// write entry to delete tx from spent pool
|
|
||||||
db.txSpentUpdateMap[*txsha] = &spentTxUpdate{delete: true}
|
|
||||||
} else {
|
|
||||||
// This code should never be hit - aakselrod
|
|
||||||
return fmt.Errorf("fully-spent tx %v does not have 1 record: "+
|
|
||||||
"%v", txsha, len(spentTxList))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create 'new' Tx update data.
|
|
||||||
blkHeight = sTx.blkHeight
|
|
||||||
txOff = sTx.txoff
|
|
||||||
txLen = sTx.txlen
|
|
||||||
spentbuflen := (sTx.numTxO + 7) / 8
|
|
||||||
spentData = make([]byte, spentbuflen, spentbuflen)
|
|
||||||
for i := range spentData {
|
|
||||||
spentData[i] = ^byte(0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
txU.txSha = txsha
|
|
||||||
txU.blkHeight = blkHeight
|
|
||||||
txU.txoff = txOff
|
|
||||||
txU.txlen = txLen
|
|
||||||
txU.spentData = spentData
|
|
||||||
|
|
||||||
txUo = &txU
|
|
||||||
}
|
|
||||||
|
|
||||||
byteidx := idx / 8
|
|
||||||
byteoff := idx % 8
|
|
||||||
|
|
||||||
if set {
|
|
||||||
txUo.spentData[byteidx] |= (byte(1) << byteoff)
|
|
||||||
} else {
|
|
||||||
txUo.spentData[byteidx] &= ^(byte(1) << byteoff)
|
|
||||||
}
|
|
||||||
|
|
||||||
// check for fully spent Tx
|
|
||||||
fullySpent := true
|
|
||||||
for _, val := range txUo.spentData {
|
|
||||||
if val != ^byte(0) {
|
|
||||||
fullySpent = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if fullySpent {
|
|
||||||
var txSu *spentTxUpdate
|
|
||||||
// Look up Tx in fully spent table
|
|
||||||
if txSuOld, ok := db.txSpentUpdateMap[*txsha]; ok {
|
|
||||||
txSu = txSuOld
|
|
||||||
} else {
|
|
||||||
var txSuStore spentTxUpdate
|
|
||||||
txSu = &txSuStore
|
|
||||||
|
|
||||||
txSuOld, err := db.getTxFullySpent(txsha)
|
|
||||||
if err == nil {
|
|
||||||
txSu.txl = txSuOld
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fill in spentTx
|
|
||||||
var sTx spentTx
|
|
||||||
sTx.blkHeight = txUo.blkHeight
|
|
||||||
sTx.txoff = txUo.txoff
|
|
||||||
sTx.txlen = txUo.txlen
|
|
||||||
// XXX -- there is no way to comput the real TxOut
|
|
||||||
// from the spent array.
|
|
||||||
sTx.numTxO = 8 * len(txUo.spentData)
|
|
||||||
|
|
||||||
// append this txdata to fully spent txlist
|
|
||||||
txSu.txl = append(txSu.txl, &sTx)
|
|
||||||
|
|
||||||
// mark txsha as deleted in the txUpdateMap
|
|
||||||
log.Tracef("***tx %v is fully spent\n", txsha)
|
|
||||||
|
|
||||||
db.txSpentUpdateMap[*txsha] = txSu
|
|
||||||
|
|
||||||
txUo.delete = true
|
|
||||||
db.txUpdateMap[*txsha] = txUo
|
|
||||||
} else {
|
|
||||||
db.txUpdateMap[*txsha] = txUo
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func int64ToKey(keyint int64) []byte {
|
|
||||||
key := strconv.FormatInt(keyint, 10)
|
|
||||||
return []byte(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func shaBlkToKey(sha *wire.ShaHash) []byte {
|
|
||||||
return sha[:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// These are used here and in tx.go's deleteOldAddrIndex() to prevent deletion
|
|
||||||
// of indexes other than the addrindex now.
|
|
||||||
var recordSuffixTx = []byte{'t', 'x'}
|
|
||||||
var recordSuffixSpentTx = []byte{'s', 'x'}
|
|
||||||
|
|
||||||
func shaTxToKey(sha *wire.ShaHash) []byte {
|
|
||||||
key := make([]byte, len(sha)+len(recordSuffixTx))
|
|
||||||
copy(key, sha[:])
|
|
||||||
copy(key[len(sha):], recordSuffixTx)
|
|
||||||
return key
|
|
||||||
}
|
|
||||||
|
|
||||||
func shaSpentTxToKey(sha *wire.ShaHash) []byte {
|
|
||||||
key := make([]byte, len(sha)+len(recordSuffixSpentTx))
|
|
||||||
copy(key, sha[:])
|
|
||||||
copy(key[len(sha):], recordSuffixSpentTx)
|
|
||||||
return key
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LevelDb) lBatch() *leveldb.Batch {
|
|
||||||
if db.lbatch == nil {
|
|
||||||
db.lbatch = new(leveldb.Batch)
|
|
||||||
}
|
|
||||||
return db.lbatch
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LevelDb) processBatches() error {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if len(db.txUpdateMap) != 0 || len(db.txSpentUpdateMap) != 0 || db.lbatch != nil {
|
|
||||||
if db.lbatch == nil {
|
|
||||||
db.lbatch = new(leveldb.Batch)
|
|
||||||
}
|
|
||||||
|
|
||||||
defer db.lbatch.Reset()
|
|
||||||
|
|
||||||
for txSha, txU := range db.txUpdateMap {
|
|
||||||
key := shaTxToKey(&txSha)
|
|
||||||
if txU.delete {
|
|
||||||
//log.Tracef("deleting tx %v", txSha)
|
|
||||||
db.lbatch.Delete(key)
|
|
||||||
} else {
|
|
||||||
//log.Tracef("inserting tx %v", txSha)
|
|
||||||
txdat := db.formatTx(txU)
|
|
||||||
db.lbatch.Put(key, txdat)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for txSha, txSu := range db.txSpentUpdateMap {
|
|
||||||
key := shaSpentTxToKey(&txSha)
|
|
||||||
if txSu.delete {
|
|
||||||
//log.Tracef("deleting tx %v", txSha)
|
|
||||||
db.lbatch.Delete(key)
|
|
||||||
} else {
|
|
||||||
//log.Tracef("inserting tx %v", txSha)
|
|
||||||
txdat := db.formatTxFullySpent(txSu.txl)
|
|
||||||
db.lbatch.Put(key, txdat)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = db.lDb.Write(db.lbatch, db.wo)
|
|
||||||
if err != nil {
|
|
||||||
log.Tracef("batch failed %v\n", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
db.txUpdateMap = map[wire.ShaHash]*txUpdateObj{}
|
|
||||||
db.txSpentUpdateMap = make(map[wire.ShaHash]*spentTxUpdate)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RollbackClose this is part of the database.Db interface and should discard
|
|
||||||
// recent changes to the db and the close the db. This currently just does
|
|
||||||
// a clean shutdown.
|
|
||||||
func (db *LevelDb) RollbackClose() error {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
return db.close()
|
|
||||||
}
|
|
|
@ -1,598 +0,0 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package ldb_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"compress/bzip2"
|
|
||||||
"encoding/binary"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
|
||||||
"github.com/btcsuite/btcd/database"
|
|
||||||
"github.com/btcsuite/btcd/txscript"
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
|
||||||
"github.com/btcsuite/btcutil"
|
|
||||||
"github.com/btcsuite/golangcrypto/ripemd160"
|
|
||||||
)
|
|
||||||
|
|
||||||
var network = wire.MainNet
|
|
||||||
|
|
||||||
// testDb is used to store db related context for a running test.
|
|
||||||
// the `cleanUpFunc` *must* be called after each test to maintain db
|
|
||||||
// consistency across tests.
|
|
||||||
type testDb struct {
|
|
||||||
db database.Db
|
|
||||||
blocks []*btcutil.Block
|
|
||||||
dbName string
|
|
||||||
dbNameVer string
|
|
||||||
cleanUpFunc func()
|
|
||||||
}
|
|
||||||
|
|
||||||
func setUpTestDb(t *testing.T, dbname string) (*testDb, error) {
|
|
||||||
// Ignore db remove errors since it means we didn't have an old one.
|
|
||||||
dbnamever := dbname + ".ver"
|
|
||||||
_ = os.RemoveAll(dbname)
|
|
||||||
_ = os.RemoveAll(dbnamever)
|
|
||||||
db, err := database.CreateDB("leveldb", dbname)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
testdatafile := filepath.Join("..", "testdata", "blocks1-256.bz2")
|
|
||||||
blocks, err := loadBlocks(t, testdatafile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cleanUp := func() {
|
|
||||||
db.Close()
|
|
||||||
os.RemoveAll(dbname)
|
|
||||||
os.RemoveAll(dbnamever)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &testDb{
|
|
||||||
db: db,
|
|
||||||
blocks: blocks,
|
|
||||||
dbName: dbname,
|
|
||||||
dbNameVer: dbnamever,
|
|
||||||
cleanUpFunc: cleanUp,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOperational(t *testing.T) {
|
|
||||||
testOperationalMode(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
// testAddrIndexOperations ensures that all normal operations concerning
|
|
||||||
// the optional address index function correctly.
|
|
||||||
func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *btcutil.Block, newestSha *wire.ShaHash, newestBlockIdx int32) {
|
|
||||||
// Metadata about the current addr index state should be unset.
|
|
||||||
sha, height, err := db.FetchAddrIndexTip()
|
|
||||||
if err != database.ErrAddrIndexDoesNotExist {
|
|
||||||
t.Fatalf("Address index metadata shouldn't be in db, hasn't been built up yet.")
|
|
||||||
}
|
|
||||||
|
|
||||||
var zeroHash wire.ShaHash
|
|
||||||
if !sha.IsEqual(&zeroHash) {
|
|
||||||
t.Fatalf("AddrIndexTip wrong hash got: %s, want %s", sha, &zeroHash)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if height != -1 {
|
|
||||||
t.Fatalf("Addrindex not built up, yet a block index tip has been set to: %d.", height)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test enforcement of constraints for "limit" and "skip"
|
|
||||||
var fakeAddr btcutil.Address
|
|
||||||
_, _, err = db.FetchTxsForAddr(fakeAddr, -1, 0, false)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("Negative value for skip passed, should return an error")
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, err = db.FetchTxsForAddr(fakeAddr, 0, -1, false)
|
|
||||||
if err == nil {
|
|
||||||
t.Fatalf("Negative value for limit passed, should return an error")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Simple test to index outputs(s) of the first tx.
|
|
||||||
testIndex := make(database.BlockAddrIndex)
|
|
||||||
testTx, err := newestBlock.Tx(0)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Block has no transactions, unable to test addr "+
|
|
||||||
"indexing, err %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract the dest addr from the tx.
|
|
||||||
_, testAddrs, _, err := txscript.ExtractPkScriptAddrs(testTx.MsgTx().TxOut[0].PkScript, &chaincfg.MainNetParams)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unable to decode tx output, err %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract the hash160 from the output script.
|
|
||||||
var hash160Bytes [ripemd160.Size]byte
|
|
||||||
testHash160 := testAddrs[0].(*btcutil.AddressPubKey).AddressPubKeyHash().ScriptAddress()
|
|
||||||
copy(hash160Bytes[:], testHash160[:])
|
|
||||||
|
|
||||||
// Create a fake index.
|
|
||||||
blktxLoc, _ := newestBlock.TxLoc()
|
|
||||||
testIndex[hash160Bytes] = []*wire.TxLoc{&blktxLoc[0]}
|
|
||||||
|
|
||||||
// Insert our test addr index into the DB.
|
|
||||||
err = db.UpdateAddrIndexForBlock(newestSha, newestBlockIdx, testIndex)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("UpdateAddrIndexForBlock: failed to index"+
|
|
||||||
" addrs for block #%d (%s) "+
|
|
||||||
"err %v", newestBlockIdx, newestSha, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Chain Tip of address should've been updated.
|
|
||||||
assertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx)
|
|
||||||
|
|
||||||
// Check index retrieval.
|
|
||||||
txReplies, _, err := db.FetchTxsForAddr(testAddrs[0], 0, 1000, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("FetchTxsForAddr failed to correctly fetch txs for an "+
|
|
||||||
"address, err %v", err)
|
|
||||||
}
|
|
||||||
// Should have one reply.
|
|
||||||
if len(txReplies) != 1 {
|
|
||||||
t.Fatalf("Failed to properly index tx by address.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Our test tx and indexed tx should have the same sha.
|
|
||||||
indexedTx := txReplies[0]
|
|
||||||
if !bytes.Equal(indexedTx.Sha.Bytes(), testTx.Sha().Bytes()) {
|
|
||||||
t.Fatalf("Failed to fetch proper indexed tx. Expected sha %v, "+
|
|
||||||
"fetched %v", testTx.Sha(), indexedTx.Sha)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shut down DB.
|
|
||||||
db.Sync()
|
|
||||||
db.Close()
|
|
||||||
|
|
||||||
// Re-Open, tip still should be updated to current height and sha.
|
|
||||||
db, err = database.OpenDB("leveldb", "tstdbopmode")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unable to re-open created db, err %v", err)
|
|
||||||
}
|
|
||||||
assertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx)
|
|
||||||
|
|
||||||
// Delete the entire index.
|
|
||||||
err = db.DeleteAddrIndex()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Couldn't delete address index, err %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Former index should no longer exist.
|
|
||||||
txReplies, _, err = db.FetchTxsForAddr(testAddrs[0], 0, 1000, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unable to fetch transactions for address: %v", err)
|
|
||||||
}
|
|
||||||
if len(txReplies) != 0 {
|
|
||||||
t.Fatalf("Address index was not successfully deleted. "+
|
|
||||||
"Should have 0 tx's indexed, %v were returned.",
|
|
||||||
len(txReplies))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tip should be blanked out.
|
|
||||||
if _, _, err := db.FetchAddrIndexTip(); err != database.ErrAddrIndexDoesNotExist {
|
|
||||||
t.Fatalf("Address index was not fully deleted.")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func assertAddrIndexTipIsUpdated(db database.Db, t *testing.T, newestSha *wire.ShaHash, newestBlockIdx int32) {
|
|
||||||
// Safe to ignore error, since height will be < 0 in "error" case.
|
|
||||||
sha, height, _ := db.FetchAddrIndexTip()
|
|
||||||
if newestBlockIdx != height {
|
|
||||||
t.Fatalf("Height of address index tip failed to update, "+
|
|
||||||
"expected %v, got %v", newestBlockIdx, height)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(newestSha.Bytes(), sha.Bytes()) {
|
|
||||||
t.Fatalf("Sha of address index tip failed to update, "+
|
|
||||||
"expected %v, got %v", newestSha, sha)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testOperationalMode(t *testing.T) {
|
|
||||||
// simplified basic operation is:
|
|
||||||
// 1) fetch block from remote server
|
|
||||||
// 2) look up all txin (except coinbase in db)
|
|
||||||
// 3) insert block
|
|
||||||
// 4) exercise the optional addridex
|
|
||||||
testDb, err := setUpTestDb(t, "tstdbopmode")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to open test database %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer testDb.cleanUpFunc()
|
|
||||||
err = nil
|
|
||||||
out:
|
|
||||||
for height := int32(0); height < int32(len(testDb.blocks)); height++ {
|
|
||||||
block := testDb.blocks[height]
|
|
||||||
mblock := block.MsgBlock()
|
|
||||||
var txneededList []*wire.ShaHash
|
|
||||||
for _, tx := range mblock.Transactions {
|
|
||||||
for _, txin := range tx.TxIn {
|
|
||||||
if txin.PreviousOutPoint.Index == uint32(4294967295) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
origintxsha := &txin.PreviousOutPoint.Hash
|
|
||||||
txneededList = append(txneededList, origintxsha)
|
|
||||||
|
|
||||||
exists, err := testDb.db.ExistsTxSha(origintxsha)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("ExistsTxSha: unexpected error %v ", err)
|
|
||||||
}
|
|
||||||
if !exists {
|
|
||||||
t.Errorf("referenced tx not found %v ", origintxsha)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = testDb.db.FetchTxBySha(origintxsha)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("referenced tx not found %v err %v ", origintxsha, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
txlist := testDb.db.FetchUnSpentTxByShaList(txneededList)
|
|
||||||
for _, txe := range txlist {
|
|
||||||
if txe.Err != nil {
|
|
||||||
t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err)
|
|
||||||
break out
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
newheight, err := testDb.db.InsertBlock(block)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to insert block %v err %v", height, err)
|
|
||||||
break out
|
|
||||||
}
|
|
||||||
if newheight != height {
|
|
||||||
t.Errorf("height mismatch expect %v returned %v", height, newheight)
|
|
||||||
break out
|
|
||||||
}
|
|
||||||
|
|
||||||
newSha, blkid, err := testDb.db.NewestSha()
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to obtain latest sha %v %v", height, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if blkid != height {
|
|
||||||
t.Errorf("height does not match latest block height %v %v %v", blkid, height, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
blkSha := block.Sha()
|
|
||||||
if *newSha != *blkSha {
|
|
||||||
t.Errorf("Newest block sha does not match freshly inserted one %v %v %v ", newSha, blkSha, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// now that the db is populated, do some additional tests
|
|
||||||
testFetchHeightRange(t, testDb.db, testDb.blocks)
|
|
||||||
|
|
||||||
// Ensure all operations dealing with the optional address index behave
|
|
||||||
// correctly.
|
|
||||||
newSha, blkid, err := testDb.db.NewestSha()
|
|
||||||
testAddrIndexOperations(t, testDb.db, testDb.blocks[len(testDb.blocks)-1], newSha, blkid)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBackout(t *testing.T) {
|
|
||||||
testBackout(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testBackout(t *testing.T) {
|
|
||||||
// simplified basic operation is:
|
|
||||||
// 1) fetch block from remote server
|
|
||||||
// 2) look up all txin (except coinbase in db)
|
|
||||||
// 3) insert block
|
|
||||||
|
|
||||||
testDb, err := setUpTestDb(t, "tstdbbackout")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to open test database %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer testDb.cleanUpFunc()
|
|
||||||
|
|
||||||
if len(testDb.blocks) < 120 {
|
|
||||||
t.Errorf("test data too small")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = nil
|
|
||||||
for height := int32(0); height < int32(len(testDb.blocks)); height++ {
|
|
||||||
if height == 100 {
|
|
||||||
t.Logf("Syncing at block height 100")
|
|
||||||
testDb.db.Sync()
|
|
||||||
}
|
|
||||||
if height == 120 {
|
|
||||||
t.Logf("Simulating unexpected application quit")
|
|
||||||
// Simulate unexpected application quit
|
|
||||||
testDb.db.RollbackClose()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
block := testDb.blocks[height]
|
|
||||||
|
|
||||||
newheight, err := testDb.db.InsertBlock(block)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to insert block %v err %v", height, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if newheight != height {
|
|
||||||
t.Errorf("height mismatch expect %v returned %v", height, newheight)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// db was closed at height 120, so no cleanup is possible.
|
|
||||||
|
|
||||||
// reopen db
|
|
||||||
testDb.db, err = database.OpenDB("leveldb", testDb.dbName)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to open test database %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := testDb.db.Close(); err != nil {
|
|
||||||
t.Errorf("Close: unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
sha := testDb.blocks[99].Sha()
|
|
||||||
if _, err := testDb.db.ExistsSha(sha); err != nil {
|
|
||||||
t.Errorf("ExistsSha: unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
_, err = testDb.db.FetchBlockBySha(sha)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to load block 99 from db %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
sha = testDb.blocks[119].Sha()
|
|
||||||
if _, err := testDb.db.ExistsSha(sha); err != nil {
|
|
||||||
t.Errorf("ExistsSha: unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
_, err = testDb.db.FetchBlockBySha(sha)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("loaded block 119 from db")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
block := testDb.blocks[119]
|
|
||||||
mblock := block.MsgBlock()
|
|
||||||
txsha := mblock.Transactions[0].TxSha()
|
|
||||||
exists, err := testDb.db.ExistsTxSha(&txsha)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("ExistsTxSha: unexpected error %v ", err)
|
|
||||||
}
|
|
||||||
if !exists {
|
|
||||||
t.Errorf("tx %v not located db\n", txsha)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = testDb.db.FetchTxBySha(&txsha)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("tx %v not located db\n", txsha)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var savedblocks []*btcutil.Block
|
|
||||||
|
|
||||||
func loadBlocks(t *testing.T, file string) (blocks []*btcutil.Block, err error) {
|
|
||||||
if len(savedblocks) != 0 {
|
|
||||||
blocks = savedblocks
|
|
||||||
return
|
|
||||||
}
|
|
||||||
testdatafile := filepath.Join("..", "testdata", "blocks1-256.bz2")
|
|
||||||
var dr io.Reader
|
|
||||||
var fi io.ReadCloser
|
|
||||||
fi, err = os.Open(testdatafile)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to open file %v, err %v", testdatafile, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(testdatafile, ".bz2") {
|
|
||||||
z := bzip2.NewReader(fi)
|
|
||||||
dr = z
|
|
||||||
} else {
|
|
||||||
dr = fi
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err := fi.Close(); err != nil {
|
|
||||||
t.Errorf("failed to close file %v %v", testdatafile, err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Set the first block as the genesis block.
|
|
||||||
genesis := btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock)
|
|
||||||
blocks = append(blocks, genesis)
|
|
||||||
|
|
||||||
var block *btcutil.Block
|
|
||||||
err = nil
|
|
||||||
for height := int32(1); err == nil; height++ {
|
|
||||||
var rintbuf uint32
|
|
||||||
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
|
|
||||||
if err == io.EOF {
|
|
||||||
// hit end of file at expected offset: no warning
|
|
||||||
height--
|
|
||||||
err = nil
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to load network type, err %v", err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if rintbuf != uint32(network) {
|
|
||||||
t.Errorf("Block doesn't match network: %v expects %v",
|
|
||||||
rintbuf, network)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
|
|
||||||
blocklen := rintbuf
|
|
||||||
|
|
||||||
rbytes := make([]byte, blocklen)
|
|
||||||
|
|
||||||
// read block
|
|
||||||
dr.Read(rbytes)
|
|
||||||
|
|
||||||
block, err = btcutil.NewBlockFromBytes(rbytes)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("failed to parse block %v", height)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
blocks = append(blocks, block)
|
|
||||||
}
|
|
||||||
savedblocks = blocks
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func testFetchHeightRange(t *testing.T, db database.Db, blocks []*btcutil.Block) {
|
|
||||||
|
|
||||||
var testincrement int32 = 50
|
|
||||||
var testcnt int32 = 100
|
|
||||||
|
|
||||||
shanames := make([]*wire.ShaHash, len(blocks))
|
|
||||||
|
|
||||||
nBlocks := int32(len(blocks))
|
|
||||||
|
|
||||||
for i := range blocks {
|
|
||||||
shanames[i] = blocks[i].Sha()
|
|
||||||
}
|
|
||||||
|
|
||||||
for startheight := int32(0); startheight < nBlocks; startheight += testincrement {
|
|
||||||
endheight := startheight + testcnt
|
|
||||||
|
|
||||||
if endheight > nBlocks {
|
|
||||||
endheight = database.AllShas
|
|
||||||
}
|
|
||||||
|
|
||||||
shalist, err := db.FetchHeightRange(startheight, endheight)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("FetchHeightRange: unexpected failure looking up shas %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if endheight == database.AllShas {
|
|
||||||
if int32(len(shalist)) != nBlocks-startheight {
|
|
||||||
t.Errorf("FetchHeightRange: expected A %v shas, got %v", nBlocks-startheight, len(shalist))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if int32(len(shalist)) != testcnt {
|
|
||||||
t.Errorf("FetchHeightRange: expected %v shas, got %v", testcnt, len(shalist))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range shalist {
|
|
||||||
sha0 := *shanames[int32(i)+startheight]
|
|
||||||
sha1 := shalist[i]
|
|
||||||
if sha0 != sha1 {
|
|
||||||
t.Errorf("FetchHeightRange: mismatch sha at %v requested range %v %v: %v %v ", int32(i)+startheight, startheight, endheight, sha0, sha1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLimitAndSkipFetchTxsForAddr(t *testing.T) {
|
|
||||||
testDb, err := setUpTestDb(t, "tstdbtxaddr")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to open test database %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer testDb.cleanUpFunc()
|
|
||||||
|
|
||||||
// Insert a block with some fake test transactions. The block will have
|
|
||||||
// 10 copies of a fake transaction involving same address.
|
|
||||||
addrString := "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa"
|
|
||||||
targetAddr, err := btcutil.DecodeAddress(addrString, &chaincfg.MainNetParams)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unable to decode test address: %v", err)
|
|
||||||
}
|
|
||||||
outputScript, err := txscript.PayToAddrScript(targetAddr)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unable make test pkScript %v", err)
|
|
||||||
}
|
|
||||||
fakeTxOut := wire.NewTxOut(10, outputScript)
|
|
||||||
var emptyHash wire.ShaHash
|
|
||||||
fakeHeader := wire.NewBlockHeader(&emptyHash, &emptyHash, 1, 1)
|
|
||||||
msgBlock := wire.NewMsgBlock(fakeHeader)
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
mtx := wire.NewMsgTx()
|
|
||||||
mtx.AddTxOut(fakeTxOut)
|
|
||||||
msgBlock.AddTransaction(mtx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert the test block into the DB.
|
|
||||||
testBlock := btcutil.NewBlock(msgBlock)
|
|
||||||
newheight, err := testDb.db.InsertBlock(testBlock)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unable to insert block into db: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create and insert an address index for out test addr.
|
|
||||||
txLoc, _ := testBlock.TxLoc()
|
|
||||||
index := make(database.BlockAddrIndex)
|
|
||||||
for i := range testBlock.Transactions() {
|
|
||||||
var hash160 [ripemd160.Size]byte
|
|
||||||
scriptAddr := targetAddr.ScriptAddress()
|
|
||||||
copy(hash160[:], scriptAddr[:])
|
|
||||||
index[hash160] = append(index[hash160], &txLoc[i])
|
|
||||||
}
|
|
||||||
blkSha := testBlock.Sha()
|
|
||||||
err = testDb.db.UpdateAddrIndexForBlock(blkSha, newheight, index)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("UpdateAddrIndexForBlock: failed to index"+
|
|
||||||
" addrs for block #%d (%s) "+
|
|
||||||
"err %v", newheight, blkSha, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try skipping the first 4 results, should get 6 in return.
|
|
||||||
txReply, txSkipped, err := testDb.db.FetchTxsForAddr(targetAddr, 4, 100000, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unable to fetch transactions for address: %v", err)
|
|
||||||
}
|
|
||||||
if txSkipped != 4 {
|
|
||||||
t.Fatalf("Did not correctly return skipped amount"+
|
|
||||||
" got %v txs, expected %v", txSkipped, 4)
|
|
||||||
}
|
|
||||||
if len(txReply) != 6 {
|
|
||||||
t.Fatalf("Did not correctly skip forward in txs for address reply"+
|
|
||||||
" got %v txs, expected %v", len(txReply), 6)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Limit the number of results to 3.
|
|
||||||
txReply, txSkipped, err = testDb.db.FetchTxsForAddr(targetAddr, 0, 3, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unable to fetch transactions for address: %v", err)
|
|
||||||
}
|
|
||||||
if txSkipped != 0 {
|
|
||||||
t.Fatalf("Did not correctly return skipped amount"+
|
|
||||||
" got %v txs, expected %v", txSkipped, 0)
|
|
||||||
}
|
|
||||||
if len(txReply) != 3 {
|
|
||||||
t.Fatalf("Did not correctly limit in txs for address reply"+
|
|
||||||
" got %v txs, expected %v", len(txReply), 3)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip 1, limit 5.
|
|
||||||
txReply, txSkipped, err = testDb.db.FetchTxsForAddr(targetAddr, 1, 5, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Unable to fetch transactions for address: %v", err)
|
|
||||||
}
|
|
||||||
if txSkipped != 1 {
|
|
||||||
t.Fatalf("Did not correctly return skipped amount"+
|
|
||||||
" got %v txs, expected %v", txSkipped, 1)
|
|
||||||
}
|
|
||||||
if len(txReply) != 5 {
|
|
||||||
t.Fatalf("Did not correctly limit in txs for address reply"+
|
|
||||||
" got %v txs, expected %v", len(txReply), 5)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,681 +0,0 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package ldb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/database"
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
|
||||||
"github.com/btcsuite/btcutil"
|
|
||||||
"github.com/btcsuite/golangcrypto/ripemd160"
|
|
||||||
"github.com/btcsuite/goleveldb/leveldb"
|
|
||||||
"github.com/btcsuite/goleveldb/leveldb/iterator"
|
|
||||||
"github.com/btcsuite/goleveldb/leveldb/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Each address index is 34 bytes:
|
|
||||||
// --------------------------------------------------------
|
|
||||||
// | Prefix | Hash160 | BlkHeight | Tx Offset | Tx Size |
|
|
||||||
// --------------------------------------------------------
|
|
||||||
// | 3 bytes | 20 bytes | 4 bytes | 4 bytes | 4 bytes |
|
|
||||||
// --------------------------------------------------------
|
|
||||||
addrIndexKeyLength = 3 + ripemd160.Size + 4 + 4 + 4
|
|
||||||
|
|
||||||
batchDeleteThreshold = 10000
|
|
||||||
|
|
||||||
addrIndexCurrentVersion = 1
|
|
||||||
)
|
|
||||||
|
|
||||||
var addrIndexMetaDataKey = []byte("addrindex")
|
|
||||||
|
|
||||||
// All address index entries share this prefix to facilitate the use of
|
|
||||||
// iterators.
|
|
||||||
var addrIndexKeyPrefix = []byte("a+-")
|
|
||||||
|
|
||||||
// Address index version is required to drop/rebuild address index if version
|
|
||||||
// is older than current as the format of the index may have changed. This is
|
|
||||||
// true when going from no version to version 1 as the address index is stored
|
|
||||||
// as big endian in version 1 and little endian in the original code. Version
|
|
||||||
// is stored as two bytes, little endian (to match all the code but the index).
|
|
||||||
var addrIndexVersionKey = []byte("addrindexversion")
|
|
||||||
|
|
||||||
type txUpdateObj struct {
|
|
||||||
txSha *wire.ShaHash
|
|
||||||
blkHeight int32
|
|
||||||
txoff int
|
|
||||||
txlen int
|
|
||||||
ntxout int
|
|
||||||
spentData []byte
|
|
||||||
delete bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type spentTx struct {
|
|
||||||
blkHeight int32
|
|
||||||
txoff int
|
|
||||||
txlen int
|
|
||||||
numTxO int
|
|
||||||
delete bool
|
|
||||||
}
|
|
||||||
type spentTxUpdate struct {
|
|
||||||
txl []*spentTx
|
|
||||||
delete bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type txAddrIndex struct {
|
|
||||||
hash160 [ripemd160.Size]byte
|
|
||||||
blkHeight int32
|
|
||||||
txoffset int
|
|
||||||
txlen int
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertTx inserts a tx hash and its associated data into the database.
|
|
||||||
func (db *LevelDb) InsertTx(txsha *wire.ShaHash, height int32, txoff int, txlen int, spentbuf []byte) (err error) {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
return db.insertTx(txsha, height, txoff, txlen, spentbuf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// insertTx inserts a tx hash and its associated data into the database.
|
|
||||||
// Must be called with db lock held.
|
|
||||||
func (db *LevelDb) insertTx(txSha *wire.ShaHash, height int32, txoff int, txlen int, spentbuf []byte) (err error) {
|
|
||||||
var txU txUpdateObj
|
|
||||||
|
|
||||||
txU.txSha = txSha
|
|
||||||
txU.blkHeight = height
|
|
||||||
txU.txoff = txoff
|
|
||||||
txU.txlen = txlen
|
|
||||||
txU.spentData = spentbuf
|
|
||||||
|
|
||||||
db.txUpdateMap[*txSha] = &txU
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatTx generates the value buffer for the Tx db.
|
|
||||||
func (db *LevelDb) formatTx(txu *txUpdateObj) []byte {
|
|
||||||
blkHeight := uint64(txu.blkHeight)
|
|
||||||
txOff := uint32(txu.txoff)
|
|
||||||
txLen := uint32(txu.txlen)
|
|
||||||
spentbuf := txu.spentData
|
|
||||||
|
|
||||||
txW := make([]byte, 16+len(spentbuf))
|
|
||||||
binary.LittleEndian.PutUint64(txW[0:8], blkHeight)
|
|
||||||
binary.LittleEndian.PutUint32(txW[8:12], txOff)
|
|
||||||
binary.LittleEndian.PutUint32(txW[12:16], txLen)
|
|
||||||
copy(txW[16:], spentbuf)
|
|
||||||
|
|
||||||
return txW[:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LevelDb) getTxData(txsha *wire.ShaHash) (int32, int, int, []byte, error) {
|
|
||||||
key := shaTxToKey(txsha)
|
|
||||||
buf, err := db.lDb.Get(key, db.ro)
|
|
||||||
if err != nil {
|
|
||||||
return 0, 0, 0, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
blkHeight := binary.LittleEndian.Uint64(buf[0:8])
|
|
||||||
txOff := binary.LittleEndian.Uint32(buf[8:12])
|
|
||||||
txLen := binary.LittleEndian.Uint32(buf[12:16])
|
|
||||||
|
|
||||||
spentBuf := make([]byte, len(buf)-16)
|
|
||||||
copy(spentBuf, buf[16:])
|
|
||||||
|
|
||||||
return int32(blkHeight), int(txOff), int(txLen), spentBuf, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LevelDb) getTxFullySpent(txsha *wire.ShaHash) ([]*spentTx, error) {
|
|
||||||
|
|
||||||
var badTxList, spentTxList []*spentTx
|
|
||||||
|
|
||||||
key := shaSpentTxToKey(txsha)
|
|
||||||
buf, err := db.lDb.Get(key, db.ro)
|
|
||||||
if err == leveldb.ErrNotFound {
|
|
||||||
return badTxList, database.ErrTxShaMissing
|
|
||||||
} else if err != nil {
|
|
||||||
return badTxList, err
|
|
||||||
}
|
|
||||||
txListLen := len(buf) / 20
|
|
||||||
|
|
||||||
spentTxList = make([]*spentTx, txListLen, txListLen)
|
|
||||||
for i := range spentTxList {
|
|
||||||
offset := i * 20
|
|
||||||
|
|
||||||
blkHeight := binary.LittleEndian.Uint64(buf[offset : offset+8])
|
|
||||||
txOff := binary.LittleEndian.Uint32(buf[offset+8 : offset+12])
|
|
||||||
txLen := binary.LittleEndian.Uint32(buf[offset+12 : offset+16])
|
|
||||||
numTxO := binary.LittleEndian.Uint32(buf[offset+16 : offset+20])
|
|
||||||
|
|
||||||
sTx := spentTx{
|
|
||||||
blkHeight: int32(blkHeight),
|
|
||||||
txoff: int(txOff),
|
|
||||||
txlen: int(txLen),
|
|
||||||
numTxO: int(numTxO),
|
|
||||||
}
|
|
||||||
|
|
||||||
spentTxList[i] = &sTx
|
|
||||||
}
|
|
||||||
|
|
||||||
return spentTxList, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (db *LevelDb) formatTxFullySpent(sTxList []*spentTx) []byte {
|
|
||||||
txW := make([]byte, 20*len(sTxList))
|
|
||||||
|
|
||||||
for i, sTx := range sTxList {
|
|
||||||
blkHeight := uint64(sTx.blkHeight)
|
|
||||||
txOff := uint32(sTx.txoff)
|
|
||||||
txLen := uint32(sTx.txlen)
|
|
||||||
numTxO := uint32(sTx.numTxO)
|
|
||||||
offset := i * 20
|
|
||||||
|
|
||||||
binary.LittleEndian.PutUint64(txW[offset:offset+8], blkHeight)
|
|
||||||
binary.LittleEndian.PutUint32(txW[offset+8:offset+12], txOff)
|
|
||||||
binary.LittleEndian.PutUint32(txW[offset+12:offset+16], txLen)
|
|
||||||
binary.LittleEndian.PutUint32(txW[offset+16:offset+20], numTxO)
|
|
||||||
}
|
|
||||||
|
|
||||||
return txW
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExistsTxSha returns if the given tx sha exists in the database
|
|
||||||
func (db *LevelDb) ExistsTxSha(txsha *wire.ShaHash) (bool, error) {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
return db.existsTxSha(txsha)
|
|
||||||
}
|
|
||||||
|
|
||||||
// existsTxSha returns if the given tx sha exists in the database.o
|
|
||||||
// Must be called with the db lock held.
|
|
||||||
func (db *LevelDb) existsTxSha(txSha *wire.ShaHash) (bool, error) {
|
|
||||||
key := shaTxToKey(txSha)
|
|
||||||
|
|
||||||
return db.lDb.Has(key, db.ro)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchTxByShaList returns the most recent tx of the name fully spent or not
|
|
||||||
func (db *LevelDb) FetchTxByShaList(txShaList []*wire.ShaHash) []*database.TxListReply {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
// until the fully spent separation of tx is complete this is identical
|
|
||||||
// to FetchUnSpentTxByShaList
|
|
||||||
replies := make([]*database.TxListReply, len(txShaList))
|
|
||||||
for i, txsha := range txShaList {
|
|
||||||
tx, blockSha, height, txspent, err := db.fetchTxDataBySha(txsha)
|
|
||||||
btxspent := []bool{}
|
|
||||||
if err == nil {
|
|
||||||
btxspent = make([]bool, len(tx.TxOut), len(tx.TxOut))
|
|
||||||
for idx := range tx.TxOut {
|
|
||||||
byteidx := idx / 8
|
|
||||||
byteoff := uint(idx % 8)
|
|
||||||
btxspent[idx] = (txspent[byteidx] & (byte(1) << byteoff)) != 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err == database.ErrTxShaMissing {
|
|
||||||
// if the unspent pool did not have the tx,
|
|
||||||
// look in the fully spent pool (only last instance)
|
|
||||||
|
|
||||||
sTxList, fSerr := db.getTxFullySpent(txsha)
|
|
||||||
if fSerr == nil && len(sTxList) != 0 {
|
|
||||||
idx := len(sTxList) - 1
|
|
||||||
stx := sTxList[idx]
|
|
||||||
|
|
||||||
tx, blockSha, _, _, err = db.fetchTxDataByLoc(
|
|
||||||
stx.blkHeight, stx.txoff, stx.txlen, []byte{})
|
|
||||||
if err == nil {
|
|
||||||
btxspent = make([]bool, len(tx.TxOut))
|
|
||||||
for i := range btxspent {
|
|
||||||
btxspent[i] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blockSha, Height: height, TxSpent: btxspent, Err: err}
|
|
||||||
replies[i] = &txlre
|
|
||||||
}
|
|
||||||
return replies
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchUnSpentTxByShaList given a array of ShaHash, look up the transactions
|
|
||||||
// and return them in a TxListReply array.
|
|
||||||
func (db *LevelDb) FetchUnSpentTxByShaList(txShaList []*wire.ShaHash) []*database.TxListReply {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
replies := make([]*database.TxListReply, len(txShaList))
|
|
||||||
for i, txsha := range txShaList {
|
|
||||||
tx, blockSha, height, txspent, err := db.fetchTxDataBySha(txsha)
|
|
||||||
btxspent := []bool{}
|
|
||||||
if err == nil {
|
|
||||||
btxspent = make([]bool, len(tx.TxOut), len(tx.TxOut))
|
|
||||||
for idx := range tx.TxOut {
|
|
||||||
byteidx := idx / 8
|
|
||||||
byteoff := uint(idx % 8)
|
|
||||||
btxspent[idx] = (txspent[byteidx] & (byte(1) << byteoff)) != 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blockSha, Height: height, TxSpent: btxspent, Err: err}
|
|
||||||
replies[i] = &txlre
|
|
||||||
}
|
|
||||||
return replies
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchTxDataBySha returns several pieces of data regarding the given sha.
|
|
||||||
func (db *LevelDb) fetchTxDataBySha(txsha *wire.ShaHash) (rtx *wire.MsgTx, rblksha *wire.ShaHash, rheight int32, rtxspent []byte, err error) {
|
|
||||||
var blkHeight int32
|
|
||||||
var txspent []byte
|
|
||||||
var txOff, txLen int
|
|
||||||
|
|
||||||
blkHeight, txOff, txLen, txspent, err = db.getTxData(txsha)
|
|
||||||
if err != nil {
|
|
||||||
if err == leveldb.ErrNotFound {
|
|
||||||
err = database.ErrTxShaMissing
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return db.fetchTxDataByLoc(blkHeight, txOff, txLen, txspent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchTxDataByLoc returns several pieces of data regarding the given tx
|
|
||||||
// located by the block/offset/size location
|
|
||||||
func (db *LevelDb) fetchTxDataByLoc(blkHeight int32, txOff int, txLen int, txspent []byte) (rtx *wire.MsgTx, rblksha *wire.ShaHash, rheight int32, rtxspent []byte, err error) {
|
|
||||||
var blksha *wire.ShaHash
|
|
||||||
var blkbuf []byte
|
|
||||||
|
|
||||||
blksha, blkbuf, err = db.getBlkByHeight(blkHeight)
|
|
||||||
if err != nil {
|
|
||||||
if err == leveldb.ErrNotFound {
|
|
||||||
err = database.ErrTxShaMissing
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
//log.Trace("transaction %v is at block %v %v txoff %v, txlen %v\n",
|
|
||||||
// txsha, blksha, blkHeight, txOff, txLen)
|
|
||||||
|
|
||||||
if len(blkbuf) < txOff+txLen {
|
|
||||||
err = database.ErrTxShaMissing
|
|
||||||
return
|
|
||||||
}
|
|
||||||
rbuf := bytes.NewReader(blkbuf[txOff : txOff+txLen])
|
|
||||||
|
|
||||||
var tx wire.MsgTx
|
|
||||||
err = tx.Deserialize(rbuf)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("unable to decode tx block %v %v txoff %v txlen %v",
|
|
||||||
blkHeight, blksha, txOff, txLen)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return &tx, blksha, blkHeight, txspent, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchTxBySha returns some data for the given Tx Sha.
|
|
||||||
func (db *LevelDb) FetchTxBySha(txsha *wire.ShaHash) ([]*database.TxListReply, error) {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
replylen := 0
|
|
||||||
replycnt := 0
|
|
||||||
|
|
||||||
tx, blksha, height, txspent, txerr := db.fetchTxDataBySha(txsha)
|
|
||||||
if txerr == nil {
|
|
||||||
replylen++
|
|
||||||
} else {
|
|
||||||
if txerr != database.ErrTxShaMissing {
|
|
||||||
return []*database.TxListReply{}, txerr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sTxList, fSerr := db.getTxFullySpent(txsha)
|
|
||||||
|
|
||||||
if fSerr != nil {
|
|
||||||
if fSerr != database.ErrTxShaMissing {
|
|
||||||
return []*database.TxListReply{}, fSerr
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
replylen += len(sTxList)
|
|
||||||
}
|
|
||||||
|
|
||||||
replies := make([]*database.TxListReply, replylen)
|
|
||||||
|
|
||||||
if fSerr == nil {
|
|
||||||
for _, stx := range sTxList {
|
|
||||||
tx, blksha, _, _, err := db.fetchTxDataByLoc(
|
|
||||||
stx.blkHeight, stx.txoff, stx.txlen, []byte{})
|
|
||||||
if err != nil {
|
|
||||||
if err != leveldb.ErrNotFound {
|
|
||||||
return []*database.TxListReply{}, err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
btxspent := make([]bool, len(tx.TxOut), len(tx.TxOut))
|
|
||||||
for i := range btxspent {
|
|
||||||
btxspent[i] = true
|
|
||||||
}
|
|
||||||
txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blksha, Height: stx.blkHeight, TxSpent: btxspent, Err: nil}
|
|
||||||
replies[replycnt] = &txlre
|
|
||||||
replycnt++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if txerr == nil {
|
|
||||||
btxspent := make([]bool, len(tx.TxOut), len(tx.TxOut))
|
|
||||||
for idx := range tx.TxOut {
|
|
||||||
byteidx := idx / 8
|
|
||||||
byteoff := uint(idx % 8)
|
|
||||||
btxspent[idx] = (txspent[byteidx] & (byte(1) << byteoff)) != 0
|
|
||||||
}
|
|
||||||
txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blksha, Height: height, TxSpent: btxspent, Err: nil}
|
|
||||||
replies[replycnt] = &txlre
|
|
||||||
replycnt++
|
|
||||||
}
|
|
||||||
return replies, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// addrIndexToKey serializes the passed txAddrIndex for storage within the DB.
|
|
||||||
// We want to use BigEndian to store at least block height and TX offset
|
|
||||||
// in order to ensure that the transactions are sorted in the index.
|
|
||||||
// This gives us the ability to use the index in more client-side
|
|
||||||
// applications that are order-dependent (specifically by dependency).
|
|
||||||
func addrIndexToKey(index *txAddrIndex) []byte {
|
|
||||||
record := make([]byte, addrIndexKeyLength, addrIndexKeyLength)
|
|
||||||
copy(record[0:3], addrIndexKeyPrefix)
|
|
||||||
copy(record[3:23], index.hash160[:])
|
|
||||||
|
|
||||||
// The index itself.
|
|
||||||
binary.BigEndian.PutUint32(record[23:27], uint32(index.blkHeight))
|
|
||||||
binary.BigEndian.PutUint32(record[27:31], uint32(index.txoffset))
|
|
||||||
binary.BigEndian.PutUint32(record[31:35], uint32(index.txlen))
|
|
||||||
|
|
||||||
return record
|
|
||||||
}
|
|
||||||
|
|
||||||
// unpackTxIndex deserializes the raw bytes of a address tx index.
|
|
||||||
func unpackTxIndex(rawIndex [12]byte) *txAddrIndex {
|
|
||||||
return &txAddrIndex{
|
|
||||||
blkHeight: int32(binary.BigEndian.Uint32(rawIndex[0:4])),
|
|
||||||
txoffset: int(binary.BigEndian.Uint32(rawIndex[4:8])),
|
|
||||||
txlen: int(binary.BigEndian.Uint32(rawIndex[8:12])),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// bytesPrefix returns key range that satisfy the given prefix.
|
|
||||||
// This only applicable for the standard 'bytes comparer'.
|
|
||||||
func bytesPrefix(prefix []byte) *util.Range {
|
|
||||||
var limit []byte
|
|
||||||
for i := len(prefix) - 1; i >= 0; i-- {
|
|
||||||
c := prefix[i]
|
|
||||||
if c < 0xff {
|
|
||||||
limit = make([]byte, i+1)
|
|
||||||
copy(limit, prefix)
|
|
||||||
limit[i] = c + 1
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &util.Range{Start: prefix, Limit: limit}
|
|
||||||
}
|
|
||||||
|
|
||||||
func advanceIterator(iter iterator.IteratorSeeker, reverse bool) bool {
|
|
||||||
if reverse {
|
|
||||||
return iter.Prev()
|
|
||||||
}
|
|
||||||
return iter.Next()
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchTxsForAddr looks up and returns all transactions which either
|
|
||||||
// spend from a previously created output of the passed address, or
|
|
||||||
// create a new output locked to the passed address. The, `limit` parameter
|
|
||||||
// should be the max number of transactions to be returned. Additionally, if the
|
|
||||||
// caller wishes to seek forward in the results some amount, the 'seek'
|
|
||||||
// represents how many results to skip.
|
|
||||||
func (db *LevelDb) FetchTxsForAddr(addr btcutil.Address, skip int,
|
|
||||||
limit int, reverse bool) ([]*database.TxListReply, int, error) {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
// Enforce constraints for skip and limit.
|
|
||||||
if skip < 0 {
|
|
||||||
return nil, 0, errors.New("offset for skip must be positive")
|
|
||||||
}
|
|
||||||
if limit < 0 {
|
|
||||||
return nil, 0, errors.New("value for limit must be positive")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse address type, bailing on an unknown type.
|
|
||||||
var addrKey []byte
|
|
||||||
switch addr := addr.(type) {
|
|
||||||
case *btcutil.AddressPubKeyHash:
|
|
||||||
hash160 := addr.Hash160()
|
|
||||||
addrKey = hash160[:]
|
|
||||||
case *btcutil.AddressScriptHash:
|
|
||||||
hash160 := addr.Hash160()
|
|
||||||
addrKey = hash160[:]
|
|
||||||
case *btcutil.AddressPubKey:
|
|
||||||
hash160 := addr.AddressPubKeyHash().Hash160()
|
|
||||||
addrKey = hash160[:]
|
|
||||||
default:
|
|
||||||
return nil, 0, database.ErrUnsupportedAddressType
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the prefix for our search.
|
|
||||||
addrPrefix := make([]byte, 23, 23)
|
|
||||||
copy(addrPrefix[0:3], addrIndexKeyPrefix)
|
|
||||||
copy(addrPrefix[3:23], addrKey)
|
|
||||||
|
|
||||||
iter := db.lDb.NewIterator(bytesPrefix(addrPrefix), nil)
|
|
||||||
skipped := 0
|
|
||||||
|
|
||||||
if reverse {
|
|
||||||
// Go to the last element if reverse iterating.
|
|
||||||
iter.Last()
|
|
||||||
// Skip "one past" the last element so the loops below don't
|
|
||||||
// miss the last element due to Prev() being called first.
|
|
||||||
// We can safely ignore iterator exhaustion since the loops
|
|
||||||
// below will see there's no keys anyway.
|
|
||||||
iter.Next()
|
|
||||||
}
|
|
||||||
|
|
||||||
for skip != 0 && advanceIterator(iter, reverse) {
|
|
||||||
skip--
|
|
||||||
skipped++
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iterate through all address indexes that match the targeted prefix.
|
|
||||||
var replies []*database.TxListReply
|
|
||||||
var rawIndex [12]byte
|
|
||||||
for advanceIterator(iter, reverse) && limit != 0 {
|
|
||||||
copy(rawIndex[:], iter.Key()[23:35])
|
|
||||||
addrIndex := unpackTxIndex(rawIndex)
|
|
||||||
|
|
||||||
tx, blkSha, blkHeight, _, err := db.fetchTxDataByLoc(addrIndex.blkHeight,
|
|
||||||
addrIndex.txoffset, addrIndex.txlen, []byte{})
|
|
||||||
if err != nil {
|
|
||||||
// Eat a possible error due to a potential re-org.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
txSha := tx.TxSha()
|
|
||||||
txReply := &database.TxListReply{Sha: &txSha, Tx: tx,
|
|
||||||
BlkSha: blkSha, Height: blkHeight, TxSpent: []bool{}, Err: err}
|
|
||||||
|
|
||||||
replies = append(replies, txReply)
|
|
||||||
limit--
|
|
||||||
}
|
|
||||||
iter.Release()
|
|
||||||
if err := iter.Error(); err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return replies, skipped, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateAddrIndexForBlock updates the stored addrindex with passed
|
|
||||||
// index information for a particular block height. Additionally, it
|
|
||||||
// will update the stored meta-data related to the curent tip of the
|
|
||||||
// addr index. These two operations are performed in an atomic
|
|
||||||
// transaction which is committed before the function returns.
|
|
||||||
// Transactions indexed by address are stored with the following format:
|
|
||||||
// * prefix || hash160 || blockHeight || txoffset || txlen
|
|
||||||
// Indexes are stored purely in the key, with blank data for the actual value
|
|
||||||
// in order to facilitate ease of iteration by their shared prefix and
|
|
||||||
// also to allow limiting the number of returned transactions (RPC).
|
|
||||||
// Alternatively, indexes for each address could be stored as an
|
|
||||||
// append-only list for the stored value. However, this add unnecessary
|
|
||||||
// overhead when storing and retrieving since the entire list must
|
|
||||||
// be fetched each time.
|
|
||||||
func (db *LevelDb) UpdateAddrIndexForBlock(blkSha *wire.ShaHash, blkHeight int32, addrIndex database.BlockAddrIndex) error {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
var blankData []byte
|
|
||||||
batch := db.lBatch()
|
|
||||||
defer db.lbatch.Reset()
|
|
||||||
|
|
||||||
// Write all data for the new address indexes in a single batch
|
|
||||||
// transaction.
|
|
||||||
for addrKey, indexes := range addrIndex {
|
|
||||||
for _, txLoc := range indexes {
|
|
||||||
index := &txAddrIndex{
|
|
||||||
hash160: addrKey,
|
|
||||||
blkHeight: blkHeight,
|
|
||||||
txoffset: txLoc.TxStart,
|
|
||||||
txlen: txLoc.TxLen,
|
|
||||||
}
|
|
||||||
// The index is stored purely in the key.
|
|
||||||
packedIndex := addrIndexToKey(index)
|
|
||||||
batch.Put(packedIndex, blankData)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update tip of addrindex.
|
|
||||||
newIndexTip := make([]byte, 40, 40)
|
|
||||||
copy(newIndexTip[0:32], blkSha[:])
|
|
||||||
binary.LittleEndian.PutUint64(newIndexTip[32:40], uint64(blkHeight))
|
|
||||||
batch.Put(addrIndexMetaDataKey, newIndexTip)
|
|
||||||
|
|
||||||
// Ensure we're writing an address index version
|
|
||||||
newIndexVersion := make([]byte, 2, 2)
|
|
||||||
binary.LittleEndian.PutUint16(newIndexVersion[0:2],
|
|
||||||
uint16(addrIndexCurrentVersion))
|
|
||||||
batch.Put(addrIndexVersionKey, newIndexVersion)
|
|
||||||
|
|
||||||
if err := db.lDb.Write(batch, db.wo); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
db.lastAddrIndexBlkIdx = blkHeight
|
|
||||||
db.lastAddrIndexBlkSha = *blkSha
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteAddrIndex deletes the entire addrindex stored within the DB.
|
|
||||||
// It also resets the cached in-memory metadata about the addr index.
|
|
||||||
func (db *LevelDb) DeleteAddrIndex() error {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
batch := db.lBatch()
|
|
||||||
defer batch.Reset()
|
|
||||||
|
|
||||||
// Delete the entire index along with any metadata about it.
|
|
||||||
iter := db.lDb.NewIterator(bytesPrefix(addrIndexKeyPrefix), db.ro)
|
|
||||||
numInBatch := 0
|
|
||||||
for iter.Next() {
|
|
||||||
key := iter.Key()
|
|
||||||
// With a 24-bit index key prefix, 1 in every 2^24 keys is a collision.
|
|
||||||
// We check the length to make sure we only delete address index keys.
|
|
||||||
if len(key) == addrIndexKeyLength {
|
|
||||||
batch.Delete(key)
|
|
||||||
numInBatch++
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete in chunks to potentially avoid very large batches.
|
|
||||||
if numInBatch >= batchDeleteThreshold {
|
|
||||||
if err := db.lDb.Write(batch, db.wo); err != nil {
|
|
||||||
iter.Release()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
batch.Reset()
|
|
||||||
numInBatch = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
iter.Release()
|
|
||||||
if err := iter.Error(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
batch.Delete(addrIndexMetaDataKey)
|
|
||||||
batch.Delete(addrIndexVersionKey)
|
|
||||||
|
|
||||||
if err := db.lDb.Write(batch, db.wo); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
db.lastAddrIndexBlkIdx = -1
|
|
||||||
db.lastAddrIndexBlkSha = wire.ShaHash{}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// deleteOldAddrIndex deletes the entire addrindex stored within the DB for a
|
|
||||||
// 2-byte addrIndexKeyPrefix. It also resets the cached in-memory metadata about
|
|
||||||
// the addr index.
|
|
||||||
func (db *LevelDb) deleteOldAddrIndex() error {
|
|
||||||
db.dbLock.Lock()
|
|
||||||
defer db.dbLock.Unlock()
|
|
||||||
|
|
||||||
batch := db.lBatch()
|
|
||||||
defer batch.Reset()
|
|
||||||
|
|
||||||
// Delete the entire index along with any metadata about it.
|
|
||||||
iter := db.lDb.NewIterator(bytesPrefix([]byte("a-")), db.ro)
|
|
||||||
numInBatch := 0
|
|
||||||
for iter.Next() {
|
|
||||||
key := iter.Key()
|
|
||||||
// With a 24-bit index key prefix, 1 in every 2^24 keys is a collision.
|
|
||||||
// We check the length to make sure we only delete address index keys.
|
|
||||||
// We also check the last two bytes to make sure the suffix doesn't
|
|
||||||
// match other types of index that are 34 bytes long.
|
|
||||||
if len(key) == 34 && !bytes.HasSuffix(key, recordSuffixTx) &&
|
|
||||||
!bytes.HasSuffix(key, recordSuffixSpentTx) {
|
|
||||||
batch.Delete(key)
|
|
||||||
numInBatch++
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete in chunks to potentially avoid very large batches.
|
|
||||||
if numInBatch >= batchDeleteThreshold {
|
|
||||||
if err := db.lDb.Write(batch, db.wo); err != nil {
|
|
||||||
iter.Release()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
batch.Reset()
|
|
||||||
numInBatch = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
iter.Release()
|
|
||||||
if err := iter.Error(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
batch.Delete(addrIndexMetaDataKey)
|
|
||||||
batch.Delete(addrIndexVersionKey)
|
|
||||||
|
|
||||||
if err := db.lDb.Write(batch, db.wo); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
db.lastAddrIndexBlkIdx = -1
|
|
||||||
db.lastAddrIndexBlkSha = wire.ShaHash{}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
// Copyright (c) 2013-2016 The btcsuite developers
|
||||||
// Use of this source code is governed by an ISC
|
// Use of this source code is governed by an ISC
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
@ -32,6 +32,13 @@ func DisableLog() {
|
||||||
// using btclog.
|
// using btclog.
|
||||||
func UseLogger(logger btclog.Logger) {
|
func UseLogger(logger btclog.Logger) {
|
||||||
log = logger
|
log = logger
|
||||||
|
|
||||||
|
// Update the logger for the registered drivers.
|
||||||
|
for _, drv := range drivers {
|
||||||
|
if drv.UseLogger != nil {
|
||||||
|
drv.UseLogger(logger)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetLogWriter uses a specified io.Writer to output package logging info.
|
// SetLogWriter uses a specified io.Writer to output package logging info.
|
||||||
|
@ -56,8 +63,3 @@ func SetLogWriter(w io.Writer, level string) error {
|
||||||
UseLogger(l)
|
UseLogger(l)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLog returns the currently active logger.
|
|
||||||
func GetLog() btclog.Logger {
|
|
||||||
return log
|
|
||||||
}
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
// Use of this source code is governed by an ISC
|
// Use of this source code is governed by an ISC
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
package database2_test
|
package database_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
@ -10,7 +10,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestSetLogWriter ensures the
|
// TestSetLogWriter ensures the
|
|
@ -1,12 +0,0 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package memdb implements an instance of the database package that uses memory
|
|
||||||
for the block storage.
|
|
||||||
|
|
||||||
This is primary used for testing purposes as normal operations require a
|
|
||||||
persistent block storage mechanism which this is not.
|
|
||||||
*/
|
|
||||||
package memdb
|
|
|
@ -1,49 +0,0 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package memdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/database"
|
|
||||||
"github.com/btcsuite/btclog"
|
|
||||||
)
|
|
||||||
|
|
||||||
var log = btclog.Disabled
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
driver := database.DriverDB{DbType: "memdb", CreateDB: CreateDB, OpenDB: OpenDB}
|
|
||||||
database.AddDBDriver(driver)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseArgs parses the arguments from the database package Open/Create methods.
|
|
||||||
func parseArgs(funcName string, args ...interface{}) error {
|
|
||||||
if len(args) != 0 {
|
|
||||||
return fmt.Errorf("memdb.%s does not accept any arguments",
|
|
||||||
funcName)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenDB opens an existing database for use.
|
|
||||||
func OpenDB(args ...interface{}) (database.Db, error) {
|
|
||||||
if err := parseArgs("OpenDB", args...); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// A memory database is not persistent, so let CreateDB handle it.
|
|
||||||
return CreateDB()
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateDB creates, initializes, and opens a database for use.
|
|
||||||
func CreateDB(args ...interface{}) (database.Db, error) {
|
|
||||||
if err := parseArgs("CreateDB", args...); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log = database.GetLog()
|
|
||||||
return newMemDb(), nil
|
|
||||||
}
|
|
|
@ -1,744 +0,0 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package memdb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/database"
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
|
||||||
"github.com/btcsuite/btcutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Errors that the various database functions may return.
|
|
||||||
var (
|
|
||||||
ErrDbClosed = errors.New("database is closed")
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
zeroHash = wire.ShaHash{}
|
|
||||||
|
|
||||||
// The following two hashes are ones that must be specially handled.
|
|
||||||
// See the comments where they're used for more details.
|
|
||||||
dupTxHash91842 = newShaHashFromStr("d5d27987d2a3dfc724e359870c6644b40e497bdc0589a033220fe15429d88599")
|
|
||||||
dupTxHash91880 = newShaHashFromStr("e3bf3d07d4b0375638d5f1db5255fe07ba2c4cb067cd81b84ee974b6585fb468")
|
|
||||||
)
|
|
||||||
|
|
||||||
// tTxInsertData holds information about the location and spent status of
|
|
||||||
// a transaction.
|
|
||||||
type tTxInsertData struct {
|
|
||||||
blockHeight int32
|
|
||||||
offset int
|
|
||||||
spentBuf []bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// newShaHashFromStr converts the passed big-endian hex string into a
|
|
||||||
// wire.ShaHash. It only differs from the one available in wire in that it
|
|
||||||
// ignores the error since it will only (and must only) be called with
|
|
||||||
// hard-coded, and therefore known good, hashes.
|
|
||||||
func newShaHashFromStr(hexStr string) *wire.ShaHash {
|
|
||||||
sha, _ := wire.NewShaHashFromStr(hexStr)
|
|
||||||
return sha
|
|
||||||
}
|
|
||||||
|
|
||||||
// isCoinbaseInput returns whether or not the passed transaction input is a
|
|
||||||
// coinbase input. A coinbase is a special transaction created by miners that
|
|
||||||
// has no inputs. This is represented in the block chain by a transaction with
|
|
||||||
// a single input that has a previous output transaction index set to the
|
|
||||||
// maximum value along with a zero hash.
|
|
||||||
func isCoinbaseInput(txIn *wire.TxIn) bool {
|
|
||||||
prevOut := &txIn.PreviousOutPoint
|
|
||||||
if prevOut.Index == math.MaxUint32 && prevOut.Hash.IsEqual(&zeroHash) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// isFullySpent returns whether or not a transaction represented by the passed
|
|
||||||
// transaction insert data is fully spent. A fully spent transaction is one
|
|
||||||
// where all outputs are spent.
|
|
||||||
func isFullySpent(txD *tTxInsertData) bool {
|
|
||||||
for _, spent := range txD.spentBuf {
|
|
||||||
if !spent {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// MemDb is a concrete implementation of the database.Db interface which provides
|
|
||||||
// a memory-only database. Since it is memory-only, it is obviously not
|
|
||||||
// persistent and is mostly only useful for testing purposes.
|
|
||||||
type MemDb struct {
|
|
||||||
// Embed a mutex for safe concurrent access.
|
|
||||||
sync.Mutex
|
|
||||||
|
|
||||||
// blocks holds all of the bitcoin blocks that will be in the memory
|
|
||||||
// database.
|
|
||||||
blocks []*wire.MsgBlock
|
|
||||||
|
|
||||||
// blocksBySha keeps track of block heights by hash. The height can
|
|
||||||
// be used as an index into the blocks slice.
|
|
||||||
blocksBySha map[wire.ShaHash]int32
|
|
||||||
|
|
||||||
// txns holds information about transactions such as which their
|
|
||||||
// block height and spent status of all their outputs.
|
|
||||||
txns map[wire.ShaHash][]*tTxInsertData
|
|
||||||
|
|
||||||
// closed indicates whether or not the database has been closed and is
|
|
||||||
// therefore invalidated.
|
|
||||||
closed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeTx removes the passed transaction including unspending it.
|
|
||||||
func (db *MemDb) removeTx(msgTx *wire.MsgTx, txHash *wire.ShaHash) {
|
|
||||||
// Undo all of the spends for the transaction.
|
|
||||||
for _, txIn := range msgTx.TxIn {
|
|
||||||
if isCoinbaseInput(txIn) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
prevOut := &txIn.PreviousOutPoint
|
|
||||||
originTxns, exists := db.txns[prevOut.Hash]
|
|
||||||
if !exists {
|
|
||||||
log.Warnf("Unable to find input transaction %s to "+
|
|
||||||
"unspend %s index %d", prevOut.Hash, txHash,
|
|
||||||
prevOut.Index)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
originTxD := originTxns[len(originTxns)-1]
|
|
||||||
originTxD.spentBuf[prevOut.Index] = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the info for the most recent version of the transaction.
|
|
||||||
txns := db.txns[*txHash]
|
|
||||||
lastIndex := len(txns) - 1
|
|
||||||
txns[lastIndex] = nil
|
|
||||||
txns = txns[:lastIndex]
|
|
||||||
db.txns[*txHash] = txns
|
|
||||||
|
|
||||||
// Remove the info entry from the map altogether if there not any older
|
|
||||||
// versions of the transaction.
|
|
||||||
if len(txns) == 0 {
|
|
||||||
delete(db.txns, *txHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close cleanly shuts down database. This is part of the database.Db interface
|
|
||||||
// implementation.
|
|
||||||
//
|
|
||||||
// All data is purged upon close with this implementation since it is a
|
|
||||||
// memory-only database.
|
|
||||||
func (db *MemDb) Close() error {
|
|
||||||
db.Lock()
|
|
||||||
defer db.Unlock()
|
|
||||||
|
|
||||||
if db.closed {
|
|
||||||
return ErrDbClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
db.blocks = nil
|
|
||||||
db.blocksBySha = nil
|
|
||||||
db.txns = nil
|
|
||||||
db.closed = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DropAfterBlockBySha removes any blocks from the database after the given
|
|
||||||
// block. This is different than a simple truncate since the spend information
|
|
||||||
// for each block must also be unwound. This is part of the database.Db interface
|
|
||||||
// implementation.
|
|
||||||
func (db *MemDb) DropAfterBlockBySha(sha *wire.ShaHash) error {
|
|
||||||
db.Lock()
|
|
||||||
defer db.Unlock()
|
|
||||||
|
|
||||||
if db.closed {
|
|
||||||
return ErrDbClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
// Begin by attempting to find the height associated with the passed
|
|
||||||
// hash.
|
|
||||||
height, exists := db.blocksBySha[*sha]
|
|
||||||
if !exists {
|
|
||||||
return fmt.Errorf("block %v does not exist in the database",
|
|
||||||
sha)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The spend information has to be undone in reverse order, so loop
|
|
||||||
// backwards from the last block through the block just after the passed
|
|
||||||
// block. While doing this unspend all transactions in each block and
|
|
||||||
// remove the block.
|
|
||||||
endHeight := int32(len(db.blocks) - 1)
|
|
||||||
for i := endHeight; i > height; i-- {
|
|
||||||
// Unspend and remove each transaction in reverse order because
|
|
||||||
// later transactions in a block can reference earlier ones.
|
|
||||||
transactions := db.blocks[i].Transactions
|
|
||||||
for j := len(transactions) - 1; j >= 0; j-- {
|
|
||||||
tx := transactions[j]
|
|
||||||
txHash := tx.TxSha()
|
|
||||||
db.removeTx(tx, &txHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
db.blocks[i] = nil
|
|
||||||
db.blocks = db.blocks[:i]
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExistsSha returns whether or not the given block hash is present in the
|
|
||||||
// database. This is part of the database.Db interface implementation.
|
|
||||||
func (db *MemDb) ExistsSha(sha *wire.ShaHash) (bool, error) {
|
|
||||||
db.Lock()
|
|
||||||
defer db.Unlock()
|
|
||||||
|
|
||||||
if db.closed {
|
|
||||||
return false, ErrDbClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, exists := db.blocksBySha[*sha]; exists {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchBlockBySha returns a btcutil.Block. The implementation may cache the
|
|
||||||
// underlying data if desired. This is part of the database.Db interface
|
|
||||||
// implementation.
|
|
||||||
//
|
|
||||||
// This implementation does not use any additional cache since the entire
|
|
||||||
// database is already in memory.
|
|
||||||
func (db *MemDb) FetchBlockBySha(sha *wire.ShaHash) (*btcutil.Block, error) {
|
|
||||||
db.Lock()
|
|
||||||
defer db.Unlock()
|
|
||||||
|
|
||||||
if db.closed {
|
|
||||||
return nil, ErrDbClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
if blockHeight, exists := db.blocksBySha[*sha]; exists {
|
|
||||||
block := btcutil.NewBlock(db.blocks[int(blockHeight)])
|
|
||||||
block.SetHeight(blockHeight)
|
|
||||||
return block, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("block %v is not in database", sha)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchBlockHeightBySha returns the block height for the given hash. This is
|
|
||||||
// part of the database.Db interface implementation.
|
|
||||||
func (db *MemDb) FetchBlockHeightBySha(sha *wire.ShaHash) (int32, error) {
|
|
||||||
db.Lock()
|
|
||||||
defer db.Unlock()
|
|
||||||
|
|
||||||
if db.closed {
|
|
||||||
return 0, ErrDbClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
if blockHeight, exists := db.blocksBySha[*sha]; exists {
|
|
||||||
return blockHeight, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0, fmt.Errorf("block %v is not in database", sha)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchBlockHeaderBySha returns a wire.BlockHeader for the given sha. The
|
|
||||||
// implementation may cache the underlying data if desired. This is part of the
|
|
||||||
// database.Db interface implementation.
|
|
||||||
//
|
|
||||||
// This implementation does not use any additional cache since the entire
|
|
||||||
// database is already in memory.
|
|
||||||
func (db *MemDb) FetchBlockHeaderBySha(sha *wire.ShaHash) (*wire.BlockHeader, error) {
|
|
||||||
db.Lock()
|
|
||||||
defer db.Unlock()
|
|
||||||
|
|
||||||
if db.closed {
|
|
||||||
return nil, ErrDbClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
if blockHeight, exists := db.blocksBySha[*sha]; exists {
|
|
||||||
return &db.blocks[int(blockHeight)].Header, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("block header %v is not in database", sha)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchBlockShaByHeight returns a block hash based on its height in the block
|
|
||||||
// chain. This is part of the database.Db interface implementation.
|
|
||||||
func (db *MemDb) FetchBlockShaByHeight(height int32) (*wire.ShaHash, error) {
|
|
||||||
db.Lock()
|
|
||||||
defer db.Unlock()
|
|
||||||
|
|
||||||
if db.closed {
|
|
||||||
return nil, ErrDbClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
numBlocks := int32(len(db.blocks))
|
|
||||||
if height < 0 || height > numBlocks-1 {
|
|
||||||
return nil, fmt.Errorf("unable to fetch block height %d since "+
|
|
||||||
"it is not within the valid range (%d-%d)", height, 0,
|
|
||||||
numBlocks-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
msgBlock := db.blocks[height]
|
|
||||||
blockHash := msgBlock.BlockSha()
|
|
||||||
return &blockHash, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchHeightRange looks up a range of blocks by the start and ending heights.
|
|
||||||
// Fetch is inclusive of the start height and exclusive of the ending height.
|
|
||||||
// To fetch all hashes from the start height until no more are present, use the
|
|
||||||
// special id `AllShas'. This is part of the database.Db interface implementation.
|
|
||||||
func (db *MemDb) FetchHeightRange(startHeight, endHeight int32) ([]wire.ShaHash, error) {
|
|
||||||
db.Lock()
|
|
||||||
defer db.Unlock()
|
|
||||||
|
|
||||||
if db.closed {
|
|
||||||
return nil, ErrDbClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
// When the user passes the special AllShas id, adjust the end height
|
|
||||||
// accordingly.
|
|
||||||
if endHeight == database.AllShas {
|
|
||||||
endHeight = int32(len(db.blocks))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure requested heights are sane.
|
|
||||||
if startHeight < 0 {
|
|
||||||
return nil, fmt.Errorf("start height of fetch range must not "+
|
|
||||||
"be less than zero - got %d", startHeight)
|
|
||||||
}
|
|
||||||
if endHeight < startHeight {
|
|
||||||
return nil, fmt.Errorf("end height of fetch range must not "+
|
|
||||||
"be less than the start height - got start %d, end %d",
|
|
||||||
startHeight, endHeight)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch as many as are availalbe within the specified range.
|
|
||||||
lastBlockIndex := int32(len(db.blocks) - 1)
|
|
||||||
hashList := make([]wire.ShaHash, 0, endHeight-startHeight)
|
|
||||||
for i := startHeight; i < endHeight; i++ {
|
|
||||||
if i > lastBlockIndex {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
msgBlock := db.blocks[i]
|
|
||||||
blockHash := msgBlock.BlockSha()
|
|
||||||
hashList = append(hashList, blockHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
return hashList, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExistsTxSha returns whether or not the given transaction hash is present in
|
|
||||||
// the database and is not fully spent. This is part of the database.Db interface
|
|
||||||
// implementation.
|
|
||||||
func (db *MemDb) ExistsTxSha(sha *wire.ShaHash) (bool, error) {
|
|
||||||
db.Lock()
|
|
||||||
defer db.Unlock()
|
|
||||||
|
|
||||||
if db.closed {
|
|
||||||
return false, ErrDbClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
if txns, exists := db.txns[*sha]; exists {
|
|
||||||
return !isFullySpent(txns[len(txns)-1]), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchTxBySha returns some data for the given transaction hash. The
|
|
||||||
// implementation may cache the underlying data if desired. This is part of the
|
|
||||||
// database.Db interface implementation.
|
|
||||||
//
|
|
||||||
// This implementation does not use any additional cache since the entire
|
|
||||||
// database is already in memory.
|
|
||||||
func (db *MemDb) FetchTxBySha(txHash *wire.ShaHash) ([]*database.TxListReply, error) {
|
|
||||||
db.Lock()
|
|
||||||
defer db.Unlock()
|
|
||||||
|
|
||||||
if db.closed {
|
|
||||||
return nil, ErrDbClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
txns, exists := db.txns[*txHash]
|
|
||||||
if !exists {
|
|
||||||
log.Warnf("FetchTxBySha: requested hash of %s does not exist",
|
|
||||||
txHash)
|
|
||||||
return nil, database.ErrTxShaMissing
|
|
||||||
}
|
|
||||||
|
|
||||||
txHashCopy := *txHash
|
|
||||||
replyList := make([]*database.TxListReply, len(txns))
|
|
||||||
for i, txD := range txns {
|
|
||||||
msgBlock := db.blocks[txD.blockHeight]
|
|
||||||
blockSha := msgBlock.BlockSha()
|
|
||||||
|
|
||||||
spentBuf := make([]bool, len(txD.spentBuf))
|
|
||||||
copy(spentBuf, txD.spentBuf)
|
|
||||||
reply := database.TxListReply{
|
|
||||||
Sha: &txHashCopy,
|
|
||||||
Tx: msgBlock.Transactions[txD.offset],
|
|
||||||
BlkSha: &blockSha,
|
|
||||||
Height: txD.blockHeight,
|
|
||||||
TxSpent: spentBuf,
|
|
||||||
Err: nil,
|
|
||||||
}
|
|
||||||
replyList[i] = &reply
|
|
||||||
}
|
|
||||||
|
|
||||||
return replyList, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// fetchTxByShaList fetches transactions and information about them given an
|
|
||||||
// array of transaction hashes. The result is a slice of of TxListReply objects
|
|
||||||
// which contain the transaction and information about it such as what block and
|
|
||||||
// block height it's contained in and which outputs are spent.
|
|
||||||
//
|
|
||||||
// The includeSpent flag indicates whether or not information about transactions
|
|
||||||
// which are fully spent should be returned. When the flag is not set, the
|
|
||||||
// corresponding entry in the TxListReply slice for fully spent transactions
|
|
||||||
// will indicate the transaction does not exist.
|
|
||||||
//
|
|
||||||
// This function must be called with the db lock held.
|
|
||||||
func (db *MemDb) fetchTxByShaList(txShaList []*wire.ShaHash, includeSpent bool) []*database.TxListReply {
|
|
||||||
replyList := make([]*database.TxListReply, 0, len(txShaList))
|
|
||||||
for i, hash := range txShaList {
|
|
||||||
// Every requested entry needs a response, so start with nothing
|
|
||||||
// more than a response with the requested hash marked missing.
|
|
||||||
// The reply will be updated below with the appropriate
|
|
||||||
// information if the transaction exists.
|
|
||||||
reply := database.TxListReply{
|
|
||||||
Sha: txShaList[i],
|
|
||||||
Err: database.ErrTxShaMissing,
|
|
||||||
}
|
|
||||||
replyList = append(replyList, &reply)
|
|
||||||
|
|
||||||
if db.closed {
|
|
||||||
reply.Err = ErrDbClosed
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if txns, exists := db.txns[*hash]; exists {
|
|
||||||
// A given transaction may have duplicates so long as the
|
|
||||||
// previous one is fully spent. We are only interested
|
|
||||||
// in the most recent version of the transaction for
|
|
||||||
// this function. The FetchTxBySha function can be
|
|
||||||
// used to get all versions of a transaction.
|
|
||||||
txD := txns[len(txns)-1]
|
|
||||||
if !includeSpent && isFullySpent(txD) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Look up the referenced block and get its hash. Set
|
|
||||||
// the reply error appropriately and go to the next
|
|
||||||
// requested transaction if anything goes wrong.
|
|
||||||
msgBlock := db.blocks[txD.blockHeight]
|
|
||||||
blockSha := msgBlock.BlockSha()
|
|
||||||
|
|
||||||
// Make a copy of the spent buf to return so the caller
|
|
||||||
// can't accidentally modify it.
|
|
||||||
spentBuf := make([]bool, len(txD.spentBuf))
|
|
||||||
copy(spentBuf, txD.spentBuf)
|
|
||||||
|
|
||||||
// Populate the reply.
|
|
||||||
reply.Tx = msgBlock.Transactions[txD.offset]
|
|
||||||
reply.BlkSha = &blockSha
|
|
||||||
reply.Height = txD.blockHeight
|
|
||||||
reply.TxSpent = spentBuf
|
|
||||||
reply.Err = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return replyList
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchTxByShaList returns a TxListReply given an array of transaction hashes.
|
|
||||||
// The implementation may cache the underlying data if desired. This is part of
|
|
||||||
// the database.Db interface implementation.
|
|
||||||
//
|
|
||||||
// This implementation does not use any additional cache since the entire
|
|
||||||
// database is already in memory.
|
|
||||||
|
|
||||||
// FetchTxByShaList returns a TxListReply given an array of transaction
|
|
||||||
// hashes. This function differs from FetchUnSpentTxByShaList in that it
|
|
||||||
// returns the most recent version of fully spent transactions. Due to the
|
|
||||||
// increased number of transaction fetches, this function is typically more
|
|
||||||
// expensive than the unspent counterpart, however the specific performance
|
|
||||||
// details depend on the concrete implementation. The implementation may cache
|
|
||||||
// the underlying data if desired. This is part of the database.Db interface
|
|
||||||
// implementation.
|
|
||||||
//
|
|
||||||
// To fetch all versions of a specific transaction, call FetchTxBySha.
|
|
||||||
//
|
|
||||||
// This implementation does not use any additional cache since the entire
|
|
||||||
// database is already in memory.
|
|
||||||
func (db *MemDb) FetchTxByShaList(txShaList []*wire.ShaHash) []*database.TxListReply {
|
|
||||||
db.Lock()
|
|
||||||
defer db.Unlock()
|
|
||||||
|
|
||||||
return db.fetchTxByShaList(txShaList, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchUnSpentTxByShaList returns a TxListReply given an array of transaction
|
|
||||||
// hashes. Any transactions which are fully spent will indicate they do not
|
|
||||||
// exist by setting the Err field to TxShaMissing. The implementation may cache
|
|
||||||
// the underlying data if desired. This is part of the database.Db interface
|
|
||||||
// implementation.
|
|
||||||
//
|
|
||||||
// To obtain results which do contain the most recent version of a fully spent
|
|
||||||
// transactions, call FetchTxByShaList. To fetch all versions of a specific
|
|
||||||
// transaction, call FetchTxBySha.
|
|
||||||
//
|
|
||||||
// This implementation does not use any additional cache since the entire
|
|
||||||
// database is already in memory.
|
|
||||||
func (db *MemDb) FetchUnSpentTxByShaList(txShaList []*wire.ShaHash) []*database.TxListReply {
|
|
||||||
db.Lock()
|
|
||||||
defer db.Unlock()
|
|
||||||
|
|
||||||
return db.fetchTxByShaList(txShaList, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertBlock inserts raw block and transaction data from a block into the
|
|
||||||
// database. The first block inserted into the database will be treated as the
|
|
||||||
// genesis block. Every subsequent block insert requires the referenced parent
|
|
||||||
// block to already exist. This is part of the database.Db interface
|
|
||||||
// implementation.
|
|
||||||
func (db *MemDb) InsertBlock(block *btcutil.Block) (int32, error) {
|
|
||||||
db.Lock()
|
|
||||||
defer db.Unlock()
|
|
||||||
|
|
||||||
if db.closed {
|
|
||||||
return 0, ErrDbClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reject the insert if the previously reference block does not exist
|
|
||||||
// except in the case there are no blocks inserted yet where the first
|
|
||||||
// inserted block is assumed to be a genesis block.
|
|
||||||
msgBlock := block.MsgBlock()
|
|
||||||
if _, exists := db.blocksBySha[msgBlock.Header.PrevBlock]; !exists {
|
|
||||||
if len(db.blocks) > 0 {
|
|
||||||
return 0, database.ErrPrevShaMissing
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build a map of in-flight transactions because some of the inputs in
|
|
||||||
// this block could be referencing other transactions earlier in this
|
|
||||||
// block which are not yet in the chain.
|
|
||||||
txInFlight := map[wire.ShaHash]int{}
|
|
||||||
transactions := block.Transactions()
|
|
||||||
for i, tx := range transactions {
|
|
||||||
txInFlight[*tx.Sha()] = i
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loop through all transactions and inputs to ensure there are no error
|
|
||||||
// conditions that would prevent them from be inserted into the db.
|
|
||||||
// Although these checks could could be done in the loop below, checking
|
|
||||||
// for error conditions up front means the code below doesn't have to
|
|
||||||
// deal with rollback on errors.
|
|
||||||
newHeight := int32(len(db.blocks))
|
|
||||||
for i, tx := range transactions {
|
|
||||||
// Two old blocks contain duplicate transactions due to being
|
|
||||||
// mined by faulty miners and accepted by the origin Satoshi
|
|
||||||
// client. Rules have since been added to the ensure this
|
|
||||||
// problem can no longer happen, but the two duplicate
|
|
||||||
// transactions which were originally accepted are forever in
|
|
||||||
// the block chain history and must be dealth with specially.
|
|
||||||
// http://blockexplorer.com/b/91842
|
|
||||||
// http://blockexplorer.com/b/91880
|
|
||||||
if newHeight == 91842 && tx.Sha().IsEqual(dupTxHash91842) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if newHeight == 91880 && tx.Sha().IsEqual(dupTxHash91880) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, txIn := range tx.MsgTx().TxIn {
|
|
||||||
if isCoinbaseInput(txIn) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// It is acceptable for a transaction input to reference
|
|
||||||
// the output of another transaction in this block only
|
|
||||||
// if the referenced transaction comes before the
|
|
||||||
// current one in this block.
|
|
||||||
prevOut := &txIn.PreviousOutPoint
|
|
||||||
if inFlightIndex, ok := txInFlight[prevOut.Hash]; ok {
|
|
||||||
if i <= inFlightIndex {
|
|
||||||
log.Warnf("InsertBlock: requested hash "+
|
|
||||||
" of %s does not exist in-flight",
|
|
||||||
tx.Sha())
|
|
||||||
return 0, database.ErrTxShaMissing
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
originTxns, exists := db.txns[prevOut.Hash]
|
|
||||||
if !exists {
|
|
||||||
log.Warnf("InsertBlock: requested hash "+
|
|
||||||
"of %s by %s does not exist",
|
|
||||||
prevOut.Hash, tx.Sha())
|
|
||||||
return 0, database.ErrTxShaMissing
|
|
||||||
}
|
|
||||||
originTxD := originTxns[len(originTxns)-1]
|
|
||||||
if prevOut.Index > uint32(len(originTxD.spentBuf)) {
|
|
||||||
log.Warnf("InsertBlock: requested hash "+
|
|
||||||
"of %s with index %d does not "+
|
|
||||||
"exist", tx.Sha(), prevOut.Index)
|
|
||||||
return 0, database.ErrTxShaMissing
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prevent duplicate transactions in the same block.
|
|
||||||
if inFlightIndex, exists := txInFlight[*tx.Sha()]; exists &&
|
|
||||||
inFlightIndex < i {
|
|
||||||
log.Warnf("Block contains duplicate transaction %s",
|
|
||||||
tx.Sha())
|
|
||||||
return 0, database.ErrDuplicateSha
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prevent duplicate transactions unless the old one is fully
|
|
||||||
// spent.
|
|
||||||
if txns, exists := db.txns[*tx.Sha()]; exists {
|
|
||||||
txD := txns[len(txns)-1]
|
|
||||||
if !isFullySpent(txD) {
|
|
||||||
log.Warnf("Attempt to insert duplicate "+
|
|
||||||
"transaction %s", tx.Sha())
|
|
||||||
return 0, database.ErrDuplicateSha
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
db.blocks = append(db.blocks, msgBlock)
|
|
||||||
db.blocksBySha[*block.Sha()] = newHeight
|
|
||||||
|
|
||||||
// Insert information about eacj transaction and spend all of the
|
|
||||||
// outputs referenced by the inputs to the transactions.
|
|
||||||
for i, tx := range block.Transactions() {
|
|
||||||
// Insert the transaction data.
|
|
||||||
txD := tTxInsertData{
|
|
||||||
blockHeight: newHeight,
|
|
||||||
offset: i,
|
|
||||||
spentBuf: make([]bool, len(tx.MsgTx().TxOut)),
|
|
||||||
}
|
|
||||||
db.txns[*tx.Sha()] = append(db.txns[*tx.Sha()], &txD)
|
|
||||||
|
|
||||||
// Spend all of the inputs.
|
|
||||||
for _, txIn := range tx.MsgTx().TxIn {
|
|
||||||
// Coinbase transaction has no inputs.
|
|
||||||
if isCoinbaseInput(txIn) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Already checked for existing and valid ranges above.
|
|
||||||
prevOut := &txIn.PreviousOutPoint
|
|
||||||
originTxns := db.txns[prevOut.Hash]
|
|
||||||
originTxD := originTxns[len(originTxns)-1]
|
|
||||||
originTxD.spentBuf[prevOut.Index] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return newHeight, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewestSha returns the hash and block height of the most recent (end) block of
|
|
||||||
// the block chain. It will return the zero hash, -1 for the block height, and
|
|
||||||
// no error (nil) if there are not any blocks in the database yet. This is part
|
|
||||||
// of the database.Db interface implementation.
|
|
||||||
func (db *MemDb) NewestSha() (*wire.ShaHash, int32, error) {
|
|
||||||
db.Lock()
|
|
||||||
defer db.Unlock()
|
|
||||||
|
|
||||||
if db.closed {
|
|
||||||
return nil, 0, ErrDbClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
// When the database has not had a genesis block inserted yet, return
|
|
||||||
// values specified by interface contract.
|
|
||||||
numBlocks := len(db.blocks)
|
|
||||||
if numBlocks == 0 {
|
|
||||||
return &zeroHash, -1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
blockSha := db.blocks[numBlocks-1].BlockSha()
|
|
||||||
return &blockSha, int32(numBlocks - 1), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchAddrIndexTip isn't currently implemented. This is a part of the
|
|
||||||
// database.Db interface implementation.
|
|
||||||
func (db *MemDb) FetchAddrIndexTip() (*wire.ShaHash, int32, error) {
|
|
||||||
return nil, 0, database.ErrNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateAddrIndexForBlock isn't currently implemented. This is a part of the
|
|
||||||
// database.Db interface implementation.
|
|
||||||
func (db *MemDb) UpdateAddrIndexForBlock(*wire.ShaHash, int32,
|
|
||||||
database.BlockAddrIndex) error {
|
|
||||||
return database.ErrNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchTxsForAddr isn't currently implemented. This is a part of the database.Db
|
|
||||||
// interface implementation.
|
|
||||||
func (db *MemDb) FetchTxsForAddr(btcutil.Address, int, int, bool) ([]*database.TxListReply, int, error) {
|
|
||||||
return nil, 0, database.ErrNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteAddrIndex isn't currently implemented. This is a part of the database.Db
|
|
||||||
// interface implementation.
|
|
||||||
func (db *MemDb) DeleteAddrIndex() error {
|
|
||||||
return database.ErrNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
// RollbackClose discards the recent database changes to the previously saved
|
|
||||||
// data at last Sync and closes the database. This is part of the database.Db
|
|
||||||
// interface implementation.
|
|
||||||
//
|
|
||||||
// The database is completely purged on close with this implementation since the
|
|
||||||
// entire database is only in memory. As a result, this function behaves no
|
|
||||||
// differently than Close.
|
|
||||||
func (db *MemDb) RollbackClose() error {
|
|
||||||
// Rollback doesn't apply to a memory database, so just call Close.
|
|
||||||
// Close handles the mutex locks.
|
|
||||||
return db.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync verifies that the database is coherent on disk and no outstanding
|
|
||||||
// transactions are in flight. This is part of the database.Db interface
|
|
||||||
// implementation.
|
|
||||||
//
|
|
||||||
// This implementation does not write any data to disk, so this function only
|
|
||||||
// grabs a lock to ensure it doesn't return until other operations are complete.
|
|
||||||
func (db *MemDb) Sync() error {
|
|
||||||
db.Lock()
|
|
||||||
defer db.Unlock()
|
|
||||||
|
|
||||||
if db.closed {
|
|
||||||
return ErrDbClosed
|
|
||||||
}
|
|
||||||
|
|
||||||
// There is nothing extra to do to sync the memory database. However,
|
|
||||||
// the lock is still grabbed to ensure the function does not return
|
|
||||||
// until other operations are complete.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newMemDb returns a new memory-only database ready for block inserts.
|
|
||||||
func newMemDb() *MemDb {
|
|
||||||
db := MemDb{
|
|
||||||
blocks: make([]*wire.MsgBlock, 0, 200000),
|
|
||||||
blocksBySha: make(map[wire.ShaHash]int32),
|
|
||||||
txns: make(map[wire.ShaHash][]*tTxInsertData),
|
|
||||||
}
|
|
||||||
return &db
|
|
||||||
}
|
|
|
@ -1,115 +0,0 @@
|
||||||
// Copyright (c) 2013-2014 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package memdb_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
|
||||||
"github.com/btcsuite/btcd/database"
|
|
||||||
"github.com/btcsuite/btcd/database/memdb"
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
|
||||||
"github.com/btcsuite/btcutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestClosed ensure calling the interface functions on a closed database
|
|
||||||
// returns appropriate errors for the interface functions that return errors
|
|
||||||
// and does not panic or otherwise misbehave for functions which do not return
|
|
||||||
// errors.
|
|
||||||
func TestClosed(t *testing.T) {
|
|
||||||
db, err := database.CreateDB("memdb")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to open test database %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, err = db.InsertBlock(btcutil.NewBlock(chaincfg.MainNetParams.GenesisBlock))
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("InsertBlock: %v", err)
|
|
||||||
}
|
|
||||||
if err := db.Close(); err != nil {
|
|
||||||
t.Errorf("Close: unexpected error %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
genesisHash := chaincfg.MainNetParams.GenesisHash
|
|
||||||
if err := db.DropAfterBlockBySha(genesisHash); err != memdb.ErrDbClosed {
|
|
||||||
t.Errorf("DropAfterBlockBySha: unexpected error %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := db.ExistsSha(genesisHash); err != memdb.ErrDbClosed {
|
|
||||||
t.Errorf("ExistsSha: Unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := db.FetchBlockBySha(genesisHash); err != memdb.ErrDbClosed {
|
|
||||||
t.Errorf("FetchBlockBySha: unexpected error %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := db.FetchBlockShaByHeight(0); err != memdb.ErrDbClosed {
|
|
||||||
t.Errorf("FetchBlockShaByHeight: unexpected error %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := db.FetchHeightRange(0, 1); err != memdb.ErrDbClosed {
|
|
||||||
t.Errorf("FetchHeightRange: unexpected error %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
genesisCoinbaseTx := chaincfg.MainNetParams.GenesisBlock.Transactions[0]
|
|
||||||
coinbaseHash := genesisCoinbaseTx.TxSha()
|
|
||||||
if _, err := db.ExistsTxSha(&coinbaseHash); err != memdb.ErrDbClosed {
|
|
||||||
t.Errorf("ExistsTxSha: unexpected error %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := db.FetchTxBySha(genesisHash); err != memdb.ErrDbClosed {
|
|
||||||
t.Errorf("FetchTxBySha: unexpected error %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
requestHashes := []*wire.ShaHash{genesisHash}
|
|
||||||
reply := db.FetchTxByShaList(requestHashes)
|
|
||||||
if len(reply) != len(requestHashes) {
|
|
||||||
t.Errorf("FetchUnSpentTxByShaList unexpected number of replies "+
|
|
||||||
"got: %d, want: %d", len(reply), len(requestHashes))
|
|
||||||
}
|
|
||||||
for i, txLR := range reply {
|
|
||||||
wantReply := &database.TxListReply{
|
|
||||||
Sha: requestHashes[i],
|
|
||||||
Err: memdb.ErrDbClosed,
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(wantReply, txLR) {
|
|
||||||
t.Errorf("FetchTxByShaList unexpected reply\ngot: %v\n"+
|
|
||||||
"want: %v", txLR, wantReply)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
reply = db.FetchUnSpentTxByShaList(requestHashes)
|
|
||||||
if len(reply) != len(requestHashes) {
|
|
||||||
t.Errorf("FetchUnSpentTxByShaList unexpected number of replies "+
|
|
||||||
"got: %d, want: %d", len(reply), len(requestHashes))
|
|
||||||
}
|
|
||||||
for i, txLR := range reply {
|
|
||||||
wantReply := &database.TxListReply{
|
|
||||||
Sha: requestHashes[i],
|
|
||||||
Err: memdb.ErrDbClosed,
|
|
||||||
}
|
|
||||||
if !reflect.DeepEqual(wantReply, txLR) {
|
|
||||||
t.Errorf("FetchUnSpentTxByShaList unexpected reply\n"+
|
|
||||||
"got: %v\nwant: %v", txLR, wantReply)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, _, err := db.NewestSha(); err != memdb.ErrDbClosed {
|
|
||||||
t.Errorf("NewestSha: unexpected error %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := db.Sync(); err != memdb.ErrDbClosed {
|
|
||||||
t.Errorf("Sync: unexpected error %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := db.RollbackClose(); err != memdb.ErrDbClosed {
|
|
||||||
t.Errorf("RollbackClose: unexpected error %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := db.Close(); err != memdb.ErrDbClosed {
|
|
||||||
t.Errorf("Close: unexpected error %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,169 +0,0 @@
|
||||||
// Copyright (c) 2015 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package database_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"compress/bzip2"
|
|
||||||
"encoding/binary"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
|
||||||
"github.com/btcsuite/btcutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// testReorganization performs reorganization tests for the passed DB type.
|
|
||||||
// Much of the setup is copied from the blockchain package, but the test looks
|
|
||||||
// to see if each TX in each block in the best chain can be fetched using
|
|
||||||
// FetchTxBySha. If not, then there's a bug.
|
|
||||||
func testReorganization(t *testing.T, dbType string) {
|
|
||||||
db, teardown, err := createDB(dbType, "reorganization", true)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create test database (%s) %v", dbType, err)
|
|
||||||
}
|
|
||||||
defer teardown()
|
|
||||||
|
|
||||||
blocks, err := loadReorgBlocks("reorgblocks.bz2")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error loading file: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := int32(0); i <= 2; i++ {
|
|
||||||
_, err = db.InsertBlock(blocks[i])
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error inserting block %d (%v): %v", i,
|
|
||||||
blocks[i].Sha(), err)
|
|
||||||
}
|
|
||||||
var txIDs []string
|
|
||||||
for _, tx := range blocks[i].Transactions() {
|
|
||||||
txIDs = append(txIDs, tx.Sha().String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := int32(1); i >= 0; i-- {
|
|
||||||
blkHash := blocks[i].Sha()
|
|
||||||
err = db.DropAfterBlockBySha(blkHash)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error removing block %d for reorganization: %v", i, err)
|
|
||||||
}
|
|
||||||
// Exercise NewestSha() to make sure DropAfterBlockBySha() updates the
|
|
||||||
// info correctly
|
|
||||||
maxHash, blkHeight, err := db.NewestSha()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error getting newest block info")
|
|
||||||
}
|
|
||||||
if !maxHash.IsEqual(blkHash) || blkHeight != i {
|
|
||||||
t.Fatalf("NewestSha returned %v (%v), expected %v (%v)", blkHeight,
|
|
||||||
maxHash, i, blkHash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := int32(3); i < int32(len(blocks)); i++ {
|
|
||||||
blkHash := blocks[i].Sha()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error getting SHA for block %dA: %v", i-2, err)
|
|
||||||
}
|
|
||||||
_, err = db.InsertBlock(blocks[i])
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error inserting block %dA (%v): %v", i-2, blkHash, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_, maxHeight, err := db.NewestSha()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error getting newest block info")
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := int32(0); i <= maxHeight; i++ {
|
|
||||||
blkHash, err := db.FetchBlockShaByHeight(i)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error fetching SHA for block %d: %v", i, err)
|
|
||||||
}
|
|
||||||
block, err := db.FetchBlockBySha(blkHash)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error fetching block %d (%v): %v", i, blkHash, err)
|
|
||||||
}
|
|
||||||
for _, tx := range block.Transactions() {
|
|
||||||
_, err := db.FetchTxBySha(tx.Sha())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error fetching transaction %v: %v", tx.Sha(), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadReorgBlocks reads files containing bitcoin block data (bzipped but
|
|
||||||
// otherwise in the format bitcoind writes) from disk and returns them as an
|
|
||||||
// array of btcutil.Block. This is copied from the blockchain package, which
|
|
||||||
// itself largely borrowed it from the test code in this package.
|
|
||||||
func loadReorgBlocks(filename string) ([]*btcutil.Block, error) {
|
|
||||||
filename = filepath.Join("testdata/", filename)
|
|
||||||
|
|
||||||
var blocks []*btcutil.Block
|
|
||||||
var err error
|
|
||||||
|
|
||||||
var network = wire.SimNet
|
|
||||||
var dr io.Reader
|
|
||||||
var fi io.ReadCloser
|
|
||||||
|
|
||||||
fi, err = os.Open(filename)
|
|
||||||
if err != nil {
|
|
||||||
return blocks, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasSuffix(filename, ".bz2") {
|
|
||||||
dr = bzip2.NewReader(fi)
|
|
||||||
} else {
|
|
||||||
dr = fi
|
|
||||||
}
|
|
||||||
defer fi.Close()
|
|
||||||
|
|
||||||
var block *btcutil.Block
|
|
||||||
|
|
||||||
err = nil
|
|
||||||
for height := int32(1); err == nil; height++ {
|
|
||||||
var rintbuf uint32
|
|
||||||
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
|
|
||||||
if err == io.EOF {
|
|
||||||
// hit end of file at expected offset: no warning
|
|
||||||
height--
|
|
||||||
err = nil
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if rintbuf != uint32(network) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
err = binary.Read(dr, binary.LittleEndian, &rintbuf)
|
|
||||||
if err != nil {
|
|
||||||
return blocks, err
|
|
||||||
}
|
|
||||||
blocklen := rintbuf
|
|
||||||
|
|
||||||
rbytes := make([]byte, blocklen)
|
|
||||||
|
|
||||||
// read block
|
|
||||||
numbytes, err := dr.Read(rbytes)
|
|
||||||
if err != nil {
|
|
||||||
return blocks, err
|
|
||||||
}
|
|
||||||
if uint32(numbytes) != blocklen {
|
|
||||||
return blocks, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
|
|
||||||
block, err = btcutil.NewBlockFromBytes(rbytes)
|
|
||||||
if err != nil {
|
|
||||||
return blocks, err
|
|
||||||
}
|
|
||||||
blocks = append(blocks, block)
|
|
||||||
}
|
|
||||||
|
|
||||||
return blocks, nil
|
|
||||||
}
|
|
BIN
database/testdata/reorgblocks.bz2
vendored
BIN
database/testdata/reorgblocks.bz2
vendored
Binary file not shown.
|
@ -1,77 +0,0 @@
|
||||||
database
|
|
||||||
========
|
|
||||||
|
|
||||||
[![Build Status](https://travis-ci.org/btcsuite/btcd.png?branch=master)]
|
|
||||||
(https://travis-ci.org/btcsuite/btcd)
|
|
||||||
|
|
||||||
Package database provides a block and metadata storage database.
|
|
||||||
|
|
||||||
Please note that this package is intended to enable btcd to support different
|
|
||||||
database backends and is not something that a client can directly access as only
|
|
||||||
one entity can have the database open at a time (for most database backends),
|
|
||||||
and that entity will be btcd.
|
|
||||||
|
|
||||||
When a client wants programmatic access to the data provided by btcd, they'll
|
|
||||||
likely want to use the [btcrpcclient](https://github.com/btcsuite/btcrpcclient)
|
|
||||||
package which makes use of the [JSON-RPC API]
|
|
||||||
(https://github.com/btcsuite/btcd/tree/master/docs/json_rpc_api.md).
|
|
||||||
|
|
||||||
However, this package could be extremely useful for any applications requiring
|
|
||||||
Bitcoin block storage capabilities.
|
|
||||||
|
|
||||||
As of July 2015, there are over 365,000 blocks in the Bitcoin block chain and
|
|
||||||
and over 76 million transactions (which turns out to be over 35GB of data).
|
|
||||||
This package provides a database layer to store and retrieve this data in a
|
|
||||||
simple and efficient manner.
|
|
||||||
|
|
||||||
The default backend, ffldb, has a strong focus on speed, efficiency, and
|
|
||||||
robustness. It makes use of leveldb for the metadata, flat files for block
|
|
||||||
storage, and strict checksums in key areas to ensure data integrity.
|
|
||||||
|
|
||||||
## Feature Overview
|
|
||||||
|
|
||||||
- Key/value metadata store
|
|
||||||
- Bitcoin block storage
|
|
||||||
- Efficient retrieval of block headers and regions (transactions, scripts, etc)
|
|
||||||
- Read-only and read-write transactions with both manual and managed modes
|
|
||||||
- Nested buckets
|
|
||||||
- Iteration support including cursors with seek capability
|
|
||||||
- Supports registration of backend databases
|
|
||||||
- Comprehensive test coverage
|
|
||||||
|
|
||||||
## Documentation
|
|
||||||
|
|
||||||
[![GoDoc](https://godoc.org/github.com/btcsuite/btcd/database?status.png)]
|
|
||||||
(http://godoc.org/github.com/btcsuite/btcd/database)
|
|
||||||
|
|
||||||
Full `go doc` style documentation for the project can be viewed online without
|
|
||||||
installing this package by using the GoDoc site here:
|
|
||||||
http://godoc.org/github.com/btcsuite/btcd/database
|
|
||||||
|
|
||||||
You can also view the documentation locally once the package is installed with
|
|
||||||
the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
|
|
||||||
http://localhost:6060/pkg/github.com/btcsuite/btcd/database
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
```bash
|
|
||||||
$ go get github.com/btcsuite/btcd/database
|
|
||||||
```
|
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
* [Basic Usage Example]
|
|
||||||
(http://godoc.org/github.com/btcsuite/btcd/database#example-package--BasicUsage)
|
|
||||||
Demonstrates creating a new database and using a managed read-write
|
|
||||||
transaction to store and retrieve metadata.
|
|
||||||
|
|
||||||
* [Block Storage and Retrieval Example]
|
|
||||||
(http://godoc.org/github.com/btcsuite/btcd/database#example-package--BlockStorageAndRetrieval)
|
|
||||||
Demonstrates creating a new database, using a managed read-write transaction
|
|
||||||
to store a block, and then using a managed read-only transaction to fetch the
|
|
||||||
block.
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
Package database is licensed under the [copyfree](http://copyfree.org) ISC
|
|
||||||
License.
|
|
|
@ -1,94 +0,0 @@
|
||||||
// Copyright (c) 2015-2016 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package database2 provides a block and metadata storage database.
|
|
||||||
|
|
||||||
Overview
|
|
||||||
|
|
||||||
As of July 2015, there are over 365,000 blocks in the Bitcoin block chain and
|
|
||||||
and over 76 million transactions (which turns out to be over 35GB of data).
|
|
||||||
This package provides a database layer to store and retrieve this data in a
|
|
||||||
simple and efficient manner.
|
|
||||||
|
|
||||||
The default backend, ffldb, has a strong focus on speed, efficiency, and
|
|
||||||
robustness. It makes use leveldb for the metadata, flat files for block
|
|
||||||
storage, and strict checksums in key areas to ensure data integrity.
|
|
||||||
|
|
||||||
A quick overview of the features database provides are as follows:
|
|
||||||
|
|
||||||
- Key/value metadata store
|
|
||||||
- Bitcoin block storage
|
|
||||||
- Efficient retrieval of block headers and regions (transactions, scripts, etc)
|
|
||||||
- Read-only and read-write transactions with both manual and managed modes
|
|
||||||
- Nested buckets
|
|
||||||
- Supports registration of backend databases
|
|
||||||
- Comprehensive test coverage
|
|
||||||
|
|
||||||
Database
|
|
||||||
|
|
||||||
The main entry point is the DB interface. It exposes functionality for
|
|
||||||
transactional-based access and storage of metadata and block data. It is
|
|
||||||
obtained via the Create and Open functions which take a database type string
|
|
||||||
that identifies the specific database driver (backend) to use as well as
|
|
||||||
arguments specific to the specified driver.
|
|
||||||
|
|
||||||
Namespaces
|
|
||||||
|
|
||||||
The Namespace interface is an abstraction that provides facilities for obtaining
|
|
||||||
transactions (the Tx interface) that are the basis of all database reads and
|
|
||||||
writes. Unlike some database interfaces that support reading and writing
|
|
||||||
without transactions, this interface requires transactions even when only
|
|
||||||
reading or writing a single key.
|
|
||||||
|
|
||||||
The Begin function provides an unmanaged transaction while the View and Update
|
|
||||||
functions provide a managed transaction. These are described in more detail
|
|
||||||
below.
|
|
||||||
|
|
||||||
Transactions
|
|
||||||
|
|
||||||
The Tx interface provides facilities for rolling back or committing changes that
|
|
||||||
took place while the transaction was active. It also provides the root metadata
|
|
||||||
bucket under which all keys, values, and nested buckets are stored. A
|
|
||||||
transaction can either be read-only or read-write and managed or unmanaged.
|
|
||||||
|
|
||||||
Managed versus Unmanaged Transactions
|
|
||||||
|
|
||||||
A managed transaction is one where the caller provides a function to execute
|
|
||||||
within the context of the transaction and the commit or rollback is handled
|
|
||||||
automatically depending on whether or not the provided function returns an
|
|
||||||
error. Attempting to manually call Rollback or Commit on the managed
|
|
||||||
transaction will result in a panic.
|
|
||||||
|
|
||||||
An unmanaged transaction, on the other hand, requires the caller to manually
|
|
||||||
call Commit or Rollback when they are finished with it. Leaving transactions
|
|
||||||
open for long periods of time can have several adverse effects, so it is
|
|
||||||
recommended that managed transactions are used instead.
|
|
||||||
|
|
||||||
Buckets
|
|
||||||
|
|
||||||
The Bucket interface provides the ability to manipulate key/value pairs and
|
|
||||||
nested buckets as well as iterate through them.
|
|
||||||
|
|
||||||
The Get, Put, and Delete functions work with key/value pairs, while the Bucket,
|
|
||||||
CreateBucket, CreateBucketIfNotExists, and DeleteBucket functions work with
|
|
||||||
buckets. The ForEach function allows the caller to provide a function to be
|
|
||||||
called with each key/value pair and nested bucket in the current bucket.
|
|
||||||
|
|
||||||
Metadata Bucket
|
|
||||||
|
|
||||||
As discussed above, all of the functions which are used to manipulate key/value
|
|
||||||
pairs and nested buckets exist on the Bucket interface. The root metadata
|
|
||||||
bucket is the upper-most bucket in which data is stored and is created at the
|
|
||||||
same time as the database. Use the Metadata function on the Tx interface
|
|
||||||
to retrieve it.
|
|
||||||
|
|
||||||
Nested Buckets
|
|
||||||
|
|
||||||
The CreateBucket and CreateBucketIfNotExists functions on the Bucket interface
|
|
||||||
provide the ability to create an arbitrary number of nested buckets. It is
|
|
||||||
a good idea to avoid a lot of buckets with little data in them as it could lead
|
|
||||||
to poor page utilization depending on the specific driver in use.
|
|
||||||
*/
|
|
||||||
package database2
|
|
|
@ -1,177 +0,0 @@
|
||||||
// Copyright (c) 2015-2016 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package database2_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
|
||||||
database "github.com/btcsuite/btcd/database2"
|
|
||||||
_ "github.com/btcsuite/btcd/database2/ffldb"
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
|
||||||
"github.com/btcsuite/btcutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This example demonstrates creating a new database.
|
|
||||||
func ExampleCreate() {
|
|
||||||
// This example assumes the ffldb driver is imported.
|
|
||||||
//
|
|
||||||
// import (
|
|
||||||
// "github.com/btcsuite/btcd/database"
|
|
||||||
// _ "github.com/btcsuite/btcd/database/ffldb"
|
|
||||||
// )
|
|
||||||
|
|
||||||
// Create a database and schedule it to be closed and removed on exit.
|
|
||||||
// Typically you wouldn't want to remove the database right away like
|
|
||||||
// this, nor put it in the temp directory, but it's done here to ensure
|
|
||||||
// the example cleans up after itself.
|
|
||||||
dbPath := filepath.Join(os.TempDir(), "examplecreate")
|
|
||||||
db, err := database.Create("ffldb", dbPath, wire.MainNet)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dbPath)
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
}
|
|
||||||
|
|
||||||
// This example demonstrates creating a new database and using a managed
|
|
||||||
// read-write transaction to store and retrieve metadata.
|
|
||||||
func Example_basicUsage() {
|
|
||||||
// This example assumes the ffldb driver is imported.
|
|
||||||
//
|
|
||||||
// import (
|
|
||||||
// "github.com/btcsuite/btcd/database"
|
|
||||||
// _ "github.com/btcsuite/btcd/database/ffldb"
|
|
||||||
// )
|
|
||||||
|
|
||||||
// Create a database and schedule it to be closed and removed on exit.
|
|
||||||
// Typically you wouldn't want to remove the database right away like
|
|
||||||
// this, nor put it in the temp directory, but it's done here to ensure
|
|
||||||
// the example cleans up after itself.
|
|
||||||
dbPath := filepath.Join(os.TempDir(), "exampleusage")
|
|
||||||
db, err := database.Create("ffldb", dbPath, wire.MainNet)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dbPath)
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
// Use the Update function of the database to perform a managed
|
|
||||||
// read-write transaction. The transaction will automatically be rolled
|
|
||||||
// back if the supplied inner function returns a non-nil error.
|
|
||||||
err = db.Update(func(tx database.Tx) error {
|
|
||||||
// Store a key/value pair directly in the metadata bucket.
|
|
||||||
// Typically a nested bucket would be used for a given feature,
|
|
||||||
// but this example is using the metadata bucket directly for
|
|
||||||
// simplicity.
|
|
||||||
key := []byte("mykey")
|
|
||||||
value := []byte("myvalue")
|
|
||||||
if err := tx.Metadata().Put(key, value); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the key back and ensure it matches.
|
|
||||||
if !bytes.Equal(tx.Metadata().Get(key), value) {
|
|
||||||
return fmt.Errorf("unexpected value for key '%s'", key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new nested bucket under the metadata bucket.
|
|
||||||
nestedBucketKey := []byte("mybucket")
|
|
||||||
nestedBucket, err := tx.Metadata().CreateBucket(nestedBucketKey)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// The key from above that was set in the metadata bucket does
|
|
||||||
// not exist in this new nested bucket.
|
|
||||||
if nestedBucket.Get(key) != nil {
|
|
||||||
return fmt.Errorf("key '%s' is not expected nil", key)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
}
|
|
||||||
|
|
||||||
// This example demonstrates creating a new database, using a managed read-write
|
|
||||||
// transaction to store a block, and using a managed read-only transaction to
|
|
||||||
// fetch the block.
|
|
||||||
func Example_blockStorageAndRetrieval() {
|
|
||||||
// This example assumes the ffldb driver is imported.
|
|
||||||
//
|
|
||||||
// import (
|
|
||||||
// "github.com/btcsuite/btcd/database"
|
|
||||||
// _ "github.com/btcsuite/btcd/database/ffldb"
|
|
||||||
// )
|
|
||||||
|
|
||||||
// Create a database and schedule it to be closed and removed on exit.
|
|
||||||
// Typically you wouldn't want to remove the database right away like
|
|
||||||
// this, nor put it in the temp directory, but it's done here to ensure
|
|
||||||
// the example cleans up after itself.
|
|
||||||
dbPath := filepath.Join(os.TempDir(), "exampleblkstorage")
|
|
||||||
db, err := database.Create("ffldb", dbPath, wire.MainNet)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dbPath)
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
// Use the Update function of the database to perform a managed
|
|
||||||
// read-write transaction and store a genesis block in the database as
|
|
||||||
// and example.
|
|
||||||
err = db.Update(func(tx database.Tx) error {
|
|
||||||
genesisBlock := chaincfg.MainNetParams.GenesisBlock
|
|
||||||
return tx.StoreBlock(btcutil.NewBlock(genesisBlock))
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use the View function of the database to perform a managed read-only
|
|
||||||
// transaction and fetch the block stored above.
|
|
||||||
var loadedBlockBytes []byte
|
|
||||||
err = db.Update(func(tx database.Tx) error {
|
|
||||||
genesisHash := chaincfg.MainNetParams.GenesisHash
|
|
||||||
blockBytes, err := tx.FetchBlock(genesisHash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// As documented, all data fetched from the database is only
|
|
||||||
// valid during a database transaction in order to support
|
|
||||||
// zero-copy backends. Thus, make a copy of the data so it
|
|
||||||
// can be used outside of the transaction.
|
|
||||||
loadedBlockBytes = make([]byte, len(blockBytes))
|
|
||||||
copy(loadedBlockBytes, blockBytes)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Typically at this point, the block could be deserialized via the
|
|
||||||
// wire.MsgBlock.Deserialize function or used in its serialized form
|
|
||||||
// depending on need. However, for this example, just display the
|
|
||||||
// number of serialized bytes to show it was loaded as expected.
|
|
||||||
fmt.Printf("Serialized block size: %d bytes\n", len(loadedBlockBytes))
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// Serialized block size: 285 bytes
|
|
||||||
}
|
|
|
@ -1,65 +0,0 @@
|
||||||
// Copyright (c) 2013-2016 The btcsuite developers
|
|
||||||
// Use of this source code is governed by an ISC
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package database2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btclog"
|
|
||||||
)
|
|
||||||
|
|
||||||
// log is a logger that is initialized with no output filters. This
|
|
||||||
// means the package will not perform any logging by default until the caller
|
|
||||||
// requests it.
|
|
||||||
var log btclog.Logger
|
|
||||||
|
|
||||||
// The default amount of logging is none.
|
|
||||||
func init() {
|
|
||||||
DisableLog()
|
|
||||||
}
|
|
||||||
|
|
||||||
// DisableLog disables all library log output. Logging output is disabled
|
|
||||||
// by default until either UseLogger or SetLogWriter are called.
|
|
||||||
func DisableLog() {
|
|
||||||
log = btclog.Disabled
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseLogger uses a specified Logger to output package logging info.
|
|
||||||
// This should be used in preference to SetLogWriter if the caller is also
|
|
||||||
// using btclog.
|
|
||||||
func UseLogger(logger btclog.Logger) {
|
|
||||||
log = logger
|
|
||||||
|
|
||||||
// Update the logger for the registered drivers.
|
|
||||||
for _, drv := range drivers {
|
|
||||||
if drv.UseLogger != nil {
|
|
||||||
drv.UseLogger(logger)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLogWriter uses a specified io.Writer to output package logging info.
|
|
||||||
// This allows a caller to direct package logging output without needing a
|
|
||||||
// dependency on seelog. If the caller is also using btclog, UseLogger should
|
|
||||||
// be used instead.
|
|
||||||
func SetLogWriter(w io.Writer, level string) error {
|
|
||||||
if w == nil {
|
|
||||||
return errors.New("nil writer")
|
|
||||||
}
|
|
||||||
|
|
||||||
lvl, ok := btclog.LogLevelFromString(level)
|
|
||||||
if !ok {
|
|
||||||
return errors.New("invalid log level")
|
|
||||||
}
|
|
||||||
|
|
||||||
l, err := btclog.NewLoggerFromWriter(w, lvl)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
UseLogger(l)
|
|
||||||
return nil
|
|
||||||
}
|
|
BIN
database2/testdata/blocks1-256.bz2
vendored
BIN
database2/testdata/blocks1-256.bz2
vendored
Binary file not shown.
2
log.go
2
log.go
|
@ -13,7 +13,7 @@ import (
|
||||||
"github.com/btcsuite/btcd/addrmgr"
|
"github.com/btcsuite/btcd/addrmgr"
|
||||||
"github.com/btcsuite/btcd/blockchain"
|
"github.com/btcsuite/btcd/blockchain"
|
||||||
"github.com/btcsuite/btcd/blockchain/indexers"
|
"github.com/btcsuite/btcd/blockchain/indexers"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/peer"
|
"github.com/btcsuite/btcd/peer"
|
||||||
"github.com/btcsuite/btcd/txscript"
|
"github.com/btcsuite/btcd/txscript"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
|
|
|
@ -31,7 +31,7 @@ import (
|
||||||
"github.com/btcsuite/btcd/btcec"
|
"github.com/btcsuite/btcd/btcec"
|
||||||
"github.com/btcsuite/btcd/btcjson"
|
"github.com/btcsuite/btcd/btcjson"
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/mining"
|
"github.com/btcsuite/btcd/mining"
|
||||||
"github.com/btcsuite/btcd/txscript"
|
"github.com/btcsuite/btcd/txscript"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
|
|
|
@ -20,7 +20,7 @@ import (
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/blockchain"
|
"github.com/btcsuite/btcd/blockchain"
|
||||||
"github.com/btcsuite/btcd/btcjson"
|
"github.com/btcsuite/btcd/btcjson"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/txscript"
|
"github.com/btcsuite/btcd/txscript"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
|
|
|
@ -24,7 +24,7 @@ import (
|
||||||
"github.com/btcsuite/btcd/blockchain"
|
"github.com/btcsuite/btcd/blockchain"
|
||||||
"github.com/btcsuite/btcd/blockchain/indexers"
|
"github.com/btcsuite/btcd/blockchain/indexers"
|
||||||
"github.com/btcsuite/btcd/chaincfg"
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
database "github.com/btcsuite/btcd/database2"
|
"github.com/btcsuite/btcd/database"
|
||||||
"github.com/btcsuite/btcd/mining"
|
"github.com/btcsuite/btcd/mining"
|
||||||
"github.com/btcsuite/btcd/peer"
|
"github.com/btcsuite/btcd/peer"
|
||||||
"github.com/btcsuite/btcd/txscript"
|
"github.com/btcsuite/btcd/txscript"
|
||||||
|
|
Loading…
Reference in a new issue