2015-05-01 08:28:01 +02:00
|
|
|
// Copyright (c) 2013-2014 The btcsuite developers
|
2013-08-03 17:20:05 +02:00
|
|
|
// Use of this source code is governed by an ISC
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package ldb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/binary"
|
2014-12-25 00:55:14 +01:00
|
|
|
"errors"
|
2014-07-03 02:47:24 +02:00
|
|
|
|
2015-01-27 18:45:10 +01:00
|
|
|
"github.com/btcsuite/btcd/database"
|
2015-02-05 22:16:39 +01:00
|
|
|
"github.com/btcsuite/btcd/wire"
|
2014-12-25 00:55:14 +01:00
|
|
|
"github.com/btcsuite/btcutil"
|
2015-03-04 04:05:26 +01:00
|
|
|
"github.com/btcsuite/golangcrypto/ripemd160"
|
2015-01-15 09:47:40 +01:00
|
|
|
"github.com/btcsuite/goleveldb/leveldb"
|
2014-12-25 00:55:14 +01:00
|
|
|
"github.com/btcsuite/goleveldb/leveldb/util"
|
2013-08-03 17:20:05 +02:00
|
|
|
)
|
|
|
|
|
2014-12-25 00:55:14 +01:00
|
|
|
const (
|
|
|
|
// Each address index is 34 bytes:
|
|
|
|
// --------------------------------------------------------
|
|
|
|
// | Prefix | Hash160 | BlkHeight | Tx Offset | Tx Size |
|
|
|
|
// --------------------------------------------------------
|
2015-03-19 14:56:06 +01:00
|
|
|
// | 3 bytes | 20 bytes | 4 bytes | 4 bytes | 4 bytes |
|
2014-12-25 00:55:14 +01:00
|
|
|
// --------------------------------------------------------
|
2015-03-19 14:56:06 +01:00
|
|
|
addrIndexKeyLength = 3 + ripemd160.Size + 4 + 4 + 4
|
2014-12-25 00:55:14 +01:00
|
|
|
|
|
|
|
batchDeleteThreshold = 10000
|
2015-03-19 14:56:06 +01:00
|
|
|
|
|
|
|
addrIndexCurrentVersion = 1
|
2014-12-25 00:55:14 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
var addrIndexMetaDataKey = []byte("addrindex")
|
|
|
|
|
|
|
|
// All address index entries share this prefix to facilitate the use of
|
|
|
|
// iterators.
|
2015-03-19 14:56:06 +01:00
|
|
|
var addrIndexKeyPrefix = []byte("a+-")
|
|
|
|
|
|
|
|
// Address index version is required to drop/rebuild address index if version
|
|
|
|
// is older than current as the format of the index may have changed. This is
|
|
|
|
// true when going from no version to version 1 as the address index is stored
|
|
|
|
// as big endian in version 1 and little endian in the original code. Version
|
|
|
|
// is stored as two bytes, little endian (to match all the code but the index).
|
|
|
|
var addrIndexVersionKey = []byte("addrindexversion")
|
2014-12-25 00:55:14 +01:00
|
|
|
|
2013-08-03 17:20:05 +02:00
|
|
|
type txUpdateObj struct {
|
2015-02-05 22:16:39 +01:00
|
|
|
txSha *wire.ShaHash
|
2015-08-08 04:20:49 +02:00
|
|
|
blkHeight int32
|
2013-08-03 17:20:05 +02:00
|
|
|
txoff int
|
|
|
|
txlen int
|
2013-09-30 23:44:26 +02:00
|
|
|
ntxout int
|
2013-08-03 17:20:05 +02:00
|
|
|
spentData []byte
|
|
|
|
delete bool
|
|
|
|
}
|
|
|
|
|
2013-09-30 23:44:26 +02:00
|
|
|
type spentTx struct {
|
2015-08-08 04:20:49 +02:00
|
|
|
blkHeight int32
|
2013-09-30 23:44:26 +02:00
|
|
|
txoff int
|
|
|
|
txlen int
|
|
|
|
numTxO int
|
|
|
|
delete bool
|
|
|
|
}
|
|
|
|
type spentTxUpdate struct {
|
2013-11-21 16:48:57 +01:00
|
|
|
txl []*spentTx
|
2013-09-30 23:44:26 +02:00
|
|
|
delete bool
|
|
|
|
}
|
|
|
|
|
2014-12-25 00:55:14 +01:00
|
|
|
type txAddrIndex struct {
|
|
|
|
hash160 [ripemd160.Size]byte
|
2015-08-08 04:20:49 +02:00
|
|
|
blkHeight int32
|
2014-12-25 00:55:14 +01:00
|
|
|
txoffset int
|
|
|
|
txlen int
|
|
|
|
}
|
|
|
|
|
2013-08-03 17:20:05 +02:00
|
|
|
// InsertTx inserts a tx hash and its associated data into the database.
|
2015-08-08 04:20:49 +02:00
|
|
|
func (db *LevelDb) InsertTx(txsha *wire.ShaHash, height int32, txoff int, txlen int, spentbuf []byte) (err error) {
|
2013-08-03 17:20:05 +02:00
|
|
|
db.dbLock.Lock()
|
|
|
|
defer db.dbLock.Unlock()
|
|
|
|
|
|
|
|
return db.insertTx(txsha, height, txoff, txlen, spentbuf)
|
|
|
|
}
|
|
|
|
|
|
|
|
// insertTx inserts a tx hash and its associated data into the database.
|
|
|
|
// Must be called with db lock held.
|
2015-08-08 04:20:49 +02:00
|
|
|
func (db *LevelDb) insertTx(txSha *wire.ShaHash, height int32, txoff int, txlen int, spentbuf []byte) (err error) {
|
2013-08-03 17:20:05 +02:00
|
|
|
var txU txUpdateObj
|
|
|
|
|
|
|
|
txU.txSha = txSha
|
|
|
|
txU.blkHeight = height
|
|
|
|
txU.txoff = txoff
|
|
|
|
txU.txlen = txlen
|
|
|
|
txU.spentData = spentbuf
|
|
|
|
|
|
|
|
db.txUpdateMap[*txSha] = &txU
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// formatTx generates the value buffer for the Tx db.
|
2014-06-04 20:55:48 +02:00
|
|
|
func (db *LevelDb) formatTx(txu *txUpdateObj) []byte {
|
|
|
|
blkHeight := uint64(txu.blkHeight)
|
|
|
|
txOff := uint32(txu.txoff)
|
|
|
|
txLen := uint32(txu.txlen)
|
2013-08-03 17:20:05 +02:00
|
|
|
spentbuf := txu.spentData
|
|
|
|
|
2014-06-04 20:55:48 +02:00
|
|
|
txW := make([]byte, 16+len(spentbuf))
|
2014-06-05 19:45:43 +02:00
|
|
|
binary.LittleEndian.PutUint64(txW[0:8], blkHeight)
|
|
|
|
binary.LittleEndian.PutUint32(txW[8:12], txOff)
|
|
|
|
binary.LittleEndian.PutUint32(txW[12:16], txLen)
|
2014-06-04 20:55:48 +02:00
|
|
|
copy(txW[16:], spentbuf)
|
2013-08-03 17:20:05 +02:00
|
|
|
|
2014-06-04 20:55:48 +02:00
|
|
|
return txW[:]
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
2015-08-08 04:20:49 +02:00
|
|
|
func (db *LevelDb) getTxData(txsha *wire.ShaHash) (int32, int, int, []byte, error) {
|
2013-08-22 17:40:59 +02:00
|
|
|
key := shaTxToKey(txsha)
|
2014-06-04 20:55:48 +02:00
|
|
|
buf, err := db.lDb.Get(key, db.ro)
|
2013-08-03 17:20:05 +02:00
|
|
|
if err != nil {
|
2014-06-04 20:55:48 +02:00
|
|
|
return 0, 0, 0, nil, err
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
2014-06-05 19:45:43 +02:00
|
|
|
blkHeight := binary.LittleEndian.Uint64(buf[0:8])
|
|
|
|
txOff := binary.LittleEndian.Uint32(buf[8:12])
|
|
|
|
txLen := binary.LittleEndian.Uint32(buf[12:16])
|
2014-06-04 20:55:48 +02:00
|
|
|
|
|
|
|
spentBuf := make([]byte, len(buf)-16)
|
|
|
|
copy(spentBuf, buf[16:])
|
|
|
|
|
2015-08-08 04:20:49 +02:00
|
|
|
return int32(blkHeight), int(txOff), int(txLen), spentBuf, nil
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
2015-02-05 22:16:39 +01:00
|
|
|
func (db *LevelDb) getTxFullySpent(txsha *wire.ShaHash) ([]*spentTx, error) {
|
2013-09-30 23:44:26 +02:00
|
|
|
|
|
|
|
var badTxList, spentTxList []*spentTx
|
|
|
|
|
|
|
|
key := shaSpentTxToKey(txsha)
|
|
|
|
buf, err := db.lDb.Get(key, db.ro)
|
|
|
|
if err == leveldb.ErrNotFound {
|
2015-01-27 18:45:10 +01:00
|
|
|
return badTxList, database.ErrTxShaMissing
|
2013-09-30 23:44:26 +02:00
|
|
|
} else if err != nil {
|
|
|
|
return badTxList, err
|
|
|
|
}
|
|
|
|
txListLen := len(buf) / 20
|
|
|
|
|
2014-06-04 20:55:48 +02:00
|
|
|
spentTxList = make([]*spentTx, txListLen, txListLen)
|
2013-09-30 23:44:26 +02:00
|
|
|
for i := range spentTxList {
|
2014-06-04 20:55:48 +02:00
|
|
|
offset := i * 20
|
|
|
|
|
2014-07-07 17:07:30 +02:00
|
|
|
blkHeight := binary.LittleEndian.Uint64(buf[offset : offset+8])
|
|
|
|
txOff := binary.LittleEndian.Uint32(buf[offset+8 : offset+12])
|
|
|
|
txLen := binary.LittleEndian.Uint32(buf[offset+12 : offset+16])
|
|
|
|
numTxO := binary.LittleEndian.Uint32(buf[offset+16 : offset+20])
|
2014-06-04 20:55:48 +02:00
|
|
|
|
|
|
|
sTx := spentTx{
|
2015-08-08 04:20:49 +02:00
|
|
|
blkHeight: int32(blkHeight),
|
2014-06-04 20:55:48 +02:00
|
|
|
txoff: int(txOff),
|
|
|
|
txlen: int(txLen),
|
|
|
|
numTxO: int(numTxO),
|
2013-09-30 23:44:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
spentTxList[i] = &sTx
|
|
|
|
}
|
|
|
|
|
|
|
|
return spentTxList, nil
|
|
|
|
}
|
|
|
|
|
2014-06-04 20:55:48 +02:00
|
|
|
func (db *LevelDb) formatTxFullySpent(sTxList []*spentTx) []byte {
|
|
|
|
txW := make([]byte, 20*len(sTxList))
|
2013-09-30 23:44:26 +02:00
|
|
|
|
2014-06-04 20:55:48 +02:00
|
|
|
for i, sTx := range sTxList {
|
|
|
|
blkHeight := uint64(sTx.blkHeight)
|
|
|
|
txOff := uint32(sTx.txoff)
|
|
|
|
txLen := uint32(sTx.txlen)
|
|
|
|
numTxO := uint32(sTx.numTxO)
|
|
|
|
offset := i * 20
|
2013-09-30 23:44:26 +02:00
|
|
|
|
2014-06-05 19:45:43 +02:00
|
|
|
binary.LittleEndian.PutUint64(txW[offset:offset+8], blkHeight)
|
|
|
|
binary.LittleEndian.PutUint32(txW[offset+8:offset+12], txOff)
|
|
|
|
binary.LittleEndian.PutUint32(txW[offset+12:offset+16], txLen)
|
|
|
|
binary.LittleEndian.PutUint32(txW[offset+16:offset+20], numTxO)
|
2013-09-30 23:44:26 +02:00
|
|
|
}
|
|
|
|
|
2014-06-04 20:55:48 +02:00
|
|
|
return txW
|
2013-09-30 23:44:26 +02:00
|
|
|
}
|
|
|
|
|
2013-08-03 17:20:05 +02:00
|
|
|
// ExistsTxSha returns if the given tx sha exists in the database
|
2015-02-05 22:16:39 +01:00
|
|
|
func (db *LevelDb) ExistsTxSha(txsha *wire.ShaHash) (bool, error) {
|
2013-08-03 17:20:05 +02:00
|
|
|
db.dbLock.Lock()
|
|
|
|
defer db.dbLock.Unlock()
|
|
|
|
|
|
|
|
return db.existsTxSha(txsha)
|
|
|
|
}
|
|
|
|
|
|
|
|
// existsTxSha returns if the given tx sha exists in the database.o
|
|
|
|
// Must be called with the db lock held.
|
2015-02-05 22:16:39 +01:00
|
|
|
func (db *LevelDb) existsTxSha(txSha *wire.ShaHash) (bool, error) {
|
2015-01-31 01:38:26 +01:00
|
|
|
key := shaTxToKey(txSha)
|
|
|
|
|
|
|
|
return db.lDb.Has(key, db.ro)
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
2013-09-30 23:44:26 +02:00
|
|
|
|
|
|
|
// FetchTxByShaList returns the most recent tx of the name fully spent or not
|
2015-02-05 22:16:39 +01:00
|
|
|
func (db *LevelDb) FetchTxByShaList(txShaList []*wire.ShaHash) []*database.TxListReply {
|
2014-04-15 16:16:10 +02:00
|
|
|
db.dbLock.Lock()
|
|
|
|
defer db.dbLock.Unlock()
|
|
|
|
|
2013-09-30 23:44:26 +02:00
|
|
|
// until the fully spent separation of tx is complete this is identical
|
|
|
|
// to FetchUnSpentTxByShaList
|
2015-01-27 18:45:10 +01:00
|
|
|
replies := make([]*database.TxListReply, len(txShaList))
|
2013-09-30 23:44:26 +02:00
|
|
|
for i, txsha := range txShaList {
|
|
|
|
tx, blockSha, height, txspent, err := db.fetchTxDataBySha(txsha)
|
|
|
|
btxspent := []bool{}
|
|
|
|
if err == nil {
|
|
|
|
btxspent = make([]bool, len(tx.TxOut), len(tx.TxOut))
|
|
|
|
for idx := range tx.TxOut {
|
|
|
|
byteidx := idx / 8
|
|
|
|
byteoff := uint(idx % 8)
|
|
|
|
btxspent[idx] = (txspent[byteidx] & (byte(1) << byteoff)) != 0
|
|
|
|
}
|
|
|
|
}
|
2015-01-27 18:45:10 +01:00
|
|
|
if err == database.ErrTxShaMissing {
|
2013-09-30 23:44:26 +02:00
|
|
|
// if the unspent pool did not have the tx,
|
2014-12-25 00:55:14 +01:00
|
|
|
// look in the fully spent pool (only last instance)
|
2013-09-30 23:44:26 +02:00
|
|
|
|
|
|
|
sTxList, fSerr := db.getTxFullySpent(txsha)
|
|
|
|
if fSerr == nil && len(sTxList) != 0 {
|
|
|
|
idx := len(sTxList) - 1
|
|
|
|
stx := sTxList[idx]
|
|
|
|
|
|
|
|
tx, blockSha, _, _, err = db.fetchTxDataByLoc(
|
2013-11-21 16:48:57 +01:00
|
|
|
stx.blkHeight, stx.txoff, stx.txlen, []byte{})
|
2013-09-30 23:44:26 +02:00
|
|
|
if err == nil {
|
|
|
|
btxspent = make([]bool, len(tx.TxOut))
|
|
|
|
for i := range btxspent {
|
|
|
|
btxspent[i] = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-01-27 18:45:10 +01:00
|
|
|
txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blockSha, Height: height, TxSpent: btxspent, Err: err}
|
2013-09-30 23:44:26 +02:00
|
|
|
replies[i] = &txlre
|
|
|
|
}
|
|
|
|
return replies
|
|
|
|
}
|
|
|
|
|
|
|
|
// FetchUnSpentTxByShaList given a array of ShaHash, look up the transactions
|
|
|
|
// and return them in a TxListReply array.
|
2015-02-05 22:16:39 +01:00
|
|
|
func (db *LevelDb) FetchUnSpentTxByShaList(txShaList []*wire.ShaHash) []*database.TxListReply {
|
2013-09-30 23:44:26 +02:00
|
|
|
db.dbLock.Lock()
|
|
|
|
defer db.dbLock.Unlock()
|
|
|
|
|
2015-01-27 18:45:10 +01:00
|
|
|
replies := make([]*database.TxListReply, len(txShaList))
|
2013-09-30 23:44:26 +02:00
|
|
|
for i, txsha := range txShaList {
|
|
|
|
tx, blockSha, height, txspent, err := db.fetchTxDataBySha(txsha)
|
|
|
|
btxspent := []bool{}
|
|
|
|
if err == nil {
|
|
|
|
btxspent = make([]bool, len(tx.TxOut), len(tx.TxOut))
|
|
|
|
for idx := range tx.TxOut {
|
|
|
|
byteidx := idx / 8
|
|
|
|
byteoff := uint(idx % 8)
|
|
|
|
btxspent[idx] = (txspent[byteidx] & (byte(1) << byteoff)) != 0
|
|
|
|
}
|
|
|
|
}
|
2015-01-27 18:45:10 +01:00
|
|
|
txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blockSha, Height: height, TxSpent: btxspent, Err: err}
|
2013-09-30 23:44:26 +02:00
|
|
|
replies[i] = &txlre
|
|
|
|
}
|
|
|
|
return replies
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetchTxDataBySha returns several pieces of data regarding the given sha.
|
2015-08-08 04:20:49 +02:00
|
|
|
func (db *LevelDb) fetchTxDataBySha(txsha *wire.ShaHash) (rtx *wire.MsgTx, rblksha *wire.ShaHash, rheight int32, rtxspent []byte, err error) {
|
|
|
|
var blkHeight int32
|
2013-09-30 23:44:26 +02:00
|
|
|
var txspent []byte
|
|
|
|
var txOff, txLen int
|
|
|
|
|
|
|
|
blkHeight, txOff, txLen, txspent, err = db.getTxData(txsha)
|
|
|
|
if err != nil {
|
|
|
|
if err == leveldb.ErrNotFound {
|
2015-01-27 18:45:10 +01:00
|
|
|
err = database.ErrTxShaMissing
|
2013-09-30 23:44:26 +02:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
return db.fetchTxDataByLoc(blkHeight, txOff, txLen, txspent)
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetchTxDataByLoc returns several pieces of data regarding the given tx
|
|
|
|
// located by the block/offset/size location
|
2015-08-08 04:20:49 +02:00
|
|
|
func (db *LevelDb) fetchTxDataByLoc(blkHeight int32, txOff int, txLen int, txspent []byte) (rtx *wire.MsgTx, rblksha *wire.ShaHash, rheight int32, rtxspent []byte, err error) {
|
2015-02-05 22:16:39 +01:00
|
|
|
var blksha *wire.ShaHash
|
2013-09-30 23:44:26 +02:00
|
|
|
var blkbuf []byte
|
|
|
|
|
|
|
|
blksha, blkbuf, err = db.getBlkByHeight(blkHeight)
|
|
|
|
if err != nil {
|
|
|
|
if err == leveldb.ErrNotFound {
|
2015-01-27 18:45:10 +01:00
|
|
|
err = database.ErrTxShaMissing
|
2013-09-30 23:44:26 +02:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
//log.Trace("transaction %v is at block %v %v txoff %v, txlen %v\n",
|
|
|
|
// txsha, blksha, blkHeight, txOff, txLen)
|
|
|
|
|
2014-04-15 16:17:16 +02:00
|
|
|
if len(blkbuf) < txOff+txLen {
|
2015-01-27 18:45:10 +01:00
|
|
|
err = database.ErrTxShaMissing
|
2014-04-15 16:17:16 +02:00
|
|
|
return
|
|
|
|
}
|
2014-06-05 19:45:43 +02:00
|
|
|
rbuf := bytes.NewReader(blkbuf[txOff : txOff+txLen])
|
2013-09-30 23:44:26 +02:00
|
|
|
|
2015-02-05 22:16:39 +01:00
|
|
|
var tx wire.MsgTx
|
2013-09-30 23:44:26 +02:00
|
|
|
err = tx.Deserialize(rbuf)
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("unable to decode tx block %v %v txoff %v txlen %v",
|
|
|
|
blkHeight, blksha, txOff, txLen)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
return &tx, blksha, blkHeight, txspent, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// FetchTxBySha returns some data for the given Tx Sha.
|
2015-02-05 22:16:39 +01:00
|
|
|
func (db *LevelDb) FetchTxBySha(txsha *wire.ShaHash) ([]*database.TxListReply, error) {
|
2014-05-30 21:03:39 +02:00
|
|
|
db.dbLock.Lock()
|
|
|
|
defer db.dbLock.Unlock()
|
|
|
|
|
2013-09-30 23:44:26 +02:00
|
|
|
replylen := 0
|
|
|
|
replycnt := 0
|
|
|
|
|
|
|
|
tx, blksha, height, txspent, txerr := db.fetchTxDataBySha(txsha)
|
|
|
|
if txerr == nil {
|
|
|
|
replylen++
|
|
|
|
} else {
|
2015-01-27 18:45:10 +01:00
|
|
|
if txerr != database.ErrTxShaMissing {
|
|
|
|
return []*database.TxListReply{}, txerr
|
2013-09-30 23:44:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sTxList, fSerr := db.getTxFullySpent(txsha)
|
|
|
|
|
|
|
|
if fSerr != nil {
|
2015-01-27 18:45:10 +01:00
|
|
|
if fSerr != database.ErrTxShaMissing {
|
|
|
|
return []*database.TxListReply{}, fSerr
|
2013-09-30 23:44:26 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
replylen += len(sTxList)
|
|
|
|
}
|
|
|
|
|
2015-01-27 18:45:10 +01:00
|
|
|
replies := make([]*database.TxListReply, replylen)
|
2013-09-30 23:44:26 +02:00
|
|
|
|
|
|
|
if fSerr == nil {
|
|
|
|
for _, stx := range sTxList {
|
|
|
|
tx, blksha, _, _, err := db.fetchTxDataByLoc(
|
2013-11-21 16:48:57 +01:00
|
|
|
stx.blkHeight, stx.txoff, stx.txlen, []byte{})
|
2013-09-30 23:44:26 +02:00
|
|
|
if err != nil {
|
|
|
|
if err != leveldb.ErrNotFound {
|
2015-01-27 18:45:10 +01:00
|
|
|
return []*database.TxListReply{}, err
|
2013-09-30 23:44:26 +02:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
btxspent := make([]bool, len(tx.TxOut), len(tx.TxOut))
|
|
|
|
for i := range btxspent {
|
|
|
|
btxspent[i] = true
|
|
|
|
}
|
2015-01-27 18:45:10 +01:00
|
|
|
txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blksha, Height: stx.blkHeight, TxSpent: btxspent, Err: nil}
|
2013-09-30 23:44:26 +02:00
|
|
|
replies[replycnt] = &txlre
|
|
|
|
replycnt++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if txerr == nil {
|
|
|
|
btxspent := make([]bool, len(tx.TxOut), len(tx.TxOut))
|
|
|
|
for idx := range tx.TxOut {
|
|
|
|
byteidx := idx / 8
|
|
|
|
byteoff := uint(idx % 8)
|
|
|
|
btxspent[idx] = (txspent[byteidx] & (byte(1) << byteoff)) != 0
|
|
|
|
}
|
2015-01-27 18:45:10 +01:00
|
|
|
txlre := database.TxListReply{Sha: txsha, Tx: tx, BlkSha: blksha, Height: height, TxSpent: btxspent, Err: nil}
|
2013-09-30 23:44:26 +02:00
|
|
|
replies[replycnt] = &txlre
|
|
|
|
replycnt++
|
|
|
|
}
|
|
|
|
return replies, nil
|
|
|
|
}
|
2014-12-25 00:55:14 +01:00
|
|
|
|
|
|
|
// addrIndexToKey serializes the passed txAddrIndex for storage within the DB.
|
2015-03-19 14:56:06 +01:00
|
|
|
// We want to use BigEndian to store at least block height and TX offset
|
|
|
|
// in order to ensure that the transactions are sorted in the index.
|
|
|
|
// This gives us the ability to use the index in more client-side
|
|
|
|
// applications that are order-dependent (specifically by dependency).
|
2014-12-25 00:55:14 +01:00
|
|
|
func addrIndexToKey(index *txAddrIndex) []byte {
|
|
|
|
record := make([]byte, addrIndexKeyLength, addrIndexKeyLength)
|
2015-03-19 14:56:06 +01:00
|
|
|
copy(record[0:3], addrIndexKeyPrefix)
|
|
|
|
copy(record[3:23], index.hash160[:])
|
2014-12-25 00:55:14 +01:00
|
|
|
|
|
|
|
// The index itself.
|
2015-03-19 14:56:06 +01:00
|
|
|
binary.BigEndian.PutUint32(record[23:27], uint32(index.blkHeight))
|
|
|
|
binary.BigEndian.PutUint32(record[27:31], uint32(index.txoffset))
|
|
|
|
binary.BigEndian.PutUint32(record[31:35], uint32(index.txlen))
|
2014-12-25 00:55:14 +01:00
|
|
|
|
|
|
|
return record
|
|
|
|
}
|
|
|
|
|
|
|
|
// unpackTxIndex deserializes the raw bytes of a address tx index.
|
2015-03-15 03:09:51 +01:00
|
|
|
func unpackTxIndex(rawIndex [12]byte) *txAddrIndex {
|
2014-12-25 00:55:14 +01:00
|
|
|
return &txAddrIndex{
|
2015-08-08 04:20:49 +02:00
|
|
|
blkHeight: int32(binary.BigEndian.Uint32(rawIndex[0:4])),
|
2015-03-19 14:56:06 +01:00
|
|
|
txoffset: int(binary.BigEndian.Uint32(rawIndex[4:8])),
|
|
|
|
txlen: int(binary.BigEndian.Uint32(rawIndex[8:12])),
|
2014-12-25 00:55:14 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// bytesPrefix returns key range that satisfy the given prefix.
|
|
|
|
// This only applicable for the standard 'bytes comparer'.
|
|
|
|
func bytesPrefix(prefix []byte) *util.Range {
|
|
|
|
var limit []byte
|
|
|
|
for i := len(prefix) - 1; i >= 0; i-- {
|
|
|
|
c := prefix[i]
|
|
|
|
if c < 0xff {
|
|
|
|
limit = make([]byte, i+1)
|
|
|
|
copy(limit, prefix)
|
|
|
|
limit[i] = c + 1
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return &util.Range{Start: prefix, Limit: limit}
|
|
|
|
}
|
|
|
|
|
|
|
|
// FetchTxsForAddr looks up and returns all transactions which either
|
|
|
|
// spend from a previously created output of the passed address, or
|
|
|
|
// create a new output locked to the passed address. The, `limit` parameter
|
|
|
|
// should be the max number of transactions to be returned. Additionally, if the
|
|
|
|
// caller wishes to seek forward in the results some amount, the 'seek'
|
|
|
|
// represents how many results to skip.
|
|
|
|
func (db *LevelDb) FetchTxsForAddr(addr btcutil.Address, skip int,
|
2015-08-28 13:58:18 +02:00
|
|
|
limit int) ([]*database.TxListReply, int, error) {
|
2014-12-25 00:55:14 +01:00
|
|
|
db.dbLock.Lock()
|
|
|
|
defer db.dbLock.Unlock()
|
|
|
|
|
|
|
|
// Enforce constraints for skip and limit.
|
|
|
|
if skip < 0 {
|
2015-08-28 13:58:18 +02:00
|
|
|
return nil, 0, errors.New("offset for skip must be positive")
|
2014-12-25 00:55:14 +01:00
|
|
|
}
|
|
|
|
if limit < 0 {
|
2015-08-28 13:58:18 +02:00
|
|
|
return nil, 0, errors.New("value for limit must be positive")
|
2014-12-25 00:55:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Parse address type, bailing on an unknown type.
|
|
|
|
var addrKey []byte
|
|
|
|
switch addr := addr.(type) {
|
|
|
|
case *btcutil.AddressPubKeyHash:
|
|
|
|
hash160 := addr.Hash160()
|
|
|
|
addrKey = hash160[:]
|
|
|
|
case *btcutil.AddressScriptHash:
|
|
|
|
hash160 := addr.Hash160()
|
|
|
|
addrKey = hash160[:]
|
|
|
|
case *btcutil.AddressPubKey:
|
|
|
|
hash160 := addr.AddressPubKeyHash().Hash160()
|
|
|
|
addrKey = hash160[:]
|
|
|
|
default:
|
2015-08-28 13:58:18 +02:00
|
|
|
return nil, 0, database.ErrUnsupportedAddressType
|
2014-12-25 00:55:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create the prefix for our search.
|
2015-03-19 14:56:06 +01:00
|
|
|
addrPrefix := make([]byte, 23, 23)
|
|
|
|
copy(addrPrefix[0:3], addrIndexKeyPrefix)
|
|
|
|
copy(addrPrefix[3:23], addrKey)
|
2014-12-25 00:55:14 +01:00
|
|
|
|
|
|
|
iter := db.lDb.NewIterator(bytesPrefix(addrPrefix), nil)
|
2015-08-28 13:58:18 +02:00
|
|
|
skipped := 0
|
2014-12-25 00:55:14 +01:00
|
|
|
for skip != 0 && iter.Next() {
|
|
|
|
skip--
|
2015-08-28 13:58:18 +02:00
|
|
|
skipped++
|
2014-12-25 00:55:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate through all address indexes that match the targeted prefix.
|
2015-03-15 03:09:51 +01:00
|
|
|
var replies []*database.TxListReply
|
|
|
|
var rawIndex [12]byte
|
2014-12-25 00:55:14 +01:00
|
|
|
for iter.Next() && limit != 0 {
|
2015-03-19 14:56:06 +01:00
|
|
|
copy(rawIndex[:], iter.Key()[23:35])
|
2014-12-25 00:55:14 +01:00
|
|
|
addrIndex := unpackTxIndex(rawIndex)
|
|
|
|
|
|
|
|
tx, blkSha, blkHeight, _, err := db.fetchTxDataByLoc(addrIndex.blkHeight,
|
|
|
|
addrIndex.txoffset, addrIndex.txlen, []byte{})
|
|
|
|
if err != nil {
|
|
|
|
// Eat a possible error due to a potential re-org.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2015-04-17 08:09:21 +02:00
|
|
|
txSha := tx.TxSha()
|
2015-01-27 18:45:10 +01:00
|
|
|
txReply := &database.TxListReply{Sha: &txSha, Tx: tx,
|
2014-12-25 00:55:14 +01:00
|
|
|
BlkSha: blkSha, Height: blkHeight, TxSpent: []bool{}, Err: err}
|
|
|
|
|
|
|
|
replies = append(replies, txReply)
|
|
|
|
limit--
|
|
|
|
}
|
|
|
|
iter.Release()
|
2015-03-15 03:09:51 +01:00
|
|
|
if err := iter.Error(); err != nil {
|
2015-08-28 13:58:18 +02:00
|
|
|
return nil, 0, err
|
2015-03-15 03:09:51 +01:00
|
|
|
}
|
2014-12-25 00:55:14 +01:00
|
|
|
|
2015-08-28 13:58:18 +02:00
|
|
|
return replies, skipped, nil
|
2014-12-25 00:55:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateAddrIndexForBlock updates the stored addrindex with passed
|
|
|
|
// index information for a particular block height. Additionally, it
|
|
|
|
// will update the stored meta-data related to the curent tip of the
|
|
|
|
// addr index. These two operations are performed in an atomic
|
|
|
|
// transaction which is commited before the function returns.
|
|
|
|
// Transactions indexed by address are stored with the following format:
|
|
|
|
// * prefix || hash160 || blockHeight || txoffset || txlen
|
|
|
|
// Indexes are stored purely in the key, with blank data for the actual value
|
|
|
|
// in order to facilitate ease of iteration by their shared prefix and
|
|
|
|
// also to allow limiting the number of returned transactions (RPC).
|
|
|
|
// Alternatively, indexes for each address could be stored as an
|
|
|
|
// append-only list for the stored value. However, this add unnecessary
|
|
|
|
// overhead when storing and retrieving since the entire list must
|
|
|
|
// be fetched each time.
|
2015-08-08 04:20:49 +02:00
|
|
|
func (db *LevelDb) UpdateAddrIndexForBlock(blkSha *wire.ShaHash, blkHeight int32, addrIndex database.BlockAddrIndex) error {
|
2014-12-25 00:55:14 +01:00
|
|
|
db.dbLock.Lock()
|
|
|
|
defer db.dbLock.Unlock()
|
|
|
|
|
|
|
|
var blankData []byte
|
|
|
|
batch := db.lBatch()
|
|
|
|
defer db.lbatch.Reset()
|
|
|
|
|
|
|
|
// Write all data for the new address indexes in a single batch
|
|
|
|
// transaction.
|
|
|
|
for addrKey, indexes := range addrIndex {
|
|
|
|
for _, txLoc := range indexes {
|
|
|
|
index := &txAddrIndex{
|
|
|
|
hash160: addrKey,
|
|
|
|
blkHeight: blkHeight,
|
|
|
|
txoffset: txLoc.TxStart,
|
|
|
|
txlen: txLoc.TxLen,
|
|
|
|
}
|
|
|
|
// The index is stored purely in the key.
|
|
|
|
packedIndex := addrIndexToKey(index)
|
|
|
|
batch.Put(packedIndex, blankData)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update tip of addrindex.
|
|
|
|
newIndexTip := make([]byte, 40, 40)
|
2015-04-04 20:25:49 +02:00
|
|
|
copy(newIndexTip[0:32], blkSha[:])
|
2015-03-15 03:09:51 +01:00
|
|
|
binary.LittleEndian.PutUint64(newIndexTip[32:40], uint64(blkHeight))
|
2014-12-25 00:55:14 +01:00
|
|
|
batch.Put(addrIndexMetaDataKey, newIndexTip)
|
|
|
|
|
2015-03-19 14:56:06 +01:00
|
|
|
// Ensure we're writing an address index version
|
|
|
|
newIndexVersion := make([]byte, 2, 2)
|
|
|
|
binary.LittleEndian.PutUint16(newIndexVersion[0:2],
|
|
|
|
uint16(addrIndexCurrentVersion))
|
|
|
|
batch.Put(addrIndexVersionKey, newIndexVersion)
|
|
|
|
|
2014-12-25 00:55:14 +01:00
|
|
|
if err := db.lDb.Write(batch, db.wo); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
db.lastAddrIndexBlkIdx = blkHeight
|
|
|
|
db.lastAddrIndexBlkSha = *blkSha
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteAddrIndex deletes the entire addrindex stored within the DB.
|
|
|
|
// It also resets the cached in-memory metadata about the addr index.
|
|
|
|
func (db *LevelDb) DeleteAddrIndex() error {
|
|
|
|
db.dbLock.Lock()
|
|
|
|
defer db.dbLock.Unlock()
|
|
|
|
|
|
|
|
batch := db.lBatch()
|
|
|
|
defer batch.Reset()
|
|
|
|
|
|
|
|
// Delete the entire index along with any metadata about it.
|
|
|
|
iter := db.lDb.NewIterator(bytesPrefix(addrIndexKeyPrefix), db.ro)
|
|
|
|
numInBatch := 0
|
|
|
|
for iter.Next() {
|
|
|
|
key := iter.Key()
|
2015-03-19 14:56:06 +01:00
|
|
|
// With a 24-bit index key prefix, 1 in every 2^24 keys is a collision.
|
|
|
|
// We check the length to make sure we only delete address index keys.
|
|
|
|
if len(key) == addrIndexKeyLength {
|
|
|
|
batch.Delete(key)
|
|
|
|
numInBatch++
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete in chunks to potentially avoid very large batches.
|
|
|
|
if numInBatch >= batchDeleteThreshold {
|
|
|
|
if err := db.lDb.Write(batch, db.wo); err != nil {
|
|
|
|
iter.Release()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
batch.Reset()
|
|
|
|
numInBatch = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
iter.Release()
|
|
|
|
if err := iter.Error(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-12-25 00:55:14 +01:00
|
|
|
|
2015-03-19 14:56:06 +01:00
|
|
|
batch.Delete(addrIndexMetaDataKey)
|
|
|
|
batch.Delete(addrIndexVersionKey)
|
|
|
|
|
|
|
|
if err := db.lDb.Write(batch, db.wo); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
db.lastAddrIndexBlkIdx = -1
|
|
|
|
db.lastAddrIndexBlkSha = wire.ShaHash{}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// deleteOldAddrIndex deletes the entire addrindex stored within the DB for a
|
|
|
|
// 2-byte addrIndexKeyPrefix. It also resets the cached in-memory metadata about
|
|
|
|
// the addr index.
|
|
|
|
func (db *LevelDb) deleteOldAddrIndex() error {
|
|
|
|
db.dbLock.Lock()
|
|
|
|
defer db.dbLock.Unlock()
|
|
|
|
|
|
|
|
batch := db.lBatch()
|
|
|
|
defer batch.Reset()
|
|
|
|
|
|
|
|
// Delete the entire index along with any metadata about it.
|
|
|
|
iter := db.lDb.NewIterator(bytesPrefix([]byte("a-")), db.ro)
|
|
|
|
numInBatch := 0
|
|
|
|
for iter.Next() {
|
|
|
|
key := iter.Key()
|
|
|
|
// With a 24-bit index key prefix, 1 in every 2^24 keys is a collision.
|
|
|
|
// We check the length to make sure we only delete address index keys.
|
|
|
|
// We also check the last two bytes to make sure the suffix doesn't
|
|
|
|
// match other types of index that are 34 bytes long.
|
|
|
|
if len(key) == 34 && !bytes.HasSuffix(key, recordSuffixTx) &&
|
|
|
|
!bytes.HasSuffix(key, recordSuffixSpentTx) {
|
|
|
|
batch.Delete(key)
|
|
|
|
numInBatch++
|
|
|
|
}
|
2014-12-25 00:55:14 +01:00
|
|
|
|
|
|
|
// Delete in chunks to potentially avoid very large batches.
|
|
|
|
if numInBatch >= batchDeleteThreshold {
|
|
|
|
if err := db.lDb.Write(batch, db.wo); err != nil {
|
2015-03-15 03:09:51 +01:00
|
|
|
iter.Release()
|
2014-12-25 00:55:14 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
batch.Reset()
|
|
|
|
numInBatch = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
iter.Release()
|
2015-03-15 03:09:51 +01:00
|
|
|
if err := iter.Error(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-12-25 00:55:14 +01:00
|
|
|
|
|
|
|
batch.Delete(addrIndexMetaDataKey)
|
2015-03-19 14:56:06 +01:00
|
|
|
batch.Delete(addrIndexVersionKey)
|
|
|
|
|
2014-12-25 00:55:14 +01:00
|
|
|
if err := db.lDb.Write(batch, db.wo); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
db.lastAddrIndexBlkIdx = -1
|
2015-02-05 22:16:39 +01:00
|
|
|
db.lastAddrIndexBlkSha = wire.ShaHash{}
|
2014-12-25 00:55:14 +01:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|