Add support for an optional address-based transaction index.

* Address index is built up concurrently with the `--addrindex` flag.
* Entire index can be deleted with `--dropaddrindex`.
* New RPC call: `searchrawtransaction`
  * Returns all transacitons related to a particular address
  * Includes mempool transactions
  * Requires `--addrindex` to be activated and fully caught up.
* New `blockLogger` struct has been added to factor our common logging
  code
* Wiki and docs updated with new features.
This commit is contained in:
Olaoluwa Osuntokun 2015-01-03 19:42:01 -06:00
parent 86cbf27f58
commit ecdffda748
13 changed files with 982 additions and 89 deletions

76
blocklogger.go Normal file
View file

@ -0,0 +1,76 @@
package main
import (
"sync"
"time"
"github.com/btcsuite/btclog"
"github.com/btcsuite/btcutil"
)
// blockProgressLogger provides periodic logging for other services in order
// to show users progress of certain "actions" involving some or all current
// blocks. Ex: syncing to best chain, indexing all blocks, etc.
type blockProgressLogger struct {
receivedLogBlocks int64
receivedLogTx int64
lastBlockLogTime time.Time
subsystemLogger btclog.Logger
progressAction string
sync.Mutex
}
// newBlockProgressLogger returns a new block progress logger.
// The progress message is templated as follows:
// {progressAction} {numProcessed} {blocks|block} in the last {timePeriod}
// ({numTxs}, height {lastBlockHeight}, {lastBlockTimeStamp})
func newBlockProgressLogger(progressMessage string, logger btclog.Logger) *blockProgressLogger {
return &blockProgressLogger{
lastBlockLogTime: time.Now(),
progressAction: progressMessage,
subsystemLogger: logger,
}
}
// LogBlockHeight logs a new block height as an information message to show
// progress to the user. In order to prevent spam, it limits logging to one
// message every 10 seconds with duration and totals included.
func (b *blockProgressLogger) LogBlockHeight(block *btcutil.Block) {
b.Lock()
defer b.Unlock()
b.receivedLogBlocks++
b.receivedLogTx += int64(len(block.MsgBlock().Transactions))
now := time.Now()
duration := now.Sub(b.lastBlockLogTime)
if duration < time.Second*10 {
return
}
// Truncate the duration to 10s of milliseconds.
durationMillis := int64(duration / time.Millisecond)
tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10)
// Log information about new block height.
blockStr := "blocks"
if b.receivedLogBlocks == 1 {
blockStr = "block"
}
txStr := "transactions"
if b.receivedLogTx == 1 {
txStr = "transaction"
}
b.subsystemLogger.Infof("%s %d %s in the last %s (%d %s, height %d, %s)",
b.progressAction, b.receivedLogBlocks, blockStr, tDuration, b.receivedLogTx,
txStr, block.Height(), block.MsgBlock().Header.Timestamp)
b.receivedLogBlocks = 0
b.receivedLogTx = 0
b.lastBlockLogTime = now
}
func (b *blockProgressLogger) SetLastLogTime(time time.Time) {
b.lastBlockLogTime = time
}

View file

@ -166,9 +166,9 @@ type blockManager struct {
blockChain *blockchain.BlockChain blockChain *blockchain.BlockChain
requestedTxns map[wire.ShaHash]struct{} requestedTxns map[wire.ShaHash]struct{}
requestedBlocks map[wire.ShaHash]struct{} requestedBlocks map[wire.ShaHash]struct{}
progressLogger *blockProgressLogger
receivedLogBlocks int64 receivedLogBlocks int64
receivedLogTx int64 receivedLogTx int64
lastBlockLogTime time.Time
processingReqs bool processingReqs bool
syncPeer *peer syncPeer *peer
msgChan chan interface{} msgChan chan interface{}
@ -436,41 +436,6 @@ func (b *blockManager) handleDonePeerMsg(peers *list.List, p *peer) {
} }
} }
// logBlockHeight logs a new block height as an information message to show
// progress to the user. In order to prevent spam, it limits logging to one
// message every 10 seconds with duration and totals included.
func (b *blockManager) logBlockHeight(block *btcutil.Block) {
b.receivedLogBlocks++
b.receivedLogTx += int64(len(block.MsgBlock().Transactions))
now := time.Now()
duration := now.Sub(b.lastBlockLogTime)
if duration < time.Second*10 {
return
}
// Truncate the duration to 10s of milliseconds.
durationMillis := int64(duration / time.Millisecond)
tDuration := 10 * time.Millisecond * time.Duration(durationMillis/10)
// Log information about new block height.
blockStr := "blocks"
if b.receivedLogBlocks == 1 {
blockStr = "block"
}
txStr := "transactions"
if b.receivedLogTx == 1 {
txStr = "transaction"
}
bmgrLog.Infof("Processed %d %s in the last %s (%d %s, height %d, %s)",
b.receivedLogBlocks, blockStr, tDuration, b.receivedLogTx,
txStr, block.Height(), block.MsgBlock().Header.Timestamp)
b.receivedLogBlocks = 0
b.receivedLogTx = 0
b.lastBlockLogTime = now
}
// handleTxMsg handles transaction messages from all peers. // handleTxMsg handles transaction messages from all peers.
func (b *blockManager) handleTxMsg(tmsg *txMsg) { func (b *blockManager) handleTxMsg(tmsg *txMsg) {
// NOTE: BitcoinJ, and possibly other wallets, don't follow the spec of // NOTE: BitcoinJ, and possibly other wallets, don't follow the spec of
@ -628,7 +593,7 @@ func (b *blockManager) handleBlockMsg(bmsg *blockMsg) {
// When the block is not an orphan, log information about it and // When the block is not an orphan, log information about it and
// update the chain state. // update the chain state.
b.logBlockHeight(bmsg.block) b.progressLogger.LogBlockHeight(bmsg.block)
// Query the db for the latest best block since the block // Query the db for the latest best block since the block
// that was processed could be on a side chain or have caused // that was processed could be on a side chain or have caused
@ -834,7 +799,7 @@ func (b *blockManager) handleHeadersMsg(hmsg *headersMsg) {
b.headerList.Remove(b.headerList.Front()) b.headerList.Remove(b.headerList.Front())
bmgrLog.Infof("Received %v block headers: Fetching blocks", bmgrLog.Infof("Received %v block headers: Fetching blocks",
b.headerList.Len()) b.headerList.Len())
b.lastBlockLogTime = time.Now() b.progressLogger.SetLastLogTime(time.Now())
b.fetchHeaderBlocks() b.fetchHeaderBlocks()
return return
} }
@ -1167,6 +1132,12 @@ func (b *blockManager) handleNotifyMsg(notification *blockchain.Notification) {
r.ntfnMgr.NotifyBlockConnected(block) r.ntfnMgr.NotifyBlockConnected(block)
} }
// If we're maintaing the address index, and it is up to date
// then update it based off this new block.
if cfg.AddrIndex && b.server.addrIndexer.IsCaughtUp() {
b.server.addrIndexer.UpdateAddressIndex(block)
}
// A block has been disconnected from the main block chain. // A block has been disconnected from the main block chain.
case blockchain.NTBlockDisconnected: case blockchain.NTBlockDisconnected:
block, ok := notification.Data.(*btcutil.Block) block, ok := notification.Data.(*btcutil.Block)
@ -1347,11 +1318,12 @@ func newBlockManager(s *server) (*blockManager, error) {
server: s, server: s,
requestedTxns: make(map[wire.ShaHash]struct{}), requestedTxns: make(map[wire.ShaHash]struct{}),
requestedBlocks: make(map[wire.ShaHash]struct{}), requestedBlocks: make(map[wire.ShaHash]struct{}),
lastBlockLogTime: time.Now(), progressLogger: newBlockProgressLogger("Processed", bmgrLog),
msgChan: make(chan interface{}, cfg.MaxPeers*3), msgChan: make(chan interface{}, cfg.MaxPeers*3),
headerList: list.New(), headerList: list.New(),
quit: make(chan struct{}), quit: make(chan struct{}),
} }
bm.progressLogger = newBlockProgressLogger("Processed", bmgrLog)
bm.blockChain = blockchain.New(s.db, s.netParams, bm.handleNotifyMsg) bm.blockChain = blockchain.New(s.db, s.netParams, bm.handleNotifyMsg)
bm.blockChain.DisableCheckpoints(cfg.DisableCheckpoints) bm.blockChain.DisableCheckpoints(cfg.DisableCheckpoints)
if !cfg.DisableCheckpoints { if !cfg.DisableCheckpoints {

11
btcd.go
View file

@ -81,6 +81,17 @@ func btcdMain(serverChan chan<- *server) error {
} }
defer db.Close() defer db.Close()
if cfg.DropAddrIndex {
btcdLog.Info("Deleting entire addrindex.")
err := db.DeleteAddrIndex()
if err != nil {
btcdLog.Errorf("Unable to delete the addrindex: %v", err)
return err
}
btcdLog.Info("Successfully deleted addrindex, exiting")
return nil
}
// Ensure the database is sync'd and closed on Ctrl+C. // Ensure the database is sync'd and closed on Ctrl+C.
addInterruptHandler(func() { addInterruptHandler(func() {
btcdLog.Infof("Gracefully shutting down the database...") btcdLog.Infof("Gracefully shutting down the database...")

487
chainindexer.go Normal file
View file

@ -0,0 +1,487 @@
// Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"container/heap"
"runtime"
"sync"
"sync/atomic"
"github.com/btcsuite/btcd/blockchain"
"github.com/btcsuite/btcd/database"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"golang.org/x/crypto/ripemd160"
)
type indexState int
const (
// Our two operating modes.
// We go into "CatchUp" mode when, on boot, the current best
// chain height is greater than the last block we've indexed.
// "CatchUp" mode is characterized by several concurrent worker
// goroutines indexing blocks organized by a manager goroutine.
// When in "CatchUp" mode, incoming requests to index newly solved
// blocks are backed up for later processing. Once we've finished
// catching up, we process these queued jobs, and then enter into
// "maintainence" mode.
indexCatchUp indexState = iota
// When in "maintainence" mode, we have a single worker serially
// processing incoming jobs to index newly solved blocks.
indexMaintain
)
// Limit the number of goroutines that concurrently
// build the index to catch up based on the number
// of processor cores. This help ensure the system
// stays reasonably responsive under heavy load.
var numCatchUpWorkers = runtime.NumCPU() * 3
// indexBlockMsg packages a request to have the addresses of a block indexed.
type indexBlockMsg struct {
blk *btcutil.Block
done chan struct{}
}
// writeIndexReq represents a request to have a completed address index
// committed to the database.
type writeIndexReq struct {
blk *btcutil.Block
addrIndex database.BlockAddrIndex
}
// addrIndexer provides a concurrent service for indexing the transactions of
// target blocks based on the addresses involved in the transaction.
type addrIndexer struct {
server *server
started int32
shutdown int32
state indexState
quit chan struct{}
wg sync.WaitGroup
addrIndexJobs chan *indexBlockMsg
writeRequests chan *writeIndexReq
progressLogger *blockProgressLogger
currentIndexTip int64
chainTip int64
sync.Mutex
}
// newAddrIndexer creates a new block address indexer.
// Use Start to begin processing incoming index jobs.
func newAddrIndexer(s *server) (*addrIndexer, error) {
_, chainHeight, err := s.db.NewestSha()
if err != nil {
return nil, err
}
_, lastIndexedHeight, err := s.db.FetchAddrIndexTip()
if err != nil && err != database.ErrAddrIndexDoesNotExist {
return nil, err
}
var state indexState
if chainHeight == lastIndexedHeight {
state = indexMaintain
} else {
state = indexCatchUp
}
ai := &addrIndexer{
server: s,
quit: make(chan struct{}),
state: state,
addrIndexJobs: make(chan *indexBlockMsg),
writeRequests: make(chan *writeIndexReq, numCatchUpWorkers),
currentIndexTip: lastIndexedHeight,
chainTip: chainHeight,
progressLogger: newBlockProgressLogger("Indexed addresses of",
adxrLog),
}
return ai, nil
}
// Start begins processing of incoming indexing jobs.
func (a *addrIndexer) Start() {
// Already started?
if atomic.AddInt32(&a.started, 1) != 1 {
return
}
adxrLog.Trace("Starting address indexer")
a.wg.Add(2)
go a.indexManager()
go a.indexWriter()
}
// Stop gracefully shuts down the address indexer by stopping all ongoing
// worker goroutines, waiting for them to finish their current task.
func (a *addrIndexer) Stop() error {
if atomic.AddInt32(&a.shutdown, 1) != 1 {
adxrLog.Warnf("Address indexer is already in the process of " +
"shutting down")
return nil
}
adxrLog.Infof("Address indexer shutting down")
close(a.quit)
a.wg.Wait()
return nil
}
// IsCaughtUp returns a bool representing if the address indexer has
// caught up with the best height on the main chain.
func (a *addrIndexer) IsCaughtUp() bool {
a.Lock()
defer a.Unlock()
return a.state == indexMaintain
}
// indexManager creates, and oversees worker index goroutines.
// indexManager is the main goroutine for the addresses indexer.
// It creates, and oversees worker goroutines to index incoming blocks, with
// the exact behavior depending on the current index state
// (catch up, vs maintain). Completion of catch-up mode is always proceeded by
// a gracefull transition into "maintain" mode.
// NOTE: Must be run as a goroutine.
func (a *addrIndexer) indexManager() {
if a.state == indexCatchUp {
adxrLog.Infof("Building up address index from height %v to %v.",
a.currentIndexTip+1, a.chainTip)
// Quit semaphores to gracefully shut down our worker tasks.
runningWorkers := make([]chan struct{}, 0, numCatchUpWorkers)
shutdownWorkers := func() {
for _, quit := range runningWorkers {
close(quit)
}
}
criticalShutdown := func() {
shutdownWorkers()
a.server.Stop()
}
// Spin up all of our "catch up" worker goroutines, giving them
// a quit channel and WaitGroup so we can gracefully exit if
// needed.
var workerWg sync.WaitGroup
catchUpChan := make(chan *indexBlockMsg)
for i := 0; i < numCatchUpWorkers; i++ {
quit := make(chan struct{})
runningWorkers = append(runningWorkers, quit)
workerWg.Add(1)
go a.indexCatchUpWorker(catchUpChan, &workerWg, quit)
}
// Starting from the next block after our current index tip,
// feed our workers each successive block to index until we've
// caught up to the current highest block height.
lastBlockIdxHeight := a.currentIndexTip + 1
for lastBlockIdxHeight <= a.chainTip {
targetSha, err := a.server.db.FetchBlockShaByHeight(lastBlockIdxHeight)
if err != nil {
adxrLog.Errorf("Unable to look up the sha of the "+
"next target block (height %v): %v",
lastBlockIdxHeight, err)
criticalShutdown()
goto fin
}
targetBlock, err := a.server.db.FetchBlockBySha(targetSha)
if err != nil {
// Unable to locate a target block by sha, this
// is a critical error, we may have an
// inconsistency in the DB.
adxrLog.Errorf("Unable to look up the next "+
"target block (sha %v): %v", targetSha, err)
criticalShutdown()
goto fin
}
// Send off the next job, ready to exit if a shutdown is
// signalled.
indexJob := &indexBlockMsg{blk: targetBlock}
select {
case catchUpChan <- indexJob:
lastBlockIdxHeight++
case <-a.quit:
shutdownWorkers()
goto fin
}
_, a.chainTip, err = a.server.db.NewestSha()
if err != nil {
adxrLog.Errorf("Unable to get latest block height: %v", err)
criticalShutdown()
goto fin
}
}
a.Lock()
a.state = indexMaintain
a.Unlock()
// We've finished catching up. Signal our workers to quit, and
// wait until they've all finished.
shutdownWorkers()
workerWg.Wait()
}
adxrLog.Infof("Address indexer has caught up to best height, entering " +
"maintainence mode")
// We're all caught up at this point. We now serially process new jobs
// coming in.
for {
select {
case indexJob := <-a.addrIndexJobs:
addrIndex, err := a.indexBlockAddrs(indexJob.blk)
if err != nil {
adxrLog.Errorf("Unable to index transactions of"+
" block %v", err)
a.server.Stop()
goto fin
}
a.writeRequests <- &writeIndexReq{blk: indexJob.blk,
addrIndex: addrIndex}
case <-a.quit:
goto fin
}
}
fin:
a.wg.Done()
}
// UpdateAddressIndex asynchronously queues a newly solved block to have its
// transactions indexed by address.
func (a *addrIndexer) UpdateAddressIndex(block *btcutil.Block) {
go func() {
job := &indexBlockMsg{blk: block}
a.addrIndexJobs <- job
}()
}
// pendingIndexWrites writes is a priority queue which is used to ensure the
// address index of the block height N+1 is written when our address tip is at
// height N. This ordering is necessary to maintain index consistency in face
// of our concurrent workers, which may not necessarily finish in the order the
// jobs are handed out.
type pendingWriteQueue []*writeIndexReq
// Len returns the number of items in the priority queue. It is part of the
// heap.Interface implementation.
func (pq pendingWriteQueue) Len() int { return len(pq) }
// Less returns whether the item in the priority queue with index i should sort
// before the item with index j. It is part of the heap.Interface implementation.
func (pq pendingWriteQueue) Less(i, j int) bool {
return pq[i].blk.Height() < pq[j].blk.Height()
}
// Swap swaps the items at the passed indices in the priority queue. It is
// part of the heap.Interface implementation.
func (pq pendingWriteQueue) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] }
// Push pushes the passed item onto the priority queue. It is part of the
// heap.Interface implementation.
func (pq *pendingWriteQueue) Push(x interface{}) {
*pq = append(*pq, x.(*writeIndexReq))
}
// Pop removes the highest priority item (according to Less) from the priority
// queue and returns it. It is part of the heap.Interface implementation.
func (pq *pendingWriteQueue) Pop() interface{} {
n := len(*pq)
item := (*pq)[n-1]
(*pq)[n-1] = nil
*pq = (*pq)[0 : n-1]
return item
}
// indexWriter commits the populated address indexes created by the
// catch up workers to the database. Since we have concurrent workers, the writer
// ensures indexes are written in ascending order to avoid a possible gap in the
// address index triggered by an unexpected shutdown.
// NOTE: Must be run as a goroutine
func (a *addrIndexer) indexWriter() {
var pendingWrites pendingWriteQueue
minHeightWrite := make(chan *writeIndexReq)
workerQuit := make(chan struct{})
writeFinished := make(chan struct{}, 1)
// Spawn a goroutine to feed our writer address indexes such
// that, if our address tip is at N, the index for block N+1 is always
// written first. We use a priority queue to enforce this condition
// while accepting new write requests.
go func() {
for {
top:
select {
case incomingWrite := <-a.writeRequests:
heap.Push(&pendingWrites, incomingWrite)
// Check if we've found a write request that
// satisfies our condition. If we have, then
// chances are we have some backed up requests
// which wouldn't be written until a previous
// request showed up. If this is the case we'll
// quickly flush our heap of now available in
// order writes. We also accept write requests
// with a block height *before* the current
// index tip, in order to re-index new prior
// blocks added to the main chain during a
// re-org.
writeReq := heap.Pop(&pendingWrites).(*writeIndexReq)
_, addrTip, _ := a.server.db.FetchAddrIndexTip()
for writeReq.blk.Height() == (addrTip+1) ||
writeReq.blk.Height() <= addrTip {
minHeightWrite <- writeReq
// Wait for write to finish so we get a
// fresh view of the addrtip.
<-writeFinished
// Break to grab a new write request
if pendingWrites.Len() == 0 {
break top
}
writeReq = heap.Pop(&pendingWrites).(*writeIndexReq)
_, addrTip, _ = a.server.db.FetchAddrIndexTip()
}
// We haven't found the proper write request yet,
// push back onto our heap and wait for the next
// request which may be our target write.
heap.Push(&pendingWrites, writeReq)
case <-workerQuit:
return
}
}
}()
out:
// Our main writer loop. Here we actually commit the populated address
// indexes to the database.
for {
select {
case nextWrite := <-minHeightWrite:
sha, _ := nextWrite.blk.Sha() // Can never fail.
height := nextWrite.blk.Height()
err := a.server.db.UpdateAddrIndexForBlock(sha, height,
nextWrite.addrIndex)
if err != nil {
adxrLog.Errorf("Unable to write index for block, "+
"sha %v, height %v", sha, height)
a.server.Stop()
break out
}
writeFinished <- struct{}{}
a.progressLogger.LogBlockHeight(nextWrite.blk)
case <-a.quit:
break out
}
}
close(workerQuit)
a.wg.Done()
}
// indexCatchUpWorker indexes the transactions of previously validated and
// stored blocks.
// NOTE: Must be run as a goroutine
func (a *addrIndexer) indexCatchUpWorker(workChan chan *indexBlockMsg,
wg *sync.WaitGroup, quit chan struct{}) {
out:
for {
select {
case indexJob := <-workChan:
addrIndex, err := a.indexBlockAddrs(indexJob.blk)
if err != nil {
adxrLog.Errorf("Unable to index transactions of"+
" block %v", err)
a.server.Stop()
break out
}
a.writeRequests <- &writeIndexReq{blk: indexJob.blk,
addrIndex: addrIndex}
case <-quit:
break out
}
}
wg.Done()
}
// indexScriptPubKey indexes all data pushes greater than 8 bytes within the
// passed SPK. Our "address" index is actually a hash160 index, where in the
// ideal case the data push is either the hash160 of a publicKey (P2PKH) or
// a Script (P2SH).
func indexScriptPubKey(addrIndex database.BlockAddrIndex, scriptPubKey []byte,
locInBlock *wire.TxLoc) error {
dataPushes, err := txscript.PushedData(scriptPubKey)
if err != nil {
adxrLog.Tracef("Couldn't get pushes: %v", err)
return err
}
for _, data := range dataPushes {
// Only index pushes greater than 8 bytes.
if len(data) < 8 {
continue
}
var indexKey [ripemd160.Size]byte
// A perfect little hash160.
if len(data) <= 20 {
copy(indexKey[:], data)
// Otherwise, could be a payToPubKey or an OP_RETURN, so we'll
// make a hash160 out of it.
} else {
copy(indexKey[:], btcutil.Hash160(data))
}
addrIndex[indexKey] = append(addrIndex[indexKey], locInBlock)
}
return nil
}
// indexBlockAddrs returns a populated index of the all the transactions in the
// passed block based on the addresses involved in each transaction.
func (a *addrIndexer) indexBlockAddrs(blk *btcutil.Block) (database.BlockAddrIndex, error) {
addrIndex := make(database.BlockAddrIndex)
txLocs, err := blk.TxLoc()
if err != nil {
return nil, err
}
for txIdx, tx := range blk.Transactions() {
// Tx's offset and length in the block.
locInBlock := &txLocs[txIdx]
// Coinbases don't have any inputs.
if !blockchain.IsCoinBase(tx) {
// Index the SPK's of each input's previous outpoint
// transaction.
for _, txIn := range tx.MsgTx().TxIn {
// Lookup and fetch the referenced output's tx.
prevOut := txIn.PreviousOutPoint
txList, err := a.server.db.FetchTxBySha(&prevOut.Hash)
if err != nil || len(txList) == 0 {
adxrLog.Errorf("Couldn't get referenced "+
"txOut (%v): %v", prevOut, err)
return nil, err
}
prevOutTx := txList[len(txList)-1]
inputOutPoint := prevOutTx.Tx.TxOut[prevOut.Index]
indexScriptPubKey(addrIndex, inputOutPoint.PkScript, locInBlock)
}
}
for _, txOut := range tx.MsgTx().TxOut {
indexScriptPubKey(addrIndex, txOut.PkScript, locInBlock)
}
}
return addrIndex, nil
}

View file

@ -105,6 +105,7 @@ var commandHandlers = map[string]*handlerData{
"lockunspent": {1, 2, displayJSONDump, []conversionHandler{toBool, nil}, makeLockUnspent, "<unlock> " + outpointArrayStr}, "lockunspent": {1, 2, displayJSONDump, []conversionHandler{toBool, nil}, makeLockUnspent, "<unlock> " + outpointArrayStr},
"ping": {0, 0, displayGeneric, nil, makePing, ""}, "ping": {0, 0, displayGeneric, nil, makePing, ""},
"renameaccount": {2, 0, displayGeneric, nil, makeRenameAccount, "<oldaccount> <newaccount>"}, "renameaccount": {2, 0, displayGeneric, nil, makeRenameAccount, "<oldaccount> <newaccount>"},
"searchrawtransactions": {1, 3, displayJSONDump, []conversionHandler{nil, toInt, toInt, toInt}, makeSearchRawTransactions, "<address> [verbose=1] [skip=0] [count=100]"},
"sendfrom": {3, 3, displayGeneric, []conversionHandler{nil, nil, toSatoshi, toInt, nil, nil}, "sendfrom": {3, 3, displayGeneric, []conversionHandler{nil, nil, toSatoshi, toInt, nil, nil},
makeSendFrom, "<account> <address> <amount> [minconf=1] [comment] [comment-to]"}, makeSendFrom, "<account> <address> <amount> [minconf=1] [comment] [comment-to]"},
"sendmany": {2, 2, displayGeneric, []conversionHandler{nil, nil, toInt, nil}, makeSendMany, "<account> <{\"address\":amount,...}> [minconf=1] [comment]"}, "sendmany": {2, 2, displayGeneric, []conversionHandler{nil, nil, toInt, nil}, makeSendMany, "<account> <{\"address\":amount,...}> [minconf=1] [comment]"},
@ -730,6 +731,24 @@ func makeRenameAccount(args []interface{}) (btcjson.Cmd, error) {
args[1].(string)), nil args[1].(string)), nil
} }
// makeSearchRawTransactions generates the cmd strucutre for
// searchrawtransactions commands.
func makeSearchRawTransactions(args []interface{}) (btcjson.Cmd, error) {
optArgs := make([]interface{}, 0, 3)
if len(args) > 1 {
optArgs = append(optArgs, args[1].(int))
}
if len(args) > 2 {
optArgs = append(optArgs, args[2].(int))
}
if len(args) > 3 {
optArgs = append(optArgs, args[3].(int))
}
return btcjson.NewSearchRawTransactionsCmd("btcctl", args[0].(string),
optArgs...)
}
// makeSendFrom generates the cmd structure for sendfrom commands. // makeSendFrom generates the cmd structure for sendfrom commands.
func makeSendFrom(args []interface{}) (btcjson.Cmd, error) { func makeSendFrom(args []interface{}) (btcjson.Cmd, error) {
var optargs = make([]interface{}, 0, 3) var optargs = make([]interface{}, 0, 3)

View file

@ -44,6 +44,7 @@ const (
blockMaxSizeMax = wire.MaxBlockPayload - 1000 blockMaxSizeMax = wire.MaxBlockPayload - 1000
defaultBlockPrioritySize = 50000 defaultBlockPrioritySize = 50000
defaultGenerate = false defaultGenerate = false
defaultAddrIndex = false
) )
var ( var (
@ -108,6 +109,8 @@ type config struct {
BlockMaxSize uint32 `long:"blockmaxsize" description:"Maximum block size in bytes to be used when creating a block"` BlockMaxSize uint32 `long:"blockmaxsize" description:"Maximum block size in bytes to be used when creating a block"`
BlockPrioritySize uint32 `long:"blockprioritysize" description:"Size in bytes for high-priority/low-fee transactions when creating a block"` BlockPrioritySize uint32 `long:"blockprioritysize" description:"Size in bytes for high-priority/low-fee transactions when creating a block"`
GetWorkKeys []string `long:"getworkkey" description:"DEPRECATED -- Use the --miningaddr option instead"` GetWorkKeys []string `long:"getworkkey" description:"DEPRECATED -- Use the --miningaddr option instead"`
AddrIndex bool `long:"addrindex" description:"Build and maintain a full address index. Currently only supported by leveldb."`
DropAddrIndex bool `long:"dropaddrindex" description:"Deletes the address-based transaction index from the database on start up, and the exits."`
onionlookup func(string) ([]net.IP, error) onionlookup func(string) ([]net.IP, error)
lookup func(string) ([]net.IP, error) lookup func(string) ([]net.IP, error)
oniondial func(string, string) (net.Conn, error) oniondial func(string, string) (net.Conn, error)
@ -314,6 +317,7 @@ func loadConfig() (*config, []string, error) {
BlockMaxSize: defaultBlockMaxSize, BlockMaxSize: defaultBlockMaxSize,
BlockPrioritySize: defaultBlockPrioritySize, BlockPrioritySize: defaultBlockPrioritySize,
Generate: defaultGenerate, Generate: defaultGenerate,
AddrIndex: defaultAddrIndex,
} }
// Service options which are only added on Windows. // Service options which are only added on Windows.
@ -474,6 +478,22 @@ func loadConfig() (*config, []string, error) {
return nil, nil, err return nil, nil, err
} }
if cfg.AddrIndex && cfg.DropAddrIndex {
err := fmt.Errorf("addrindex and dropaddrindex cannot be " +
"activated at the same")
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Memdb does not currently support the addrindex.
if cfg.DbType == "memdb" && cfg.AddrIndex {
err := fmt.Errorf("memdb does not currently support the addrindex")
fmt.Fprintln(os.Stderr, err)
fmt.Fprintln(os.Stderr, usageMessage)
return nil, nil, err
}
// Validate profile port number // Validate profile port number
if cfg.Profile != "" { if cfg.Profile != "" {
profilePort, err := strconv.Atoi(cfg.Profile) profilePort, err := strconv.Atoi(cfg.Profile)

4
doc.go
View file

@ -91,6 +91,10 @@ Application Options:
--blockprioritysize= Size in bytes for high-priority/low-fee transactions --blockprioritysize= Size in bytes for high-priority/low-fee transactions
when creating a block (50000) when creating a block (50000)
--getworkkey= DEPRECATED -- Use the --miningaddr option instead --getworkkey= DEPRECATED -- Use the --miningaddr option instead
--addrindex= Build and maintain a full address index. Currently
only supported by leveldb.
--dropaddrindex= Deletes the address-based transaction index from the
database on start up, and the exits.
Help Options: Help Options:
-h, --help Show this help message -h, --help Show this help message

View file

@ -549,6 +549,7 @@ The following is an overview of the RPC methods which are implemented by btcd, b
|1|[debuglevel](#debuglevel)|Dynamically changes the debug logging level.| |1|[debuglevel](#debuglevel)|Dynamically changes the debug logging level.|
|2|[getbestblock](#getbestblock)|Get block height and hash of best block in the main chain.|None| |2|[getbestblock](#getbestblock)|Get block height and hash of best block in the main chain.|None|
|3|[getcurrentnet](#getcurrentnet)|Get bitcoin network btcd is running on.|None| |3|[getcurrentnet](#getcurrentnet)|Get bitcoin network btcd is running on.|None|
|4|[searchrawtransactions](#searchrawtransactions)|Query for transactions related to a particular address.|None|
<a name="ExtMethodDetails" /> <a name="ExtMethodDetails" />
**6.2 Method Details**<br /> **6.2 Method Details**<br />
@ -559,10 +560,10 @@ The following is an overview of the RPC methods which are implemented by btcd, b
|---|---| |---|---|
|Method|debuglevel| |Method|debuglevel|
|Parameters|1. _levelspec_ (string)| |Parameters|1. _levelspec_ (string)|
|Description|Dynamically changes the debug logging level.<br />The levelspec can either a debug level or of the form `<subsystem>=<level>,<subsystem2>=<level2>,...`<br />The valid debug levels are `trace`, `debug`, `info`, `warn`, `error`, and `critical`.<br />The valid subsystems are `AMGR`, `BCDB`, `BMGR`, `BTCD`, `CHAN`, `DISC`, `PEER`, `RPCS`, `SCRP`, `SRVR`, and `TXMP`.<br />Additionally, the special keyword `show` can be used to get a list of the available subsystems.| |Description|Dynamically changes the debug logging level.<br />The levelspec can either a debug level or of the form `<subsystem>=<level>,<subsystem2>=<level2>,...`<br />The valid debug levels are `trace`, `debug`, `info`, `warn`, `error`, and `critical`.<br />The valid subsystems are `AMGR`, `ADXR`, `BCDB`, `BMGR`, `BTCD`, `CHAN`, `DISC`, `PEER`, `RPCS`, `SCRP`, `SRVR`, and `TXMP`.<br />Additionally, the special keyword `show` can be used to get a list of the available subsystems.|
|Returns|string| |Returns|string|
|Example Return|`Done.`| |Example Return|`Done.`|
|Example `show` Return|`Supported subsystems [AMGR BCDB BMGR BTCD CHAN DISC PEER RPCS SCRP SRVR TXMP]`| |Example `show` Return|`Supported subsystems [AMGR ADXR BCDB BMGR BTCD CHAN DISC PEER RPCS SCRP SRVR TXMP]`|
[Return to Overview](#ExtMethodOverview)<br /> [Return to Overview](#ExtMethodOverview)<br />
*** ***
@ -592,6 +593,19 @@ The following is an overview of the RPC methods which are implemented by btcd, b
*** ***
<a name="searchrawtransactions"/>
| | |
|---|---|
|Method|searchrawtransactions|
|Parameters|1. address (string, required) - bitcoin address <br /> 2. verbose (int, optional, default=true) - specifies the transaction is returned as a JSON object instead of hex-encoded string <br />3. skip (int, optional, default=0) - the number of leading transactions to leave out of the final response <br /> 4. count (int, optional, default=100) - the maximum number of transactions to return|
|Description|Returns raw data for transactions involving the passed address. Returned transactions are pulled from both the database, and transactions currently in the mempool. Transactions pulled from the mempool will have the `"confirmations"` field set to 0. Usage of this RPC requires the optional `--addrindex` flag to be activated, otherwise all responses will simply return with an error stating the address index has not yet been built up. Similarly, until the address index has caught up with the current best height, all requests will return an error response in order to avoid serving stale data.|
|Returns (verbose=0)|`"data" (string) hex-encoded bytes of the serialized transaction`|
|Returns (verbose=1)|`{ (json object)`<br />&nbsp;&nbsp;`"hex": "data", (string) hex-encoded transaction`<br />&nbsp;&nbsp;`"txid": "hash", (string) the hash of the transaction`<br />&nbsp;&nbsp;`"version": n, (numeric) the transaction version`<br />&nbsp;&nbsp;`"locktime": n, (numeric) the transaction lock time`<br />&nbsp;&nbsp;`"vin": [ (array of json objects) the transaction inputs as json objects`<br />&nbsp;&nbsp;<font color="orange">For coinbase transactions:</font><br />&nbsp;&nbsp;&nbsp;&nbsp;`{ (json object)`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`"coinbase": "data", (string) the hex-dencoded bytes of the signature script`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`"sequence": n, (numeric) the script sequence number`<br />&nbsp;&nbsp;&nbsp;&nbsp;`}`<br />&nbsp;&nbsp;<font color="orange">For non-coinbase transactions:</font><br />&nbsp;&nbsp;&nbsp;&nbsp;`{ (json object)`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`"txid": "hash", (string) the hash of the origin transaction`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`"vout": n, (numeric) the index of the output being redeemed from the origin transaction`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`"scriptSig": { (json object) the signature script used to redeem the origin transaction`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`"asm": "asm", (string) disassembly of the script`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`"hex": "data", (string) hex-encoded bytes of the script`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`}`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`"sequence": n, (numeric) the script sequence number`<br />&nbsp;&nbsp;&nbsp;&nbsp;`}, ...`<br />&nbsp;&nbsp;`]`<br />&nbsp;&nbsp;`"vout": [ (array of json objects) the transaction outputs as json objects`<br />&nbsp;&nbsp;&nbsp;&nbsp;`{ (json object)`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`"value": n, (numeric) the value in BTC`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`"n": n, (numeric) the index of this transaction output`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`"scriptPubKey": { (json object) the public key script used to pay coins`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`"asm": "asm", (string) disassembly of the script`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`"hex": "data", (string) hex-encoded bytes of the script`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`"reqSigs": n, (numeric) the number of required signatures`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`"type": "scripttype" (string) the type of the script (e.g. 'pubkeyhash')`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`"addresses": [ (json array of string) the bitcoin addresses associated with this output`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`"address", (string) the bitcoin address`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`...`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`]`<br />&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;`}`<br />&nbsp;&nbsp;&nbsp;&nbsp;`}, ...`<br /> &nbsp;&nbsp;&nbsp;`]`<br />&nbsp;&nbsp; `"blockhash":"hash" Hash of the block the transaction is part of.` <br /> &nbsp;&nbsp; `"confirmations":n, Number of numeric confirmations of block.` <br /> &nbsp;&nbsp;&nbsp;`"time":t, Transaction time in seconds since the epoch.` <br /> &nbsp;&nbsp;&nbsp;`"blocktime":t, Block time in seconds since the epoch.`<br /> `}`|
[Return to Overview](#ExtMethodOverview)<br />
***
<a name="WSExtMethods" /> <a name="WSExtMethods" />
### 7. Websocket Extension Methods (Websocket-specific) ### 7. Websocket Extension Methods (Websocket-specific)

5
log.go
View file

@ -39,6 +39,7 @@ const (
// function. // function.
var ( var (
backendLog = seelog.Disabled backendLog = seelog.Disabled
adxrLog = btclog.Disabled
amgrLog = btclog.Disabled amgrLog = btclog.Disabled
bcdbLog = btclog.Disabled bcdbLog = btclog.Disabled
bmgrLog = btclog.Disabled bmgrLog = btclog.Disabled
@ -55,6 +56,7 @@ var (
// subsystemLoggers maps each subsystem identifier to its associated logger. // subsystemLoggers maps each subsystem identifier to its associated logger.
var subsystemLoggers = map[string]btclog.Logger{ var subsystemLoggers = map[string]btclog.Logger{
"ADXR": adxrLog,
"AMGR": amgrLog, "AMGR": amgrLog,
"BCDB": bcdbLog, "BCDB": bcdbLog,
"BMGR": bmgrLog, "BMGR": bmgrLog,
@ -94,6 +96,9 @@ func useLogger(subsystemID string, logger btclog.Logger) {
subsystemLoggers[subsystemID] = logger subsystemLoggers[subsystemID] = logger
switch subsystemID { switch subsystemID {
case "ADXR":
adxrLog = logger
case "AMGR": case "AMGR":
amgrLog = logger amgrLog = logger
addrmgr.UseLogger(logger) addrmgr.UseLogger(logger)

View file

@ -97,6 +97,7 @@ type txMemPool struct {
pool map[wire.ShaHash]*TxDesc pool map[wire.ShaHash]*TxDesc
orphans map[wire.ShaHash]*btcutil.Tx orphans map[wire.ShaHash]*btcutil.Tx
orphansByPrev map[wire.ShaHash]*list.List orphansByPrev map[wire.ShaHash]*list.List
addrindex map[string]map[*btcutil.Tx]struct{} // maps address to txs
outpoints map[wire.OutPoint]*btcutil.Tx outpoints map[wire.OutPoint]*btcutil.Tx
lastUpdated time.Time // last time pool was updated lastUpdated time.Time // last time pool was updated
pennyTotal float64 // exponentially decaying total for penny spends. pennyTotal float64 // exponentially decaying total for penny spends.
@ -610,12 +611,59 @@ func (mp *txMemPool) removeTransaction(tx *btcutil.Tx) {
// Remove the transaction and mark the referenced outpoints as unspent // Remove the transaction and mark the referenced outpoints as unspent
// by the pool. // by the pool.
if txDesc, exists := mp.pool[*txHash]; exists { if txDesc, exists := mp.pool[*txHash]; exists {
if cfg.AddrIndex {
mp.removeTransactionFromAddrIndex(tx)
}
for _, txIn := range txDesc.Tx.MsgTx().TxIn { for _, txIn := range txDesc.Tx.MsgTx().TxIn {
delete(mp.outpoints, txIn.PreviousOutPoint) delete(mp.outpoints, txIn.PreviousOutPoint)
} }
delete(mp.pool, *txHash) delete(mp.pool, *txHash)
mp.lastUpdated = time.Now() mp.lastUpdated = time.Now()
} }
}
// removeTransactionFromAddrIndex removes the passed transaction from our
// address based index.
//
// This function MUST be called with the mempool lock held (for writes).
func (mp *txMemPool) removeTransactionFromAddrIndex(tx *btcutil.Tx) error {
previousOutputScripts, err := mp.fetchReferencedOutputScripts(tx)
if err != nil {
txmpLog.Errorf("Unable to obtain referenced output scripts for "+
"the passed tx (addrindex): %v", err)
return err
}
for _, pkScript := range previousOutputScripts {
mp.removeScriptFromAddrIndex(pkScript, tx)
}
for _, txOut := range tx.MsgTx().TxOut {
mp.removeScriptFromAddrIndex(txOut.PkScript, tx)
}
return nil
}
// removeScriptFromAddrIndex dissociates the address encoded by the
// passed pkScript from the passed tx in our address based tx index.
//
// This function MUST be called with the mempool lock held (for writes).
func (mp *txMemPool) removeScriptFromAddrIndex(pkScript []byte, tx *btcutil.Tx) error {
_, addresses, _, err := txscript.ExtractPkScriptAddrs(pkScript,
activeNetParams.Params)
if err != nil {
txmpLog.Errorf("Unable to extract encoded addresses from script "+
"for addrindex (addrindex): %v", err)
return err
}
for _, addr := range addresses {
delete(mp.addrindex[addr.EncodeAddress()], tx)
}
return nil
} }
// RemoveTransaction removes the passed transaction and any transactions which // RemoveTransaction removes the passed transaction and any transactions which
@ -669,6 +717,79 @@ func (mp *txMemPool) addTransaction(tx *btcutil.Tx, height, fee int64) {
mp.outpoints[txIn.PreviousOutPoint] = tx mp.outpoints[txIn.PreviousOutPoint] = tx
} }
mp.lastUpdated = time.Now() mp.lastUpdated = time.Now()
if cfg.AddrIndex {
mp.addTransactionToAddrIndex(tx)
}
}
// addTransactionToAddrIndex adds all addresses related to the transaction to
// our in-memory address index. Note that this address is only populated when
// we're running with the optional address index activated.
//
// This function MUST be called with the mempool lock held (for writes).
func (mp *txMemPool) addTransactionToAddrIndex(tx *btcutil.Tx) error {
previousOutScripts, err := mp.fetchReferencedOutputScripts(tx)
if err != nil {
txmpLog.Errorf("Unable to obtain referenced output scripts for "+
"the passed tx (addrindex): %v", err)
return err
}
// Index addresses of all referenced previous output tx's.
for _, pkScript := range previousOutScripts {
mp.indexScriptAddressToTx(pkScript, tx)
}
// Index addresses of all created outputs.
for _, txOut := range tx.MsgTx().TxOut {
mp.indexScriptAddressToTx(txOut.PkScript, tx)
}
return nil
}
// fetchReferencedOutputScripts looks up and returns all the scriptPubKeys
// referenced by inputs of the passed transaction.
//
// This function MUST be called with the mempool lock held (for reads).
func (mp *txMemPool) fetchReferencedOutputScripts(tx *btcutil.Tx) ([][]byte, error) {
txStore, err := mp.fetchInputTransactions(tx)
if err != nil || len(txStore) == 0 {
return nil, err
}
previousOutScripts := make([][]byte, 0, len(tx.MsgTx().TxIn))
for _, txIn := range tx.MsgTx().TxIn {
outPoint := txIn.PreviousOutPoint
if txStore[outPoint.Hash].Err == nil {
referencedOutPoint := txStore[outPoint.Hash].Tx.MsgTx().TxOut[outPoint.Index]
previousOutScripts = append(previousOutScripts, referencedOutPoint.PkScript)
}
}
return previousOutScripts, nil
}
// indexScriptByAddress alters our address index by indexing the payment address
// encoded by the passed scriptPubKey to the passed transaction.
//
// This function MUST be called with the mempool lock held (for writes).
func (mp *txMemPool) indexScriptAddressToTx(pkScript []byte, tx *btcutil.Tx) error {
_, addresses, _, err := txscript.ExtractPkScriptAddrs(pkScript,
activeNetParams.Params)
if err != nil {
txmpLog.Errorf("Unable to extract encoded addresses from script "+
"for addrindex: %v", err)
return err
}
for _, addr := range addresses {
if mp.addrindex[addr.EncodeAddress()] == nil {
mp.addrindex[addr.EncodeAddress()] = make(map[*btcutil.Tx]struct{})
}
mp.addrindex[addr.EncodeAddress()][tx] = struct{}{}
}
return nil
} }
// calcInputValueAge is a helper function used to calculate the input age of // calcInputValueAge is a helper function used to calculate the input age of
@ -795,6 +916,25 @@ func (mp *txMemPool) FetchTransaction(txHash *wire.ShaHash) (*btcutil.Tx, error)
return nil, fmt.Errorf("transaction is not in the pool") return nil, fmt.Errorf("transaction is not in the pool")
} }
// FilterTransactionsByAddress returns all transactions currently in the
// mempool that either create an output to the passed address or spend a
// previously created ouput to the address.
func (mp *txMemPool) FilterTransactionsByAddress(addr btcutil.Address) ([]*btcutil.Tx, error) {
// Protect concurrent access.
mp.RLock()
defer mp.RUnlock()
if txs, exists := mp.addrindex[addr.EncodeAddress()]; exists {
addressTxs := make([]*btcutil.Tx, 0, len(txs))
for tx := range txs {
addressTxs = append(addressTxs, tx)
}
return addressTxs, nil
}
return nil, fmt.Errorf("address does not have any transactions in the pool")
}
// maybeAcceptTransaction is the internal function which implements the public // maybeAcceptTransaction is the internal function which implements the public
// MaybeAcceptTransaction. See the comment for MaybeAcceptTransaction for // MaybeAcceptTransaction. See the comment for MaybeAcceptTransaction for
// more details. // more details.
@ -1276,11 +1416,15 @@ func (mp *txMemPool) LastUpdated() time.Time {
// newTxMemPool returns a new memory pool for validating and storing standalone // newTxMemPool returns a new memory pool for validating and storing standalone
// transactions until they are mined into a block. // transactions until they are mined into a block.
func newTxMemPool(server *server) *txMemPool { func newTxMemPool(server *server) *txMemPool {
return &txMemPool{ memPool := &txMemPool{
server: server, server: server,
pool: make(map[wire.ShaHash]*TxDesc), pool: make(map[wire.ShaHash]*TxDesc),
orphans: make(map[wire.ShaHash]*btcutil.Tx), orphans: make(map[wire.ShaHash]*btcutil.Tx),
orphansByPrev: make(map[wire.ShaHash]*list.List), orphansByPrev: make(map[wire.ShaHash]*list.List),
outpoints: make(map[wire.OutPoint]*btcutil.Tx), outpoints: make(map[wire.OutPoint]*btcutil.Tx),
} }
if cfg.AddrIndex {
memPool.addrindex = make(map[string]map[*btcutil.Tx]struct{})
}
return memPool
} }

View file

@ -150,6 +150,7 @@ var rpcHandlersBeforeInit = map[string]commandHandler{
"getwork": handleGetWork, "getwork": handleGetWork,
"help": handleHelp, "help": handleHelp,
"ping": handlePing, "ping": handlePing,
"searchrawtransactions": handleSearchRawTransactions,
"sendrawtransaction": handleSendRawTransaction, "sendrawtransaction": handleSendRawTransaction,
"setgenerate": handleSetGenerate, "setgenerate": handleSetGenerate,
"stop": handleStop, "stop": handleStop,
@ -2947,6 +2948,122 @@ func handlePing(s *rpcServer, cmd btcjson.Cmd, closeChan <-chan struct{}) (inter
return nil, nil return nil, nil
} }
// handleSearchRawTransaction implements the searchrawtransactions command.
func handleSearchRawTransactions(s *rpcServer, cmd btcjson.Cmd, closeChan <-chan struct{}) (interface{}, error) {
if !cfg.AddrIndex {
return nil, btcjson.Error{
Code: btcjson.ErrMisc.Code,
Message: "addrindex is not currently enabled",
}
}
if !s.server.addrIndexer.IsCaughtUp() {
return nil, btcjson.Error{
Code: btcjson.ErrMisc.Code,
Message: "Address index has not yet caught up to the current " +
"best height",
}
}
c := cmd.(*btcjson.SearchRawTransactionsCmd)
// Attempt to decode the supplied address.
addr, err := btcutil.DecodeAddress(c.Address, s.server.netParams)
if err != nil {
return nil, btcjson.Error{
Code: btcjson.ErrInvalidAddressOrKey.Code,
Message: fmt.Sprintf("%s: %v",
btcjson.ErrInvalidAddressOrKey.Message, err),
}
}
var addressTxs []*database.TxListReply
// First check the mempool for relevent transactions.
memPoolTxs, err := s.server.txMemPool.FilterTransactionsByAddress(addr)
if err == nil && len(memPoolTxs) != 0 {
for _, tx := range memPoolTxs {
txReply := &database.TxListReply{Tx: tx.MsgTx(), Sha: tx.Sha()}
addressTxs = append(addressTxs, txReply)
}
}
if len(addressTxs) >= c.Count {
// Tx's in the mempool exceed the requested number of tx's.
// Slice off any possible overflow.
addressTxs = addressTxs[:c.Count]
} else {
// Otherwise, we'll also take a look into the database.
dbTxs, err := s.server.db.FetchTxsForAddr(addr, c.Skip,
c.Count-len(addressTxs))
if err == nil && len(dbTxs) != 0 {
for _, txReply := range dbTxs {
addressTxs = append(addressTxs, txReply)
}
}
}
// If neither source yielded any results, then the address has never
// been used.
if len(addressTxs) == 0 {
return nil, btcjson.ErrNoTxInfo
}
// When not in verbose mode, simply return a list of serialized txs.
if c.Verbose == 0 {
serializedTxs := make([]string, len(addressTxs), len(addressTxs))
for i, txReply := range addressTxs {
serializedTxs[i], err = messageToHex(txReply.Tx)
if err != nil {
return nil, err
}
}
return serializedTxs, nil
}
// Otherwise, we'll need to populate raw tx results.
// Grab the current best height for tx confirmation calculation.
_, maxidx, err := s.server.db.NewestSha()
if err != nil {
rpcsLog.Errorf("Cannot get newest sha: %v", err)
return nil, btcjson.ErrBlockNotFound
}
rawTxns := make([]btcjson.TxRawResult, len(addressTxs), len(addressTxs))
for i, txReply := range addressTxs {
txSha := txReply.Sha.String()
mtx := txReply.Tx
// Transactions grabbed from the mempool aren't yet
// within a block. So we conditionally fetch a txs
// embedded block here. This will be reflected in the
// final JSON output (mempool won't have confirmations).
var blk *btcutil.Block
if txReply.BlkSha != nil {
blk, err = s.server.db.FetchBlockBySha(txReply.BlkSha)
if err != nil {
rpcsLog.Errorf("Error fetching sha: %v", err)
return nil, btcjson.ErrBlockNotFound
}
}
var blkSha *wire.ShaHash
if blk != nil {
blkSha, _ = blk.Sha()
}
rawTxn, err := createTxRawResult(s.server.netParams,
txSha, mtx, blk, maxidx, blkSha)
if err != nil {
rpcsLog.Errorf("Cannot create TxRawResult for "+
"transaction %s: %v", txSha, err)
return nil, err
}
rawTxns[i] = *rawTxn
}
return rawTxns, nil
}
// handleSendRawTransaction implements the sendrawtransaction command. // handleSendRawTransaction implements the sendrawtransaction command.
func handleSendRawTransaction(s *rpcServer, cmd btcjson.Cmd, closeChan <-chan struct{}) (interface{}, error) { func handleSendRawTransaction(s *rpcServer, cmd btcjson.Cmd, closeChan <-chan struct{}) (interface{}, error) {
c := cmd.(*btcjson.SendRawTransactionCmd) c := cmd.(*btcjson.SendRawTransactionCmd)

View file

@ -199,6 +199,14 @@
; the default). ; the default).
; notls=1 ; notls=1
; ------------------------------------------------------------------------------
; Optional Transaction Indexes
; ------------------------------------------------------------------------------
; Build and maintain a full address-based transaction index.
; addrindex=1
; Delete the entire address index on start up, then exit.
; dropaddrindex=0
; ------------------------------------------------------------------------------ ; ------------------------------------------------------------------------------
; Coin Generation (Mining) Settings - The following options control the ; Coin Generation (Mining) Settings - The following options control the

View file

@ -85,6 +85,7 @@ type server struct {
addrManager *addrmgr.AddrManager addrManager *addrmgr.AddrManager
rpcServer *rpcServer rpcServer *rpcServer
blockManager *blockManager blockManager *blockManager
addrIndexer *addrIndexer
txMemPool *txMemPool txMemPool *txMemPool
cpuMiner *CPUMiner cpuMiner *CPUMiner
modifyRebroadcastInv chan interface{} modifyRebroadcastInv chan interface{}
@ -706,6 +707,9 @@ out:
} }
} }
if cfg.AddrIndex {
s.addrIndexer.Stop()
}
s.blockManager.Stop() s.blockManager.Stop()
s.addrManager.Stop() s.addrManager.Stop()
s.wg.Done() s.wg.Done()
@ -910,6 +914,10 @@ func (s *server) Start() {
if cfg.Generate { if cfg.Generate {
s.cpuMiner.Start() s.cpuMiner.Start()
} }
if cfg.AddrIndex {
s.addrIndexer.Start()
}
} }
// Stop gracefully shuts down the server by stopping and disconnecting all // Stop gracefully shuts down the server by stopping and disconnecting all
@ -1248,6 +1256,14 @@ func newServer(listenAddrs []string, db database.Db, netParams *btcnet.Params) (
s.txMemPool = newTxMemPool(&s) s.txMemPool = newTxMemPool(&s)
s.cpuMiner = newCPUMiner(&s) s.cpuMiner = newCPUMiner(&s)
if cfg.AddrIndex {
ai, err := newAddrIndexer(&s)
if err != nil {
return nil, err
}
s.addrIndexer = ai
}
if !cfg.DisableRPC { if !cfg.DisableRPC {
s.rpcServer, err = newRPCServer(cfg.RPCListeners, &s) s.rpcServer, err = newRPCServer(cfg.RPCListeners, &s)
if err != nil { if err != nil {