2014-01-09 06:54:52 +01:00
|
|
|
// Copyright (c) 2013-2014 Conformal Systems LLC.
|
2013-08-03 17:20:05 +02:00
|
|
|
// Use of this source code is governed by an ISC
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package ldb
|
|
|
|
|
|
|
|
import (
|
2013-09-25 22:18:35 +02:00
|
|
|
"encoding/binary"
|
2013-08-03 17:20:05 +02:00
|
|
|
"fmt"
|
2014-07-03 02:47:24 +02:00
|
|
|
"os"
|
2014-07-07 17:07:07 +02:00
|
|
|
"strconv"
|
2014-07-03 02:47:24 +02:00
|
|
|
"sync"
|
|
|
|
|
2015-01-27 18:45:10 +01:00
|
|
|
"github.com/btcsuite/btcd/database"
|
2015-01-16 18:15:32 +01:00
|
|
|
"github.com/btcsuite/btclog"
|
2015-01-15 17:33:06 +01:00
|
|
|
"github.com/btcsuite/btcutil"
|
2015-01-16 20:59:18 +01:00
|
|
|
"github.com/btcsuite/btcwire"
|
2015-01-15 09:47:40 +01:00
|
|
|
"github.com/btcsuite/goleveldb/leveldb"
|
|
|
|
"github.com/btcsuite/goleveldb/leveldb/opt"
|
2013-08-03 17:20:05 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
dbVersion int = 2
|
|
|
|
dbMaxTransCnt = 20000
|
|
|
|
dbMaxTransMem = 64 * 1024 * 1024 // 64 MB
|
|
|
|
)
|
|
|
|
|
2013-11-21 16:48:16 +01:00
|
|
|
var log = btclog.Disabled
|
2013-08-03 17:20:05 +02:00
|
|
|
|
|
|
|
type tTxInsertData struct {
|
|
|
|
txsha *btcwire.ShaHash
|
|
|
|
blockid int64
|
|
|
|
txoff int
|
|
|
|
txlen int
|
|
|
|
usedbuf []byte
|
|
|
|
}
|
|
|
|
|
2014-09-16 16:54:38 +02:00
|
|
|
// LevelDb holds internal state for databse.
|
2013-08-03 17:20:05 +02:00
|
|
|
type LevelDb struct {
|
|
|
|
// lock preventing multiple entry
|
|
|
|
dbLock sync.Mutex
|
|
|
|
|
|
|
|
// leveldb pieces
|
2013-08-22 17:40:59 +02:00
|
|
|
lDb *leveldb.DB
|
2013-09-09 22:50:13 +02:00
|
|
|
ro *opt.ReadOptions
|
|
|
|
wo *opt.WriteOptions
|
2013-08-03 17:20:05 +02:00
|
|
|
|
2013-08-22 17:40:59 +02:00
|
|
|
lbatch *leveldb.Batch
|
2013-08-03 17:20:05 +02:00
|
|
|
|
|
|
|
nextBlock int64
|
|
|
|
|
|
|
|
lastBlkShaCached bool
|
|
|
|
lastBlkSha btcwire.ShaHash
|
|
|
|
lastBlkIdx int64
|
|
|
|
|
2014-12-25 00:55:14 +01:00
|
|
|
lastAddrIndexBlkSha btcwire.ShaHash
|
|
|
|
lastAddrIndexBlkIdx int64
|
|
|
|
|
2013-11-21 16:48:57 +01:00
|
|
|
txUpdateMap map[btcwire.ShaHash]*txUpdateObj
|
2013-09-30 23:44:26 +02:00
|
|
|
txSpentUpdateMap map[btcwire.ShaHash]*spentTxUpdate
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
2015-01-27 18:45:10 +01:00
|
|
|
var self = database.DriverDB{DbType: "leveldb", CreateDB: CreateDB, OpenDB: OpenDB}
|
2013-08-03 17:20:05 +02:00
|
|
|
|
|
|
|
func init() {
|
2015-01-27 18:45:10 +01:00
|
|
|
database.AddDBDriver(self)
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
2015-01-27 18:45:10 +01:00
|
|
|
// parseArgs parses the arguments from the database package Open/Create methods.
|
2014-01-20 03:01:31 +01:00
|
|
|
func parseArgs(funcName string, args ...interface{}) (string, error) {
|
|
|
|
if len(args) != 1 {
|
|
|
|
return "", fmt.Errorf("Invalid arguments to ldb.%s -- "+
|
|
|
|
"expected database path string", funcName)
|
|
|
|
}
|
|
|
|
dbPath, ok := args[0].(string)
|
|
|
|
if !ok {
|
|
|
|
return "", fmt.Errorf("First argument to ldb.%s is invalid -- "+
|
|
|
|
"expected database path string", funcName)
|
|
|
|
}
|
|
|
|
return dbPath, nil
|
|
|
|
}
|
|
|
|
|
2013-08-03 17:20:05 +02:00
|
|
|
// OpenDB opens an existing database for use.
|
2015-01-27 18:45:10 +01:00
|
|
|
func OpenDB(args ...interface{}) (database.Db, error) {
|
2014-01-20 03:01:31 +01:00
|
|
|
dbpath, err := parseArgs("OpenDB", args...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-01-27 18:45:10 +01:00
|
|
|
log = database.GetLog()
|
2013-08-03 17:20:05 +02:00
|
|
|
|
2014-01-06 23:02:26 +01:00
|
|
|
db, err := openDB(dbpath, false)
|
2013-08-03 17:20:05 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Need to find last block and tx
|
|
|
|
var lastknownblock, nextunknownblock, testblock int64
|
|
|
|
|
|
|
|
increment := int64(100000)
|
|
|
|
ldb := db.(*LevelDb)
|
|
|
|
|
|
|
|
var lastSha *btcwire.ShaHash
|
|
|
|
// forward scan
|
|
|
|
blockforward:
|
|
|
|
for {
|
|
|
|
|
2014-03-20 16:07:40 +01:00
|
|
|
sha, err := ldb.fetchBlockShaByHeight(testblock)
|
2013-08-03 17:20:05 +02:00
|
|
|
if err == nil {
|
|
|
|
// block is found
|
|
|
|
lastSha = sha
|
|
|
|
lastknownblock = testblock
|
|
|
|
testblock += increment
|
|
|
|
} else {
|
|
|
|
if testblock == 0 {
|
|
|
|
//no blocks in db, odd but ok.
|
2013-08-22 17:40:59 +02:00
|
|
|
lastknownblock = -1
|
|
|
|
nextunknownblock = 0
|
|
|
|
var emptysha btcwire.ShaHash
|
|
|
|
lastSha = &emptysha
|
|
|
|
} else {
|
|
|
|
nextunknownblock = testblock
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
break blockforward
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// narrow search
|
|
|
|
blocknarrow:
|
2013-08-22 17:40:59 +02:00
|
|
|
for lastknownblock != -1 {
|
2013-08-03 17:20:05 +02:00
|
|
|
testblock = (lastknownblock + nextunknownblock) / 2
|
2014-03-20 16:07:40 +01:00
|
|
|
sha, err := ldb.fetchBlockShaByHeight(testblock)
|
2013-08-03 17:20:05 +02:00
|
|
|
if err == nil {
|
|
|
|
lastknownblock = testblock
|
|
|
|
lastSha = sha
|
|
|
|
} else {
|
|
|
|
nextunknownblock = testblock
|
|
|
|
}
|
|
|
|
if lastknownblock+1 == nextunknownblock {
|
|
|
|
break blocknarrow
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-25 00:55:14 +01:00
|
|
|
// Load the last block whose transactions have been indexed by address.
|
|
|
|
if sha, idx, err := ldb.fetchAddrIndexTip(); err == nil {
|
|
|
|
ldb.lastAddrIndexBlkSha = *sha
|
|
|
|
ldb.lastAddrIndexBlkIdx = idx
|
|
|
|
} else {
|
|
|
|
ldb.lastAddrIndexBlkIdx = -1
|
|
|
|
}
|
|
|
|
|
2013-08-03 17:20:05 +02:00
|
|
|
ldb.lastBlkSha = *lastSha
|
|
|
|
ldb.lastBlkIdx = lastknownblock
|
|
|
|
ldb.nextBlock = lastknownblock + 1
|
|
|
|
|
|
|
|
return db, nil
|
|
|
|
}
|
|
|
|
|
2014-09-16 16:54:38 +02:00
|
|
|
// CurrentDBVersion is the database version.
|
2013-09-25 22:18:35 +02:00
|
|
|
var CurrentDBVersion int32 = 1
|
|
|
|
|
2015-01-27 18:45:10 +01:00
|
|
|
func openDB(dbpath string, create bool) (pbdb database.Db, err error) {
|
2013-08-03 17:20:05 +02:00
|
|
|
var db LevelDb
|
2013-08-22 17:40:59 +02:00
|
|
|
var tlDb *leveldb.DB
|
2013-09-25 22:18:35 +02:00
|
|
|
var dbversion int32
|
|
|
|
|
2013-08-03 17:20:05 +02:00
|
|
|
defer func() {
|
|
|
|
if err == nil {
|
2013-09-09 22:50:13 +02:00
|
|
|
db.lDb = tlDb
|
2013-08-03 17:20:05 +02:00
|
|
|
|
|
|
|
db.txUpdateMap = map[btcwire.ShaHash]*txUpdateObj{}
|
2013-09-30 23:44:26 +02:00
|
|
|
db.txSpentUpdateMap = make(map[btcwire.ShaHash]*spentTxUpdate)
|
2013-08-03 17:20:05 +02:00
|
|
|
|
|
|
|
pbdb = &db
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2014-01-06 23:02:26 +01:00
|
|
|
if create == true {
|
2013-08-03 17:20:05 +02:00
|
|
|
err = os.Mkdir(dbpath, 0750)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("mkdir failed %v %v", dbpath, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
_, err = os.Stat(dbpath)
|
|
|
|
if err != nil {
|
2015-01-27 18:45:10 +01:00
|
|
|
err = database.ErrDbDoesNotExist
|
2013-08-03 17:20:05 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-25 22:18:35 +02:00
|
|
|
needVersionFile := false
|
|
|
|
verfile := dbpath + ".ver"
|
|
|
|
fi, ferr := os.Open(verfile)
|
|
|
|
if ferr == nil {
|
|
|
|
defer fi.Close()
|
|
|
|
|
|
|
|
ferr = binary.Read(fi, binary.LittleEndian, &dbversion)
|
|
|
|
if ferr != nil {
|
|
|
|
dbversion = ^0
|
|
|
|
}
|
|
|
|
} else {
|
2014-01-06 23:02:26 +01:00
|
|
|
if create == true {
|
2013-09-25 22:18:35 +02:00
|
|
|
needVersionFile = true
|
2013-10-11 13:35:12 +02:00
|
|
|
dbversion = CurrentDBVersion
|
2013-09-25 22:18:35 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-06 23:02:26 +01:00
|
|
|
opts := &opt.Options{
|
2015-01-15 09:47:40 +01:00
|
|
|
BlockCacher: opt.DefaultBlockCacher,
|
|
|
|
Compression: opt.NoCompression,
|
|
|
|
OpenFilesCacher: opt.DefaultOpenFilesCacher,
|
2013-09-25 22:18:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
switch dbversion {
|
|
|
|
case 0:
|
2014-01-06 23:02:26 +01:00
|
|
|
opts = &opt.Options{}
|
2013-09-25 22:18:35 +02:00
|
|
|
case 1:
|
|
|
|
// uses defaults from above
|
|
|
|
default:
|
|
|
|
err = fmt.Errorf("unsupported db version %v", dbversion)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
tlDb, err = leveldb.OpenFile(dbpath, opts)
|
2013-08-03 17:20:05 +02:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-09-25 22:18:35 +02:00
|
|
|
// If we opened the database successfully on 'create'
|
|
|
|
// update the
|
|
|
|
if needVersionFile {
|
|
|
|
fo, ferr := os.Create(verfile)
|
|
|
|
if ferr != nil {
|
|
|
|
// TODO(design) close and delete database?
|
|
|
|
err = ferr
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer fo.Close()
|
|
|
|
err = binary.Write(fo, binary.LittleEndian, dbversion)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-03 17:20:05 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateDB creates, initializes and opens a database for use.
|
2015-01-27 18:45:10 +01:00
|
|
|
func CreateDB(args ...interface{}) (database.Db, error) {
|
2014-01-20 03:01:31 +01:00
|
|
|
dbpath, err := parseArgs("Create", args...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-01-27 18:45:10 +01:00
|
|
|
log = database.GetLog()
|
2013-08-03 17:20:05 +02:00
|
|
|
|
|
|
|
// No special setup needed, just OpenBB
|
2014-01-06 23:02:26 +01:00
|
|
|
db, err := openDB(dbpath, true)
|
2013-09-10 20:17:24 +02:00
|
|
|
if err == nil {
|
|
|
|
ldb := db.(*LevelDb)
|
|
|
|
ldb.lastBlkIdx = -1
|
2014-12-25 00:55:14 +01:00
|
|
|
ldb.lastAddrIndexBlkIdx = -1
|
2013-09-10 20:17:24 +02:00
|
|
|
ldb.nextBlock = 0
|
|
|
|
}
|
|
|
|
return db, err
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
2014-07-07 16:50:50 +02:00
|
|
|
func (db *LevelDb) close() error {
|
|
|
|
return db.lDb.Close()
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Sync verifies that the database is coherent on disk,
|
|
|
|
// and no outstanding transactions are in flight.
|
2014-07-07 16:50:50 +02:00
|
|
|
func (db *LevelDb) Sync() error {
|
2013-08-03 17:20:05 +02:00
|
|
|
db.dbLock.Lock()
|
|
|
|
defer db.dbLock.Unlock()
|
|
|
|
|
|
|
|
// while specified by the API, does nothing
|
|
|
|
// however does grab lock to verify it does not return until other operations are complete.
|
2014-07-07 16:50:50 +02:00
|
|
|
return nil
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Close cleanly shuts down database, syncing all data.
|
2014-07-07 16:50:50 +02:00
|
|
|
func (db *LevelDb) Close() error {
|
2013-08-03 17:20:05 +02:00
|
|
|
db.dbLock.Lock()
|
|
|
|
defer db.dbLock.Unlock()
|
|
|
|
|
2014-07-07 16:50:50 +02:00
|
|
|
return db.close()
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// DropAfterBlockBySha will remove any blocks from the database after
|
|
|
|
// the given block.
|
2013-08-22 17:40:59 +02:00
|
|
|
func (db *LevelDb) DropAfterBlockBySha(sha *btcwire.ShaHash) (rerr error) {
|
2013-08-03 17:20:05 +02:00
|
|
|
db.dbLock.Lock()
|
|
|
|
defer db.dbLock.Unlock()
|
2013-09-09 22:50:13 +02:00
|
|
|
defer func() {
|
2013-08-22 17:40:59 +02:00
|
|
|
if rerr == nil {
|
|
|
|
rerr = db.processBatches()
|
2013-11-26 22:42:55 +01:00
|
|
|
} else {
|
|
|
|
db.lBatch().Reset()
|
2013-08-22 17:40:59 +02:00
|
|
|
}
|
2013-09-09 22:50:13 +02:00
|
|
|
}()
|
2013-08-03 17:20:05 +02:00
|
|
|
|
|
|
|
startheight := db.nextBlock - 1
|
|
|
|
|
|
|
|
keepidx, err := db.getBlkLoc(sha)
|
|
|
|
if err != nil {
|
|
|
|
// should the error here be normalized ?
|
2013-08-22 17:40:59 +02:00
|
|
|
log.Tracef("block loc failed %v ", sha)
|
2013-08-03 17:20:05 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for height := startheight; height > keepidx; height = height - 1 {
|
|
|
|
var blk *btcutil.Block
|
|
|
|
blksha, buf, err := db.getBlkByHeight(height)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
blk, err = btcutil.NewBlockFromBytes(buf)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tx := range blk.MsgBlock().Transactions {
|
|
|
|
err = db.unSpend(tx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// rather than iterate the list of tx backward, do it twice.
|
2013-11-02 03:17:06 +01:00
|
|
|
for _, tx := range blk.Transactions() {
|
2013-08-03 17:20:05 +02:00
|
|
|
var txUo txUpdateObj
|
|
|
|
txUo.delete = true
|
2013-11-02 03:17:06 +01:00
|
|
|
db.txUpdateMap[*tx.Sha()] = &txUo
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
2013-08-22 17:40:59 +02:00
|
|
|
db.lBatch().Delete(shaBlkToKey(blksha))
|
|
|
|
db.lBatch().Delete(int64ToKey(height))
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
db.nextBlock = keepidx + 1
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// InsertBlock inserts raw block and transaction data from a block into the
|
|
|
|
// database. The first block inserted into the database will be treated as the
|
|
|
|
// genesis block. Every subsequent block insert requires the referenced parent
|
|
|
|
// block to already exist.
|
2013-08-22 17:40:59 +02:00
|
|
|
func (db *LevelDb) InsertBlock(block *btcutil.Block) (height int64, rerr error) {
|
2013-08-03 17:20:05 +02:00
|
|
|
db.dbLock.Lock()
|
|
|
|
defer db.dbLock.Unlock()
|
2013-09-09 22:50:13 +02:00
|
|
|
defer func() {
|
2013-08-22 17:40:59 +02:00
|
|
|
if rerr == nil {
|
|
|
|
rerr = db.processBatches()
|
2013-11-26 21:58:13 +01:00
|
|
|
} else {
|
|
|
|
db.lBatch().Reset()
|
2013-08-22 17:40:59 +02:00
|
|
|
}
|
2013-09-09 22:50:13 +02:00
|
|
|
}()
|
2013-08-03 17:20:05 +02:00
|
|
|
|
|
|
|
blocksha, err := block.Sha()
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("Failed to compute block sha %v", blocksha)
|
2013-08-22 17:40:59 +02:00
|
|
|
return 0, err
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
mblock := block.MsgBlock()
|
|
|
|
rawMsg, err := block.Bytes()
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("Failed to obtain raw block sha %v", blocksha)
|
2013-08-22 17:40:59 +02:00
|
|
|
return 0, err
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
txloc, err := block.TxLoc()
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("Failed to obtain raw block sha %v", blocksha)
|
2013-08-22 17:40:59 +02:00
|
|
|
return 0, err
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Insert block into database
|
|
|
|
newheight, err := db.insertBlockData(blocksha, &mblock.Header.PrevBlock,
|
|
|
|
rawMsg)
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("Failed to insert block %v %v %v", blocksha,
|
|
|
|
&mblock.Header.PrevBlock, err)
|
2013-08-22 17:40:59 +02:00
|
|
|
return 0, err
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// At least two blocks in the long past were generated by faulty
|
|
|
|
// miners, the sha of the transaction exists in a previous block,
|
|
|
|
// detect this condition and 'accept' the block.
|
|
|
|
for txidx, tx := range mblock.Transactions {
|
2013-10-27 01:02:09 +02:00
|
|
|
txsha, err := block.TxSha(txidx)
|
2013-08-03 17:20:05 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Warnf("failed to compute tx name block %v idx %v err %v", blocksha, txidx, err)
|
2013-08-22 17:40:59 +02:00
|
|
|
return 0, err
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
2013-09-30 23:44:26 +02:00
|
|
|
spentbuflen := (len(tx.TxOut) + 7) / 8
|
|
|
|
spentbuf := make([]byte, spentbuflen, spentbuflen)
|
|
|
|
if len(tx.TxOut)%8 != 0 {
|
|
|
|
for i := uint(len(tx.TxOut) % 8); i < 8; i++ {
|
|
|
|
spentbuf[spentbuflen-1] |= (byte(1) << i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = db.insertTx(txsha, newheight, txloc[txidx].TxStart, txloc[txidx].TxLen, spentbuf)
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("block %v idx %v failed to insert tx %v %v err %v", blocksha, newheight, &txsha, txidx, err)
|
2013-11-26 21:58:13 +01:00
|
|
|
return 0, err
|
2013-09-30 23:44:26 +02:00
|
|
|
}
|
|
|
|
|
2013-08-03 17:20:05 +02:00
|
|
|
// Some old blocks contain duplicate transactions
|
2013-09-30 23:44:26 +02:00
|
|
|
// Attempt to cleanly bypass this problem by marking the
|
|
|
|
// first as fully spent.
|
|
|
|
// http://blockexplorer.com/b/91812 dup in 91842
|
|
|
|
// http://blockexplorer.com/b/91722 dup in 91880
|
|
|
|
if newheight == 91812 {
|
2013-08-03 17:20:05 +02:00
|
|
|
dupsha, err := btcwire.NewShaHashFromStr("d5d27987d2a3dfc724e359870c6644b40e497bdc0589a033220fe15429d88599")
|
|
|
|
if err != nil {
|
|
|
|
panic("invalid sha string in source")
|
|
|
|
}
|
2013-10-27 01:02:09 +02:00
|
|
|
if txsha.IsEqual(dupsha) {
|
2013-09-30 23:44:26 +02:00
|
|
|
// marking TxOut[0] as spent
|
2013-11-21 16:48:57 +01:00
|
|
|
po := btcwire.NewOutPoint(dupsha, 0)
|
2013-09-30 23:44:26 +02:00
|
|
|
txI := btcwire.NewTxIn(po, []byte("garbage"))
|
|
|
|
|
2013-11-21 16:48:57 +01:00
|
|
|
var spendtx btcwire.MsgTx
|
2013-09-30 23:44:26 +02:00
|
|
|
spendtx.AddTxIn(txI)
|
|
|
|
err = db.doSpend(&spendtx)
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, &txsha, txidx, err)
|
|
|
|
}
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
}
|
2013-09-30 23:44:26 +02:00
|
|
|
if newheight == 91722 {
|
2013-08-03 17:20:05 +02:00
|
|
|
dupsha, err := btcwire.NewShaHashFromStr("e3bf3d07d4b0375638d5f1db5255fe07ba2c4cb067cd81b84ee974b6585fb468")
|
|
|
|
if err != nil {
|
|
|
|
panic("invalid sha string in source")
|
|
|
|
}
|
2013-10-27 01:02:09 +02:00
|
|
|
if txsha.IsEqual(dupsha) {
|
2013-09-30 23:44:26 +02:00
|
|
|
// marking TxOut[0] as spent
|
2013-11-21 16:48:57 +01:00
|
|
|
po := btcwire.NewOutPoint(dupsha, 0)
|
2013-09-30 23:44:26 +02:00
|
|
|
txI := btcwire.NewTxIn(po, []byte("garbage"))
|
|
|
|
|
2013-11-21 16:48:57 +01:00
|
|
|
var spendtx btcwire.MsgTx
|
2013-09-30 23:44:26 +02:00
|
|
|
spendtx.AddTxIn(txI)
|
|
|
|
err = db.doSpend(&spendtx)
|
|
|
|
if err != nil {
|
|
|
|
log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, &txsha, txidx, err)
|
|
|
|
}
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err = db.doSpend(tx)
|
|
|
|
if err != nil {
|
2013-10-27 01:02:09 +02:00
|
|
|
log.Warnf("block %v idx %v failed to spend tx %v %v err %v", blocksha, newheight, txsha, txidx, err)
|
2013-11-26 21:58:13 +01:00
|
|
|
return 0, err
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return newheight, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// doSpend iterates all TxIn in a bitcoin transaction marking each associated
|
|
|
|
// TxOut as spent.
|
|
|
|
func (db *LevelDb) doSpend(tx *btcwire.MsgTx) error {
|
|
|
|
for txinidx := range tx.TxIn {
|
|
|
|
txin := tx.TxIn[txinidx]
|
|
|
|
|
2014-10-01 14:52:19 +02:00
|
|
|
inTxSha := txin.PreviousOutPoint.Hash
|
|
|
|
inTxidx := txin.PreviousOutPoint.Index
|
2013-08-03 17:20:05 +02:00
|
|
|
|
|
|
|
if inTxidx == ^uint32(0) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
//log.Infof("spending %v %v", &inTxSha, inTxidx)
|
|
|
|
|
|
|
|
err := db.setSpentData(&inTxSha, inTxidx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// unSpend iterates all TxIn in a bitcoin transaction marking each associated
|
|
|
|
// TxOut as unspent.
|
|
|
|
func (db *LevelDb) unSpend(tx *btcwire.MsgTx) error {
|
|
|
|
for txinidx := range tx.TxIn {
|
|
|
|
txin := tx.TxIn[txinidx]
|
|
|
|
|
2014-10-01 14:52:19 +02:00
|
|
|
inTxSha := txin.PreviousOutPoint.Hash
|
|
|
|
inTxidx := txin.PreviousOutPoint.Index
|
2013-08-03 17:20:05 +02:00
|
|
|
|
|
|
|
if inTxidx == ^uint32(0) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
err := db.clearSpentData(&inTxSha, inTxidx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *LevelDb) setSpentData(sha *btcwire.ShaHash, idx uint32) error {
|
|
|
|
return db.setclearSpentData(sha, idx, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *LevelDb) clearSpentData(sha *btcwire.ShaHash, idx uint32) error {
|
|
|
|
return db.setclearSpentData(sha, idx, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (db *LevelDb) setclearSpentData(txsha *btcwire.ShaHash, idx uint32, set bool) error {
|
|
|
|
var txUo *txUpdateObj
|
|
|
|
var ok bool
|
|
|
|
|
|
|
|
if txUo, ok = db.txUpdateMap[*txsha]; !ok {
|
|
|
|
// not cached, load from db
|
|
|
|
var txU txUpdateObj
|
|
|
|
blkHeight, txOff, txLen, spentData, err := db.getTxData(txsha)
|
|
|
|
if err != nil {
|
2013-09-30 23:44:26 +02:00
|
|
|
// setting a fully spent tx is an error.
|
|
|
|
if set == true {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// if we are clearing a tx and it wasn't found
|
|
|
|
// in the tx table, it could be in the fully spent
|
|
|
|
// (duplicates) table.
|
|
|
|
spentTxList, err := db.getTxFullySpent(txsha)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// need to reslice the list to exclude the most recent.
|
2013-11-21 16:48:57 +01:00
|
|
|
sTx := spentTxList[len(spentTxList)-1]
|
|
|
|
spentTxList[len(spentTxList)-1] = nil
|
|
|
|
if len(spentTxList) == 1 {
|
2013-09-30 23:44:26 +02:00
|
|
|
// write entry to delete tx from spent pool
|
|
|
|
// XXX
|
|
|
|
} else {
|
2013-11-21 16:48:57 +01:00
|
|
|
spentTxList = spentTxList[:len(spentTxList)-1]
|
2013-09-30 23:44:26 +02:00
|
|
|
// XXX format sTxList and set update Table
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create 'new' Tx update data.
|
|
|
|
blkHeight = sTx.blkHeight
|
|
|
|
txOff = sTx.txoff
|
2013-11-21 16:48:57 +01:00
|
|
|
txLen = sTx.txlen
|
2013-09-30 23:44:26 +02:00
|
|
|
spentbuflen := (sTx.numTxO + 7) / 8
|
|
|
|
spentData = make([]byte, spentbuflen, spentbuflen)
|
|
|
|
for i := range spentData {
|
|
|
|
spentData[i] = ^byte(0)
|
|
|
|
}
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
txU.txSha = txsha
|
|
|
|
txU.blkHeight = blkHeight
|
|
|
|
txU.txoff = txOff
|
|
|
|
txU.txlen = txLen
|
|
|
|
txU.spentData = spentData
|
|
|
|
|
|
|
|
txUo = &txU
|
|
|
|
}
|
|
|
|
|
|
|
|
byteidx := idx / 8
|
|
|
|
byteoff := idx % 8
|
|
|
|
|
|
|
|
if set {
|
|
|
|
txUo.spentData[byteidx] |= (byte(1) << byteoff)
|
|
|
|
} else {
|
|
|
|
txUo.spentData[byteidx] &= ^(byte(1) << byteoff)
|
|
|
|
}
|
|
|
|
|
2013-09-30 23:44:26 +02:00
|
|
|
// check for fully spent Tx
|
|
|
|
fullySpent := true
|
2013-11-21 16:48:57 +01:00
|
|
|
for _, val := range txUo.spentData {
|
2013-09-30 23:44:26 +02:00
|
|
|
if val != ^byte(0) {
|
|
|
|
fullySpent = false
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if fullySpent {
|
|
|
|
var txSu *spentTxUpdate
|
|
|
|
// Look up Tx in fully spent table
|
2013-11-21 16:48:57 +01:00
|
|
|
if txSuOld, ok := db.txSpentUpdateMap[*txsha]; ok {
|
2013-09-30 23:44:26 +02:00
|
|
|
txSu = txSuOld
|
|
|
|
} else {
|
|
|
|
var txSuStore spentTxUpdate
|
2013-11-21 16:48:57 +01:00
|
|
|
txSu = &txSuStore
|
2013-09-30 23:44:26 +02:00
|
|
|
|
|
|
|
txSuOld, err := db.getTxFullySpent(txsha)
|
|
|
|
if err == nil {
|
|
|
|
txSu.txl = txSuOld
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fill in spentTx
|
|
|
|
var sTx spentTx
|
|
|
|
sTx.blkHeight = txUo.blkHeight
|
|
|
|
sTx.txoff = txUo.txoff
|
|
|
|
sTx.txlen = txUo.txlen
|
2013-11-21 16:48:57 +01:00
|
|
|
// XXX -- there is no way to comput the real TxOut
|
|
|
|
// from the spent array.
|
2013-09-30 23:44:26 +02:00
|
|
|
sTx.numTxO = 8 * len(txUo.spentData)
|
|
|
|
|
|
|
|
// append this txdata to fully spent txlist
|
|
|
|
txSu.txl = append(txSu.txl, &sTx)
|
|
|
|
|
|
|
|
// mark txsha as deleted in the txUpdateMap
|
|
|
|
log.Tracef("***tx %v is fully spent\n", txsha)
|
|
|
|
|
|
|
|
db.txSpentUpdateMap[*txsha] = txSu
|
|
|
|
|
|
|
|
txUo.delete = true
|
|
|
|
db.txUpdateMap[*txsha] = txUo
|
|
|
|
} else {
|
|
|
|
db.txUpdateMap[*txsha] = txUo
|
|
|
|
}
|
2013-08-03 17:20:05 +02:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func int64ToKey(keyint int64) []byte {
|
2014-07-07 17:07:07 +02:00
|
|
|
key := strconv.FormatInt(keyint, 10)
|
2013-08-03 17:20:05 +02:00
|
|
|
return []byte(key)
|
|
|
|
}
|
|
|
|
|
2013-08-22 17:40:59 +02:00
|
|
|
func shaBlkToKey(sha *btcwire.ShaHash) []byte {
|
2013-09-09 22:50:13 +02:00
|
|
|
shaB := sha.Bytes()
|
2013-08-22 17:40:59 +02:00
|
|
|
return shaB
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
2013-08-22 17:40:59 +02:00
|
|
|
func shaTxToKey(sha *btcwire.ShaHash) []byte {
|
2013-09-09 22:50:13 +02:00
|
|
|
shaB := sha.Bytes()
|
2013-08-22 17:40:59 +02:00
|
|
|
shaB = append(shaB, "tx"...)
|
|
|
|
return shaB
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
2013-09-30 23:44:26 +02:00
|
|
|
func shaSpentTxToKey(sha *btcwire.ShaHash) []byte {
|
|
|
|
shaB := sha.Bytes()
|
|
|
|
shaB = append(shaB, "sx"...)
|
|
|
|
return shaB
|
|
|
|
}
|
|
|
|
|
2013-08-22 17:40:59 +02:00
|
|
|
func (db *LevelDb) lBatch() *leveldb.Batch {
|
|
|
|
if db.lbatch == nil {
|
|
|
|
db.lbatch = new(leveldb.Batch)
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
2013-08-22 17:40:59 +02:00
|
|
|
return db.lbatch
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
2013-08-22 17:40:59 +02:00
|
|
|
func (db *LevelDb) processBatches() error {
|
2013-08-03 17:20:05 +02:00
|
|
|
var err error
|
|
|
|
|
2013-09-30 23:44:26 +02:00
|
|
|
if len(db.txUpdateMap) != 0 || len(db.txSpentUpdateMap) != 0 || db.lbatch != nil {
|
2013-08-22 17:40:59 +02:00
|
|
|
if db.lbatch == nil {
|
|
|
|
db.lbatch = new(leveldb.Batch)
|
|
|
|
}
|
2013-08-03 17:20:05 +02:00
|
|
|
|
2013-11-26 21:58:13 +01:00
|
|
|
defer db.lbatch.Reset()
|
|
|
|
|
2013-08-03 17:20:05 +02:00
|
|
|
for txSha, txU := range db.txUpdateMap {
|
2013-08-22 17:40:59 +02:00
|
|
|
key := shaTxToKey(&txSha)
|
2013-08-03 17:20:05 +02:00
|
|
|
if txU.delete {
|
2014-06-05 19:45:43 +02:00
|
|
|
//log.Tracef("deleting tx %v", txSha)
|
2013-08-22 17:40:59 +02:00
|
|
|
db.lbatch.Delete(key)
|
2013-08-03 17:20:05 +02:00
|
|
|
} else {
|
2014-06-05 19:45:43 +02:00
|
|
|
//log.Tracef("inserting tx %v", txSha)
|
2014-06-04 20:55:48 +02:00
|
|
|
txdat := db.formatTx(txU)
|
2013-08-22 17:40:59 +02:00
|
|
|
db.lbatch.Put(key, txdat)
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
}
|
2013-09-30 23:44:26 +02:00
|
|
|
for txSha, txSu := range db.txSpentUpdateMap {
|
|
|
|
key := shaSpentTxToKey(&txSha)
|
|
|
|
if txSu.delete {
|
2014-06-05 19:45:43 +02:00
|
|
|
//log.Tracef("deleting tx %v", txSha)
|
2013-09-30 23:44:26 +02:00
|
|
|
db.lbatch.Delete(key)
|
|
|
|
} else {
|
2014-06-05 19:45:43 +02:00
|
|
|
//log.Tracef("inserting tx %v", txSha)
|
2014-06-04 20:55:48 +02:00
|
|
|
txdat := db.formatTxFullySpent(txSu.txl)
|
2013-09-30 23:44:26 +02:00
|
|
|
db.lbatch.Put(key, txdat)
|
|
|
|
}
|
|
|
|
}
|
2013-08-03 17:20:05 +02:00
|
|
|
|
2013-08-22 17:40:59 +02:00
|
|
|
err = db.lDb.Write(db.lbatch, db.wo)
|
2013-08-03 17:20:05 +02:00
|
|
|
if err != nil {
|
2013-09-10 17:25:13 +02:00
|
|
|
log.Tracef("batch failed %v\n", err)
|
2013-08-22 17:40:59 +02:00
|
|
|
return err
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
db.txUpdateMap = map[btcwire.ShaHash]*txUpdateObj{}
|
2013-09-30 23:44:26 +02:00
|
|
|
db.txSpentUpdateMap = make(map[btcwire.ShaHash]*spentTxUpdate)
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
2013-08-22 17:40:59 +02:00
|
|
|
return nil
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|
|
|
|
|
2015-01-27 18:45:10 +01:00
|
|
|
// RollbackClose this is part of the database.Db interface and should discard
|
2014-09-16 16:54:38 +02:00
|
|
|
// recent changes to the db and the close the db. This currently just does
|
|
|
|
// a clean shutdown.
|
2014-07-07 16:50:50 +02:00
|
|
|
func (db *LevelDb) RollbackClose() error {
|
2013-08-22 17:40:59 +02:00
|
|
|
db.dbLock.Lock()
|
|
|
|
defer db.dbLock.Unlock()
|
|
|
|
|
2014-07-07 16:50:50 +02:00
|
|
|
return db.close()
|
2013-08-03 17:20:05 +02:00
|
|
|
}
|