[lbry] fees: port estimatesmartfee from DCRD

1. logger
2. blockheight: int64 -> int32
3. dcrutil -> lbcutl
4. MaxConfirimation: 42
5. MinBucketFee: mempool.MinRelayFee (default 1000)
6. BucketFee Spacing: 1.1 -> 1.05

Note:
  DCRD implementation of estimatesmartfee is based on bitcoin core 0.14
  Lbrycrd (0.17) includes the updates of bitcoin core 0.15.
  They are slightly different, but shouldn't matter much.
This commit is contained in:
Roy Lee 2022-01-26 20:42:17 -08:00
parent 324c443c64
commit d126d0c10e
4 changed files with 54 additions and 53 deletions

View file

@ -12,20 +12,24 @@ import (
"os" "os"
"path" "path"
"github.com/decred/dcrd/dcrutil/v4" "github.com/btcsuite/btclog"
"github.com/decred/dcrd/internal/fees"
flags "github.com/jessevdk/go-flags" flags "github.com/jessevdk/go-flags"
"github.com/lbryio/lbcd/fees"
"github.com/lbryio/lbcutil"
) )
type config struct { type config struct {
DB string `short:"b" long:"db" description:"Path to fee database"` DB string `short:"b" long:"db" description:"Path to fee database"`
} }
var feesLog = btclog.NewBackend(os.Stdout).Logger("FEES")
func main() { func main() {
cfg := config{ cfg := config{
DB: path.Join(dcrutil.AppDataDir("dcrd", false), "data", "mainnet", "feesdb"), DB: path.Join(lbcutil.AppDataDir("lbcd", false), "data", "mainnet", "feesdb"),
} }
fees.UseLogger(feesLog)
parser := flags.NewParser(&cfg, flags.Default) parser := flags.NewParser(&cfg, flags.Default)
_, err := parser.Parse() _, err := parser.Parse()
if err != nil { if err != nil {

View file

@ -13,9 +13,8 @@ import (
"sort" "sort"
"sync" "sync"
"github.com/decred/dcrd/blockchain/stake/v4" "github.com/lbryio/lbcd/chaincfg/chainhash"
"github.com/decred/dcrd/chaincfg/chainhash" "github.com/lbryio/lbcutil"
"github.com/decred/dcrd/dcrutil/v4"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
ldbutil "github.com/syndtr/goleveldb/leveldb/util" ldbutil "github.com/syndtr/goleveldb/leveldb/util"
) )
@ -27,11 +26,11 @@ const (
// DefaultMaxConfirmations is the default number of confirmation ranges to // DefaultMaxConfirmations is the default number of confirmation ranges to
// track in the estimator. // track in the estimator.
DefaultMaxConfirmations uint32 = 32 DefaultMaxConfirmations uint32 = 42
// DefaultFeeRateStep is the default multiplier between two consecutive fee // DefaultFeeRateStep is the default multiplier between two consecutive fee
// rate buckets. // rate buckets.
DefaultFeeRateStep float64 = 1.1 DefaultFeeRateStep float64 = 1.05
// defaultDecay is the default value used to decay old transactions from the // defaultDecay is the default value used to decay old transactions from the
// estimator. // estimator.
@ -102,13 +101,13 @@ type EstimatorConfig struct {
// MinBucketFee is the value of the fee rate of the lowest bucket for which // MinBucketFee is the value of the fee rate of the lowest bucket for which
// estimation is tracked. // estimation is tracked.
MinBucketFee dcrutil.Amount MinBucketFee lbcutil.Amount
// MaxBucketFee is the value of the fee for the highest bucket for which // MaxBucketFee is the value of the fee for the highest bucket for which
// estimation is tracked. // estimation is tracked.
// //
// It MUST be higher than MinBucketFee. // It MUST be higher than MinBucketFee.
MaxBucketFee dcrutil.Amount MaxBucketFee lbcutil.Amount
// ExtraBucketFee is an additional bucket fee rate to include in the // ExtraBucketFee is an additional bucket fee rate to include in the
// database for tracking transactions. Specifying this can be useful when // database for tracking transactions. Specifying this can be useful when
@ -118,7 +117,7 @@ type EstimatorConfig struct {
// //
// It MUST have a value between MinBucketFee and MaxBucketFee, otherwise // It MUST have a value between MinBucketFee and MaxBucketFee, otherwise
// it's ignored. // it's ignored.
ExtraBucketFee dcrutil.Amount ExtraBucketFee lbcutil.Amount
// FeeRateStep is the multiplier to generate the fee rate buckets (each // FeeRateStep is the multiplier to generate the fee rate buckets (each
// bucket is higher than the previous one by this factor). // bucket is higher than the previous one by this factor).
@ -138,7 +137,7 @@ type EstimatorConfig struct {
// memPoolTxDesc is an aux structure used to track the local estimator mempool. // memPoolTxDesc is an aux structure used to track the local estimator mempool.
type memPoolTxDesc struct { type memPoolTxDesc struct {
addedHeight int64 addedHeight int32
bucketIndex int32 bucketIndex int32
fees feeRate fees feeRate
} }
@ -161,7 +160,7 @@ type Estimator struct {
maxConfirms int32 maxConfirms int32
decay float64 decay float64
bestHeight int64 bestHeight int32
db *leveldb.DB db *leveldb.DB
lock sync.RWMutex lock sync.RWMutex
} }
@ -324,7 +323,7 @@ func (stats *Estimator) loadFromDatabase(replaceBuckets bool) error {
dbByteOrder.PutUint64(bestHeightBytes[:], uint64(stats.bestHeight)) dbByteOrder.PutUint64(bestHeightBytes[:], uint64(stats.bestHeight))
batch.Put(dbKeyBestHeight, bestHeightBytes[:]) batch.Put(dbKeyBestHeight, bestHeightBytes[:])
err := binary.Write(b, dbByteOrder, stats.bucketFeeBounds) err = binary.Write(b, dbByteOrder, stats.bucketFeeBounds)
if err != nil { if err != nil {
return fmt.Errorf("error writing bucket fees to db: %v", err) return fmt.Errorf("error writing bucket fees to db: %v", err)
} }
@ -534,7 +533,7 @@ func (stats *Estimator) confirmRange(blocksToConfirm int32) int32 {
// statistics and increases the confirmation ranges for mempool txs. This is // statistics and increases the confirmation ranges for mempool txs. This is
// meant to be called when a new block is mined, so that we discount older // meant to be called when a new block is mined, so that we discount older
// information. // information.
func (stats *Estimator) updateMovingAverages(newHeight int64) { func (stats *Estimator) updateMovingAverages(newHeight int32) {
log.Debugf("Updated moving averages into block %d", newHeight) log.Debugf("Updated moving averages into block %d", newHeight)
// decay the existing stats so that, over time, we rely on more up to date // decay the existing stats so that, over time, we rely on more up to date
@ -718,7 +717,7 @@ func (stats *Estimator) estimateMedianFee(targetConfs int32, successPct float64)
// //
// This function is safe to be called from multiple goroutines but might block // This function is safe to be called from multiple goroutines but might block
// until concurrent modifications to the internal database state are complete. // until concurrent modifications to the internal database state are complete.
func (stats *Estimator) EstimateFee(targetConfs int32) (dcrutil.Amount, error) { func (stats *Estimator) EstimateFee(targetConfs int32) (lbcutil.Amount, error) {
stats.lock.RLock() stats.lock.RLock()
rate, err := stats.estimateMedianFee(targetConfs, 0.95) rate, err := stats.estimateMedianFee(targetConfs, 0.95)
stats.lock.RUnlock() stats.lock.RUnlock()
@ -734,13 +733,13 @@ func (stats *Estimator) EstimateFee(targetConfs int32) (dcrutil.Amount, error) {
rate = stats.bucketFeeBounds[0] rate = stats.bucketFeeBounds[0]
} }
return dcrutil.Amount(rate), nil return lbcutil.Amount(rate), nil
} }
// Enable establishes the current best height of the blockchain after // Enable establishes the current best height of the blockchain after
// initializing the chain. All new mempool transactions will be added at this // initializing the chain. All new mempool transactions will be added at this
// block height. // block height.
func (stats *Estimator) Enable(bestHeight int64) { func (stats *Estimator) Enable(bestHeight int32) {
log.Debugf("Setting best height as %d", bestHeight) log.Debugf("Setting best height as %d", bestHeight)
stats.lock.Lock() stats.lock.Lock()
stats.bestHeight = bestHeight stats.bestHeight = bestHeight
@ -762,7 +761,7 @@ func (stats *Estimator) IsEnabled() bool {
// total fee amount (in atoms) and with the provided size (in bytes). // total fee amount (in atoms) and with the provided size (in bytes).
// //
// This is safe to be called from multiple goroutines. // This is safe to be called from multiple goroutines.
func (stats *Estimator) AddMemPoolTransaction(txHash *chainhash.Hash, fee, size int64, txType stake.TxType) { func (stats *Estimator) AddMemPoolTransaction(txHash *chainhash.Hash, fee, size int64) {
stats.lock.Lock() stats.lock.Lock()
defer stats.lock.Unlock() defer stats.lock.Unlock()
@ -775,13 +774,6 @@ func (stats *Estimator) AddMemPoolTransaction(txHash *chainhash.Hash, fee, size
return return
} }
// Ignore tspends for the purposes of fee estimation, since they remain
// in the mempool for a long time and have special rules about when
// they can be included in blocks.
if txType == stake.TxTypeTSpend {
return
}
// Note that we use this less exact version instead of fee * 1000 / size // Note that we use this less exact version instead of fee * 1000 / size
// (using ints) because it naturally "downsamples" the fee rates towards the // (using ints) because it naturally "downsamples" the fee rates towards the
// minimum at values less than 0.001 DCR/KB. This is needed because due to // minimum at values less than 0.001 DCR/KB. This is needed because due to
@ -828,7 +820,7 @@ func (stats *Estimator) RemoveMemPoolTransaction(txHash *chainhash.Hash) {
log.Debugf("Removing tx %s from mempool", txHash) log.Debugf("Removing tx %s from mempool", txHash)
stats.removeFromMemPool(int32(stats.bestHeight-desc.addedHeight), desc.fees) stats.removeFromMemPool(stats.bestHeight-desc.addedHeight, desc.fees)
delete(stats.memPoolTxs, *txHash) delete(stats.memPoolTxs, *txHash)
} }
@ -836,7 +828,7 @@ func (stats *Estimator) RemoveMemPoolTransaction(txHash *chainhash.Hash) {
// tracked mempool into a mined state. // tracked mempool into a mined state.
// //
// This function is *not* safe to be called from multiple goroutines. // This function is *not* safe to be called from multiple goroutines.
func (stats *Estimator) processMinedTransaction(blockHeight int64, txh *chainhash.Hash) { func (stats *Estimator) processMinedTransaction(blockHeight int32, txh *chainhash.Hash) {
desc, exists := stats.memPoolTxs[*txh] desc, exists := stats.memPoolTxs[*txh]
if !exists { if !exists {
// We cannot use transactions that we didn't know about to estimate // We cannot use transactions that we didn't know about to estimate
@ -850,7 +842,7 @@ func (stats *Estimator) processMinedTransaction(blockHeight int64, txh *chainhas
return return
} }
stats.removeFromMemPool(int32(blockHeight-desc.addedHeight), desc.fees) stats.removeFromMemPool(blockHeight-desc.addedHeight, desc.fees)
delete(stats.memPoolTxs, *txh) delete(stats.memPoolTxs, *txh)
if blockHeight <= desc.addedHeight { if blockHeight <= desc.addedHeight {
@ -863,7 +855,7 @@ func (stats *Estimator) processMinedTransaction(blockHeight int64, txh *chainhas
return return
} }
mineDelay := int32(blockHeight - desc.addedHeight) mineDelay := blockHeight - desc.addedHeight
log.Debugf("Processing mined tx %s (rate %.8f, delay %d)", txh, log.Debugf("Processing mined tx %s (rate %.8f, delay %d)", txh,
desc.fees/1e8, mineDelay) desc.fees/1e8, mineDelay)
stats.newMinedTx(mineDelay, desc.fees) stats.newMinedTx(mineDelay, desc.fees)
@ -872,7 +864,7 @@ func (stats *Estimator) processMinedTransaction(blockHeight int64, txh *chainhas
// ProcessBlock processes all mined transactions in the provided block. // ProcessBlock processes all mined transactions in the provided block.
// //
// This function is safe to be called from multiple goroutines. // This function is safe to be called from multiple goroutines.
func (stats *Estimator) ProcessBlock(block *dcrutil.Block) error { func (stats *Estimator) ProcessBlock(block *lbcutil.Block) error {
stats.lock.Lock() stats.lock.Lock()
defer stats.lock.Unlock() defer stats.lock.Unlock()
@ -895,10 +887,6 @@ func (stats *Estimator) ProcessBlock(block *dcrutil.Block) error {
stats.processMinedTransaction(blockHeight, tx.Hash()) stats.processMinedTransaction(blockHeight, tx.Hash())
} }
for _, tx := range block.STransactions() {
stats.processMinedTransaction(blockHeight, tx.Hash())
}
if stats.db != nil { if stats.db != nil {
return stats.updateDatabase() return stats.updateDatabase()
} }

View file

@ -5,17 +5,23 @@
package fees package fees
import ( import (
"github.com/decred/slog" "github.com/btcsuite/btclog"
) )
// log is a logger that is initialized with no output filters. This means the // log is a logger that is initialized with no output filters. This means the
// package will not perform any logging by default until the caller requests it. // package will not perform any logging by default until the caller requests it.
// The default amount of logging is none. // The default amount of logging is none.
var log = slog.Disabled var log btclog.Logger
// UseLogger uses a specified Logger to output fee estimator logging info. This // DisableLog disables all library log output. Logging output is disabled
// should be used in preference to SetLogWriter if the caller is also using // by default until either UseLogger or SetLogWriter are called.
// slog. func DisableLog() {
func UseLogger(logger slog.Logger) { log = btclog.Disabled
}
// UseLogger uses a specified Logger to output package logging info.
// This should be used in preference to SetLogWriter if the caller is also
// using btclog.
func UseLogger(logger btclog.Logger) {
log = logger log = logger
} }

27
log.go
View file

@ -16,6 +16,7 @@ import (
"github.com/lbryio/lbcd/claimtrie/node" "github.com/lbryio/lbcd/claimtrie/node"
"github.com/lbryio/lbcd/connmgr" "github.com/lbryio/lbcd/connmgr"
"github.com/lbryio/lbcd/database" "github.com/lbryio/lbcd/database"
"github.com/lbryio/lbcd/fees"
"github.com/lbryio/lbcd/mempool" "github.com/lbryio/lbcd/mempool"
"github.com/lbryio/lbcd/mining" "github.com/lbryio/lbcd/mining"
"github.com/lbryio/lbcd/mining/cpuminer" "github.com/lbryio/lbcd/mining/cpuminer"
@ -57,13 +58,14 @@ var (
adxrLog = backendLog.Logger("ADXR") adxrLog = backendLog.Logger("ADXR")
amgrLog = backendLog.Logger("AMGR") amgrLog = backendLog.Logger("AMGR")
cmgrLog = backendLog.Logger("CMGR")
bcdbLog = backendLog.Logger("BCDB") bcdbLog = backendLog.Logger("BCDB")
btcdLog = backendLog.Logger("MAIN") btcdLog = backendLog.Logger("MAIN")
chanLog = backendLog.Logger("CHAN") chanLog = backendLog.Logger("CHAN")
lbryLog = backendLog.Logger("LBRY") cmgrLog = backendLog.Logger("CMGR")
discLog = backendLog.Logger("DISC") discLog = backendLog.Logger("DISC")
feesLog = backendLog.Logger("FEES")
indxLog = backendLog.Logger("INDX") indxLog = backendLog.Logger("INDX")
lbryLog = backendLog.Logger("LBRY")
minrLog = backendLog.Logger("MINR") minrLog = backendLog.Logger("MINR")
peerLog = backendLog.Logger("PEER") peerLog = backendLog.Logger("PEER")
rpcsLog = backendLog.Logger("RPCS") rpcsLog = backendLog.Logger("RPCS")
@ -76,30 +78,31 @@ var (
// Initialize package-global logger variables. // Initialize package-global logger variables.
func init() { func init() {
addrmgr.UseLogger(amgrLog) addrmgr.UseLogger(amgrLog)
connmgr.UseLogger(cmgrLog)
database.UseLogger(bcdbLog)
blockchain.UseLogger(chanLog) blockchain.UseLogger(chanLog)
node.UseLogger(lbryLog) connmgr.UseLogger(cmgrLog)
indexers.UseLogger(indxLog)
mining.UseLogger(minrLog)
cpuminer.UseLogger(minrLog) cpuminer.UseLogger(minrLog)
database.UseLogger(bcdbLog)
fees.UseLogger(feesLog)
indexers.UseLogger(indxLog)
mempool.UseLogger(txmpLog)
mining.UseLogger(minrLog)
netsync.UseLogger(syncLog)
node.UseLogger(lbryLog)
peer.UseLogger(peerLog) peer.UseLogger(peerLog)
txscript.UseLogger(scrpLog) txscript.UseLogger(scrpLog)
netsync.UseLogger(syncLog)
mempool.UseLogger(txmpLog)
} }
// subsystemLoggers maps each subsystem identifier to its associated logger. // subsystemLoggers maps each subsystem identifier to its associated logger.
var subsystemLoggers = map[string]btclog.Logger{ var subsystemLoggers = map[string]btclog.Logger{
"ADXR": adxrLog, "ADXR": adxrLog,
"AMGR": amgrLog, "AMGR": amgrLog,
"CMGR": cmgrLog,
"BCDB": bcdbLog, "BCDB": bcdbLog,
"MAIN": btcdLog,
"CHAN": chanLog, "CHAN": chanLog,
"LBRY": lbryLog, "CMGR": cmgrLog,
"DISC": discLog, "DISC": discLog,
"INDX": indxLog, "INDX": indxLog,
"LBRY": lbryLog,
"MAIN": btcdLog,
"MINR": minrLog, "MINR": minrLog,
"PEER": peerLog, "PEER": peerLog,
"RPCS": rpcsLog, "RPCS": rpcsLog,