Port estimatesmartfee
from dcrd #18
11 changed files with 1233 additions and 82 deletions
23
fees/README.md
Normal file
23
fees/README.md
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
fees
|
||||||
|
====
|
||||||
|
|
||||||
|
|
||||||
|
[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions)
|
||||||
|
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
|
||||||
|
[![Doc](https://img.shields.io/badge/doc-reference-blue.svg)](https://pkg.go.dev/github.com/decred/dcrd/internal/fees)
|
||||||
|
|
||||||
|
Package fees provides decred-specific methods for tracking and estimating fee
|
||||||
|
rates for new transactions to be mined into the network. Fee rate estimation has
|
||||||
|
two main goals:
|
||||||
|
|
||||||
|
- Ensuring transactions are mined within a target _confirmation range_
|
||||||
|
(expressed in blocks);
|
||||||
|
- Attempting to minimize fees while maintaining be above restriction.
|
||||||
|
|
||||||
|
This package was started in order to resolve issue decred/dcrd#1412 and related.
|
||||||
|
See that issue for discussion of the selected approach.
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
Package dcrutil is licensed under the [copyfree](http://copyfree.org) ISC
|
||||||
|
License.
|
56
fees/cmd/dumpfeedb/dumpfeedb.go
Normal file
56
fees/cmd/dumpfeedb/dumpfeedb.go
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
// Copyright (c) 2018-2020 The Decred developers
|
||||||
|
// Use of this source code is governed by an ISC
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Tool dumpfeedb can be used to dump the internal state of the buckets of an
|
||||||
|
// estimator's feedb so that it can be externally analyzed.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
|
||||||
|
"github.com/btcsuite/btclog"
|
||||||
|
flags "github.com/jessevdk/go-flags"
|
||||||
|
"github.com/lbryio/lbcd/fees"
|
||||||
|
"github.com/lbryio/lbcutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
type config struct {
|
||||||
|
DB string `short:"b" long:"db" description:"Path to fee database"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var feesLog = btclog.NewBackend(os.Stdout).Logger("FEES")
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
cfg := config{
|
||||||
|
DB: path.Join(lbcutil.AppDataDir("lbcd", false), "data", "mainnet", "feesdb"),
|
||||||
|
}
|
||||||
|
|
||||||
|
fees.UseLogger(feesLog)
|
||||||
|
parser := flags.NewParser(&cfg, flags.Default)
|
||||||
|
_, err := parser.Parse()
|
||||||
|
if err != nil {
|
||||||
|
var e *flags.Error
|
||||||
|
if !errors.As(err, &e) || e.Type != flags.ErrHelp {
|
||||||
|
parser.WriteHelp(os.Stderr)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ecfg := fees.EstimatorConfig{
|
||||||
|
DatabaseFile: cfg.DB,
|
||||||
|
ReplaceBucketsOnLoad: true,
|
||||||
|
MinBucketFee: 1,
|
||||||
|
MaxBucketFee: 2,
|
||||||
|
FeeRateStep: fees.DefaultFeeRateStep,
|
||||||
|
}
|
||||||
|
est, err := fees.NewEstimator(&ecfg)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(est.DumpBuckets())
|
||||||
|
}
|
107
fees/doc.go
Normal file
107
fees/doc.go
Normal file
|
@ -0,0 +1,107 @@
|
||||||
|
// Copyright (c) 2018-2020 The Decred developers
|
||||||
|
// Use of this source code is governed by an ISC
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package fees provides decred-specific methods for tracking and estimating fee
|
||||||
|
rates for new transactions to be mined into the network. Fee rate estimation has
|
||||||
|
two main goals:
|
||||||
|
|
||||||
|
- Ensuring transactions are mined within a target _confirmation range_
|
||||||
|
(expressed in blocks);
|
||||||
|
- Attempting to minimize fees while maintaining be above restriction.
|
||||||
|
|
||||||
|
Preliminaries
|
||||||
|
|
||||||
|
There are two main regimes against which fee estimation needs to be evaluated
|
||||||
|
according to how full blocks being mined are (and consequently how important fee
|
||||||
|
rates are): _low contention_ and _high contention_:
|
||||||
|
|
||||||
|
In a low contention regime, the mempool sits mostly empty, transactions are
|
||||||
|
usually mined very soon after being published and transaction fees are mostly
|
||||||
|
sent using the minimum relay fee.
|
||||||
|
|
||||||
|
In a high contention regime, the mempool is usually filled with unmined
|
||||||
|
transactions, there is active dispute for space in a block (by transactions
|
||||||
|
using higher fees) and blocks are usually full.
|
||||||
|
|
||||||
|
The exact point of where these two regimes intersect is arbitrary, but it should
|
||||||
|
be clear in the examples and simulations which of these is being discussed.
|
||||||
|
|
||||||
|
Note: a very high contention scenario (> 90% of blocks being full and
|
||||||
|
transactions remaining in the mempool indefinitely) is one in which stakeholders
|
||||||
|
should be discussing alternative solutions (increase block size, provide other
|
||||||
|
second layer alternatives, etc). Also, the current fill rate of blocks in decred
|
||||||
|
is low, so while we try to account for this regime, I personally expect that the
|
||||||
|
implementation will need more tweaks as it approaches this.
|
||||||
|
|
||||||
|
The current approach to implement this estimation is based on bitcoin core's
|
||||||
|
algorithm. References [1] and [2] provide a high level description of how it
|
||||||
|
works there. Actual code is linked in references [3] and [4].
|
||||||
|
|
||||||
|
Outline of the Algorithm
|
||||||
|
|
||||||
|
The algorithm is currently based in fee estimation as used in v0.14 of bitcoin
|
||||||
|
core (which is also the basis for the v0.15+ method). A more comprehensive
|
||||||
|
overview is available in reference [1].
|
||||||
|
|
||||||
|
This particular version was chosen because it's simpler to implement and should
|
||||||
|
be sufficient for low contention regimes. It probably overestimates fees in
|
||||||
|
higher contention regimes and longer target confirmation windows, but as pointed
|
||||||
|
out earlier should be sufficient for current fill rate of decred's network.
|
||||||
|
|
||||||
|
The basic algorithm is as follows (as executed by a single full node):
|
||||||
|
|
||||||
|
Stats building stage:
|
||||||
|
|
||||||
|
- For each transaction observed entering mempool, record the block at which it
|
||||||
|
was first seen
|
||||||
|
- For each mined transaction which was previously observed to enter the mempool,
|
||||||
|
record how long (in blocks) it took to be mined and its fee rate
|
||||||
|
- Group mined transactions into fee rate _buckets_ and _confirmation ranges_,
|
||||||
|
creating a table of how many transactions were mined at each confirmation
|
||||||
|
range and fee rate bucket and their total committed fee
|
||||||
|
- Whenever a new block is mined, decay older transactions to account for a
|
||||||
|
dynamic fee environment
|
||||||
|
|
||||||
|
Estimation stage:
|
||||||
|
|
||||||
|
- Input a target confirmation range (how many blocks to wait for the tx to be
|
||||||
|
mined)
|
||||||
|
- Starting at the highest fee bucket, look for buckets where the chance of
|
||||||
|
confirmation within the desired confirmation window is > 95%
|
||||||
|
- Average all such buckets to get the estimated fee rate
|
||||||
|
|
||||||
|
Simulation
|
||||||
|
|
||||||
|
Development of the estimator was originally performed and simulated using the
|
||||||
|
code in [5]. Simulation of the current code can be performed by using the
|
||||||
|
dcrfeesim tool available in [6].
|
||||||
|
|
||||||
|
Acknowledgements
|
||||||
|
|
||||||
|
Thanks to @davecgh for providing the initial review of the results and the
|
||||||
|
original developers of the bitcoin core code (the brunt of which seems to have
|
||||||
|
been made by @morcos).
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
[1] Introduction to Bitcoin Core Estimation:
|
||||||
|
https://bitcointechtalk.com/an-introduction-to-bitcoin-core-fee-estimation-27920880ad0
|
||||||
|
|
||||||
|
[2] Proposed Changes to Fee Estimation in version 0.15:
|
||||||
|
https://gist.github.com/morcos/d3637f015bc4e607e1fd10d8351e9f41
|
||||||
|
|
||||||
|
[3] Source for fee estimation in v0.14:
|
||||||
|
https://github.com/bitcoin/bitcoin/blob/v0.14.2/src/policy/fees.cpp
|
||||||
|
|
||||||
|
[4] Source for fee estimation in version 0.16.2:
|
||||||
|
https://github.com/bitcoin/bitcoin/blob/v0.16.2/src/policy/fees.cpp
|
||||||
|
|
||||||
|
[5] Source for the original dcrfeesim and estimator work:
|
||||||
|
https://github.com/matheusd/dcrfeesim_dev
|
||||||
|
|
||||||
|
[6] Source for the current dcrfeesim, using this module:
|
||||||
|
https://github.com/matheusd/dcrfeesim
|
||||||
|
*/
|
||||||
|
package fees
|
908
fees/estimator.go
Normal file
908
fees/estimator.go
Normal file
|
@ -0,0 +1,908 @@
|
||||||
|
// Copyright (c) 2018-2020 The Decred developers
|
||||||
|
// Use of this source code is governed by an ISC
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fees
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
||||||
|
"github.com/lbryio/lbcutil"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
|
ldbutil "github.com/syndtr/goleveldb/leveldb/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultMaxBucketFeeMultiplier is the default multiplier used to find the
|
||||||
|
// largest fee bucket, starting at the minimum fee.
|
||||||
|
DefaultMaxBucketFeeMultiplier int = 100
|
||||||
|
|
||||||
|
// DefaultMaxConfirmations is the default number of confirmation ranges to
|
||||||
|
// track in the estimator.
|
||||||
|
DefaultMaxConfirmations uint32 = 42
|
||||||
|
|
||||||
|
// DefaultFeeRateStep is the default multiplier between two consecutive fee
|
||||||
|
// rate buckets.
|
||||||
|
DefaultFeeRateStep float64 = 1.05
|
||||||
|
|
||||||
|
// defaultDecay is the default value used to decay old transactions from the
|
||||||
|
// estimator.
|
||||||
|
defaultDecay float64 = 0.998
|
||||||
|
|
||||||
|
// maxAllowedBucketFees is an upper bound of how many bucket fees can be
|
||||||
|
// used in the estimator. This is verified during estimator initialization
|
||||||
|
// and database loading.
|
||||||
|
maxAllowedBucketFees = 2000
|
||||||
|
|
||||||
|
// maxAllowedConfirms is an upper bound of how many confirmation ranges can
|
||||||
|
// be used in the estimator. This is verified during estimator
|
||||||
|
// initialization and database loading.
|
||||||
|
maxAllowedConfirms = 788
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNoSuccessPctBucketFound is the error returned when no bucket has been
|
||||||
|
// found with the minimum required percentage success.
|
||||||
|
ErrNoSuccessPctBucketFound = errors.New("no bucket with the minimum " +
|
||||||
|
"required success percentage found")
|
||||||
|
|
||||||
|
// ErrNotEnoughTxsForEstimate is the error returned when not enough
|
||||||
|
// transactions have been seen by the fee generator to give an estimate.
|
||||||
|
ErrNotEnoughTxsForEstimate = errors.New("not enough transactions seen for " +
|
||||||
|
"estimation")
|
||||||
|
|
||||||
|
dbByteOrder = binary.BigEndian
|
||||||
|
|
||||||
|
dbKeyVersion = []byte("version")
|
||||||
|
dbKeyBucketFees = []byte("bucketFeeBounds")
|
||||||
|
dbKeyMaxConfirms = []byte("maxConfirms")
|
||||||
|
dbKeyBestHeight = []byte("bestHeight")
|
||||||
|
dbKeyBucketPrefix = []byte{0x01, 0x70, 0x1d, 0x00}
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrTargetConfTooLarge is the type of error returned when an user of the
|
||||||
|
// estimator requested a confirmation range higher than tracked by the estimator.
|
||||||
|
type ErrTargetConfTooLarge struct {
|
||||||
|
MaxConfirms int32
|
||||||
|
ReqConfirms int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ErrTargetConfTooLarge) Error() string {
|
||||||
|
return fmt.Sprintf("target confirmation requested (%d) higher than "+
|
||||||
|
"maximum confirmation range tracked by estimator (%d)", e.ReqConfirms,
|
||||||
|
e.MaxConfirms)
|
||||||
|
}
|
||||||
|
|
||||||
|
type feeRate float64
|
||||||
|
|
||||||
|
type txConfirmStatBucketCount struct {
|
||||||
|
txCount float64
|
||||||
|
feeSum float64
|
||||||
|
}
|
||||||
|
|
||||||
|
type txConfirmStatBucket struct {
|
||||||
|
confirmed []txConfirmStatBucketCount
|
||||||
|
confirmCount float64
|
||||||
|
feeSum float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// EstimatorConfig stores the configuration parameters for a given fee
|
||||||
|
// estimator. It is used to initialize an empty fee estimator.
|
||||||
|
type EstimatorConfig struct {
|
||||||
|
// MaxConfirms is the maximum number of confirmation ranges to check.
|
||||||
|
MaxConfirms uint32
|
||||||
|
|
||||||
|
// MinBucketFee is the value of the fee rate of the lowest bucket for which
|
||||||
|
// estimation is tracked.
|
||||||
|
MinBucketFee lbcutil.Amount
|
||||||
|
|
||||||
|
// MaxBucketFee is the value of the fee for the highest bucket for which
|
||||||
|
// estimation is tracked.
|
||||||
|
//
|
||||||
|
// It MUST be higher than MinBucketFee.
|
||||||
|
MaxBucketFee lbcutil.Amount
|
||||||
|
|
||||||
|
// ExtraBucketFee is an additional bucket fee rate to include in the
|
||||||
|
// database for tracking transactions. Specifying this can be useful when
|
||||||
|
// the default relay fee of the network is undergoing change (due to a new
|
||||||
|
// release of the software for example), so that the older fee can be
|
||||||
|
// tracked exactly.
|
||||||
|
//
|
||||||
|
// It MUST have a value between MinBucketFee and MaxBucketFee, otherwise
|
||||||
|
// it's ignored.
|
||||||
|
ExtraBucketFee lbcutil.Amount
|
||||||
|
|
||||||
|
// FeeRateStep is the multiplier to generate the fee rate buckets (each
|
||||||
|
// bucket is higher than the previous one by this factor).
|
||||||
|
//
|
||||||
|
// It MUST have a value > 1.0.
|
||||||
|
FeeRateStep float64
|
||||||
|
|
||||||
|
// DatabaseFile is the location of the estimator database file. If empty,
|
||||||
|
// updates to the estimator state are not backed by the filesystem.
|
||||||
|
DatabaseFile string
|
||||||
|
|
||||||
|
// ReplaceBucketsOnLoad indicates whether to replace the buckets in the
|
||||||
|
// current estimator by those stored in the feesdb file instead of
|
||||||
|
// validating that they are both using the same set of fees.
|
||||||
|
ReplaceBucketsOnLoad bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// memPoolTxDesc is an aux structure used to track the local estimator mempool.
|
||||||
|
type memPoolTxDesc struct {
|
||||||
|
addedHeight int32
|
||||||
|
bucketIndex int32
|
||||||
|
fees feeRate
|
||||||
|
}
|
||||||
|
|
||||||
|
// Estimator tracks historical data for published and mined transactions in
|
||||||
|
// order to estimate fees to be used in new transactions for confirmation
|
||||||
|
// within a target block window.
|
||||||
|
type Estimator struct {
|
||||||
|
// bucketFeeBounds are the upper bounds for each individual fee bucket.
|
||||||
|
bucketFeeBounds []feeRate
|
||||||
|
|
||||||
|
// buckets are the confirmed tx count and fee sum by bucket fee.
|
||||||
|
buckets []txConfirmStatBucket
|
||||||
|
|
||||||
|
// memPool are the mempool transaction count and fee sum by bucket fee.
|
||||||
|
memPool []txConfirmStatBucket
|
||||||
|
|
||||||
|
// memPoolTxs is the map of transaction hashes and data of known mempool txs.
|
||||||
|
memPoolTxs map[chainhash.Hash]memPoolTxDesc
|
||||||
|
|
||||||
|
maxConfirms int32
|
||||||
|
decay float64
|
||||||
|
bestHeight int32
|
||||||
|
db *leveldb.DB
|
||||||
|
lock sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEstimator returns an empty estimator given a config. This estimator
|
||||||
|
// then needs to be fed data for published and mined transactions before it can
|
||||||
|
// be used to estimate fees for new transactions.
|
||||||
|
func NewEstimator(cfg *EstimatorConfig) (*Estimator, error) {
|
||||||
|
// Sanity check the config.
|
||||||
|
if cfg.MaxBucketFee <= cfg.MinBucketFee {
|
||||||
|
return nil, errors.New("maximum bucket fee should not be lower than " +
|
||||||
|
"minimum bucket fee")
|
||||||
|
}
|
||||||
|
if cfg.FeeRateStep <= 1.0 {
|
||||||
|
return nil, errors.New("fee rate step should not be <= 1.0")
|
||||||
|
}
|
||||||
|
if cfg.MinBucketFee <= 0 {
|
||||||
|
return nil, errors.New("minimum bucket fee rate cannot be <= 0")
|
||||||
|
}
|
||||||
|
if cfg.MaxConfirms > maxAllowedConfirms {
|
||||||
|
return nil, fmt.Errorf("confirmation count requested (%d) larger than "+
|
||||||
|
"maximum allowed (%d)", cfg.MaxConfirms, maxAllowedConfirms)
|
||||||
|
}
|
||||||
|
|
||||||
|
decay := defaultDecay
|
||||||
|
maxConfirms := cfg.MaxConfirms
|
||||||
|
max := float64(cfg.MaxBucketFee)
|
||||||
|
var bucketFees []feeRate
|
||||||
|
prevF := 0.0
|
||||||
|
extraBucketFee := float64(cfg.ExtraBucketFee)
|
||||||
|
for f := float64(cfg.MinBucketFee); f < max; f *= cfg.FeeRateStep {
|
||||||
|
if (f > extraBucketFee) && (prevF < extraBucketFee) {
|
||||||
|
// Add the extra bucket fee for tracking.
|
||||||
|
bucketFees = append(bucketFees, feeRate(extraBucketFee))
|
||||||
|
}
|
||||||
|
bucketFees = append(bucketFees, feeRate(f))
|
||||||
|
prevF = f
|
||||||
|
}
|
||||||
|
|
||||||
|
// The last bucket catches everything else, so it uses an upper bound of
|
||||||
|
// +inf which any rate must be lower than.
|
||||||
|
bucketFees = append(bucketFees, feeRate(math.Inf(1)))
|
||||||
|
|
||||||
|
nbBuckets := len(bucketFees)
|
||||||
|
res := &Estimator{
|
||||||
|
bucketFeeBounds: bucketFees,
|
||||||
|
buckets: make([]txConfirmStatBucket, nbBuckets),
|
||||||
|
memPool: make([]txConfirmStatBucket, nbBuckets),
|
||||||
|
maxConfirms: int32(maxConfirms),
|
||||||
|
decay: decay,
|
||||||
|
memPoolTxs: make(map[chainhash.Hash]memPoolTxDesc),
|
||||||
|
bestHeight: -1,
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range bucketFees {
|
||||||
|
res.buckets[i] = txConfirmStatBucket{
|
||||||
|
confirmed: make([]txConfirmStatBucketCount, maxConfirms),
|
||||||
|
}
|
||||||
|
res.memPool[i] = txConfirmStatBucket{
|
||||||
|
confirmed: make([]txConfirmStatBucketCount, maxConfirms),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.DatabaseFile != "" {
|
||||||
|
db, err := leveldb.OpenFile(cfg.DatabaseFile, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error opening estimator database: %v", err)
|
||||||
|
}
|
||||||
|
res.db = db
|
||||||
|
|
||||||
|
err = res.loadFromDatabase(cfg.ReplaceBucketsOnLoad)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error loading estimator data from db: %v",
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DumpBuckets returns the internal estimator state as a string.
|
||||||
|
func (stats *Estimator) DumpBuckets() string {
|
||||||
|
res := " |"
|
||||||
|
for c := 0; c < int(stats.maxConfirms); c++ {
|
||||||
|
if c == int(stats.maxConfirms)-1 {
|
||||||
|
res += fmt.Sprintf(" %15s", "+Inf")
|
||||||
|
} else {
|
||||||
|
res += fmt.Sprintf(" %15d|", c+1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res += "\n"
|
||||||
|
|
||||||
|
l := len(stats.bucketFeeBounds)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
res += fmt.Sprintf("%10.8f", stats.bucketFeeBounds[i]/1e8)
|
||||||
|
for c := 0; c < int(stats.maxConfirms); c++ {
|
||||||
|
avg := float64(0)
|
||||||
|
count := stats.buckets[i].confirmed[c].txCount
|
||||||
|
if stats.buckets[i].confirmed[c].txCount > 0 {
|
||||||
|
avg = stats.buckets[i].confirmed[c].feeSum /
|
||||||
|
stats.buckets[i].confirmed[c].txCount / 1e8
|
||||||
|
}
|
||||||
|
|
||||||
|
res += fmt.Sprintf("| %.8f %6.1f", avg, count)
|
||||||
|
}
|
||||||
|
res += "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadFromDatabase loads the estimator data from the currently opened database
|
||||||
|
// and performs any db upgrades if required. After loading, it updates the db
|
||||||
|
// with the current estimator configuration.
|
||||||
|
//
|
||||||
|
// Argument replaceBuckets indicates if the buckets in the current stats should
|
||||||
|
// be completely replaced by what is stored in the database or if the data
|
||||||
|
// should be validated against what is current in the estimator.
|
||||||
|
//
|
||||||
|
// The database should *not* be used while loading is taking place.
|
||||||
|
//
|
||||||
|
// The current code does not support loading from a database created with a
|
||||||
|
// different set of configuration parameters (fee rate buckets, max confirmation
|
||||||
|
// range, etc) than the current estimator is configured with. If an incompatible
|
||||||
|
// file is detected during loading, an error is returned and the user must
|
||||||
|
// either reconfigure the estimator to use the same parameters to allow the
|
||||||
|
// database to be loaded or they must ignore the database file (possibly by
|
||||||
|
// deleting it) so that the new parameters are used. In the future it might be
|
||||||
|
// possible to load from a different set of configuration parameters.
|
||||||
|
//
|
||||||
|
// The current code does not currently save mempool information, since saving
|
||||||
|
// information in the estimator without saving the corresponding data in the
|
||||||
|
// mempool itself could result in transactions lingering in the mempool
|
||||||
|
// estimator forever.
|
||||||
|
func (stats *Estimator) loadFromDatabase(replaceBuckets bool) error {
|
||||||
|
if stats.db == nil {
|
||||||
|
return errors.New("estimator database is not open")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Database version is currently hardcoded here as this is the only
|
||||||
|
// place that uses it.
|
||||||
|
currentDbVersion := []byte{1}
|
||||||
|
|
||||||
|
version, err := stats.db.Get(dbKeyVersion, nil)
|
||||||
|
if err != nil && !errors.Is(err, leveldb.ErrNotFound) {
|
||||||
|
return fmt.Errorf("error reading version from db: %v", err)
|
||||||
|
}
|
||||||
|
if len(version) < 1 {
|
||||||
|
// No data in the file. Fill with the current config.
|
||||||
|
batch := new(leveldb.Batch)
|
||||||
|
b := bytes.NewBuffer(nil)
|
||||||
|
var maxConfirmsBytes [4]byte
|
||||||
|
var bestHeightBytes [8]byte
|
||||||
|
|
||||||
|
batch.Put(dbKeyVersion, currentDbVersion)
|
||||||
|
|
||||||
|
dbByteOrder.PutUint32(maxConfirmsBytes[:], uint32(stats.maxConfirms))
|
||||||
|
batch.Put(dbKeyMaxConfirms, maxConfirmsBytes[:])
|
||||||
|
|
||||||
|
dbByteOrder.PutUint64(bestHeightBytes[:], uint64(stats.bestHeight))
|
||||||
|
batch.Put(dbKeyBestHeight, bestHeightBytes[:])
|
||||||
|
|
||||||
|
err = binary.Write(b, dbByteOrder, stats.bucketFeeBounds)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error writing bucket fees to db: %v", err)
|
||||||
|
}
|
||||||
|
batch.Put(dbKeyBucketFees, b.Bytes())
|
||||||
|
|
||||||
|
err = stats.db.Write(batch, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error writing initial estimator db file: %v",
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = stats.updateDatabase()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error adding initial estimator data to db: %v",
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("Initialized fee estimator database")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(currentDbVersion, version) {
|
||||||
|
return fmt.Errorf("incompatible database version: %d", version)
|
||||||
|
}
|
||||||
|
|
||||||
|
maxConfirmsBytes, err := stats.db.Get(dbKeyMaxConfirms, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading max confirmation range from db file: "+
|
||||||
|
"%v", err)
|
||||||
|
}
|
||||||
|
if len(maxConfirmsBytes) != 4 {
|
||||||
|
return errors.New("wrong number of bytes in stored maxConfirms")
|
||||||
|
}
|
||||||
|
fileMaxConfirms := int32(dbByteOrder.Uint32(maxConfirmsBytes))
|
||||||
|
if fileMaxConfirms > maxAllowedConfirms {
|
||||||
|
return fmt.Errorf("confirmation count stored in database (%d) larger "+
|
||||||
|
"than maximum allowed (%d)", fileMaxConfirms, maxAllowedConfirms)
|
||||||
|
}
|
||||||
|
|
||||||
|
feesBytes, err := stats.db.Get(dbKeyBucketFees, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading fee bounds from db file: %v", err)
|
||||||
|
}
|
||||||
|
if feesBytes == nil {
|
||||||
|
return errors.New("fee bounds not found in database file")
|
||||||
|
}
|
||||||
|
fileNbBucketFees := len(feesBytes) / 8
|
||||||
|
if fileNbBucketFees > maxAllowedBucketFees {
|
||||||
|
return fmt.Errorf("more fee buckets stored in file (%d) than allowed "+
|
||||||
|
"(%d)", fileNbBucketFees, maxAllowedBucketFees)
|
||||||
|
}
|
||||||
|
fileBucketFees := make([]feeRate, fileNbBucketFees)
|
||||||
|
err = binary.Read(bytes.NewReader(feesBytes), dbByteOrder,
|
||||||
|
&fileBucketFees)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error decoding file bucket fees: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !replaceBuckets {
|
||||||
|
if stats.maxConfirms != fileMaxConfirms {
|
||||||
|
return errors.New("max confirmation range in database file different " +
|
||||||
|
"than currently configured max confirmation")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(stats.bucketFeeBounds) != len(fileBucketFees) {
|
||||||
|
return errors.New("number of bucket fees stored in database file " +
|
||||||
|
"different than currently configured bucket fees")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, f := range fileBucketFees {
|
||||||
|
if stats.bucketFeeBounds[i] != f {
|
||||||
|
return errors.New("bucket fee rates stored in database file " +
|
||||||
|
"different than currently configured fees")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fileBuckets := make([]txConfirmStatBucket, fileNbBucketFees)
|
||||||
|
|
||||||
|
iter := stats.db.NewIterator(ldbutil.BytesPrefix(dbKeyBucketPrefix), nil)
|
||||||
|
err = nil
|
||||||
|
var fbytes [8]byte
|
||||||
|
for iter.Next() {
|
||||||
|
key := iter.Key()
|
||||||
|
if len(key) != 8 {
|
||||||
|
err = fmt.Errorf("bucket key read from db has wrong length (%d)",
|
||||||
|
len(key))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
idx := int(int32(dbByteOrder.Uint32(key[4:])))
|
||||||
|
if (idx >= len(fileBuckets)) || (idx < 0) {
|
||||||
|
err = fmt.Errorf("wrong bucket index read from db (%d vs %d)",
|
||||||
|
idx, len(fileBuckets))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
value := iter.Value()
|
||||||
|
if len(value) != 8+8+int(fileMaxConfirms)*16 {
|
||||||
|
err = errors.New("wrong size of data in bucket read from db")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(value)
|
||||||
|
readf := func() float64 {
|
||||||
|
// We ignore the error here because the only possible one is EOF and
|
||||||
|
// we already previously checked the length of the source byte array
|
||||||
|
// for consistency.
|
||||||
|
b.Read(fbytes[:])
|
||||||
|
return math.Float64frombits(dbByteOrder.Uint64(fbytes[:]))
|
||||||
|
}
|
||||||
|
|
||||||
|
fileBuckets[idx].confirmCount = readf()
|
||||||
|
fileBuckets[idx].feeSum = readf()
|
||||||
|
fileBuckets[idx].confirmed = make([]txConfirmStatBucketCount, fileMaxConfirms)
|
||||||
|
for i := range fileBuckets[idx].confirmed {
|
||||||
|
fileBuckets[idx].confirmed[i].txCount = readf()
|
||||||
|
fileBuckets[idx].confirmed[i].feeSum = readf()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iter.Release()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = iter.Error()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error on bucket iterator: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.bucketFeeBounds = fileBucketFees
|
||||||
|
stats.buckets = fileBuckets
|
||||||
|
stats.maxConfirms = fileMaxConfirms
|
||||||
|
log.Debug("Loaded fee estimator database")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateDatabase updates the current database file with the current bucket
|
||||||
|
// data. This is called during normal operation after processing mined
|
||||||
|
// transactions, so it only updates data that might have changed.
|
||||||
|
func (stats *Estimator) updateDatabase() error {
|
||||||
|
if stats.db == nil {
|
||||||
|
return errors.New("estimator database is closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
batch := new(leveldb.Batch)
|
||||||
|
buf := bytes.NewBuffer(nil)
|
||||||
|
|
||||||
|
var key [8]byte
|
||||||
|
copy(key[:], dbKeyBucketPrefix)
|
||||||
|
var fbytes [8]byte
|
||||||
|
writef := func(f float64) {
|
||||||
|
dbByteOrder.PutUint64(fbytes[:], math.Float64bits(f))
|
||||||
|
_, err := buf.Write(fbytes[:])
|
||||||
|
if err != nil {
|
||||||
|
panic(err) // only possible error is ErrTooLarge
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, b := range stats.buckets {
|
||||||
|
dbByteOrder.PutUint32(key[4:], uint32(i))
|
||||||
|
buf.Reset()
|
||||||
|
writef(b.confirmCount)
|
||||||
|
writef(b.feeSum)
|
||||||
|
for _, c := range b.confirmed {
|
||||||
|
writef(c.txCount)
|
||||||
|
writef(c.feeSum)
|
||||||
|
}
|
||||||
|
batch.Put(key[:], buf.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
var bestHeightBytes [8]byte
|
||||||
|
|
||||||
|
dbByteOrder.PutUint64(bestHeightBytes[:], uint64(stats.bestHeight))
|
||||||
|
batch.Put(dbKeyBestHeight, bestHeightBytes[:])
|
||||||
|
|
||||||
|
err := stats.db.Write(batch, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error writing update to estimator db file: %v",
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// lowerBucket returns the bucket that has the highest upperBound such that it
|
||||||
|
// is still lower than rate.
|
||||||
|
func (stats *Estimator) lowerBucket(rate feeRate) int32 {
|
||||||
|
res := sort.Search(len(stats.bucketFeeBounds), func(i int) bool {
|
||||||
|
return stats.bucketFeeBounds[i] >= rate
|
||||||
|
})
|
||||||
|
return int32(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
// confirmRange returns the confirmation range index to be used for the given
|
||||||
|
// number of blocks to confirm. The last confirmation range has an upper bound
|
||||||
|
// of +inf to mean that it represents all confirmations higher than the second
|
||||||
|
// to last bucket.
|
||||||
|
func (stats *Estimator) confirmRange(blocksToConfirm int32) int32 {
|
||||||
|
idx := blocksToConfirm - 1
|
||||||
|
if idx >= stats.maxConfirms {
|
||||||
|
return stats.maxConfirms - 1
|
||||||
|
}
|
||||||
|
return idx
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateMovingAverages updates the moving averages for the existing confirmed
|
||||||
|
// statistics and increases the confirmation ranges for mempool txs. This is
|
||||||
|
// meant to be called when a new block is mined, so that we discount older
|
||||||
|
// information.
|
||||||
|
func (stats *Estimator) updateMovingAverages(newHeight int32) {
|
||||||
|
log.Debugf("Updated moving averages into block %d", newHeight)
|
||||||
|
|
||||||
|
// decay the existing stats so that, over time, we rely on more up to date
|
||||||
|
// information regarding fees.
|
||||||
|
for b := 0; b < len(stats.buckets); b++ {
|
||||||
|
bucket := &stats.buckets[b]
|
||||||
|
bucket.feeSum *= stats.decay
|
||||||
|
bucket.confirmCount *= stats.decay
|
||||||
|
for c := 0; c < len(bucket.confirmed); c++ {
|
||||||
|
conf := &bucket.confirmed[c]
|
||||||
|
conf.feeSum *= stats.decay
|
||||||
|
conf.txCount *= stats.decay
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// For unconfirmed (mempool) transactions, every transaction will now take
|
||||||
|
// at least one additional block to confirm. So for every fee bucket, we
|
||||||
|
// move the stats up one confirmation range.
|
||||||
|
for b := 0; b < len(stats.memPool); b++ {
|
||||||
|
bucket := &stats.memPool[b]
|
||||||
|
|
||||||
|
// The last confirmation range represents all txs confirmed at >= than
|
||||||
|
// the initial maxConfirms, so we *add* the second to last range into
|
||||||
|
// the last range.
|
||||||
|
c := len(bucket.confirmed) - 1
|
||||||
|
bucket.confirmed[c].txCount += bucket.confirmed[c-1].txCount
|
||||||
|
bucket.confirmed[c].feeSum += bucket.confirmed[c-1].feeSum
|
||||||
|
|
||||||
|
// For the other ranges, just move up the stats.
|
||||||
|
for c--; c > 0; c-- {
|
||||||
|
bucket.confirmed[c] = bucket.confirmed[c-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// and finally, the very first confirmation range (ie, what will enter
|
||||||
|
// the mempool now that a new block has been mined) is zeroed so we can
|
||||||
|
// start tracking brand new txs.
|
||||||
|
bucket.confirmed[0].txCount = 0
|
||||||
|
bucket.confirmed[0].feeSum = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.bestHeight = newHeight
|
||||||
|
}
|
||||||
|
|
||||||
|
// newMemPoolTx records a new memPool transaction into the stats. A brand new
|
||||||
|
// mempool transaction has a minimum confirmation range of 1, so it is inserted
|
||||||
|
// into the very first confirmation range bucket of the appropriate fee rate
|
||||||
|
// bucket.
|
||||||
|
func (stats *Estimator) newMemPoolTx(bucketIdx int32, fees feeRate) {
|
||||||
|
conf := &stats.memPool[bucketIdx].confirmed[0]
|
||||||
|
conf.feeSum += float64(fees)
|
||||||
|
conf.txCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
// newMinedTx moves a mined tx from the mempool into the confirmed statistics.
|
||||||
|
// Note that this should only be called if the transaction had been seen and
|
||||||
|
// previously tracked by calling newMemPoolTx for it. Failing to observe that
|
||||||
|
// will result in undefined statistical results.
|
||||||
|
func (stats *Estimator) newMinedTx(blocksToConfirm int32, rate feeRate) {
|
||||||
|
bucketIdx := stats.lowerBucket(rate)
|
||||||
|
confirmIdx := stats.confirmRange(blocksToConfirm)
|
||||||
|
bucket := &stats.buckets[bucketIdx]
|
||||||
|
|
||||||
|
// increase the counts for all confirmation ranges starting at the first
|
||||||
|
// confirmIdx because it took at least `blocksToConfirm` for this tx to be
|
||||||
|
// mined. This is used to simplify the bucket selection during estimation,
|
||||||
|
// so that we only need to check a single confirmation range (instead of
|
||||||
|
// iterating to sum all confirmations with <= `minConfs`).
|
||||||
|
for c := int(confirmIdx); c < len(bucket.confirmed); c++ {
|
||||||
|
conf := &bucket.confirmed[c]
|
||||||
|
conf.feeSum += float64(rate)
|
||||||
|
conf.txCount++
|
||||||
|
}
|
||||||
|
bucket.confirmCount++
|
||||||
|
bucket.feeSum += float64(rate)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stats *Estimator) removeFromMemPool(blocksInMemPool int32, rate feeRate) {
|
||||||
|
bucketIdx := stats.lowerBucket(rate)
|
||||||
|
confirmIdx := stats.confirmRange(blocksInMemPool + 1)
|
||||||
|
bucket := &stats.memPool[bucketIdx]
|
||||||
|
conf := &bucket.confirmed[confirmIdx]
|
||||||
|
conf.feeSum -= float64(rate)
|
||||||
|
conf.txCount--
|
||||||
|
if conf.txCount < 0 {
|
||||||
|
// If this happens, it means a transaction has been called on this
|
||||||
|
// function but not on a previous newMemPoolTx. This leaves the fee db
|
||||||
|
// in an undefined state and should never happen in regular use. If this
|
||||||
|
// happens, then there is a logic or coding error somewhere, either in
|
||||||
|
// the estimator itself or on its hooking to the mempool/network sync
|
||||||
|
// manager. Either way, the easiest way to fix this is to completely
|
||||||
|
// delete the database and start again. During development, you can use
|
||||||
|
// a panic() here and we might return it after being confident that the
|
||||||
|
// estimator is completely bug free.
|
||||||
|
log.Errorf("Transaction count in bucket index %d and confirmation "+
|
||||||
|
"index %d became < 0", bucketIdx, confirmIdx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// estimateMedianFee estimates the median fee rate for the current recorded
|
||||||
|
// statistics such that at least successPct transactions have been mined on all
|
||||||
|
// tracked fee rate buckets with fee >= to the median.
|
||||||
|
// In other words, this is the median fee of the lowest bucket such that it and
|
||||||
|
// all higher fee buckets have >= successPct transactions confirmed in at most
|
||||||
|
// `targetConfs` confirmations.
|
||||||
|
// Note that sometimes the requested combination of targetConfs and successPct is
|
||||||
|
// not achievable (hypothetical example: 99% of txs confirmed within 1 block)
|
||||||
|
// or there are not enough recorded statistics to derive a successful estimate
|
||||||
|
// (eg: confirmation tracking has only started or there was a period of very few
|
||||||
|
// transactions). In those situations, the appropriate error is returned.
|
||||||
|
func (stats *Estimator) estimateMedianFee(targetConfs int32, successPct float64) (feeRate, error) {
|
||||||
|
if targetConfs <= 0 {
|
||||||
|
return 0, errors.New("target confirmation range cannot be <= 0")
|
||||||
|
}
|
||||||
|
|
||||||
|
const minTxCount float64 = 1
|
||||||
|
|
||||||
|
if (targetConfs - 1) >= stats.maxConfirms {
|
||||||
|
// We might want to add support to use a targetConf at +infinity to
|
||||||
|
// allow us to make estimates at confirmation interval higher than what
|
||||||
|
// we currently track.
|
||||||
|
return 0, ErrTargetConfTooLarge{MaxConfirms: stats.maxConfirms,
|
||||||
|
ReqConfirms: targetConfs}
|
||||||
|
}
|
||||||
|
|
||||||
|
startIdx := len(stats.buckets) - 1
|
||||||
|
confirmRangeIdx := stats.confirmRange(targetConfs)
|
||||||
|
|
||||||
|
var totalTxs, confirmedTxs float64
|
||||||
|
bestBucketsStt := startIdx
|
||||||
|
bestBucketsEnd := startIdx
|
||||||
|
curBucketsEnd := startIdx
|
||||||
|
|
||||||
|
for b := startIdx; b >= 0; b-- {
|
||||||
|
totalTxs += stats.buckets[b].confirmCount
|
||||||
|
confirmedTxs += stats.buckets[b].confirmed[confirmRangeIdx].txCount
|
||||||
|
|
||||||
|
// Add the mempool (unconfirmed) transactions to the total tx count
|
||||||
|
// since a very large mempool for the given bucket might mean that
|
||||||
|
// miners are reluctant to include these in their mined blocks.
|
||||||
|
totalTxs += stats.memPool[b].confirmed[confirmRangeIdx].txCount
|
||||||
|
|
||||||
|
if totalTxs > minTxCount {
|
||||||
|
if confirmedTxs/totalTxs < successPct {
|
||||||
|
if curBucketsEnd == startIdx {
|
||||||
|
return 0, ErrNoSuccessPctBucketFound
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
bestBucketsStt = b
|
||||||
|
bestBucketsEnd = curBucketsEnd
|
||||||
|
curBucketsEnd = b - 1
|
||||||
|
totalTxs = 0
|
||||||
|
confirmedTxs = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
txCount := float64(0)
|
||||||
|
for b := bestBucketsStt; b <= bestBucketsEnd; b++ {
|
||||||
|
txCount += stats.buckets[b].confirmCount
|
||||||
|
}
|
||||||
|
if txCount <= 0 {
|
||||||
|
return 0, ErrNotEnoughTxsForEstimate
|
||||||
|
}
|
||||||
|
txCount /= 2
|
||||||
|
for b := bestBucketsStt; b <= bestBucketsEnd; b++ {
|
||||||
|
if stats.buckets[b].confirmCount < txCount {
|
||||||
|
txCount -= stats.buckets[b].confirmCount
|
||||||
|
} else {
|
||||||
|
median := stats.buckets[b].feeSum / stats.buckets[b].confirmCount
|
||||||
|
return feeRate(median), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, errors.New("this isn't supposed to be reached")
|
||||||
|
}
|
||||||
|
|
||||||
|
// EstimateFee is the public version of estimateMedianFee. It calculates the
|
||||||
|
// suggested fee for a transaction to be confirmed in at most `targetConf`
|
||||||
|
// blocks after publishing with a high degree of certainty.
|
||||||
|
//
|
||||||
|
// This function is safe to be called from multiple goroutines but might block
|
||||||
|
// until concurrent modifications to the internal database state are complete.
|
||||||
|
func (stats *Estimator) EstimateFee(targetConfs int32) (lbcutil.Amount, error) {
|
||||||
|
stats.lock.RLock()
|
||||||
|
rate, err := stats.estimateMedianFee(targetConfs, 0.95)
|
||||||
|
stats.lock.RUnlock()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
rate = feeRate(math.Round(float64(rate)))
|
||||||
|
if rate < stats.bucketFeeBounds[0] {
|
||||||
|
// Prevent our public facing api to ever return something lower than the
|
||||||
|
// minimum fee
|
||||||
|
rate = stats.bucketFeeBounds[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
return lbcutil.Amount(rate), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable establishes the current best height of the blockchain after
|
||||||
|
// initializing the chain. All new mempool transactions will be added at this
|
||||||
|
// block height.
|
||||||
|
func (stats *Estimator) Enable(bestHeight int32) {
|
||||||
|
log.Debugf("Setting best height as %d", bestHeight)
|
||||||
|
stats.lock.Lock()
|
||||||
|
stats.bestHeight = bestHeight
|
||||||
|
stats.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEnabled returns whether the fee estimator is ready to accept new mined and
|
||||||
|
// mempool transactions.
|
||||||
|
func (stats *Estimator) IsEnabled() bool {
|
||||||
|
stats.lock.RLock()
|
||||||
|
enabled := stats.bestHeight > -1
|
||||||
|
stats.lock.RUnlock()
|
||||||
|
return enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMemPoolTransaction adds a mempool transaction to the estimator in order to
|
||||||
|
// account for it in the estimations. It assumes that this transaction is
|
||||||
|
// entering the mempool at the currently recorded best chain hash, using the
|
||||||
|
// total fee amount (in atoms) and with the provided size (in bytes).
|
||||||
|
//
|
||||||
|
// This is safe to be called from multiple goroutines.
|
||||||
|
func (stats *Estimator) AddMemPoolTransaction(txHash *chainhash.Hash, fee, size int64) {
|
||||||
|
stats.lock.Lock()
|
||||||
|
defer stats.lock.Unlock()
|
||||||
|
|
||||||
|
if stats.bestHeight < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, exists := stats.memPoolTxs[*txHash]; exists {
|
||||||
|
// we should not double count transactions
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that we use this less exact version instead of fee * 1000 / size
|
||||||
|
// (using ints) because it naturally "downsamples" the fee rates towards the
|
||||||
|
// minimum at values less than 0.001 DCR/KB. This is needed because due to
|
||||||
|
// how the wallet estimates the final fee given an input rate and the final
|
||||||
|
// tx size, there's usually a small discrepancy towards a higher effective
|
||||||
|
// rate in the published tx.
|
||||||
|
rate := feeRate(fee / size * 1000)
|
||||||
|
|
||||||
|
if rate < stats.bucketFeeBounds[0] {
|
||||||
|
// Transactions paying less than the current relaying fee can only
|
||||||
|
// possibly be included in the high priority/zero fee area of blocks,
|
||||||
|
// which are usually of limited size, so we explicitly don't track
|
||||||
|
// those.
|
||||||
|
// This also naturally handles votes (SSGen transactions) which don't
|
||||||
|
// carry a tx fee and are required for inclusion in blocks. Note that
|
||||||
|
// the test is explicitly < instead of <= so that we *can* track
|
||||||
|
// transactions that pay *exactly* the minimum fee.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("Adding mempool tx %s using fee rate %.8f", txHash, rate/1e8)
|
||||||
|
|
||||||
|
tx := memPoolTxDesc{
|
||||||
|
addedHeight: stats.bestHeight,
|
||||||
|
bucketIndex: stats.lowerBucket(rate),
|
||||||
|
fees: rate,
|
||||||
|
}
|
||||||
|
stats.memPoolTxs[*txHash] = tx
|
||||||
|
stats.newMemPoolTx(tx.bucketIndex, rate)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveMemPoolTransaction removes a mempool transaction from statistics
|
||||||
|
// tracking.
|
||||||
|
//
|
||||||
|
// This is safe to be called from multiple goroutines.
|
||||||
|
func (stats *Estimator) RemoveMemPoolTransaction(txHash *chainhash.Hash) {
|
||||||
|
stats.lock.Lock()
|
||||||
|
defer stats.lock.Unlock()
|
||||||
|
|
||||||
|
desc, exists := stats.memPoolTxs[*txHash]
|
||||||
|
if !exists {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("Removing tx %s from mempool", txHash)
|
||||||
|
|
||||||
|
stats.removeFromMemPool(stats.bestHeight-desc.addedHeight, desc.fees)
|
||||||
|
delete(stats.memPoolTxs, *txHash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// processMinedTransaction moves the transaction that exist in the currently
|
||||||
|
// tracked mempool into a mined state.
|
||||||
|
//
|
||||||
|
// This function is *not* safe to be called from multiple goroutines.
|
||||||
|
func (stats *Estimator) processMinedTransaction(blockHeight int32, txh *chainhash.Hash) {
|
||||||
|
desc, exists := stats.memPoolTxs[*txh]
|
||||||
|
if !exists {
|
||||||
|
// We cannot use transactions that we didn't know about to estimate
|
||||||
|
// because that opens up the possibility of miners introducing dummy,
|
||||||
|
// high fee transactions which would tend to then increase the average
|
||||||
|
// fee estimate.
|
||||||
|
// Tracking only previously known transactions forces miners trying to
|
||||||
|
// pull off this attack to broadcast their transactions and possibly
|
||||||
|
// forfeit their coins by having the transaction mined by a competitor.
|
||||||
|
log.Tracef("Processing previously unknown mined tx %s", txh)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.removeFromMemPool(blockHeight-desc.addedHeight, desc.fees)
|
||||||
|
delete(stats.memPoolTxs, *txh)
|
||||||
|
|
||||||
|
if blockHeight <= desc.addedHeight {
|
||||||
|
// This shouldn't usually happen but we need to explicitly test for
|
||||||
|
// because we can't account for non positive confirmation ranges in
|
||||||
|
// mined transactions.
|
||||||
|
log.Errorf("Mined transaction %s (%d) that was known from "+
|
||||||
|
"mempool at a higher block height (%d)", txh, blockHeight,
|
||||||
|
desc.addedHeight)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mineDelay := blockHeight - desc.addedHeight
|
||||||
|
log.Debugf("Processing mined tx %s (rate %.8f, delay %d)", txh,
|
||||||
|
desc.fees/1e8, mineDelay)
|
||||||
|
stats.newMinedTx(mineDelay, desc.fees)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessBlock processes all mined transactions in the provided block.
|
||||||
|
//
|
||||||
|
// This function is safe to be called from multiple goroutines.
|
||||||
|
func (stats *Estimator) ProcessBlock(block *lbcutil.Block) error {
|
||||||
|
stats.lock.Lock()
|
||||||
|
defer stats.lock.Unlock()
|
||||||
|
|
||||||
|
if stats.bestHeight < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
blockHeight := block.Height()
|
||||||
|
if blockHeight <= stats.bestHeight {
|
||||||
|
// we don't explicitly track reorgs right now
|
||||||
|
log.Warnf("Trying to process mined transactions at block %d when "+
|
||||||
|
"previous best block was at height %d", blockHeight,
|
||||||
|
stats.bestHeight)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.updateMovingAverages(blockHeight)
|
||||||
|
|
||||||
|
for _, tx := range block.Transactions() {
|
||||||
|
stats.processMinedTransaction(blockHeight, tx.Hash())
|
||||||
|
}
|
||||||
|
|
||||||
|
if stats.db != nil {
|
||||||
|
return stats.updateDatabase()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the database (if it is currently opened).
|
||||||
|
func (stats *Estimator) Close() {
|
||||||
|
stats.lock.Lock()
|
||||||
|
|
||||||
|
if stats.db != nil {
|
||||||
|
log.Trace("Closing fee estimator database")
|
||||||
|
stats.db.Close()
|
||||||
|
stats.db = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.lock.Unlock()
|
||||||
|
}
|
27
fees/log.go
Normal file
27
fees/log.go
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
// Copyright (c) 2018-2019 The Decred developers
|
||||||
|
// Use of this source code is governed by an ISC
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fees
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/btcsuite/btclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// log is a logger that is initialized with no output filters. This means the
|
||||||
|
// package will not perform any logging by default until the caller requests it.
|
||||||
|
// The default amount of logging is none.
|
||||||
|
var log btclog.Logger
|
||||||
|
|
||||||
|
// DisableLog disables all library log output. Logging output is disabled
|
||||||
|
// by default until either UseLogger or SetLogWriter are called.
|
||||||
|
func DisableLog() {
|
||||||
|
log = btclog.Disabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseLogger uses a specified Logger to output package logging info.
|
||||||
|
// This should be used in preference to SetLogWriter if the caller is also
|
||||||
|
// using btclog.
|
||||||
|
func UseLogger(logger btclog.Logger) {
|
||||||
|
log = logger
|
||||||
|
}
|
27
log.go
27
log.go
|
@ -16,6 +16,7 @@ import (
|
||||||
"github.com/lbryio/lbcd/claimtrie/node"
|
"github.com/lbryio/lbcd/claimtrie/node"
|
||||||
"github.com/lbryio/lbcd/connmgr"
|
"github.com/lbryio/lbcd/connmgr"
|
||||||
"github.com/lbryio/lbcd/database"
|
"github.com/lbryio/lbcd/database"
|
||||||
|
"github.com/lbryio/lbcd/fees"
|
||||||
"github.com/lbryio/lbcd/mempool"
|
"github.com/lbryio/lbcd/mempool"
|
||||||
"github.com/lbryio/lbcd/mining"
|
"github.com/lbryio/lbcd/mining"
|
||||||
"github.com/lbryio/lbcd/mining/cpuminer"
|
"github.com/lbryio/lbcd/mining/cpuminer"
|
||||||
|
@ -57,13 +58,14 @@ var (
|
||||||
|
|
||||||
adxrLog = backendLog.Logger("ADXR")
|
adxrLog = backendLog.Logger("ADXR")
|
||||||
amgrLog = backendLog.Logger("AMGR")
|
amgrLog = backendLog.Logger("AMGR")
|
||||||
cmgrLog = backendLog.Logger("CMGR")
|
|
||||||
bcdbLog = backendLog.Logger("BCDB")
|
bcdbLog = backendLog.Logger("BCDB")
|
||||||
btcdLog = backendLog.Logger("MAIN")
|
btcdLog = backendLog.Logger("MAIN")
|
||||||
chanLog = backendLog.Logger("CHAN")
|
chanLog = backendLog.Logger("CHAN")
|
||||||
lbryLog = backendLog.Logger("LBRY")
|
cmgrLog = backendLog.Logger("CMGR")
|
||||||
discLog = backendLog.Logger("DISC")
|
discLog = backendLog.Logger("DISC")
|
||||||
|
feesLog = backendLog.Logger("FEES")
|
||||||
indxLog = backendLog.Logger("INDX")
|
indxLog = backendLog.Logger("INDX")
|
||||||
|
lbryLog = backendLog.Logger("LBRY")
|
||||||
minrLog = backendLog.Logger("MINR")
|
minrLog = backendLog.Logger("MINR")
|
||||||
peerLog = backendLog.Logger("PEER")
|
peerLog = backendLog.Logger("PEER")
|
||||||
rpcsLog = backendLog.Logger("RPCS")
|
rpcsLog = backendLog.Logger("RPCS")
|
||||||
|
@ -76,30 +78,31 @@ var (
|
||||||
// Initialize package-global logger variables.
|
// Initialize package-global logger variables.
|
||||||
func init() {
|
func init() {
|
||||||
addrmgr.UseLogger(amgrLog)
|
addrmgr.UseLogger(amgrLog)
|
||||||
connmgr.UseLogger(cmgrLog)
|
|
||||||
database.UseLogger(bcdbLog)
|
|
||||||
blockchain.UseLogger(chanLog)
|
blockchain.UseLogger(chanLog)
|
||||||
node.UseLogger(lbryLog)
|
connmgr.UseLogger(cmgrLog)
|
||||||
indexers.UseLogger(indxLog)
|
|
||||||
mining.UseLogger(minrLog)
|
|
||||||
cpuminer.UseLogger(minrLog)
|
cpuminer.UseLogger(minrLog)
|
||||||
|
database.UseLogger(bcdbLog)
|
||||||
|
fees.UseLogger(feesLog)
|
||||||
|
indexers.UseLogger(indxLog)
|
||||||
|
mempool.UseLogger(txmpLog)
|
||||||
|
mining.UseLogger(minrLog)
|
||||||
|
netsync.UseLogger(syncLog)
|
||||||
|
node.UseLogger(lbryLog)
|
||||||
peer.UseLogger(peerLog)
|
peer.UseLogger(peerLog)
|
||||||
txscript.UseLogger(scrpLog)
|
txscript.UseLogger(scrpLog)
|
||||||
netsync.UseLogger(syncLog)
|
|
||||||
mempool.UseLogger(txmpLog)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// subsystemLoggers maps each subsystem identifier to its associated logger.
|
// subsystemLoggers maps each subsystem identifier to its associated logger.
|
||||||
var subsystemLoggers = map[string]btclog.Logger{
|
var subsystemLoggers = map[string]btclog.Logger{
|
||||||
"ADXR": adxrLog,
|
"ADXR": adxrLog,
|
||||||
"AMGR": amgrLog,
|
"AMGR": amgrLog,
|
||||||
"CMGR": cmgrLog,
|
|
||||||
"BCDB": bcdbLog,
|
"BCDB": bcdbLog,
|
||||||
"MAIN": btcdLog,
|
|
||||||
"CHAN": chanLog,
|
"CHAN": chanLog,
|
||||||
"LBRY": lbryLog,
|
"CMGR": cmgrLog,
|
||||||
"DISC": discLog,
|
"DISC": discLog,
|
||||||
"INDX": indxLog,
|
"INDX": indxLog,
|
||||||
|
"LBRY": lbryLog,
|
||||||
|
"MAIN": btcdLog,
|
||||||
"MINR": minrLog,
|
"MINR": minrLog,
|
||||||
"PEER": peerLog,
|
"PEER": peerLog,
|
||||||
"RPCS": rpcsLog,
|
"RPCS": rpcsLog,
|
||||||
|
|
|
@ -100,9 +100,15 @@ type Config struct {
|
||||||
// This can be nil if the address index is not enabled.
|
// This can be nil if the address index is not enabled.
|
||||||
AddrIndex *indexers.AddrIndex
|
AddrIndex *indexers.AddrIndex
|
||||||
|
|
||||||
// FeeEstimatator provides a feeEstimator. If it is not nil, the mempool
|
// AddTxToFeeEstimation defines an optional function to be called whenever a
|
||||||
// records all new transactions it observes into the feeEstimator.
|
// new transaction is added to the mempool, which can be used to track fees
|
||||||
FeeEstimator *FeeEstimator
|
// for the purposes of smart fee estimation.
|
||||||
|
AddTxToFeeEstimation func(txHash *chainhash.Hash, fee, size int64)
|
||||||
|
|
||||||
|
// RemoveTxFromFeeEstimation defines an optional function to be called
|
||||||
|
// whenever a transaction is removed from the mempool in order to track fee
|
||||||
|
// estimation.
|
||||||
|
RemoveTxFromFeeEstimation func(txHash *chainhash.Hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Policy houses the policy (configuration parameters) which is used to
|
// Policy houses the policy (configuration parameters) which is used to
|
||||||
|
@ -491,6 +497,13 @@ func (mp *TxPool) removeTransaction(tx *btcutil.Tx, removeRedeemers bool) {
|
||||||
delete(mp.outpoints, txIn.PreviousOutPoint)
|
delete(mp.outpoints, txIn.PreviousOutPoint)
|
||||||
}
|
}
|
||||||
delete(mp.pool, *txHash)
|
delete(mp.pool, *txHash)
|
||||||
|
|
||||||
|
// Inform associated fee estimator that the transaction has been removed
|
||||||
|
// from the mempool
|
||||||
|
if mp.cfg.RemoveTxFromFeeEstimation != nil {
|
||||||
|
mp.cfg.RemoveTxFromFeeEstimation(txHash)
|
||||||
|
}
|
||||||
|
|
||||||
atomic.StoreInt64(&mp.lastUpdated, time.Now().Unix())
|
atomic.StoreInt64(&mp.lastUpdated, time.Now().Unix())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -559,9 +572,11 @@ func (mp *TxPool) addTransaction(utxoView *blockchain.UtxoViewpoint, tx *btcutil
|
||||||
mp.cfg.AddrIndex.AddUnconfirmedTx(tx, utxoView)
|
mp.cfg.AddrIndex.AddUnconfirmedTx(tx, utxoView)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Record this tx for fee estimation if enabled.
|
// Inform the associated fee estimator that a new transaction has been added
|
||||||
if mp.cfg.FeeEstimator != nil {
|
// to the mempool.
|
||||||
mp.cfg.FeeEstimator.ObserveTransaction(txD)
|
size := GetTxVirtualSize(txD.Tx)
|
||||||
|
if mp.cfg.AddTxToFeeEstimation != nil {
|
||||||
|
mp.cfg.AddTxToFeeEstimation(txD.Tx.Hash(), txD.Fee, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
return txD
|
return txD
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"github.com/lbryio/lbcd/blockchain"
|
"github.com/lbryio/lbcd/blockchain"
|
||||||
"github.com/lbryio/lbcd/chaincfg"
|
"github.com/lbryio/lbcd/chaincfg"
|
||||||
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
||||||
|
"github.com/lbryio/lbcd/fees"
|
||||||
"github.com/lbryio/lbcd/mempool"
|
"github.com/lbryio/lbcd/mempool"
|
||||||
"github.com/lbryio/lbcd/peer"
|
"github.com/lbryio/lbcd/peer"
|
||||||
"github.com/lbryio/lbcd/wire"
|
"github.com/lbryio/lbcd/wire"
|
||||||
|
@ -37,5 +38,5 @@ type Config struct {
|
||||||
DisableCheckpoints bool
|
DisableCheckpoints bool
|
||||||
MaxPeers int
|
MaxPeers int
|
||||||
|
|
||||||
FeeEstimator *mempool.FeeEstimator
|
FeeEstimator *fees.Estimator
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"github.com/lbryio/lbcd/chaincfg"
|
"github.com/lbryio/lbcd/chaincfg"
|
||||||
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
||||||
"github.com/lbryio/lbcd/database"
|
"github.com/lbryio/lbcd/database"
|
||||||
|
"github.com/lbryio/lbcd/fees"
|
||||||
"github.com/lbryio/lbcd/mempool"
|
"github.com/lbryio/lbcd/mempool"
|
||||||
peerpkg "github.com/lbryio/lbcd/peer"
|
peerpkg "github.com/lbryio/lbcd/peer"
|
||||||
"github.com/lbryio/lbcd/wire"
|
"github.com/lbryio/lbcd/wire"
|
||||||
|
@ -205,7 +206,7 @@ type SyncManager struct {
|
||||||
nextCheckpoint *chaincfg.Checkpoint
|
nextCheckpoint *chaincfg.Checkpoint
|
||||||
|
|
||||||
// An optional fee estimator.
|
// An optional fee estimator.
|
||||||
feeEstimator *mempool.FeeEstimator
|
feeEstimator *fees.Estimator
|
||||||
}
|
}
|
||||||
|
|
||||||
// resetHeaderState sets the headers-first mode state to values appropriate for
|
// resetHeaderState sets the headers-first mode state to values appropriate for
|
||||||
|
@ -1414,6 +1415,13 @@ func (sm *SyncManager) handleBlockchainNotification(notification *blockchain.Not
|
||||||
iv := wire.NewInvVect(wire.InvTypeBlock, block.Hash())
|
iv := wire.NewInvVect(wire.InvTypeBlock, block.Hash())
|
||||||
sm.peerNotifier.RelayInventory(iv, block.MsgBlock().Header)
|
sm.peerNotifier.RelayInventory(iv, block.MsgBlock().Header)
|
||||||
|
|
||||||
|
if !sm.feeEstimator.IsEnabled() {
|
||||||
|
// fee estimation can only start after we have performed an initial
|
||||||
|
// sync, otherwise we'll start adding mempool transactions at the
|
||||||
|
// wrong height.
|
||||||
|
sm.feeEstimator.Enable(block.Height())
|
||||||
|
}
|
||||||
|
|
||||||
// A block has been connected to the main block chain.
|
// A block has been connected to the main block chain.
|
||||||
case blockchain.NTBlockConnected:
|
case blockchain.NTBlockConnected:
|
||||||
block, ok := notification.Data.(*btcutil.Block)
|
block, ok := notification.Data.(*btcutil.Block)
|
||||||
|
@ -1422,6 +1430,12 @@ func (sm *SyncManager) handleBlockchainNotification(notification *blockchain.Not
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Account for transactions mined in the newly connected block for fee
|
||||||
|
// estimation. This must be done before attempting to remove
|
||||||
|
// transactions from the mempool because the mempool will alert the
|
||||||
|
// estimator of the txs that are leaving
|
||||||
|
sm.feeEstimator.ProcessBlock(block)
|
||||||
|
|
||||||
// Remove all of the transactions (except the coinbase) in the
|
// Remove all of the transactions (except the coinbase) in the
|
||||||
// connected block from the transaction pool. Secondly, remove any
|
// connected block from the transaction pool. Secondly, remove any
|
||||||
// transactions which are now double spends as a result of these
|
// transactions which are now double spends as a result of these
|
||||||
|
@ -1438,20 +1452,6 @@ func (sm *SyncManager) handleBlockchainNotification(notification *blockchain.Not
|
||||||
sm.peerNotifier.AnnounceNewTransactions(acceptedTxs)
|
sm.peerNotifier.AnnounceNewTransactions(acceptedTxs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register block with the fee estimator, if it exists.
|
|
||||||
if sm.feeEstimator != nil {
|
|
||||||
err := sm.feeEstimator.RegisterBlock(block)
|
|
||||||
|
|
||||||
// If an error is somehow generated then the fee estimator
|
|
||||||
// has entered an invalid state. Since it doesn't know how
|
|
||||||
// to recover, create a new one.
|
|
||||||
if err != nil {
|
|
||||||
sm.feeEstimator = mempool.NewFeeEstimator(
|
|
||||||
mempool.DefaultEstimateFeeMaxRollback,
|
|
||||||
mempool.DefaultEstimateFeeMinRegisteredBlocks)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A block has been disconnected from the main block chain.
|
// A block has been disconnected from the main block chain.
|
||||||
case blockchain.NTBlockDisconnected:
|
case blockchain.NTBlockDisconnected:
|
||||||
block, ok := notification.Data.(*btcutil.Block)
|
block, ok := notification.Data.(*btcutil.Block)
|
||||||
|
@ -1473,10 +1473,6 @@ func (sm *SyncManager) handleBlockchainNotification(notification *blockchain.Not
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rollback previous block recorded by the fee estimator.
|
|
||||||
if sm.feeEstimator != nil {
|
|
||||||
sm.feeEstimator.Rollback(block.Hash())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
35
rpcserver.go
35
rpcserver.go
|
@ -36,6 +36,7 @@ import (
|
||||||
"github.com/lbryio/lbcd/chaincfg"
|
"github.com/lbryio/lbcd/chaincfg"
|
||||||
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
"github.com/lbryio/lbcd/chaincfg/chainhash"
|
||||||
"github.com/lbryio/lbcd/database"
|
"github.com/lbryio/lbcd/database"
|
||||||
|
"github.com/lbryio/lbcd/fees"
|
||||||
"github.com/lbryio/lbcd/mempool"
|
"github.com/lbryio/lbcd/mempool"
|
||||||
"github.com/lbryio/lbcd/mining"
|
"github.com/lbryio/lbcd/mining"
|
||||||
"github.com/lbryio/lbcd/mining/cpuminer"
|
"github.com/lbryio/lbcd/mining/cpuminer"
|
||||||
|
@ -893,7 +894,7 @@ func handleEstimateFee(s *rpcServer, cmd interface{}, closeChan <-chan struct{})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
feeRate, err := s.cfg.FeeEstimator.EstimateFee(uint32(c.NumBlocks))
|
feeRate, err := s.cfg.FeeEstimator.EstimateFee(int32(c.NumBlocks))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, &btcjson.RPCError{
|
return nil, &btcjson.RPCError{
|
||||||
|
@ -906,12 +907,36 @@ func handleEstimateFee(s *rpcServer, cmd interface{}, closeChan <-chan struct{})
|
||||||
return float64(feeRate), nil
|
return float64(feeRate), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// handleEstimateSmartFee implements the estimatesmartfee command.
|
||||||
|
//
|
||||||
|
// The default estimation mode when unset is assumed as "conservative". As of
|
||||||
|
// 2018-12, the only supported mode is "conservative".
|
||||||
func handleEstimateSmartFee(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
func handleEstimateSmartFee(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
||||||
c := cmd.(*btcjson.EstimateSmartFeeCmd)
|
c := cmd.(*btcjson.EstimateSmartFeeCmd)
|
||||||
|
|
||||||
rpcsLog.Debugf("EstimateSmartFee is not implemented; falling back to EstimateFee. Requested mode: %s", c.EstimateMode)
|
mode := btcjson.EstimateModeConservative
|
||||||
|
if c.EstimateMode != nil {
|
||||||
|
mode = *c.EstimateMode
|
||||||
|
}
|
||||||
|
|
||||||
return handleEstimateFee(s, &btcjson.EstimateFeeCmd{NumBlocks: c.ConfTarget}, closeChan)
|
if mode != btcjson.EstimateModeConservative {
|
||||||
|
return nil, &btcjson.RPCError{
|
||||||
|
Code: btcjson.ErrRPCInvalidParameter,
|
||||||
|
Message: "Only the default and conservative modes " +
|
||||||
|
"are supported for smart fee estimation at the moment",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fee, err := s.cfg.FeeEstimator.EstimateFee(int32(c.ConfTarget))
|
||||||
|
if err != nil {
|
||||||
|
return nil, internalRPCError(err.Error(), "Could not estimate fee")
|
||||||
|
}
|
||||||
|
|
||||||
|
feeRate := float64(fee) / btcutil.SatoshiPerBitcoin
|
||||||
|
return &btcjson.EstimateSmartFeeResult{
|
||||||
|
FeeRate: &feeRate,
|
||||||
|
Blocks: c.ConfTarget,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleGenerate(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
func handleGenerate(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) {
|
||||||
|
@ -4012,6 +4037,7 @@ type rpcServer struct {
|
||||||
gbtWorkState *gbtWorkState
|
gbtWorkState *gbtWorkState
|
||||||
helpCacher *helpCacher
|
helpCacher *helpCacher
|
||||||
requestProcessShutdown chan struct{}
|
requestProcessShutdown chan struct{}
|
||||||
|
feeEstimator *fees.Estimator
|
||||||
quit chan int
|
quit chan int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4845,7 +4871,7 @@ type rpcserverConfig struct {
|
||||||
|
|
||||||
// The fee estimator keeps track of how long transactions are left in
|
// The fee estimator keeps track of how long transactions are left in
|
||||||
// the mempool before they are mined into blocks.
|
// the mempool before they are mined into blocks.
|
||||||
FeeEstimator *mempool.FeeEstimator
|
FeeEstimator *fees.Estimator
|
||||||
|
|
||||||
// Services represents the services supported by this node.
|
// Services represents the services supported by this node.
|
||||||
Services wire.ServiceFlag
|
Services wire.ServiceFlag
|
||||||
|
@ -4859,6 +4885,7 @@ func newRPCServer(config *rpcserverConfig) (*rpcServer, error) {
|
||||||
gbtWorkState: newGbtWorkState(config.TimeSource),
|
gbtWorkState: newGbtWorkState(config.TimeSource),
|
||||||
helpCacher: newHelpCacher(),
|
helpCacher: newHelpCacher(),
|
||||||
requestProcessShutdown: make(chan struct{}),
|
requestProcessShutdown: make(chan struct{}),
|
||||||
|
feeEstimator: config.FeeEstimator,
|
||||||
quit: make(chan int),
|
quit: make(chan int),
|
||||||
}
|
}
|
||||||
if cfg.RPCUser != "" && cfg.RPCPass != "" {
|
if cfg.RPCUser != "" && cfg.RPCPass != "" {
|
||||||
|
|
68
server.go
68
server.go
|
@ -14,6 +14,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"net"
|
"net"
|
||||||
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -31,6 +32,7 @@ import (
|
||||||
claimtrieconfig "github.com/lbryio/lbcd/claimtrie/config"
|
claimtrieconfig "github.com/lbryio/lbcd/claimtrie/config"
|
||||||
"github.com/lbryio/lbcd/connmgr"
|
"github.com/lbryio/lbcd/connmgr"
|
||||||
"github.com/lbryio/lbcd/database"
|
"github.com/lbryio/lbcd/database"
|
||||||
|
"github.com/lbryio/lbcd/fees"
|
||||||
"github.com/lbryio/lbcd/mempool"
|
"github.com/lbryio/lbcd/mempool"
|
||||||
"github.com/lbryio/lbcd/mining"
|
"github.com/lbryio/lbcd/mining"
|
||||||
"github.com/lbryio/lbcd/mining/cpuminer"
|
"github.com/lbryio/lbcd/mining/cpuminer"
|
||||||
|
@ -38,6 +40,7 @@ import (
|
||||||
"github.com/lbryio/lbcd/peer"
|
"github.com/lbryio/lbcd/peer"
|
||||||
"github.com/lbryio/lbcd/txscript"
|
"github.com/lbryio/lbcd/txscript"
|
||||||
"github.com/lbryio/lbcd/wire"
|
"github.com/lbryio/lbcd/wire"
|
||||||
|
"github.com/lbryio/lbcutil"
|
||||||
btcutil "github.com/lbryio/lbcutil"
|
btcutil "github.com/lbryio/lbcutil"
|
||||||
"github.com/lbryio/lbcutil/bloom"
|
"github.com/lbryio/lbcutil/bloom"
|
||||||
)
|
)
|
||||||
|
@ -241,7 +244,7 @@ type server struct {
|
||||||
|
|
||||||
// The fee estimator keeps track of how long transactions are left in
|
// The fee estimator keeps track of how long transactions are left in
|
||||||
// the mempool before they are mined into blocks.
|
// the mempool before they are mined into blocks.
|
||||||
feeEstimator *mempool.FeeEstimator
|
feeEstimator *fees.Estimator
|
||||||
|
|
||||||
// cfCheckptCaches stores a cached slice of filter headers for cfcheckpt
|
// cfCheckptCaches stores a cached slice of filter headers for cfcheckpt
|
||||||
// messages for each filter type.
|
// messages for each filter type.
|
||||||
|
@ -2417,13 +2420,7 @@ func (s *server) Stop() error {
|
||||||
s.rpcServer.Stop()
|
s.rpcServer.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save fee estimator state in the database.
|
s.feeEstimator.Close()
|
||||||
s.db.Update(func(tx database.Tx) error {
|
|
||||||
metadata := tx.Metadata()
|
|
||||||
metadata.Put(mempool.EstimateFeeDatabaseKey, s.feeEstimator.Save())
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// Signal the remaining goroutines to quit.
|
// Signal the remaining goroutines to quit.
|
||||||
close(s.quit)
|
close(s.quit)
|
||||||
|
@ -2755,35 +2752,25 @@ func newServer(listenAddrs, agentBlacklist, agentWhitelist []string,
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Search for a FeeEstimator state in the database. If none can be found
|
feC := fees.EstimatorConfig{
|
||||||
// or if it cannot be loaded, create a new one.
|
MinBucketFee: cfg.minRelayTxFee,
|
||||||
db.Update(func(tx database.Tx) error {
|
MaxBucketFee: lbcutil.Amount(fees.DefaultMaxBucketFeeMultiplier) * cfg.minRelayTxFee,
|
||||||
metadata := tx.Metadata()
|
MaxConfirms: fees.DefaultMaxConfirmations,
|
||||||
feeEstimationData := metadata.Get(mempool.EstimateFeeDatabaseKey)
|
FeeRateStep: fees.DefaultFeeRateStep,
|
||||||
if feeEstimationData != nil {
|
DatabaseFile: path.Join(cfg.DataDir, "feesdb"),
|
||||||
// delete it from the database so that we don't try to restore the
|
|
||||||
// same thing again somehow.
|
|
||||||
metadata.Delete(mempool.EstimateFeeDatabaseKey)
|
|
||||||
|
|
||||||
// If there is an error, log it and make a new fee estimator.
|
// 1e5 is the previous (up to 1.1.0) mempool.DefaultMinRelayTxFee that
|
||||||
var err error
|
// un-upgraded wallets will be using, so track this particular rate
|
||||||
s.feeEstimator, err = mempool.RestoreFeeEstimator(feeEstimationData)
|
// explicitly. Note that bumping this value will cause the existing fees
|
||||||
|
// database to become invalid and will force nodes to explicitly delete
|
||||||
if err != nil {
|
// it.
|
||||||
peerLog.Errorf("Failed to restore fee estimator %v", err)
|
ExtraBucketFee: 1e5,
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// If no feeEstimator has been found, or if the one that has been found
|
|
||||||
// is behind somehow, create a new one and start over.
|
|
||||||
if s.feeEstimator == nil || s.feeEstimator.LastKnownHeight() != s.chain.BestSnapshot().Height {
|
|
||||||
s.feeEstimator = mempool.NewFeeEstimator(
|
|
||||||
mempool.DefaultEstimateFeeMaxRollback,
|
|
||||||
mempool.DefaultEstimateFeeMinRegisteredBlocks)
|
|
||||||
}
|
}
|
||||||
|
fe, err := fees.NewEstimator(&feC)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
s.feeEstimator = fe
|
||||||
|
|
||||||
txC := mempool.Config{
|
txC := mempool.Config{
|
||||||
Policy: mempool.Policy{
|
Policy: mempool.Policy{
|
||||||
|
@ -2804,11 +2791,12 @@ func newServer(listenAddrs, agentBlacklist, agentWhitelist []string,
|
||||||
CalcSequenceLock: func(tx *btcutil.Tx, view *blockchain.UtxoViewpoint) (*blockchain.SequenceLock, error) {
|
CalcSequenceLock: func(tx *btcutil.Tx, view *blockchain.UtxoViewpoint) (*blockchain.SequenceLock, error) {
|
||||||
return s.chain.CalcSequenceLock(tx, view, true)
|
return s.chain.CalcSequenceLock(tx, view, true)
|
||||||
},
|
},
|
||||||
IsDeploymentActive: s.chain.IsDeploymentActive,
|
IsDeploymentActive: s.chain.IsDeploymentActive,
|
||||||
SigCache: s.sigCache,
|
SigCache: s.sigCache,
|
||||||
HashCache: s.hashCache,
|
HashCache: s.hashCache,
|
||||||
AddrIndex: s.addrIndex,
|
AddrIndex: s.addrIndex,
|
||||||
FeeEstimator: s.feeEstimator,
|
AddTxToFeeEstimation: s.feeEstimator.AddMemPoolTransaction,
|
||||||
|
RemoveTxFromFeeEstimation: s.feeEstimator.RemoveMemPoolTransaction,
|
||||||
}
|
}
|
||||||
s.txMemPool = mempool.New(&txC)
|
s.txMemPool = mempool.New(&txC)
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue