2014-08-26 22:28:32 +02:00
// Copyright (c) 2009-2010 Satoshi Nakamoto
2016-12-31 19:01:21 +01:00
// Copyright (c) 2009-2016 The Bitcoin Core developers
2014-08-26 22:28:32 +02:00
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
# include "policy/fees.h"
2015-11-16 21:21:51 +01:00
# include "policy/policy.h"
2014-08-26 22:28:32 +02:00
# include "amount.h"
2017-02-15 21:48:48 +01:00
# include "clientversion.h"
2014-08-26 22:28:32 +02:00
# include "primitives/transaction.h"
2016-02-12 21:57:15 +01:00
# include "random.h"
2014-08-26 22:28:32 +02:00
# include "streams.h"
# include "txmempool.h"
# include "util.h"
2017-03-09 20:02:00 +01:00
static constexpr double INF_FEERATE = 1e99 ;
2017-02-16 23:27:20 +01:00
/**
* We will instantiate an instance of this class to track transactions that were
* included in a block . We will lump transactions into a bucket according to their
* approximate feerate and then track how long it took for those txs to be included in a block
*
* The tracking of unconfirmed ( mempool ) transactions is completely independent of the
* historical tracking of transactions that have been confirmed in a block .
*/
class TxConfirmStats
{
private :
//Define the buckets we will group transactions into
2017-02-23 21:13:41 +01:00
const std : : vector < double > & buckets ; // The upper-bound of the range for the bucket (inclusive)
const std : : map < double , unsigned int > & bucketMap ; // Map of bucket upper-bound to index into all vectors by bucket
2017-02-16 23:27:20 +01:00
// For each bucket X:
// Count the total # of txs in each bucket
// Track the historical moving average of this total over blocks
std : : vector < double > txCtAvg ;
// Count the total # of txs confirmed within Y blocks in each bucket
// Track the historical moving average of theses totals over blocks
2017-02-23 21:13:41 +01:00
std : : vector < std : : vector < double > > confAvg ; // confAvg[Y][X]
2017-03-09 21:26:05 +01:00
// Track moving avg of txs which have been evicted from the mempool
// after failing to be confirmed within Y blocks
std : : vector < std : : vector < double > > failAvg ; // failAvg[Y][X]
2017-02-16 23:27:20 +01:00
// Sum the total feerate of all tx's in each bucket
// Track the historical moving average of this total over blocks
std : : vector < double > avg ;
// Combine the conf counts with tx counts to calculate the confirmation % for each Y,X
// Combine the total value with the tx counts to calculate the avg feerate per bucket
double decay ;
2017-04-03 17:31:27 +02:00
// Resolution (# of blocks) with which confirmations are tracked
2017-02-23 21:13:41 +01:00
unsigned int scale ;
2017-02-16 23:27:20 +01:00
// Mempool counts of outstanding transactions
// For each bucket X, track the number of transactions in the mempool
// that are unconfirmed for each possible confirmation value Y
std : : vector < std : : vector < int > > unconfTxs ; //unconfTxs[Y][X]
2017-04-03 17:31:27 +02:00
// transactions still unconfirmed after GetMaxConfirms for each bucket
2017-02-16 23:27:20 +01:00
std : : vector < int > oldUnconfTxs ;
2017-02-23 21:13:41 +01:00
void resizeInMemoryCounters ( size_t newbuckets ) ;
2017-02-16 23:27:20 +01:00
public :
/**
* Create new TxConfirmStats . This is called by BlockPolicyEstimator ' s
* constructor with default values .
* @ param defaultBuckets contains the upper limits for the bucket boundaries
* @ param maxConfirms max number of confirms to track
* @ param decay how much to decay the historical moving average per block
*/
2017-02-23 21:13:41 +01:00
TxConfirmStats ( const std : : vector < double > & defaultBuckets , const std : : map < double , unsigned int > & defaultBucketMap ,
2017-04-03 17:31:27 +02:00
unsigned int maxPeriods , double decay , unsigned int scale ) ;
2017-02-16 23:27:20 +01:00
2017-02-28 23:29:42 +01:00
/** Roll the circular buffer for unconfirmed txs*/
2017-02-16 23:27:20 +01:00
void ClearCurrent ( unsigned int nBlockHeight ) ;
/**
* Record a new transaction data point in the current block stats
* @ param blocksToConfirm the number of blocks it took this transaction to confirm
* @ param val the feerate of the transaction
* @ warning blocksToConfirm is 1 - based and has to be > = 1
*/
void Record ( int blocksToConfirm , double val ) ;
/** Record a new transaction entering the mempool*/
unsigned int NewTx ( unsigned int nBlockHeight , double val ) ;
/** Remove a transaction from mempool tracking stats*/
void removeTx ( unsigned int entryHeight , unsigned int nBestSeenHeight ,
2017-03-09 21:26:05 +01:00
unsigned int bucketIndex , bool inBlock ) ;
2017-02-16 23:27:20 +01:00
/** Update our estimates by decaying our historical moving average and updating
with the data gathered from the current block */
void UpdateMovingAverages ( ) ;
/**
* Calculate a feerate estimate . Find the lowest value bucket ( or range of buckets
* to make sure we have enough data points ) whose transactions still have sufficient likelihood
* of being confirmed within the target number of confirmations
* @ param confTarget target number of confirmations
* @ param sufficientTxVal required average number of transactions per block in a bucket range
* @ param minSuccess the success probability we require
* @ param requireGreater return the lowest feerate such that all higher values pass minSuccess OR
* return the highest feerate such that all lower values fail minSuccess
* @ param nBlockHeight the current block height
*/
double EstimateMedianVal ( int confTarget , double sufficientTxVal ,
2017-01-24 22:30:03 +01:00
double minSuccess , bool requireGreater , unsigned int nBlockHeight ,
EstimationResult * result = nullptr ) const ;
2017-02-16 23:27:20 +01:00
/** Return the max number of confirms we're tracking */
2017-04-03 17:31:27 +02:00
unsigned int GetMaxConfirms ( ) const { return scale * confAvg . size ( ) ; }
2017-02-16 23:27:20 +01:00
/** Write state of estimation data to a file*/
void Write ( CAutoFile & fileout ) const ;
/**
* Read saved state of estimation data from a file and replace all internal data structures and
* variables with this state .
*/
2017-02-23 21:13:41 +01:00
void Read ( CAutoFile & filein , int nFileVersion , size_t numBuckets ) ;
2017-02-16 23:27:20 +01:00
} ;
2017-02-16 22:23:15 +01:00
TxConfirmStats : : TxConfirmStats ( const std : : vector < double > & defaultBuckets ,
2017-02-23 21:13:41 +01:00
const std : : map < double , unsigned int > & defaultBucketMap ,
2017-04-03 17:31:27 +02:00
unsigned int maxPeriods , double _decay , unsigned int _scale )
2017-02-23 21:13:41 +01:00
: buckets ( defaultBuckets ) , bucketMap ( defaultBucketMap )
2014-08-26 22:28:32 +02:00
{
decay = _decay ;
2017-04-03 17:31:27 +02:00
scale = _scale ;
confAvg . resize ( maxPeriods ) ;
for ( unsigned int i = 0 ; i < maxPeriods ; i + + ) {
2014-08-26 22:28:32 +02:00
confAvg [ i ] . resize ( buckets . size ( ) ) ;
}
2017-04-03 17:31:27 +02:00
failAvg . resize ( maxPeriods ) ;
for ( unsigned int i = 0 ; i < maxPeriods ; i + + ) {
2017-03-09 21:26:05 +01:00
failAvg [ i ] . resize ( buckets . size ( ) ) ;
}
2014-08-26 22:28:32 +02:00
txCtAvg . resize ( buckets . size ( ) ) ;
avg . resize ( buckets . size ( ) ) ;
2017-02-23 21:13:41 +01:00
resizeInMemoryCounters ( buckets . size ( ) ) ;
}
void TxConfirmStats : : resizeInMemoryCounters ( size_t newbuckets ) {
// newbuckets must be passed in because the buckets referred to during Read have not been updated yet.
unconfTxs . resize ( GetMaxConfirms ( ) ) ;
for ( unsigned int i = 0 ; i < unconfTxs . size ( ) ; i + + ) {
unconfTxs [ i ] . resize ( newbuckets ) ;
}
oldUnconfTxs . resize ( newbuckets ) ;
2014-08-26 22:28:32 +02:00
}
2017-02-28 23:29:42 +01:00
// Roll the unconfirmed txs circular buffer
2014-08-26 22:28:32 +02:00
void TxConfirmStats : : ClearCurrent ( unsigned int nBlockHeight )
{
for ( unsigned int j = 0 ; j < buckets . size ( ) ; j + + ) {
oldUnconfTxs [ j ] + = unconfTxs [ nBlockHeight % unconfTxs . size ( ) ] [ j ] ;
unconfTxs [ nBlockHeight % unconfTxs . size ( ) ] [ j ] = 0 ;
}
}
void TxConfirmStats : : Record ( int blocksToConfirm , double val )
{
// blocksToConfirm is 1-based
if ( blocksToConfirm < 1 )
return ;
2017-04-03 17:31:27 +02:00
int periodsToConfirm = ( blocksToConfirm + scale - 1 ) / scale ;
2014-08-26 22:28:32 +02:00
unsigned int bucketindex = bucketMap . lower_bound ( val ) - > second ;
2017-04-03 17:31:27 +02:00
for ( size_t i = periodsToConfirm ; i < = confAvg . size ( ) ; i + + ) {
2017-02-28 23:29:42 +01:00
confAvg [ i - 1 ] [ bucketindex ] + + ;
2014-08-26 22:28:32 +02:00
}
2017-02-28 23:29:42 +01:00
txCtAvg [ bucketindex ] + + ;
avg [ bucketindex ] + = val ;
2014-08-26 22:28:32 +02:00
}
void TxConfirmStats : : UpdateMovingAverages ( )
{
for ( unsigned int j = 0 ; j < buckets . size ( ) ; j + + ) {
for ( unsigned int i = 0 ; i < confAvg . size ( ) ; i + + )
2017-02-28 23:29:42 +01:00
confAvg [ i ] [ j ] = confAvg [ i ] [ j ] * decay ;
2017-03-09 21:26:05 +01:00
for ( unsigned int i = 0 ; i < failAvg . size ( ) ; i + + )
failAvg [ i ] [ j ] = failAvg [ i ] [ j ] * decay ;
2017-02-28 23:29:42 +01:00
avg [ j ] = avg [ j ] * decay ;
txCtAvg [ j ] = txCtAvg [ j ] * decay ;
2014-08-26 22:28:32 +02:00
}
}
// returns -1 on error conditions
double TxConfirmStats : : EstimateMedianVal ( int confTarget , double sufficientTxVal ,
double successBreakPoint , bool requireGreater ,
2017-01-24 22:30:03 +01:00
unsigned int nBlockHeight , EstimationResult * result ) const
2014-08-26 22:28:32 +02:00
{
// Counters for a bucket (or range of buckets)
double nConf = 0 ; // Number of tx's confirmed within the confTarget
double totalNum = 0 ; // Total number of tx's that were ever confirmed
int extraNum = 0 ; // Number of tx's still in mempool for confTarget or longer
2017-03-09 21:26:05 +01:00
double failNum = 0 ; // Number of tx's that were never confirmed but removed from the mempool after confTarget
2017-04-03 17:31:27 +02:00
int periodTarget = ( confTarget + scale - 1 ) / scale ;
2014-08-26 22:28:32 +02:00
int maxbucketindex = buckets . size ( ) - 1 ;
2016-03-21 18:04:40 +01:00
// requireGreater means we are looking for the lowest feerate such that all higher
// values pass, so we start at maxbucketindex (highest feerate) and look at successively
2014-08-26 22:28:32 +02:00
// smaller buckets until we reach failure. Otherwise, we are looking for the highest
2016-03-21 18:04:40 +01:00
// feerate such that all lower values fail, and we go in the opposite direction.
2014-08-26 22:28:32 +02:00
unsigned int startbucket = requireGreater ? maxbucketindex : 0 ;
int step = requireGreater ? - 1 : 1 ;
// We'll combine buckets until we have enough samples.
// The near and far variables will define the range we've combined
// The best variables are the last range we saw which still had a high
// enough confirmation rate to count as success.
// The cur variables are the current range we're counting.
unsigned int curNearBucket = startbucket ;
unsigned int bestNearBucket = startbucket ;
unsigned int curFarBucket = startbucket ;
unsigned int bestFarBucket = startbucket ;
bool foundAnswer = false ;
unsigned int bins = unconfTxs . size ( ) ;
2017-02-22 04:18:13 +01:00
bool newBucketRange = true ;
2017-01-24 22:30:03 +01:00
bool passing = true ;
EstimatorBucket passBucket ;
EstimatorBucket failBucket ;
2014-08-26 22:28:32 +02:00
2016-03-21 18:04:40 +01:00
// Start counting from highest(default) or lowest feerate transactions
2014-08-26 22:28:32 +02:00
for ( int bucket = startbucket ; bucket > = 0 & & bucket < = maxbucketindex ; bucket + = step ) {
2017-02-22 04:18:13 +01:00
if ( newBucketRange ) {
curNearBucket = bucket ;
newBucketRange = false ;
}
2014-08-26 22:28:32 +02:00
curFarBucket = bucket ;
2017-04-03 17:31:27 +02:00
nConf + = confAvg [ periodTarget - 1 ] [ bucket ] ;
2014-08-26 22:28:32 +02:00
totalNum + = txCtAvg [ bucket ] ;
2017-04-03 17:31:27 +02:00
failNum + = failAvg [ periodTarget - 1 ] [ bucket ] ;
2014-08-26 22:28:32 +02:00
for ( unsigned int confct = confTarget ; confct < GetMaxConfirms ( ) ; confct + + )
extraNum + = unconfTxs [ ( nBlockHeight - confct ) % bins ] [ bucket ] ;
extraNum + = oldUnconfTxs [ bucket ] ;
// If we have enough transaction data points in this range of buckets,
// we can test for success
// (Only count the confirmed data points, so that each confirmation count
// will be looking at the same amount of data and same bucket breaks)
if ( totalNum > = sufficientTxVal / ( 1 - decay ) ) {
2017-03-09 21:26:05 +01:00
double curPct = nConf / ( totalNum + failNum + extraNum ) ;
2014-08-26 22:28:32 +02:00
// Check to see if we are no longer getting confirmed at the success rate
2017-03-02 16:08:25 +01:00
if ( ( requireGreater & & curPct < successBreakPoint ) | | ( ! requireGreater & & curPct > successBreakPoint ) ) {
2017-01-24 22:30:03 +01:00
if ( passing = = true ) {
// First time we hit a failure record the failed bucket
unsigned int failMinBucket = std : : min ( curNearBucket , curFarBucket ) ;
unsigned int failMaxBucket = std : : max ( curNearBucket , curFarBucket ) ;
failBucket . start = failMinBucket ? buckets [ failMinBucket - 1 ] : 0 ;
failBucket . end = buckets [ failMaxBucket ] ;
failBucket . withinTarget = nConf ;
failBucket . totalConfirmed = totalNum ;
failBucket . inMempool = extraNum ;
2017-03-09 21:26:05 +01:00
failBucket . leftMempool = failNum ;
2017-01-24 22:30:03 +01:00
passing = false ;
}
2017-03-02 16:08:25 +01:00
continue ;
}
2014-08-26 22:28:32 +02:00
// Otherwise update the cumulative stats, and the bucket variables
// and reset the counters
else {
2017-01-24 22:30:03 +01:00
failBucket = EstimatorBucket ( ) ; // Reset any failed bucket, currently passing
2014-08-26 22:28:32 +02:00
foundAnswer = true ;
2017-01-24 22:30:03 +01:00
passing = true ;
passBucket . withinTarget = nConf ;
2014-08-26 22:28:32 +02:00
nConf = 0 ;
2017-01-24 22:30:03 +01:00
passBucket . totalConfirmed = totalNum ;
2014-08-26 22:28:32 +02:00
totalNum = 0 ;
2017-01-24 22:30:03 +01:00
passBucket . inMempool = extraNum ;
2017-03-09 21:26:05 +01:00
passBucket . leftMempool = failNum ;
failNum = 0 ;
2014-08-26 22:28:32 +02:00
extraNum = 0 ;
bestNearBucket = curNearBucket ;
bestFarBucket = curFarBucket ;
2017-02-22 04:18:13 +01:00
newBucketRange = true ;
2014-08-26 22:28:32 +02:00
}
}
}
double median = - 1 ;
double txSum = 0 ;
2016-03-21 18:04:40 +01:00
// Calculate the "average" feerate of the best bucket range that met success conditions
// Find the bucket with the median transaction and then report the average feerate from that bucket
2014-08-26 22:28:32 +02:00
// This is a compromise between finding the median which we can't since we don't save all tx's
// and reporting the average which is less accurate
2017-01-24 22:30:03 +01:00
unsigned int minBucket = std : : min ( bestNearBucket , bestFarBucket ) ;
unsigned int maxBucket = std : : max ( bestNearBucket , bestFarBucket ) ;
2014-08-26 22:28:32 +02:00
for ( unsigned int j = minBucket ; j < = maxBucket ; j + + ) {
txSum + = txCtAvg [ j ] ;
}
if ( foundAnswer & & txSum ! = 0 ) {
txSum = txSum / 2 ;
for ( unsigned int j = minBucket ; j < = maxBucket ; j + + ) {
if ( txCtAvg [ j ] < txSum )
txSum - = txCtAvg [ j ] ;
else { // we're in the right bucket
median = avg [ j ] / txCtAvg [ j ] ;
break ;
}
}
2017-01-24 22:30:03 +01:00
passBucket . start = minBucket ? buckets [ minBucket - 1 ] : 0 ;
passBucket . end = buckets [ maxBucket ] ;
}
// If we were passing until we reached last few buckets with insufficient data, then report those as failed
if ( passing & & ! newBucketRange ) {
unsigned int failMinBucket = std : : min ( curNearBucket , curFarBucket ) ;
unsigned int failMaxBucket = std : : max ( curNearBucket , curFarBucket ) ;
failBucket . start = failMinBucket ? buckets [ failMinBucket - 1 ] : 0 ;
failBucket . end = buckets [ failMaxBucket ] ;
failBucket . withinTarget = nConf ;
failBucket . totalConfirmed = totalNum ;
failBucket . inMempool = extraNum ;
2017-03-09 21:26:05 +01:00
failBucket . leftMempool = failNum ;
2014-08-26 22:28:32 +02:00
}
2017-03-10 22:57:21 +01:00
LogPrint ( BCLog : : ESTIMATEFEE , " FeeEst: %d %s%.0f%% decay %.5f: feerate: %g from (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out) Fail: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out) \n " ,
2017-01-24 22:30:03 +01:00
confTarget , requireGreater ? " > " : " < " , 100.0 * successBreakPoint , decay ,
median , passBucket . start , passBucket . end ,
2017-03-09 21:26:05 +01:00
100 * passBucket . withinTarget / ( passBucket . totalConfirmed + passBucket . inMempool + passBucket . leftMempool ) ,
passBucket . withinTarget , passBucket . totalConfirmed , passBucket . inMempool , passBucket . leftMempool ,
2017-01-24 22:30:03 +01:00
failBucket . start , failBucket . end ,
2017-03-09 21:26:05 +01:00
100 * failBucket . withinTarget / ( failBucket . totalConfirmed + failBucket . inMempool + failBucket . leftMempool ) ,
failBucket . withinTarget , failBucket . totalConfirmed , failBucket . inMempool , failBucket . leftMempool ) ;
2014-08-26 22:28:32 +02:00
2017-01-24 22:30:03 +01:00
if ( result ) {
result - > pass = passBucket ;
result - > fail = failBucket ;
result - > decay = decay ;
2017-04-03 17:31:27 +02:00
result - > scale = scale ;
2017-01-24 22:30:03 +01:00
}
2014-08-26 22:28:32 +02:00
return median ;
}
2017-02-15 21:48:48 +01:00
void TxConfirmStats : : Write ( CAutoFile & fileout ) const
2014-08-26 22:28:32 +02:00
{
fileout < < decay ;
2017-02-23 21:13:41 +01:00
fileout < < scale ;
2014-08-26 22:28:32 +02:00
fileout < < avg ;
fileout < < txCtAvg ;
fileout < < confAvg ;
2017-02-23 21:13:41 +01:00
fileout < < failAvg ;
2014-08-26 22:28:32 +02:00
}
2017-02-23 21:13:41 +01:00
void TxConfirmStats : : Read ( CAutoFile & filein , int nFileVersion , size_t numBuckets )
2014-08-26 22:28:32 +02:00
{
2017-02-23 21:13:41 +01:00
// Read data file and do some very basic sanity checking
// buckets and bucketMap are not updated yet, so don't access them
// If there is a read failure, we'll just discard this entire object anyway
2017-04-03 17:31:27 +02:00
size_t maxConfirms , maxPeriods ;
2017-02-23 21:13:41 +01:00
// The current version will store the decay with each individual TxConfirmStats and also keep a scale factor
if ( nFileVersion > = 149900 ) {
filein > > decay ;
if ( decay < = 0 | | decay > = 1 ) {
throw std : : runtime_error ( " Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive) " ) ;
}
2017-04-03 17:31:27 +02:00
filein > > scale ;
2017-02-23 21:13:41 +01:00
}
filein > > avg ;
if ( avg . size ( ) ! = numBuckets ) {
2016-03-21 18:04:40 +01:00
throw std : : runtime_error ( " Corrupt estimates file. Mismatch in feerate average bucket count " ) ;
2017-02-23 21:13:41 +01:00
}
filein > > txCtAvg ;
if ( txCtAvg . size ( ) ! = numBuckets ) {
2014-08-26 22:28:32 +02:00
throw std : : runtime_error ( " Corrupt estimates file. Mismatch in tx count bucket count " ) ;
2017-02-23 21:13:41 +01:00
}
filein > > confAvg ;
2017-04-03 17:31:27 +02:00
maxPeriods = confAvg . size ( ) ;
maxConfirms = scale * maxPeriods ;
2017-02-23 21:13:41 +01:00
if ( maxConfirms < = 0 | | maxConfirms > 6 * 24 * 7 ) { // one week
2014-08-26 22:28:32 +02:00
throw std : : runtime_error ( " Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms " ) ;
}
2017-04-03 17:31:27 +02:00
for ( unsigned int i = 0 ; i < maxPeriods ; i + + ) {
2017-02-23 21:13:41 +01:00
if ( confAvg [ i ] . size ( ) ! = numBuckets ) {
throw std : : runtime_error ( " Corrupt estimates file. Mismatch in feerate conf average bucket count " ) ;
}
2014-08-26 22:28:32 +02:00
}
2017-02-23 21:13:41 +01:00
if ( nFileVersion > = 149900 ) {
filein > > failAvg ;
2017-04-03 17:31:27 +02:00
if ( maxPeriods ! = failAvg . size ( ) ) {
2017-03-09 21:26:05 +01:00
throw std : : runtime_error ( " Corrupt estimates file. Mismatch in confirms tracked for failures " ) ;
}
2017-04-03 17:31:27 +02:00
for ( unsigned int i = 0 ; i < maxPeriods ; i + + ) {
2017-03-09 21:26:05 +01:00
if ( failAvg [ i ] . size ( ) ! = numBuckets ) {
throw std : : runtime_error ( " Corrupt estimates file. Mismatch in one of failure average bucket counts " ) ;
}
}
} else {
failAvg . resize ( confAvg . size ( ) ) ;
for ( unsigned int i = 0 ; i < failAvg . size ( ) ; i + + ) {
failAvg [ i ] . resize ( numBuckets ) ;
}
2014-08-26 22:28:32 +02:00
}
2017-02-23 21:13:41 +01:00
// Resize the current block variables which aren't stored in the data file
// to match the number of confirms and buckets
resizeInMemoryCounters ( numBuckets ) ;
2014-08-26 22:28:32 +02:00
2016-12-25 21:19:40 +01:00
LogPrint ( BCLog : : ESTIMATEFEE , " Reading estimates: %u buckets counting confirms up to %u blocks \n " ,
2016-03-21 18:04:40 +01:00
numBuckets , maxConfirms ) ;
2014-08-26 22:28:32 +02:00
}
unsigned int TxConfirmStats : : NewTx ( unsigned int nBlockHeight , double val )
{
unsigned int bucketindex = bucketMap . lower_bound ( val ) - > second ;
unsigned int blockIndex = nBlockHeight % unconfTxs . size ( ) ;
unconfTxs [ blockIndex ] [ bucketindex ] + + ;
return bucketindex ;
}
2017-03-09 21:26:05 +01:00
void TxConfirmStats : : removeTx ( unsigned int entryHeight , unsigned int nBestSeenHeight , unsigned int bucketindex , bool inBlock )
2014-08-26 22:28:32 +02:00
{
//nBestSeenHeight is not updated yet for the new block
int blocksAgo = nBestSeenHeight - entryHeight ;
if ( nBestSeenHeight = = 0 ) // the BlockPolicyEstimator hasn't seen any blocks yet
blocksAgo = 0 ;
if ( blocksAgo < 0 ) {
2016-12-25 21:19:40 +01:00
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy error, blocks ago is negative for mempool tx \n " ) ;
2015-08-09 01:17:27 +02:00
return ; //This can't happen because we call this with our best seen height, no entries can have higher
2014-08-26 22:28:32 +02:00
}
if ( blocksAgo > = ( int ) unconfTxs . size ( ) ) {
2016-12-25 21:19:40 +01:00
if ( oldUnconfTxs [ bucketindex ] > 0 ) {
2014-08-26 22:28:32 +02:00
oldUnconfTxs [ bucketindex ] - - ;
2016-12-25 21:19:40 +01:00
} else {
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy error, mempool tx removed from >25 blocks,bucketIndex=%u already \n " ,
2014-08-26 22:28:32 +02:00
bucketindex ) ;
2016-12-25 21:19:40 +01:00
}
2014-08-26 22:28:32 +02:00
}
else {
unsigned int blockIndex = entryHeight % unconfTxs . size ( ) ;
2016-12-25 21:19:40 +01:00
if ( unconfTxs [ blockIndex ] [ bucketindex ] > 0 ) {
2014-08-26 22:28:32 +02:00
unconfTxs [ blockIndex ] [ bucketindex ] - - ;
2016-12-25 21:19:40 +01:00
} else {
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy error, mempool tx removed from blockIndex=%u,bucketIndex=%u already \n " ,
2014-08-26 22:28:32 +02:00
blockIndex , bucketindex ) ;
2016-12-25 21:19:40 +01:00
}
2014-08-26 22:28:32 +02:00
}
2017-04-03 17:31:27 +02:00
if ( ! inBlock & & ( unsigned int ) blocksAgo > = scale ) { // Only counts as a failure if not confirmed for entire period
unsigned int periodsAgo = blocksAgo / scale ;
for ( size_t i = 0 ; i < periodsAgo & & i < failAvg . size ( ) ; i + + ) {
2017-03-09 21:26:05 +01:00
failAvg [ i ] [ bucketindex ] + + ;
}
}
2014-08-26 22:28:32 +02:00
}
2016-11-29 19:55:26 +01:00
// This function is called from CTxMemPool::removeUnchecked to ensure
// txs removed from the mempool for any reason are no longer
// tracked. Txs that were part of a block have already been removed in
// processBlockTx to ensure they are never double tracked, but it is
// of no harm to try to remove them again.
2017-03-09 21:26:05 +01:00
bool CBlockPolicyEstimator : : removeTx ( uint256 hash , bool inBlock )
2014-08-26 22:28:32 +02:00
{
2017-02-15 15:24:11 +01:00
LOCK ( cs_feeEstimator ) ;
2014-08-26 22:28:32 +02:00
std : : map < uint256 , TxStatsInfo > : : iterator pos = mapMemPoolTxs . find ( hash ) ;
2016-11-10 20:16:42 +01:00
if ( pos ! = mapMemPoolTxs . end ( ) ) {
2017-03-09 21:26:05 +01:00
feeStats - > removeTx ( pos - > second . blockHeight , nBestSeenHeight , pos - > second . bucketIndex , inBlock ) ;
shortStats - > removeTx ( pos - > second . blockHeight , nBestSeenHeight , pos - > second . bucketIndex , inBlock ) ;
longStats - > removeTx ( pos - > second . blockHeight , nBestSeenHeight , pos - > second . bucketIndex , inBlock ) ;
2016-11-10 20:16:42 +01:00
mapMemPoolTxs . erase ( hash ) ;
return true ;
} else {
return false ;
2014-08-26 22:28:32 +02:00
}
}
2017-01-13 22:25:15 +01:00
CBlockPolicyEstimator : : CBlockPolicyEstimator ( )
2017-03-10 22:57:40 +01:00
: nBestSeenHeight ( 0 ) , firstRecordedHeight ( 0 ) , historicalFirst ( 0 ) , historicalBest ( 0 ) , trackedTxs ( 0 ) , untrackedTxs ( 0 )
2014-08-26 22:28:32 +02:00
{
2017-01-13 22:19:25 +01:00
static_assert ( MIN_BUCKET_FEERATE > 0 , " Min feerate must be nonzero " ) ;
2017-01-13 22:25:15 +01:00
minTrackedFee = CFeeRate ( MIN_BUCKET_FEERATE ) ;
2017-02-23 21:13:41 +01:00
size_t bucketIndex = 0 ;
for ( double bucketBoundary = minTrackedFee . GetFeePerK ( ) ; bucketBoundary < = MAX_BUCKET_FEERATE ; bucketBoundary * = FEE_SPACING , bucketIndex + + ) {
buckets . push_back ( bucketBoundary ) ;
bucketMap [ bucketBoundary ] = bucketIndex ;
2014-08-26 22:28:32 +02:00
}
2017-02-23 21:13:41 +01:00
buckets . push_back ( INF_FEERATE ) ;
bucketMap [ INF_FEERATE ] = bucketIndex ;
assert ( bucketMap . size ( ) = = buckets . size ( ) ) ;
2017-04-03 17:31:27 +02:00
feeStats = new TxConfirmStats ( buckets , bucketMap , MED_BLOCK_PERIODS , MED_DECAY , MED_SCALE ) ;
shortStats = new TxConfirmStats ( buckets , bucketMap , SHORT_BLOCK_PERIODS , SHORT_DECAY , SHORT_SCALE ) ;
longStats = new TxConfirmStats ( buckets , bucketMap , LONG_BLOCK_PERIODS , LONG_DECAY , LONG_SCALE ) ;
2017-02-16 22:23:15 +01:00
}
CBlockPolicyEstimator : : ~ CBlockPolicyEstimator ( )
{
delete feeStats ;
2017-02-23 21:13:41 +01:00
delete shortStats ;
delete longStats ;
2014-08-26 22:28:32 +02:00
}
2016-11-11 18:48:01 +01:00
void CBlockPolicyEstimator : : processTransaction ( const CTxMemPoolEntry & entry , bool validFeeEstimate )
2014-08-26 22:28:32 +02:00
{
2017-02-15 15:24:11 +01:00
LOCK ( cs_feeEstimator ) ;
2014-08-26 22:28:32 +02:00
unsigned int txHeight = entry . GetHeight ( ) ;
uint256 hash = entry . GetTx ( ) . GetHash ( ) ;
2016-03-21 18:04:40 +01:00
if ( mapMemPoolTxs . count ( hash ) ) {
2016-12-25 21:19:40 +01:00
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy error mempool tx %s already being tracked \n " ,
2014-08-26 22:28:32 +02:00
hash . ToString ( ) . c_str ( ) ) ;
return ;
}
2016-11-29 21:40:03 +01:00
if ( txHeight ! = nBestSeenHeight ) {
2014-08-26 22:28:32 +02:00
// Ignore side chains and re-orgs; assuming they are random they don't
// affect the estimate. We'll potentially double count transactions in 1-block reorgs.
2016-11-29 21:40:03 +01:00
// Ignore txs if BlockPolicyEstimator is not in sync with chainActive.Tip().
// It will be synced next time a block is processed.
2014-08-26 22:28:32 +02:00
return ;
}
// Only want to be updating estimates when our blockchain is synced,
// otherwise we'll miscalculate how many blocks its taking to get included.
2016-11-11 19:40:27 +01:00
if ( ! validFeeEstimate ) {
untrackedTxs + + ;
2014-08-26 22:28:32 +02:00
return ;
2016-11-11 19:40:27 +01:00
}
trackedTxs + + ;
2014-08-26 22:28:32 +02:00
2016-03-21 18:04:40 +01:00
// Feerates are stored and reported as BTC-per-kb:
2014-08-26 22:28:32 +02:00
CFeeRate feeRate ( entry . GetFee ( ) , entry . GetTxSize ( ) ) ;
mapMemPoolTxs [ hash ] . blockHeight = txHeight ;
2017-03-09 20:02:00 +01:00
unsigned int bucketIndex = feeStats - > NewTx ( txHeight , ( double ) feeRate . GetFeePerK ( ) ) ;
mapMemPoolTxs [ hash ] . bucketIndex = bucketIndex ;
unsigned int bucketIndex2 = shortStats - > NewTx ( txHeight , ( double ) feeRate . GetFeePerK ( ) ) ;
assert ( bucketIndex = = bucketIndex2 ) ;
unsigned int bucketIndex3 = longStats - > NewTx ( txHeight , ( double ) feeRate . GetFeePerK ( ) ) ;
assert ( bucketIndex = = bucketIndex3 ) ;
2014-08-26 22:28:32 +02:00
}
2016-11-11 19:40:27 +01:00
bool CBlockPolicyEstimator : : processBlockTx ( unsigned int nBlockHeight , const CTxMemPoolEntry * entry )
2014-08-26 22:28:32 +02:00
{
2017-03-09 21:26:05 +01:00
if ( ! removeTx ( entry - > GetTx ( ) . GetHash ( ) , true ) ) {
2016-11-11 17:57:51 +01:00
// This transaction wasn't being tracked for fee estimation
2016-11-11 19:40:27 +01:00
return false ;
2014-08-26 22:28:32 +02:00
}
// How many blocks did it take for miners to include this transaction?
// blocksToConfirm is 1-based, so a transaction included in the earliest
// possible block has confirmation count of 1
2016-11-11 20:16:42 +01:00
int blocksToConfirm = nBlockHeight - entry - > GetHeight ( ) ;
2014-08-26 22:28:32 +02:00
if ( blocksToConfirm < = 0 ) {
// This can't happen because we don't process transactions from a block with a height
// lower than our greatest seen height
2016-12-25 21:19:40 +01:00
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy error Transaction had negative blocksToConfirm \n " ) ;
2016-11-11 19:40:27 +01:00
return false ;
2014-08-26 22:28:32 +02:00
}
2016-03-21 18:04:40 +01:00
// Feerates are stored and reported as BTC-per-kb:
2016-11-11 20:16:42 +01:00
CFeeRate feeRate ( entry - > GetFee ( ) , entry - > GetTxSize ( ) ) ;
2014-08-26 22:28:32 +02:00
2017-02-16 22:23:15 +01:00
feeStats - > Record ( blocksToConfirm , ( double ) feeRate . GetFeePerK ( ) ) ;
2017-03-09 20:02:00 +01:00
shortStats - > Record ( blocksToConfirm , ( double ) feeRate . GetFeePerK ( ) ) ;
longStats - > Record ( blocksToConfirm , ( double ) feeRate . GetFeePerK ( ) ) ;
2016-11-11 19:40:27 +01:00
return true ;
2014-08-26 22:28:32 +02:00
}
void CBlockPolicyEstimator : : processBlock ( unsigned int nBlockHeight ,
2016-11-11 20:16:42 +01:00
std : : vector < const CTxMemPoolEntry * > & entries )
2014-08-26 22:28:32 +02:00
{
2017-02-15 15:24:11 +01:00
LOCK ( cs_feeEstimator ) ;
2014-08-26 22:28:32 +02:00
if ( nBlockHeight < = nBestSeenHeight ) {
// Ignore side chains and re-orgs; assuming they are random
// they don't affect the estimate.
// And if an attacker can re-org the chain at will, then
// you've got much bigger problems than "attacker can influence
// transaction fees."
return ;
}
2016-11-11 17:57:51 +01:00
// Must update nBestSeenHeight in sync with ClearCurrent so that
// calls to removeTx (via processBlockTx) correctly calculate age
// of unconfirmed txs to remove from tracking.
nBestSeenHeight = nBlockHeight ;
2017-02-28 23:29:42 +01:00
// Update unconfirmed circular buffer
2017-02-16 22:23:15 +01:00
feeStats - > ClearCurrent ( nBlockHeight ) ;
2017-03-09 20:02:00 +01:00
shortStats - > ClearCurrent ( nBlockHeight ) ;
longStats - > ClearCurrent ( nBlockHeight ) ;
2014-08-26 22:28:32 +02:00
2017-02-28 23:29:42 +01:00
// Decay all exponential averages
feeStats - > UpdateMovingAverages ( ) ;
shortStats - > UpdateMovingAverages ( ) ;
longStats - > UpdateMovingAverages ( ) ;
2016-11-11 19:40:27 +01:00
unsigned int countedTxs = 0 ;
2017-02-28 23:29:42 +01:00
// Update averages with data points from current block
2016-11-11 19:40:27 +01:00
for ( unsigned int i = 0 ; i < entries . size ( ) ; i + + ) {
if ( processBlockTx ( nBlockHeight , entries [ i ] ) )
countedTxs + + ;
}
2014-08-26 22:28:32 +02:00
2017-03-07 21:01:50 +01:00
if ( firstRecordedHeight = = 0 & & countedTxs > 0 ) {
firstRecordedHeight = nBestSeenHeight ;
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy first recorded height %u \n " , firstRecordedHeight ) ;
}
2014-08-26 22:28:32 +02:00
2017-03-10 22:57:40 +01:00
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy estimates updated by %u of %u block txs, since last block %u of %u tracked, mempool map size %u, max target %u from %s \n " ,
countedTxs , entries . size ( ) , trackedTxs , trackedTxs + untrackedTxs , mapMemPoolTxs . size ( ) ,
MaxUsableEstimate ( ) , HistoricalBlockSpan ( ) > BlockSpan ( ) ? " historical " : " current " ) ;
2016-11-11 19:40:27 +01:00
trackedTxs = 0 ;
untrackedTxs = 0 ;
2014-08-26 22:28:32 +02:00
}
2017-02-15 21:23:34 +01:00
CFeeRate CBlockPolicyEstimator : : estimateFee ( int confTarget ) const
2014-08-26 22:28:32 +02:00
{
2017-01-24 22:30:03 +01:00
// It's not possible to get reasonable estimates for confTarget of 1
if ( confTarget < = 1 )
return CFeeRate ( 0 ) ;
return estimateRawFee ( confTarget , DOUBLE_SUCCESS_PCT , FeeEstimateHorizon : : MED_HALFLIFE ) ;
}
CFeeRate CBlockPolicyEstimator : : estimateRawFee ( int confTarget , double successThreshold , FeeEstimateHorizon horizon , EstimationResult * result ) const
{
TxConfirmStats * stats ;
double sufficientTxs = SUFFICIENT_FEETXS ;
switch ( horizon ) {
case FeeEstimateHorizon : : SHORT_HALFLIFE : {
stats = shortStats ;
sufficientTxs = SUFFICIENT_TXS_SHORT ;
break ;
}
case FeeEstimateHorizon : : MED_HALFLIFE : {
stats = feeStats ;
break ;
}
case FeeEstimateHorizon : : LONG_HALFLIFE : {
stats = longStats ;
break ;
}
default : {
return CFeeRate ( 0 ) ;
}
}
2017-02-15 15:24:11 +01:00
LOCK ( cs_feeEstimator ) ;
2014-08-26 22:28:32 +02:00
// Return failure if trying to analyze a target we're not tracking
2017-01-24 22:30:03 +01:00
if ( confTarget < = 0 | | ( unsigned int ) confTarget > stats - > GetMaxConfirms ( ) )
return CFeeRate ( 0 ) ;
if ( successThreshold > 1 )
2014-08-26 22:28:32 +02:00
return CFeeRate ( 0 ) ;
2017-01-24 22:30:03 +01:00
double median = stats - > EstimateMedianVal ( confTarget , sufficientTxs , successThreshold , true , nBestSeenHeight , result ) ;
2014-08-26 22:28:32 +02:00
if ( median < 0 )
return CFeeRate ( 0 ) ;
return CFeeRate ( median ) ;
}
2017-03-10 22:57:40 +01:00
unsigned int CBlockPolicyEstimator : : BlockSpan ( ) const
{
if ( firstRecordedHeight = = 0 ) return 0 ;
assert ( nBestSeenHeight > = firstRecordedHeight ) ;
return nBestSeenHeight - firstRecordedHeight ;
}
unsigned int CBlockPolicyEstimator : : HistoricalBlockSpan ( ) const
{
if ( historicalFirst = = 0 ) return 0 ;
assert ( historicalBest > = historicalFirst ) ;
if ( nBestSeenHeight - historicalBest > OLDEST_ESTIMATE_HISTORY ) return 0 ;
return historicalBest - historicalFirst ;
}
unsigned int CBlockPolicyEstimator : : MaxUsableEstimate ( ) const
{
// Block spans are divided by 2 to make sure there are enough potential failing data points for the estimate
return std : : min ( longStats - > GetMaxConfirms ( ) , std : : max ( BlockSpan ( ) , HistoricalBlockSpan ( ) ) / 2 ) ;
}
2017-03-07 17:33:44 +01:00
/** Return a fee estimate at the required successThreshold from the shortest
* time horizon which tracks confirmations up to the desired target . If
* checkShorterHorizon is requested , also allow short time horizon estimates
* for a lower target to reduce the given answer */
double CBlockPolicyEstimator : : estimateCombinedFee ( unsigned int confTarget , double successThreshold , bool checkShorterHorizon ) const
{
double estimate = - 1 ;
if ( confTarget > = 1 & & confTarget < = longStats - > GetMaxConfirms ( ) ) {
// Find estimate from shortest time horizon possible
if ( confTarget < = shortStats - > GetMaxConfirms ( ) ) { // short horizon
estimate = shortStats - > EstimateMedianVal ( confTarget , SUFFICIENT_TXS_SHORT , successThreshold , true , nBestSeenHeight ) ;
}
else if ( confTarget < = feeStats - > GetMaxConfirms ( ) ) { // medium horizon
estimate = feeStats - > EstimateMedianVal ( confTarget , SUFFICIENT_FEETXS , successThreshold , true , nBestSeenHeight ) ;
}
else { // long horizon
estimate = longStats - > EstimateMedianVal ( confTarget , SUFFICIENT_FEETXS , successThreshold , true , nBestSeenHeight ) ;
}
if ( checkShorterHorizon ) {
// If a lower confTarget from a more recent horizon returns a lower answer use it.
if ( confTarget > feeStats - > GetMaxConfirms ( ) ) {
double medMax = feeStats - > EstimateMedianVal ( feeStats - > GetMaxConfirms ( ) , SUFFICIENT_FEETXS , successThreshold , true , nBestSeenHeight ) ;
if ( medMax > 0 & & ( estimate = = - 1 | | medMax < estimate ) )
estimate = medMax ;
}
if ( confTarget > shortStats - > GetMaxConfirms ( ) ) {
double shortMax = shortStats - > EstimateMedianVal ( shortStats - > GetMaxConfirms ( ) , SUFFICIENT_TXS_SHORT , successThreshold , true , nBestSeenHeight ) ;
if ( shortMax > 0 & & ( estimate = = - 1 | | shortMax < estimate ) )
estimate = shortMax ;
}
}
}
return estimate ;
}
double CBlockPolicyEstimator : : estimateConservativeFee ( unsigned int doubleTarget ) const
{
double estimate = - 1 ;
if ( doubleTarget < = shortStats - > GetMaxConfirms ( ) ) {
estimate = feeStats - > EstimateMedianVal ( doubleTarget , SUFFICIENT_FEETXS , DOUBLE_SUCCESS_PCT , true , nBestSeenHeight ) ;
}
if ( doubleTarget < = feeStats - > GetMaxConfirms ( ) ) {
double longEstimate = longStats - > EstimateMedianVal ( doubleTarget , SUFFICIENT_FEETXS , DOUBLE_SUCCESS_PCT , true , nBestSeenHeight ) ;
if ( longEstimate > estimate ) {
estimate = longEstimate ;
}
}
return estimate ;
}
CFeeRate CBlockPolicyEstimator : : estimateSmartFee ( int confTarget , int * answerFoundAtTarget , const CTxMemPool & pool , bool conservative ) const
2015-11-16 21:10:22 +01:00
{
if ( answerFoundAtTarget )
* answerFoundAtTarget = confTarget ;
2016-11-29 18:18:44 +01:00
2015-11-16 21:10:22 +01:00
double median = - 1 ;
2017-02-15 15:24:11 +01:00
{
LOCK ( cs_feeEstimator ) ;
// Return failure if trying to analyze a target we're not tracking
2017-03-07 17:33:44 +01:00
if ( confTarget < = 0 | | ( unsigned int ) confTarget > longStats - > GetMaxConfirms ( ) )
2017-02-15 15:24:11 +01:00
return CFeeRate ( 0 ) ;
// It's not possible to get reasonable estimates for confTarget of 1
if ( confTarget = = 1 )
confTarget = 2 ;
2017-03-10 22:57:40 +01:00
unsigned int maxUsableEstimate = MaxUsableEstimate ( ) ;
if ( maxUsableEstimate < = 1 )
return CFeeRate ( 0 ) ;
if ( ( unsigned int ) confTarget > maxUsableEstimate ) {
confTarget = maxUsableEstimate ;
}
2017-03-07 17:33:44 +01:00
assert ( confTarget > 0 ) ; //estimateCombinedFee and estimateConservativeFee take unsigned ints
/** true is passed to estimateCombined fee for target/2 and target so
* that we check the max confirms for shorter time horizons as well .
* This is necessary to preserve monotonically increasing estimates .
* For non - conservative estimates we do the same thing for 2 * target , but
* for conservative estimates we want to skip these shorter horizons
* checks for 2 * target becuase we are taking the max over all time
* horizons so we already have monotonically increasing estimates and
* the purpose of conservative estimates is not to let short term
* fluctuations lower our estimates by too much .
*/
double halfEst = estimateCombinedFee ( confTarget / 2 , HALF_SUCCESS_PCT , true ) ;
double actualEst = estimateCombinedFee ( confTarget , SUCCESS_PCT , true ) ;
double doubleEst = estimateCombinedFee ( 2 * confTarget , DOUBLE_SUCCESS_PCT , ! conservative ) ;
median = halfEst ;
if ( actualEst > median ) {
median = actualEst ;
}
if ( doubleEst > median ) {
median = doubleEst ;
}
if ( conservative | | median = = - 1 ) {
double consEst = estimateConservativeFee ( 2 * confTarget ) ;
if ( consEst > median ) {
median = consEst ;
}
2017-02-15 15:24:11 +01:00
}
} // Must unlock cs_feeEstimator before taking mempool locks
2015-11-16 21:10:22 +01:00
if ( answerFoundAtTarget )
2017-03-07 17:33:44 +01:00
* answerFoundAtTarget = confTarget ;
2015-11-16 21:10:22 +01:00
2016-03-21 18:04:40 +01:00
// If mempool is limiting txs , return at least the min feerate from the mempool
2015-11-24 14:53:14 +01:00
CAmount minPoolFee = pool . GetMinFee ( GetArg ( " -maxmempool " , DEFAULT_MAX_MEMPOOL_SIZE ) * 1000000 ) . GetFeePerK ( ) ;
2015-11-16 21:21:51 +01:00
if ( minPoolFee > 0 & & minPoolFee > median )
return CFeeRate ( minPoolFee ) ;
2015-11-16 21:10:22 +01:00
if ( median < 0 )
return CFeeRate ( 0 ) ;
return CFeeRate ( median ) ;
}
2017-03-07 17:33:44 +01:00
2017-02-15 21:48:48 +01:00
bool CBlockPolicyEstimator : : Write ( CAutoFile & fileout ) const
2014-08-26 22:28:32 +02:00
{
2017-02-15 21:48:48 +01:00
try {
LOCK ( cs_feeEstimator ) ;
2017-02-23 21:13:41 +01:00
fileout < < 149900 ; // version required to read: 0.14.99 or later
2017-02-15 21:48:48 +01:00
fileout < < CLIENT_VERSION ; // version that wrote the file
fileout < < nBestSeenHeight ;
2017-03-10 22:57:40 +01:00
if ( BlockSpan ( ) > HistoricalBlockSpan ( ) / 2 ) {
fileout < < firstRecordedHeight < < nBestSeenHeight ;
}
else {
fileout < < historicalFirst < < historicalBest ;
}
2017-02-23 21:13:41 +01:00
fileout < < buckets ;
2017-02-16 22:23:15 +01:00
feeStats - > Write ( fileout ) ;
2017-02-23 21:13:41 +01:00
shortStats - > Write ( fileout ) ;
longStats - > Write ( fileout ) ;
2017-02-15 21:48:48 +01:00
}
catch ( const std : : exception & ) {
LogPrintf ( " CBlockPolicyEstimator::Write(): unable to read policy estimator data (non-fatal) \n " ) ;
return false ;
}
return true ;
2014-08-26 22:28:32 +02:00
}
2017-02-15 21:48:48 +01:00
bool CBlockPolicyEstimator : : Read ( CAutoFile & filein )
2014-08-26 22:28:32 +02:00
{
2017-02-15 21:48:48 +01:00
try {
LOCK ( cs_feeEstimator ) ;
2017-02-23 21:13:41 +01:00
int nVersionRequired , nVersionThatWrote ;
2017-03-10 22:57:40 +01:00
unsigned int nFileBestSeenHeight , nFileHistoricalFirst , nFileHistoricalBest ;
2017-02-15 21:48:48 +01:00
filein > > nVersionRequired > > nVersionThatWrote ;
if ( nVersionRequired > CLIENT_VERSION )
return error ( " CBlockPolicyEstimator::Read() : up - version ( % d ) fee estimate file " , nVersionRequired) ;
2017-02-23 21:13:41 +01:00
// Read fee estimates file into temporary variables so existing data
// structures aren't corrupted if there is an exception.
2017-02-15 21:48:48 +01:00
filein > > nFileBestSeenHeight ;
2017-02-23 21:13:41 +01:00
if ( nVersionThatWrote < 149900 ) {
// Read the old fee estimates file for temporary use, but then discard. Will start collecting data from scratch.
// decay is stored before buckets in old versions, so pre-read decay and pass into TxConfirmStats constructor
double tempDecay ;
filein > > tempDecay ;
if ( tempDecay < = 0 | | tempDecay > = 1 )
throw std : : runtime_error ( " Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive) " ) ;
std : : vector < double > tempBuckets ;
filein > > tempBuckets ;
size_t tempNum = tempBuckets . size ( ) ;
if ( tempNum < = 1 | | tempNum > 1000 )
throw std : : runtime_error ( " Corrupt estimates file. Must have between 2 and 1000 feerate buckets " ) ;
std : : map < double , unsigned int > tempMap ;
2017-04-03 17:31:27 +02:00
std : : unique_ptr < TxConfirmStats > tempFeeStats ( new TxConfirmStats ( tempBuckets , tempMap , MED_BLOCK_PERIODS , tempDecay , 1 ) ) ;
2017-02-23 21:13:41 +01:00
tempFeeStats - > Read ( filein , nVersionThatWrote , tempNum ) ;
// if nVersionThatWrote < 139900 then another TxConfirmStats (for priority) follows but can be ignored.
tempMap . clear ( ) ;
for ( unsigned int i = 0 ; i < tempBuckets . size ( ) ; i + + ) {
tempMap [ tempBuckets [ i ] ] = i ;
}
}
else { // nVersionThatWrote >= 149900
2017-03-10 22:57:40 +01:00
filein > > nFileHistoricalFirst > > nFileHistoricalBest ;
if ( nFileHistoricalFirst > nFileHistoricalBest | | nFileHistoricalBest > nFileBestSeenHeight ) {
throw std : : runtime_error ( " Corrupt estimates file. Historical block range for estimates is invalid " ) ;
}
2017-02-23 21:13:41 +01:00
std : : vector < double > fileBuckets ;
filein > > fileBuckets ;
size_t numBuckets = fileBuckets . size ( ) ;
if ( numBuckets < = 1 | | numBuckets > 1000 )
throw std : : runtime_error ( " Corrupt estimates file. Must have between 2 and 1000 feerate buckets " ) ;
2017-04-03 17:31:27 +02:00
std : : unique_ptr < TxConfirmStats > fileFeeStats ( new TxConfirmStats ( buckets , bucketMap , MED_BLOCK_PERIODS , MED_DECAY , MED_SCALE ) ) ;
std : : unique_ptr < TxConfirmStats > fileShortStats ( new TxConfirmStats ( buckets , bucketMap , SHORT_BLOCK_PERIODS , SHORT_DECAY , SHORT_SCALE ) ) ;
std : : unique_ptr < TxConfirmStats > fileLongStats ( new TxConfirmStats ( buckets , bucketMap , LONG_BLOCK_PERIODS , LONG_DECAY , LONG_SCALE ) ) ;
2017-02-23 21:13:41 +01:00
fileFeeStats - > Read ( filein , nVersionThatWrote , numBuckets ) ;
fileShortStats - > Read ( filein , nVersionThatWrote , numBuckets ) ;
fileLongStats - > Read ( filein , nVersionThatWrote , numBuckets ) ;
// Fee estimates file parsed correctly
// Copy buckets from file and refresh our bucketmap
buckets = fileBuckets ;
bucketMap . clear ( ) ;
for ( unsigned int i = 0 ; i < buckets . size ( ) ; i + + ) {
bucketMap [ buckets [ i ] ] = i ;
}
// Destroy old TxConfirmStats and point to new ones that already reference buckets and bucketMap
delete feeStats ;
delete shortStats ;
delete longStats ;
feeStats = fileFeeStats . release ( ) ;
shortStats = fileShortStats . release ( ) ;
longStats = fileLongStats . release ( ) ;
nBestSeenHeight = nFileBestSeenHeight ;
2017-03-10 22:57:40 +01:00
historicalFirst = nFileHistoricalFirst ;
historicalBest = nFileHistoricalBest ;
2017-02-23 21:13:41 +01:00
}
2017-02-15 21:48:48 +01:00
}
2017-02-23 21:13:41 +01:00
catch ( const std : : exception & e ) {
LogPrintf ( " CBlockPolicyEstimator::Read(): unable to read policy estimator data (non-fatal): %s \n " , e . what ( ) ) ;
2017-02-15 21:48:48 +01:00
return false ;
}
return true ;
2014-08-26 22:28:32 +02:00
}
2016-02-12 21:57:15 +01:00
2017-03-09 21:26:05 +01:00
void CBlockPolicyEstimator : : FlushUnconfirmed ( CTxMemPool & pool ) {
int64_t startclear = GetTimeMicros ( ) ;
std : : vector < uint256 > txids ;
pool . queryHashes ( txids ) ;
LOCK ( cs_feeEstimator ) ;
for ( auto & txid : txids ) {
removeTx ( txid , false ) ;
}
int64_t endclear = GetTimeMicros ( ) ;
LogPrint ( BCLog : : ESTIMATEFEE , " Recorded %u unconfirmed txs from mempool in %ld micros \n " , txids . size ( ) , endclear - startclear ) ;
}
2016-02-12 21:57:15 +01:00
FeeFilterRounder : : FeeFilterRounder ( const CFeeRate & minIncrementalFee )
{
2016-12-06 03:46:08 +01:00
CAmount minFeeLimit = std : : max ( CAmount ( 1 ) , minIncrementalFee . GetFeePerK ( ) / 2 ) ;
2016-02-12 21:57:15 +01:00
feeset . insert ( 0 ) ;
2017-03-09 20:02:00 +01:00
for ( double bucketBoundary = minFeeLimit ; bucketBoundary < = MAX_FILTER_FEERATE ; bucketBoundary * = FEE_FILTER_SPACING ) {
2016-02-12 21:57:15 +01:00
feeset . insert ( bucketBoundary ) ;
}
}
CAmount FeeFilterRounder : : round ( CAmount currentMinFee )
{
std : : set < double > : : iterator it = feeset . lower_bound ( currentMinFee ) ;
2016-10-13 16:19:20 +02:00
if ( ( it ! = feeset . begin ( ) & & insecure_rand . rand32 ( ) % 3 ! = 0 ) | | it = = feeset . end ( ) ) {
2016-02-12 21:57:15 +01:00
it - - ;
}
return * it ;
}