2014-08-26 22:28:32 +02:00
// Copyright (c) 2009-2010 Satoshi Nakamoto
2016-12-31 19:01:21 +01:00
// Copyright (c) 2009-2016 The Bitcoin Core developers
2014-08-26 22:28:32 +02:00
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
# include "policy/fees.h"
2015-11-16 21:21:51 +01:00
# include "policy/policy.h"
2014-08-26 22:28:32 +02:00
# include "amount.h"
2017-02-15 21:48:48 +01:00
# include "clientversion.h"
2014-08-26 22:28:32 +02:00
# include "primitives/transaction.h"
2016-02-12 21:57:15 +01:00
# include "random.h"
2014-08-26 22:28:32 +02:00
# include "streams.h"
# include "txmempool.h"
# include "util.h"
2017-03-09 20:02:00 +01:00
static constexpr double INF_FEERATE = 1e99 ;
2017-02-16 23:27:20 +01:00
/**
* We will instantiate an instance of this class to track transactions that were
* included in a block . We will lump transactions into a bucket according to their
* approximate feerate and then track how long it took for those txs to be included in a block
*
* The tracking of unconfirmed ( mempool ) transactions is completely independent of the
* historical tracking of transactions that have been confirmed in a block .
*/
class TxConfirmStats
{
private :
//Define the buckets we will group transactions into
2017-02-23 21:13:41 +01:00
const std : : vector < double > & buckets ; // The upper-bound of the range for the bucket (inclusive)
const std : : map < double , unsigned int > & bucketMap ; // Map of bucket upper-bound to index into all vectors by bucket
2017-02-16 23:27:20 +01:00
// For each bucket X:
// Count the total # of txs in each bucket
// Track the historical moving average of this total over blocks
std : : vector < double > txCtAvg ;
// Count the total # of txs confirmed within Y blocks in each bucket
// Track the historical moving average of theses totals over blocks
2017-02-23 21:13:41 +01:00
std : : vector < std : : vector < double > > confAvg ; // confAvg[Y][X]
std : : vector < std : : vector < double > > failAvg ; // future use
2017-02-16 23:27:20 +01:00
// Sum the total feerate of all tx's in each bucket
// Track the historical moving average of this total over blocks
std : : vector < double > avg ;
// Combine the conf counts with tx counts to calculate the confirmation % for each Y,X
// Combine the total value with the tx counts to calculate the avg feerate per bucket
double decay ;
2017-02-23 21:13:41 +01:00
unsigned int scale ;
2017-02-16 23:27:20 +01:00
// Mempool counts of outstanding transactions
// For each bucket X, track the number of transactions in the mempool
// that are unconfirmed for each possible confirmation value Y
std : : vector < std : : vector < int > > unconfTxs ; //unconfTxs[Y][X]
// transactions still unconfirmed after MAX_CONFIRMS for each bucket
std : : vector < int > oldUnconfTxs ;
2017-02-23 21:13:41 +01:00
void resizeInMemoryCounters ( size_t newbuckets ) ;
2017-02-16 23:27:20 +01:00
public :
/**
* Create new TxConfirmStats . This is called by BlockPolicyEstimator ' s
* constructor with default values .
* @ param defaultBuckets contains the upper limits for the bucket boundaries
* @ param maxConfirms max number of confirms to track
* @ param decay how much to decay the historical moving average per block
*/
2017-02-23 21:13:41 +01:00
TxConfirmStats ( const std : : vector < double > & defaultBuckets , const std : : map < double , unsigned int > & defaultBucketMap ,
unsigned int maxConfirms , double decay ) ;
2017-02-16 23:27:20 +01:00
2017-02-28 23:29:42 +01:00
/** Roll the circular buffer for unconfirmed txs*/
2017-02-16 23:27:20 +01:00
void ClearCurrent ( unsigned int nBlockHeight ) ;
/**
* Record a new transaction data point in the current block stats
* @ param blocksToConfirm the number of blocks it took this transaction to confirm
* @ param val the feerate of the transaction
* @ warning blocksToConfirm is 1 - based and has to be > = 1
*/
void Record ( int blocksToConfirm , double val ) ;
/** Record a new transaction entering the mempool*/
unsigned int NewTx ( unsigned int nBlockHeight , double val ) ;
/** Remove a transaction from mempool tracking stats*/
void removeTx ( unsigned int entryHeight , unsigned int nBestSeenHeight ,
unsigned int bucketIndex ) ;
/** Update our estimates by decaying our historical moving average and updating
with the data gathered from the current block */
void UpdateMovingAverages ( ) ;
/**
* Calculate a feerate estimate . Find the lowest value bucket ( or range of buckets
* to make sure we have enough data points ) whose transactions still have sufficient likelihood
* of being confirmed within the target number of confirmations
* @ param confTarget target number of confirmations
* @ param sufficientTxVal required average number of transactions per block in a bucket range
* @ param minSuccess the success probability we require
* @ param requireGreater return the lowest feerate such that all higher values pass minSuccess OR
* return the highest feerate such that all lower values fail minSuccess
* @ param nBlockHeight the current block height
*/
double EstimateMedianVal ( int confTarget , double sufficientTxVal ,
double minSuccess , bool requireGreater , unsigned int nBlockHeight ) const ;
/** Return the max number of confirms we're tracking */
unsigned int GetMaxConfirms ( ) const { return confAvg . size ( ) ; }
/** Write state of estimation data to a file*/
void Write ( CAutoFile & fileout ) const ;
/**
* Read saved state of estimation data from a file and replace all internal data structures and
* variables with this state .
*/
2017-02-23 21:13:41 +01:00
void Read ( CAutoFile & filein , int nFileVersion , size_t numBuckets ) ;
2017-02-16 23:27:20 +01:00
} ;
2017-02-16 22:23:15 +01:00
TxConfirmStats : : TxConfirmStats ( const std : : vector < double > & defaultBuckets ,
2017-02-23 21:13:41 +01:00
const std : : map < double , unsigned int > & defaultBucketMap ,
2017-02-16 22:23:15 +01:00
unsigned int maxConfirms , double _decay )
2017-02-23 21:13:41 +01:00
: buckets ( defaultBuckets ) , bucketMap ( defaultBucketMap )
2014-08-26 22:28:32 +02:00
{
decay = _decay ;
2017-02-23 21:13:41 +01:00
scale = 1 ;
2014-08-26 22:28:32 +02:00
confAvg . resize ( maxConfirms ) ;
for ( unsigned int i = 0 ; i < maxConfirms ; i + + ) {
confAvg [ i ] . resize ( buckets . size ( ) ) ;
}
txCtAvg . resize ( buckets . size ( ) ) ;
avg . resize ( buckets . size ( ) ) ;
2017-02-23 21:13:41 +01:00
resizeInMemoryCounters ( buckets . size ( ) ) ;
}
void TxConfirmStats : : resizeInMemoryCounters ( size_t newbuckets ) {
// newbuckets must be passed in because the buckets referred to during Read have not been updated yet.
unconfTxs . resize ( GetMaxConfirms ( ) ) ;
for ( unsigned int i = 0 ; i < unconfTxs . size ( ) ; i + + ) {
unconfTxs [ i ] . resize ( newbuckets ) ;
}
oldUnconfTxs . resize ( newbuckets ) ;
2014-08-26 22:28:32 +02:00
}
2017-02-28 23:29:42 +01:00
// Roll the unconfirmed txs circular buffer
2014-08-26 22:28:32 +02:00
void TxConfirmStats : : ClearCurrent ( unsigned int nBlockHeight )
{
for ( unsigned int j = 0 ; j < buckets . size ( ) ; j + + ) {
oldUnconfTxs [ j ] + = unconfTxs [ nBlockHeight % unconfTxs . size ( ) ] [ j ] ;
unconfTxs [ nBlockHeight % unconfTxs . size ( ) ] [ j ] = 0 ;
}
}
void TxConfirmStats : : Record ( int blocksToConfirm , double val )
{
// blocksToConfirm is 1-based
if ( blocksToConfirm < 1 )
return ;
unsigned int bucketindex = bucketMap . lower_bound ( val ) - > second ;
2017-02-28 23:29:42 +01:00
for ( size_t i = blocksToConfirm ; i < = confAvg . size ( ) ; i + + ) {
confAvg [ i - 1 ] [ bucketindex ] + + ;
2014-08-26 22:28:32 +02:00
}
2017-02-28 23:29:42 +01:00
txCtAvg [ bucketindex ] + + ;
avg [ bucketindex ] + = val ;
2014-08-26 22:28:32 +02:00
}
void TxConfirmStats : : UpdateMovingAverages ( )
{
for ( unsigned int j = 0 ; j < buckets . size ( ) ; j + + ) {
for ( unsigned int i = 0 ; i < confAvg . size ( ) ; i + + )
2017-02-28 23:29:42 +01:00
confAvg [ i ] [ j ] = confAvg [ i ] [ j ] * decay ;
avg [ j ] = avg [ j ] * decay ;
txCtAvg [ j ] = txCtAvg [ j ] * decay ;
2014-08-26 22:28:32 +02:00
}
}
// returns -1 on error conditions
double TxConfirmStats : : EstimateMedianVal ( int confTarget , double sufficientTxVal ,
double successBreakPoint , bool requireGreater ,
2017-02-15 21:23:34 +01:00
unsigned int nBlockHeight ) const
2014-08-26 22:28:32 +02:00
{
// Counters for a bucket (or range of buckets)
double nConf = 0 ; // Number of tx's confirmed within the confTarget
double totalNum = 0 ; // Total number of tx's that were ever confirmed
int extraNum = 0 ; // Number of tx's still in mempool for confTarget or longer
int maxbucketindex = buckets . size ( ) - 1 ;
2016-03-21 18:04:40 +01:00
// requireGreater means we are looking for the lowest feerate such that all higher
// values pass, so we start at maxbucketindex (highest feerate) and look at successively
2014-08-26 22:28:32 +02:00
// smaller buckets until we reach failure. Otherwise, we are looking for the highest
2016-03-21 18:04:40 +01:00
// feerate such that all lower values fail, and we go in the opposite direction.
2014-08-26 22:28:32 +02:00
unsigned int startbucket = requireGreater ? maxbucketindex : 0 ;
int step = requireGreater ? - 1 : 1 ;
// We'll combine buckets until we have enough samples.
// The near and far variables will define the range we've combined
// The best variables are the last range we saw which still had a high
// enough confirmation rate to count as success.
// The cur variables are the current range we're counting.
unsigned int curNearBucket = startbucket ;
unsigned int bestNearBucket = startbucket ;
unsigned int curFarBucket = startbucket ;
unsigned int bestFarBucket = startbucket ;
bool foundAnswer = false ;
unsigned int bins = unconfTxs . size ( ) ;
2016-03-21 18:04:40 +01:00
// Start counting from highest(default) or lowest feerate transactions
2014-08-26 22:28:32 +02:00
for ( int bucket = startbucket ; bucket > = 0 & & bucket < = maxbucketindex ; bucket + = step ) {
curFarBucket = bucket ;
nConf + = confAvg [ confTarget - 1 ] [ bucket ] ;
totalNum + = txCtAvg [ bucket ] ;
for ( unsigned int confct = confTarget ; confct < GetMaxConfirms ( ) ; confct + + )
extraNum + = unconfTxs [ ( nBlockHeight - confct ) % bins ] [ bucket ] ;
extraNum + = oldUnconfTxs [ bucket ] ;
// If we have enough transaction data points in this range of buckets,
// we can test for success
// (Only count the confirmed data points, so that each confirmation count
// will be looking at the same amount of data and same bucket breaks)
if ( totalNum > = sufficientTxVal / ( 1 - decay ) ) {
double curPct = nConf / ( totalNum + extraNum ) ;
// Check to see if we are no longer getting confirmed at the success rate
2017-03-02 16:08:25 +01:00
if ( ( requireGreater & & curPct < successBreakPoint ) | | ( ! requireGreater & & curPct > successBreakPoint ) ) {
continue ;
}
2014-08-26 22:28:32 +02:00
// Otherwise update the cumulative stats, and the bucket variables
// and reset the counters
else {
foundAnswer = true ;
nConf = 0 ;
totalNum = 0 ;
extraNum = 0 ;
bestNearBucket = curNearBucket ;
bestFarBucket = curFarBucket ;
curNearBucket = bucket + step ;
}
}
}
double median = - 1 ;
double txSum = 0 ;
2016-03-21 18:04:40 +01:00
// Calculate the "average" feerate of the best bucket range that met success conditions
// Find the bucket with the median transaction and then report the average feerate from that bucket
2014-08-26 22:28:32 +02:00
// This is a compromise between finding the median which we can't since we don't save all tx's
// and reporting the average which is less accurate
unsigned int minBucket = bestNearBucket < bestFarBucket ? bestNearBucket : bestFarBucket ;
unsigned int maxBucket = bestNearBucket > bestFarBucket ? bestNearBucket : bestFarBucket ;
for ( unsigned int j = minBucket ; j < = maxBucket ; j + + ) {
txSum + = txCtAvg [ j ] ;
}
if ( foundAnswer & & txSum ! = 0 ) {
txSum = txSum / 2 ;
for ( unsigned int j = minBucket ; j < = maxBucket ; j + + ) {
if ( txCtAvg [ j ] < txSum )
txSum - = txCtAvg [ j ] ;
else { // we're in the right bucket
median = avg [ j ] / txCtAvg [ j ] ;
break ;
}
}
}
2016-12-25 21:19:40 +01:00
LogPrint ( BCLog : : ESTIMATEFEE , " %3d: For conf success %s %4.2f need feerate %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool) \n " ,
2016-03-21 18:04:40 +01:00
confTarget , requireGreater ? " > " : " < " , successBreakPoint ,
2014-08-26 22:28:32 +02:00
requireGreater ? " > " : " < " , median , buckets [ minBucket ] , buckets [ maxBucket ] ,
100 * nConf / ( totalNum + extraNum ) , nConf , totalNum , extraNum ) ;
return median ;
}
2017-02-15 21:48:48 +01:00
void TxConfirmStats : : Write ( CAutoFile & fileout ) const
2014-08-26 22:28:32 +02:00
{
fileout < < decay ;
2017-02-23 21:13:41 +01:00
fileout < < scale ;
2014-08-26 22:28:32 +02:00
fileout < < avg ;
fileout < < txCtAvg ;
fileout < < confAvg ;
2017-02-23 21:13:41 +01:00
fileout < < failAvg ;
2014-08-26 22:28:32 +02:00
}
2017-02-23 21:13:41 +01:00
void TxConfirmStats : : Read ( CAutoFile & filein , int nFileVersion , size_t numBuckets )
2014-08-26 22:28:32 +02:00
{
2017-02-23 21:13:41 +01:00
// Read data file and do some very basic sanity checking
// buckets and bucketMap are not updated yet, so don't access them
// If there is a read failure, we'll just discard this entire object anyway
2014-08-26 22:28:32 +02:00
size_t maxConfirms ;
2017-02-23 21:13:41 +01:00
// The current version will store the decay with each individual TxConfirmStats and also keep a scale factor
if ( nFileVersion > = 149900 ) {
filein > > decay ;
if ( decay < = 0 | | decay > = 1 ) {
throw std : : runtime_error ( " Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive) " ) ;
}
filein > > scale ; //Unused for now
}
filein > > avg ;
if ( avg . size ( ) ! = numBuckets ) {
2016-03-21 18:04:40 +01:00
throw std : : runtime_error ( " Corrupt estimates file. Mismatch in feerate average bucket count " ) ;
2017-02-23 21:13:41 +01:00
}
filein > > txCtAvg ;
if ( txCtAvg . size ( ) ! = numBuckets ) {
2014-08-26 22:28:32 +02:00
throw std : : runtime_error ( " Corrupt estimates file. Mismatch in tx count bucket count " ) ;
2017-02-23 21:13:41 +01:00
}
filein > > confAvg ;
maxConfirms = confAvg . size ( ) ;
if ( maxConfirms < = 0 | | maxConfirms > 6 * 24 * 7 ) { // one week
2014-08-26 22:28:32 +02:00
throw std : : runtime_error ( " Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms " ) ;
}
for ( unsigned int i = 0 ; i < maxConfirms ; i + + ) {
2017-02-23 21:13:41 +01:00
if ( confAvg [ i ] . size ( ) ! = numBuckets ) {
throw std : : runtime_error ( " Corrupt estimates file. Mismatch in feerate conf average bucket count " ) ;
}
2014-08-26 22:28:32 +02:00
}
2017-02-23 21:13:41 +01:00
if ( nFileVersion > = 149900 ) {
filein > > failAvg ;
2014-08-26 22:28:32 +02:00
}
2017-02-23 21:13:41 +01:00
// Resize the current block variables which aren't stored in the data file
// to match the number of confirms and buckets
resizeInMemoryCounters ( numBuckets ) ;
2014-08-26 22:28:32 +02:00
2016-12-25 21:19:40 +01:00
LogPrint ( BCLog : : ESTIMATEFEE , " Reading estimates: %u buckets counting confirms up to %u blocks \n " ,
2016-03-21 18:04:40 +01:00
numBuckets , maxConfirms ) ;
2014-08-26 22:28:32 +02:00
}
unsigned int TxConfirmStats : : NewTx ( unsigned int nBlockHeight , double val )
{
unsigned int bucketindex = bucketMap . lower_bound ( val ) - > second ;
unsigned int blockIndex = nBlockHeight % unconfTxs . size ( ) ;
unconfTxs [ blockIndex ] [ bucketindex ] + + ;
return bucketindex ;
}
void TxConfirmStats : : removeTx ( unsigned int entryHeight , unsigned int nBestSeenHeight , unsigned int bucketindex )
{
//nBestSeenHeight is not updated yet for the new block
int blocksAgo = nBestSeenHeight - entryHeight ;
if ( nBestSeenHeight = = 0 ) // the BlockPolicyEstimator hasn't seen any blocks yet
blocksAgo = 0 ;
if ( blocksAgo < 0 ) {
2016-12-25 21:19:40 +01:00
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy error, blocks ago is negative for mempool tx \n " ) ;
2015-08-09 01:17:27 +02:00
return ; //This can't happen because we call this with our best seen height, no entries can have higher
2014-08-26 22:28:32 +02:00
}
if ( blocksAgo > = ( int ) unconfTxs . size ( ) ) {
2016-12-25 21:19:40 +01:00
if ( oldUnconfTxs [ bucketindex ] > 0 ) {
2014-08-26 22:28:32 +02:00
oldUnconfTxs [ bucketindex ] - - ;
2016-12-25 21:19:40 +01:00
} else {
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy error, mempool tx removed from >25 blocks,bucketIndex=%u already \n " ,
2014-08-26 22:28:32 +02:00
bucketindex ) ;
2016-12-25 21:19:40 +01:00
}
2014-08-26 22:28:32 +02:00
}
else {
unsigned int blockIndex = entryHeight % unconfTxs . size ( ) ;
2016-12-25 21:19:40 +01:00
if ( unconfTxs [ blockIndex ] [ bucketindex ] > 0 ) {
2014-08-26 22:28:32 +02:00
unconfTxs [ blockIndex ] [ bucketindex ] - - ;
2016-12-25 21:19:40 +01:00
} else {
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy error, mempool tx removed from blockIndex=%u,bucketIndex=%u already \n " ,
2014-08-26 22:28:32 +02:00
blockIndex , bucketindex ) ;
2016-12-25 21:19:40 +01:00
}
2014-08-26 22:28:32 +02:00
}
}
2016-11-29 19:55:26 +01:00
// This function is called from CTxMemPool::removeUnchecked to ensure
// txs removed from the mempool for any reason are no longer
// tracked. Txs that were part of a block have already been removed in
// processBlockTx to ensure they are never double tracked, but it is
// of no harm to try to remove them again.
2016-11-10 20:16:42 +01:00
bool CBlockPolicyEstimator : : removeTx ( uint256 hash )
2014-08-26 22:28:32 +02:00
{
2017-02-15 15:24:11 +01:00
LOCK ( cs_feeEstimator ) ;
2014-08-26 22:28:32 +02:00
std : : map < uint256 , TxStatsInfo > : : iterator pos = mapMemPoolTxs . find ( hash ) ;
2016-11-10 20:16:42 +01:00
if ( pos ! = mapMemPoolTxs . end ( ) ) {
2017-02-16 22:23:15 +01:00
feeStats - > removeTx ( pos - > second . blockHeight , nBestSeenHeight , pos - > second . bucketIndex ) ;
2017-03-09 20:02:00 +01:00
shortStats - > removeTx ( pos - > second . blockHeight , nBestSeenHeight , pos - > second . bucketIndex ) ;
longStats - > removeTx ( pos - > second . blockHeight , nBestSeenHeight , pos - > second . bucketIndex ) ;
2016-11-10 20:16:42 +01:00
mapMemPoolTxs . erase ( hash ) ;
return true ;
} else {
return false ;
2014-08-26 22:28:32 +02:00
}
}
2017-01-13 22:25:15 +01:00
CBlockPolicyEstimator : : CBlockPolicyEstimator ( )
2016-11-11 19:40:27 +01:00
: nBestSeenHeight ( 0 ) , trackedTxs ( 0 ) , untrackedTxs ( 0 )
2014-08-26 22:28:32 +02:00
{
2017-01-13 22:19:25 +01:00
static_assert ( MIN_BUCKET_FEERATE > 0 , " Min feerate must be nonzero " ) ;
2017-01-13 22:25:15 +01:00
minTrackedFee = CFeeRate ( MIN_BUCKET_FEERATE ) ;
2017-02-23 21:13:41 +01:00
size_t bucketIndex = 0 ;
for ( double bucketBoundary = minTrackedFee . GetFeePerK ( ) ; bucketBoundary < = MAX_BUCKET_FEERATE ; bucketBoundary * = FEE_SPACING , bucketIndex + + ) {
buckets . push_back ( bucketBoundary ) ;
bucketMap [ bucketBoundary ] = bucketIndex ;
2014-08-26 22:28:32 +02:00
}
2017-02-23 21:13:41 +01:00
buckets . push_back ( INF_FEERATE ) ;
bucketMap [ INF_FEERATE ] = bucketIndex ;
assert ( bucketMap . size ( ) = = buckets . size ( ) ) ;
2017-03-09 20:02:00 +01:00
feeStats = new TxConfirmStats ( buckets , bucketMap , MED_BLOCK_CONFIRMS , MED_DECAY ) ;
shortStats = new TxConfirmStats ( buckets , bucketMap , SHORT_BLOCK_CONFIRMS , SHORT_DECAY ) ;
longStats = new TxConfirmStats ( buckets , bucketMap , LONG_BLOCK_CONFIRMS , LONG_DECAY ) ;
2017-02-16 22:23:15 +01:00
}
CBlockPolicyEstimator : : ~ CBlockPolicyEstimator ( )
{
delete feeStats ;
2017-02-23 21:13:41 +01:00
delete shortStats ;
delete longStats ;
2014-08-26 22:28:32 +02:00
}
2016-11-11 18:48:01 +01:00
void CBlockPolicyEstimator : : processTransaction ( const CTxMemPoolEntry & entry , bool validFeeEstimate )
2014-08-26 22:28:32 +02:00
{
2017-02-15 15:24:11 +01:00
LOCK ( cs_feeEstimator ) ;
2014-08-26 22:28:32 +02:00
unsigned int txHeight = entry . GetHeight ( ) ;
uint256 hash = entry . GetTx ( ) . GetHash ( ) ;
2016-03-21 18:04:40 +01:00
if ( mapMemPoolTxs . count ( hash ) ) {
2016-12-25 21:19:40 +01:00
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy error mempool tx %s already being tracked \n " ,
2014-08-26 22:28:32 +02:00
hash . ToString ( ) . c_str ( ) ) ;
return ;
}
2016-11-29 21:40:03 +01:00
if ( txHeight ! = nBestSeenHeight ) {
2014-08-26 22:28:32 +02:00
// Ignore side chains and re-orgs; assuming they are random they don't
// affect the estimate. We'll potentially double count transactions in 1-block reorgs.
2016-11-29 21:40:03 +01:00
// Ignore txs if BlockPolicyEstimator is not in sync with chainActive.Tip().
// It will be synced next time a block is processed.
2014-08-26 22:28:32 +02:00
return ;
}
// Only want to be updating estimates when our blockchain is synced,
// otherwise we'll miscalculate how many blocks its taking to get included.
2016-11-11 19:40:27 +01:00
if ( ! validFeeEstimate ) {
untrackedTxs + + ;
2014-08-26 22:28:32 +02:00
return ;
2016-11-11 19:40:27 +01:00
}
trackedTxs + + ;
2014-08-26 22:28:32 +02:00
2016-03-21 18:04:40 +01:00
// Feerates are stored and reported as BTC-per-kb:
2014-08-26 22:28:32 +02:00
CFeeRate feeRate ( entry . GetFee ( ) , entry . GetTxSize ( ) ) ;
mapMemPoolTxs [ hash ] . blockHeight = txHeight ;
2017-03-09 20:02:00 +01:00
unsigned int bucketIndex = feeStats - > NewTx ( txHeight , ( double ) feeRate . GetFeePerK ( ) ) ;
mapMemPoolTxs [ hash ] . bucketIndex = bucketIndex ;
unsigned int bucketIndex2 = shortStats - > NewTx ( txHeight , ( double ) feeRate . GetFeePerK ( ) ) ;
assert ( bucketIndex = = bucketIndex2 ) ;
unsigned int bucketIndex3 = longStats - > NewTx ( txHeight , ( double ) feeRate . GetFeePerK ( ) ) ;
assert ( bucketIndex = = bucketIndex3 ) ;
2014-08-26 22:28:32 +02:00
}
2016-11-11 19:40:27 +01:00
bool CBlockPolicyEstimator : : processBlockTx ( unsigned int nBlockHeight , const CTxMemPoolEntry * entry )
2014-08-26 22:28:32 +02:00
{
2016-11-11 20:16:42 +01:00
if ( ! removeTx ( entry - > GetTx ( ) . GetHash ( ) ) ) {
2016-11-11 17:57:51 +01:00
// This transaction wasn't being tracked for fee estimation
2016-11-11 19:40:27 +01:00
return false ;
2014-08-26 22:28:32 +02:00
}
// How many blocks did it take for miners to include this transaction?
// blocksToConfirm is 1-based, so a transaction included in the earliest
// possible block has confirmation count of 1
2016-11-11 20:16:42 +01:00
int blocksToConfirm = nBlockHeight - entry - > GetHeight ( ) ;
2014-08-26 22:28:32 +02:00
if ( blocksToConfirm < = 0 ) {
// This can't happen because we don't process transactions from a block with a height
// lower than our greatest seen height
2016-12-25 21:19:40 +01:00
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy error Transaction had negative blocksToConfirm \n " ) ;
2016-11-11 19:40:27 +01:00
return false ;
2014-08-26 22:28:32 +02:00
}
2016-03-21 18:04:40 +01:00
// Feerates are stored and reported as BTC-per-kb:
2016-11-11 20:16:42 +01:00
CFeeRate feeRate ( entry - > GetFee ( ) , entry - > GetTxSize ( ) ) ;
2014-08-26 22:28:32 +02:00
2017-02-16 22:23:15 +01:00
feeStats - > Record ( blocksToConfirm , ( double ) feeRate . GetFeePerK ( ) ) ;
2017-03-09 20:02:00 +01:00
shortStats - > Record ( blocksToConfirm , ( double ) feeRate . GetFeePerK ( ) ) ;
longStats - > Record ( blocksToConfirm , ( double ) feeRate . GetFeePerK ( ) ) ;
2016-11-11 19:40:27 +01:00
return true ;
2014-08-26 22:28:32 +02:00
}
void CBlockPolicyEstimator : : processBlock ( unsigned int nBlockHeight ,
2016-11-11 20:16:42 +01:00
std : : vector < const CTxMemPoolEntry * > & entries )
2014-08-26 22:28:32 +02:00
{
2017-02-15 15:24:11 +01:00
LOCK ( cs_feeEstimator ) ;
2014-08-26 22:28:32 +02:00
if ( nBlockHeight < = nBestSeenHeight ) {
// Ignore side chains and re-orgs; assuming they are random
// they don't affect the estimate.
// And if an attacker can re-org the chain at will, then
// you've got much bigger problems than "attacker can influence
// transaction fees."
return ;
}
2016-11-11 17:57:51 +01:00
// Must update nBestSeenHeight in sync with ClearCurrent so that
// calls to removeTx (via processBlockTx) correctly calculate age
// of unconfirmed txs to remove from tracking.
nBestSeenHeight = nBlockHeight ;
2017-02-28 23:29:42 +01:00
// Update unconfirmed circular buffer
2017-02-16 22:23:15 +01:00
feeStats - > ClearCurrent ( nBlockHeight ) ;
2017-03-09 20:02:00 +01:00
shortStats - > ClearCurrent ( nBlockHeight ) ;
longStats - > ClearCurrent ( nBlockHeight ) ;
2014-08-26 22:28:32 +02:00
2017-02-28 23:29:42 +01:00
// Decay all exponential averages
feeStats - > UpdateMovingAverages ( ) ;
shortStats - > UpdateMovingAverages ( ) ;
longStats - > UpdateMovingAverages ( ) ;
2016-11-11 19:40:27 +01:00
unsigned int countedTxs = 0 ;
2017-02-28 23:29:42 +01:00
// Update averages with data points from current block
2016-11-11 19:40:27 +01:00
for ( unsigned int i = 0 ; i < entries . size ( ) ; i + + ) {
if ( processBlockTx ( nBlockHeight , entries [ i ] ) )
countedTxs + + ;
}
2014-08-26 22:28:32 +02:00
2016-12-25 21:19:40 +01:00
LogPrint ( BCLog : : ESTIMATEFEE , " Blockpolicy after updating estimates for %u of %u txs in block, since last block %u of %u tracked, new mempool map size %u \n " ,
2016-11-11 19:40:27 +01:00
countedTxs , entries . size ( ) , trackedTxs , trackedTxs + untrackedTxs , mapMemPoolTxs . size ( ) ) ;
trackedTxs = 0 ;
untrackedTxs = 0 ;
2014-08-26 22:28:32 +02:00
}
2017-02-15 21:23:34 +01:00
CFeeRate CBlockPolicyEstimator : : estimateFee ( int confTarget ) const
2014-08-26 22:28:32 +02:00
{
2017-02-15 15:24:11 +01:00
LOCK ( cs_feeEstimator ) ;
2014-08-26 22:28:32 +02:00
// Return failure if trying to analyze a target we're not tracking
2016-11-29 18:18:44 +01:00
// It's not possible to get reasonable estimates for confTarget of 1
2017-02-16 22:23:15 +01:00
if ( confTarget < = 1 | | ( unsigned int ) confTarget > feeStats - > GetMaxConfirms ( ) )
2014-08-26 22:28:32 +02:00
return CFeeRate ( 0 ) ;
2017-03-09 20:02:00 +01:00
double median = feeStats - > EstimateMedianVal ( confTarget , SUFFICIENT_FEETXS , DOUBLE_SUCCESS_PCT , true , nBestSeenHeight ) ;
2014-08-26 22:28:32 +02:00
if ( median < 0 )
return CFeeRate ( 0 ) ;
return CFeeRate ( median ) ;
}
2017-02-15 21:23:34 +01:00
CFeeRate CBlockPolicyEstimator : : estimateSmartFee ( int confTarget , int * answerFoundAtTarget , const CTxMemPool & pool ) const
2015-11-16 21:10:22 +01:00
{
if ( answerFoundAtTarget )
* answerFoundAtTarget = confTarget ;
2016-11-29 18:18:44 +01:00
2015-11-16 21:10:22 +01:00
double median = - 1 ;
2017-02-15 15:24:11 +01:00
{
LOCK ( cs_feeEstimator ) ;
// Return failure if trying to analyze a target we're not tracking
2017-02-16 22:23:15 +01:00
if ( confTarget < = 0 | | ( unsigned int ) confTarget > feeStats - > GetMaxConfirms ( ) )
2017-02-15 15:24:11 +01:00
return CFeeRate ( 0 ) ;
// It's not possible to get reasonable estimates for confTarget of 1
if ( confTarget = = 1 )
confTarget = 2 ;
2017-02-16 22:23:15 +01:00
while ( median < 0 & & ( unsigned int ) confTarget < = feeStats - > GetMaxConfirms ( ) ) {
2017-03-09 20:02:00 +01:00
median = feeStats - > EstimateMedianVal ( confTarget + + , SUFFICIENT_FEETXS , DOUBLE_SUCCESS_PCT , true , nBestSeenHeight ) ;
2017-02-15 15:24:11 +01:00
}
} // Must unlock cs_feeEstimator before taking mempool locks
2015-11-16 21:10:22 +01:00
if ( answerFoundAtTarget )
* answerFoundAtTarget = confTarget - 1 ;
2016-03-21 18:04:40 +01:00
// If mempool is limiting txs , return at least the min feerate from the mempool
2015-11-24 14:53:14 +01:00
CAmount minPoolFee = pool . GetMinFee ( GetArg ( " -maxmempool " , DEFAULT_MAX_MEMPOOL_SIZE ) * 1000000 ) . GetFeePerK ( ) ;
2015-11-16 21:21:51 +01:00
if ( minPoolFee > 0 & & minPoolFee > median )
return CFeeRate ( minPoolFee ) ;
2015-11-16 21:10:22 +01:00
if ( median < 0 )
return CFeeRate ( 0 ) ;
return CFeeRate ( median ) ;
}
2017-02-15 21:48:48 +01:00
bool CBlockPolicyEstimator : : Write ( CAutoFile & fileout ) const
2014-08-26 22:28:32 +02:00
{
2017-02-15 21:48:48 +01:00
try {
LOCK ( cs_feeEstimator ) ;
2017-02-23 21:13:41 +01:00
fileout < < 149900 ; // version required to read: 0.14.99 or later
2017-02-15 21:48:48 +01:00
fileout < < CLIENT_VERSION ; // version that wrote the file
fileout < < nBestSeenHeight ;
2017-02-23 21:13:41 +01:00
unsigned int future1 = 0 , future2 = 0 ;
fileout < < future1 < < future2 ;
fileout < < buckets ;
2017-02-16 22:23:15 +01:00
feeStats - > Write ( fileout ) ;
2017-02-23 21:13:41 +01:00
shortStats - > Write ( fileout ) ;
longStats - > Write ( fileout ) ;
2017-02-15 21:48:48 +01:00
}
catch ( const std : : exception & ) {
LogPrintf ( " CBlockPolicyEstimator::Write(): unable to read policy estimator data (non-fatal) \n " ) ;
return false ;
}
return true ;
2014-08-26 22:28:32 +02:00
}
2017-02-15 21:48:48 +01:00
bool CBlockPolicyEstimator : : Read ( CAutoFile & filein )
2014-08-26 22:28:32 +02:00
{
2017-02-15 21:48:48 +01:00
try {
LOCK ( cs_feeEstimator ) ;
2017-02-23 21:13:41 +01:00
int nVersionRequired , nVersionThatWrote ;
unsigned int nFileBestSeenHeight ;
2017-02-15 21:48:48 +01:00
filein > > nVersionRequired > > nVersionThatWrote ;
if ( nVersionRequired > CLIENT_VERSION )
return error ( " CBlockPolicyEstimator::Read() : up - version ( % d ) fee estimate file " , nVersionRequired) ;
2017-02-23 21:13:41 +01:00
// Read fee estimates file into temporary variables so existing data
// structures aren't corrupted if there is an exception.
2017-02-15 21:48:48 +01:00
filein > > nFileBestSeenHeight ;
2017-02-23 21:13:41 +01:00
if ( nVersionThatWrote < 149900 ) {
// Read the old fee estimates file for temporary use, but then discard. Will start collecting data from scratch.
// decay is stored before buckets in old versions, so pre-read decay and pass into TxConfirmStats constructor
double tempDecay ;
filein > > tempDecay ;
if ( tempDecay < = 0 | | tempDecay > = 1 )
throw std : : runtime_error ( " Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive) " ) ;
std : : vector < double > tempBuckets ;
filein > > tempBuckets ;
size_t tempNum = tempBuckets . size ( ) ;
if ( tempNum < = 1 | | tempNum > 1000 )
throw std : : runtime_error ( " Corrupt estimates file. Must have between 2 and 1000 feerate buckets " ) ;
std : : map < double , unsigned int > tempMap ;
2017-03-09 20:02:00 +01:00
std : : unique_ptr < TxConfirmStats > tempFeeStats ( new TxConfirmStats ( tempBuckets , tempMap , MED_BLOCK_CONFIRMS , tempDecay ) ) ;
2017-02-23 21:13:41 +01:00
tempFeeStats - > Read ( filein , nVersionThatWrote , tempNum ) ;
// if nVersionThatWrote < 139900 then another TxConfirmStats (for priority) follows but can be ignored.
tempMap . clear ( ) ;
for ( unsigned int i = 0 ; i < tempBuckets . size ( ) ; i + + ) {
tempMap [ tempBuckets [ i ] ] = i ;
}
}
else { // nVersionThatWrote >= 149900
unsigned int future1 , future2 ;
filein > > future1 > > future2 ;
std : : vector < double > fileBuckets ;
filein > > fileBuckets ;
size_t numBuckets = fileBuckets . size ( ) ;
if ( numBuckets < = 1 | | numBuckets > 1000 )
throw std : : runtime_error ( " Corrupt estimates file. Must have between 2 and 1000 feerate buckets " ) ;
2017-03-09 20:02:00 +01:00
std : : unique_ptr < TxConfirmStats > fileFeeStats ( new TxConfirmStats ( buckets , bucketMap , MED_BLOCK_CONFIRMS , MED_DECAY ) ) ;
std : : unique_ptr < TxConfirmStats > fileShortStats ( new TxConfirmStats ( buckets , bucketMap , SHORT_BLOCK_CONFIRMS , SHORT_DECAY ) ) ;
std : : unique_ptr < TxConfirmStats > fileLongStats ( new TxConfirmStats ( buckets , bucketMap , LONG_BLOCK_CONFIRMS , LONG_DECAY ) ) ;
2017-02-23 21:13:41 +01:00
fileFeeStats - > Read ( filein , nVersionThatWrote , numBuckets ) ;
fileShortStats - > Read ( filein , nVersionThatWrote , numBuckets ) ;
fileLongStats - > Read ( filein , nVersionThatWrote , numBuckets ) ;
// Fee estimates file parsed correctly
// Copy buckets from file and refresh our bucketmap
buckets = fileBuckets ;
bucketMap . clear ( ) ;
for ( unsigned int i = 0 ; i < buckets . size ( ) ; i + + ) {
bucketMap [ buckets [ i ] ] = i ;
}
// Destroy old TxConfirmStats and point to new ones that already reference buckets and bucketMap
delete feeStats ;
delete shortStats ;
delete longStats ;
feeStats = fileFeeStats . release ( ) ;
shortStats = fileShortStats . release ( ) ;
longStats = fileLongStats . release ( ) ;
nBestSeenHeight = nFileBestSeenHeight ;
}
2017-02-15 21:48:48 +01:00
}
2017-02-23 21:13:41 +01:00
catch ( const std : : exception & e ) {
LogPrintf ( " CBlockPolicyEstimator::Read(): unable to read policy estimator data (non-fatal): %s \n " , e . what ( ) ) ;
2017-02-15 21:48:48 +01:00
return false ;
}
return true ;
2014-08-26 22:28:32 +02:00
}
2016-02-12 21:57:15 +01:00
FeeFilterRounder : : FeeFilterRounder ( const CFeeRate & minIncrementalFee )
{
2016-12-06 03:46:08 +01:00
CAmount minFeeLimit = std : : max ( CAmount ( 1 ) , minIncrementalFee . GetFeePerK ( ) / 2 ) ;
2016-02-12 21:57:15 +01:00
feeset . insert ( 0 ) ;
2017-03-09 20:02:00 +01:00
for ( double bucketBoundary = minFeeLimit ; bucketBoundary < = MAX_FILTER_FEERATE ; bucketBoundary * = FEE_FILTER_SPACING ) {
2016-02-12 21:57:15 +01:00
feeset . insert ( bucketBoundary ) ;
}
}
CAmount FeeFilterRounder : : round ( CAmount currentMinFee )
{
std : : set < double > : : iterator it = feeset . lower_bound ( currentMinFee ) ;
2016-10-13 16:19:20 +02:00
if ( ( it ! = feeset . begin ( ) & & insecure_rand . rand32 ( ) % 3 ! = 0 ) | | it = = feeset . end ( ) ) {
2016-02-12 21:57:15 +01:00
it - - ;
}
return * it ;
}