2014-08-26 22:28:32 +02:00
|
|
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
|
2016-12-31 19:01:21 +01:00
|
|
|
// Copyright (c) 2009-2016 The Bitcoin Core developers
|
2014-08-26 22:28:32 +02:00
|
|
|
// Distributed under the MIT software license, see the accompanying
|
|
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
|
|
|
|
|
|
#include "policy/fees.h"
|
2015-11-16 21:21:51 +01:00
|
|
|
#include "policy/policy.h"
|
2014-08-26 22:28:32 +02:00
|
|
|
|
|
|
|
#include "amount.h"
|
|
|
|
#include "primitives/transaction.h"
|
2016-02-12 21:57:15 +01:00
|
|
|
#include "random.h"
|
2014-08-26 22:28:32 +02:00
|
|
|
#include "streams.h"
|
|
|
|
#include "txmempool.h"
|
|
|
|
#include "util.h"
|
|
|
|
|
|
|
|
void TxConfirmStats::Initialize(std::vector<double>& defaultBuckets,
|
2016-03-21 18:04:40 +01:00
|
|
|
unsigned int maxConfirms, double _decay)
|
2014-08-26 22:28:32 +02:00
|
|
|
{
|
|
|
|
decay = _decay;
|
|
|
|
for (unsigned int i = 0; i < defaultBuckets.size(); i++) {
|
|
|
|
buckets.push_back(defaultBuckets[i]);
|
|
|
|
bucketMap[defaultBuckets[i]] = i;
|
|
|
|
}
|
|
|
|
confAvg.resize(maxConfirms);
|
|
|
|
curBlockConf.resize(maxConfirms);
|
|
|
|
unconfTxs.resize(maxConfirms);
|
|
|
|
for (unsigned int i = 0; i < maxConfirms; i++) {
|
|
|
|
confAvg[i].resize(buckets.size());
|
|
|
|
curBlockConf[i].resize(buckets.size());
|
|
|
|
unconfTxs[i].resize(buckets.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
oldUnconfTxs.resize(buckets.size());
|
|
|
|
curBlockTxCt.resize(buckets.size());
|
|
|
|
txCtAvg.resize(buckets.size());
|
|
|
|
curBlockVal.resize(buckets.size());
|
|
|
|
avg.resize(buckets.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Zero out the data for the current block
|
|
|
|
void TxConfirmStats::ClearCurrent(unsigned int nBlockHeight)
|
|
|
|
{
|
|
|
|
for (unsigned int j = 0; j < buckets.size(); j++) {
|
|
|
|
oldUnconfTxs[j] += unconfTxs[nBlockHeight%unconfTxs.size()][j];
|
|
|
|
unconfTxs[nBlockHeight%unconfTxs.size()][j] = 0;
|
|
|
|
for (unsigned int i = 0; i < curBlockConf.size(); i++)
|
|
|
|
curBlockConf[i][j] = 0;
|
|
|
|
curBlockTxCt[j] = 0;
|
|
|
|
curBlockVal[j] = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void TxConfirmStats::Record(int blocksToConfirm, double val)
|
|
|
|
{
|
|
|
|
// blocksToConfirm is 1-based
|
|
|
|
if (blocksToConfirm < 1)
|
|
|
|
return;
|
|
|
|
unsigned int bucketindex = bucketMap.lower_bound(val)->second;
|
|
|
|
for (size_t i = blocksToConfirm; i <= curBlockConf.size(); i++) {
|
|
|
|
curBlockConf[i - 1][bucketindex]++;
|
|
|
|
}
|
|
|
|
curBlockTxCt[bucketindex]++;
|
|
|
|
curBlockVal[bucketindex] += val;
|
|
|
|
}
|
|
|
|
|
|
|
|
void TxConfirmStats::UpdateMovingAverages()
|
|
|
|
{
|
|
|
|
for (unsigned int j = 0; j < buckets.size(); j++) {
|
|
|
|
for (unsigned int i = 0; i < confAvg.size(); i++)
|
|
|
|
confAvg[i][j] = confAvg[i][j] * decay + curBlockConf[i][j];
|
|
|
|
avg[j] = avg[j] * decay + curBlockVal[j];
|
|
|
|
txCtAvg[j] = txCtAvg[j] * decay + curBlockTxCt[j];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// returns -1 on error conditions
|
|
|
|
double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
|
|
|
|
double successBreakPoint, bool requireGreater,
|
|
|
|
unsigned int nBlockHeight)
|
|
|
|
{
|
|
|
|
// Counters for a bucket (or range of buckets)
|
|
|
|
double nConf = 0; // Number of tx's confirmed within the confTarget
|
|
|
|
double totalNum = 0; // Total number of tx's that were ever confirmed
|
|
|
|
int extraNum = 0; // Number of tx's still in mempool for confTarget or longer
|
|
|
|
|
|
|
|
int maxbucketindex = buckets.size() - 1;
|
|
|
|
|
2016-03-21 18:04:40 +01:00
|
|
|
// requireGreater means we are looking for the lowest feerate such that all higher
|
|
|
|
// values pass, so we start at maxbucketindex (highest feerate) and look at successively
|
2014-08-26 22:28:32 +02:00
|
|
|
// smaller buckets until we reach failure. Otherwise, we are looking for the highest
|
2016-03-21 18:04:40 +01:00
|
|
|
// feerate such that all lower values fail, and we go in the opposite direction.
|
2014-08-26 22:28:32 +02:00
|
|
|
unsigned int startbucket = requireGreater ? maxbucketindex : 0;
|
|
|
|
int step = requireGreater ? -1 : 1;
|
|
|
|
|
|
|
|
// We'll combine buckets until we have enough samples.
|
|
|
|
// The near and far variables will define the range we've combined
|
|
|
|
// The best variables are the last range we saw which still had a high
|
|
|
|
// enough confirmation rate to count as success.
|
|
|
|
// The cur variables are the current range we're counting.
|
|
|
|
unsigned int curNearBucket = startbucket;
|
|
|
|
unsigned int bestNearBucket = startbucket;
|
|
|
|
unsigned int curFarBucket = startbucket;
|
|
|
|
unsigned int bestFarBucket = startbucket;
|
|
|
|
|
|
|
|
bool foundAnswer = false;
|
|
|
|
unsigned int bins = unconfTxs.size();
|
|
|
|
|
2016-03-21 18:04:40 +01:00
|
|
|
// Start counting from highest(default) or lowest feerate transactions
|
2014-08-26 22:28:32 +02:00
|
|
|
for (int bucket = startbucket; bucket >= 0 && bucket <= maxbucketindex; bucket += step) {
|
|
|
|
curFarBucket = bucket;
|
|
|
|
nConf += confAvg[confTarget - 1][bucket];
|
|
|
|
totalNum += txCtAvg[bucket];
|
|
|
|
for (unsigned int confct = confTarget; confct < GetMaxConfirms(); confct++)
|
|
|
|
extraNum += unconfTxs[(nBlockHeight - confct)%bins][bucket];
|
|
|
|
extraNum += oldUnconfTxs[bucket];
|
|
|
|
// If we have enough transaction data points in this range of buckets,
|
|
|
|
// we can test for success
|
|
|
|
// (Only count the confirmed data points, so that each confirmation count
|
|
|
|
// will be looking at the same amount of data and same bucket breaks)
|
|
|
|
if (totalNum >= sufficientTxVal / (1 - decay)) {
|
|
|
|
double curPct = nConf / (totalNum + extraNum);
|
|
|
|
|
|
|
|
// Check to see if we are no longer getting confirmed at the success rate
|
|
|
|
if (requireGreater && curPct < successBreakPoint)
|
|
|
|
break;
|
|
|
|
if (!requireGreater && curPct > successBreakPoint)
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Otherwise update the cumulative stats, and the bucket variables
|
|
|
|
// and reset the counters
|
|
|
|
else {
|
|
|
|
foundAnswer = true;
|
|
|
|
nConf = 0;
|
|
|
|
totalNum = 0;
|
|
|
|
extraNum = 0;
|
|
|
|
bestNearBucket = curNearBucket;
|
|
|
|
bestFarBucket = curFarBucket;
|
|
|
|
curNearBucket = bucket + step;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
double median = -1;
|
|
|
|
double txSum = 0;
|
|
|
|
|
2016-03-21 18:04:40 +01:00
|
|
|
// Calculate the "average" feerate of the best bucket range that met success conditions
|
|
|
|
// Find the bucket with the median transaction and then report the average feerate from that bucket
|
2014-08-26 22:28:32 +02:00
|
|
|
// This is a compromise between finding the median which we can't since we don't save all tx's
|
|
|
|
// and reporting the average which is less accurate
|
|
|
|
unsigned int minBucket = bestNearBucket < bestFarBucket ? bestNearBucket : bestFarBucket;
|
|
|
|
unsigned int maxBucket = bestNearBucket > bestFarBucket ? bestNearBucket : bestFarBucket;
|
|
|
|
for (unsigned int j = minBucket; j <= maxBucket; j++) {
|
|
|
|
txSum += txCtAvg[j];
|
|
|
|
}
|
|
|
|
if (foundAnswer && txSum != 0) {
|
|
|
|
txSum = txSum / 2;
|
|
|
|
for (unsigned int j = minBucket; j <= maxBucket; j++) {
|
|
|
|
if (txCtAvg[j] < txSum)
|
|
|
|
txSum -= txCtAvg[j];
|
|
|
|
else { // we're in the right bucket
|
|
|
|
median = avg[j] / txCtAvg[j];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-21 18:04:40 +01:00
|
|
|
LogPrint("estimatefee", "%3d: For conf success %s %4.2f need feerate %s: %12.5g from buckets %8g - %8g Cur Bucket stats %6.2f%% %8.1f/(%.1f+%d mempool)\n",
|
|
|
|
confTarget, requireGreater ? ">" : "<", successBreakPoint,
|
2014-08-26 22:28:32 +02:00
|
|
|
requireGreater ? ">" : "<", median, buckets[minBucket], buckets[maxBucket],
|
|
|
|
100 * nConf / (totalNum + extraNum), nConf, totalNum, extraNum);
|
|
|
|
|
|
|
|
return median;
|
|
|
|
}
|
|
|
|
|
|
|
|
void TxConfirmStats::Write(CAutoFile& fileout)
|
|
|
|
{
|
|
|
|
fileout << decay;
|
|
|
|
fileout << buckets;
|
|
|
|
fileout << avg;
|
|
|
|
fileout << txCtAvg;
|
|
|
|
fileout << confAvg;
|
|
|
|
}
|
|
|
|
|
|
|
|
void TxConfirmStats::Read(CAutoFile& filein)
|
|
|
|
{
|
|
|
|
// Read data file into temporary variables and do some very basic sanity checking
|
|
|
|
std::vector<double> fileBuckets;
|
|
|
|
std::vector<double> fileAvg;
|
|
|
|
std::vector<std::vector<double> > fileConfAvg;
|
|
|
|
std::vector<double> fileTxCtAvg;
|
|
|
|
double fileDecay;
|
|
|
|
size_t maxConfirms;
|
|
|
|
size_t numBuckets;
|
|
|
|
|
|
|
|
filein >> fileDecay;
|
|
|
|
if (fileDecay <= 0 || fileDecay >= 1)
|
|
|
|
throw std::runtime_error("Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive)");
|
|
|
|
filein >> fileBuckets;
|
|
|
|
numBuckets = fileBuckets.size();
|
|
|
|
if (numBuckets <= 1 || numBuckets > 1000)
|
2016-03-21 18:04:40 +01:00
|
|
|
throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets");
|
2014-08-26 22:28:32 +02:00
|
|
|
filein >> fileAvg;
|
|
|
|
if (fileAvg.size() != numBuckets)
|
2016-03-21 18:04:40 +01:00
|
|
|
throw std::runtime_error("Corrupt estimates file. Mismatch in feerate average bucket count");
|
2014-08-26 22:28:32 +02:00
|
|
|
filein >> fileTxCtAvg;
|
|
|
|
if (fileTxCtAvg.size() != numBuckets)
|
|
|
|
throw std::runtime_error("Corrupt estimates file. Mismatch in tx count bucket count");
|
|
|
|
filein >> fileConfAvg;
|
|
|
|
maxConfirms = fileConfAvg.size();
|
|
|
|
if (maxConfirms <= 0 || maxConfirms > 6 * 24 * 7) // one week
|
|
|
|
throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms");
|
|
|
|
for (unsigned int i = 0; i < maxConfirms; i++) {
|
|
|
|
if (fileConfAvg[i].size() != numBuckets)
|
2016-03-21 18:04:40 +01:00
|
|
|
throw std::runtime_error("Corrupt estimates file. Mismatch in feerate conf average bucket count");
|
2014-08-26 22:28:32 +02:00
|
|
|
}
|
2016-03-21 18:04:40 +01:00
|
|
|
// Now that we've processed the entire feerate estimate data file and not
|
2014-08-26 22:28:32 +02:00
|
|
|
// thrown any errors, we can copy it to our data structures
|
|
|
|
decay = fileDecay;
|
|
|
|
buckets = fileBuckets;
|
|
|
|
avg = fileAvg;
|
|
|
|
confAvg = fileConfAvg;
|
|
|
|
txCtAvg = fileTxCtAvg;
|
|
|
|
bucketMap.clear();
|
|
|
|
|
|
|
|
// Resize the current block variables which aren't stored in the data file
|
|
|
|
// to match the number of confirms and buckets
|
|
|
|
curBlockConf.resize(maxConfirms);
|
|
|
|
for (unsigned int i = 0; i < maxConfirms; i++) {
|
|
|
|
curBlockConf[i].resize(buckets.size());
|
|
|
|
}
|
|
|
|
curBlockTxCt.resize(buckets.size());
|
|
|
|
curBlockVal.resize(buckets.size());
|
|
|
|
|
|
|
|
unconfTxs.resize(maxConfirms);
|
|
|
|
for (unsigned int i = 0; i < maxConfirms; i++) {
|
|
|
|
unconfTxs[i].resize(buckets.size());
|
|
|
|
}
|
|
|
|
oldUnconfTxs.resize(buckets.size());
|
|
|
|
|
|
|
|
for (unsigned int i = 0; i < buckets.size(); i++)
|
|
|
|
bucketMap[buckets[i]] = i;
|
|
|
|
|
2016-03-21 18:04:40 +01:00
|
|
|
LogPrint("estimatefee", "Reading estimates: %u buckets counting confirms up to %u blocks\n",
|
|
|
|
numBuckets, maxConfirms);
|
2014-08-26 22:28:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int TxConfirmStats::NewTx(unsigned int nBlockHeight, double val)
|
|
|
|
{
|
|
|
|
unsigned int bucketindex = bucketMap.lower_bound(val)->second;
|
|
|
|
unsigned int blockIndex = nBlockHeight % unconfTxs.size();
|
|
|
|
unconfTxs[blockIndex][bucketindex]++;
|
|
|
|
return bucketindex;
|
|
|
|
}
|
|
|
|
|
|
|
|
void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHeight, unsigned int bucketindex)
|
|
|
|
{
|
|
|
|
//nBestSeenHeight is not updated yet for the new block
|
|
|
|
int blocksAgo = nBestSeenHeight - entryHeight;
|
|
|
|
if (nBestSeenHeight == 0) // the BlockPolicyEstimator hasn't seen any blocks yet
|
|
|
|
blocksAgo = 0;
|
|
|
|
if (blocksAgo < 0) {
|
|
|
|
LogPrint("estimatefee", "Blockpolicy error, blocks ago is negative for mempool tx\n");
|
2015-08-09 01:17:27 +02:00
|
|
|
return; //This can't happen because we call this with our best seen height, no entries can have higher
|
2014-08-26 22:28:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (blocksAgo >= (int)unconfTxs.size()) {
|
|
|
|
if (oldUnconfTxs[bucketindex] > 0)
|
|
|
|
oldUnconfTxs[bucketindex]--;
|
|
|
|
else
|
|
|
|
LogPrint("estimatefee", "Blockpolicy error, mempool tx removed from >25 blocks,bucketIndex=%u already\n",
|
|
|
|
bucketindex);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
unsigned int blockIndex = entryHeight % unconfTxs.size();
|
|
|
|
if (unconfTxs[blockIndex][bucketindex] > 0)
|
|
|
|
unconfTxs[blockIndex][bucketindex]--;
|
|
|
|
else
|
|
|
|
LogPrint("estimatefee", "Blockpolicy error, mempool tx removed from blockIndex=%u,bucketIndex=%u already\n",
|
|
|
|
blockIndex, bucketindex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-29 19:55:26 +01:00
|
|
|
// This function is called from CTxMemPool::removeUnchecked to ensure
|
|
|
|
// txs removed from the mempool for any reason are no longer
|
|
|
|
// tracked. Txs that were part of a block have already been removed in
|
|
|
|
// processBlockTx to ensure they are never double tracked, but it is
|
|
|
|
// of no harm to try to remove them again.
|
2016-11-10 20:16:42 +01:00
|
|
|
bool CBlockPolicyEstimator::removeTx(uint256 hash)
|
2014-08-26 22:28:32 +02:00
|
|
|
{
|
|
|
|
std::map<uint256, TxStatsInfo>::iterator pos = mapMemPoolTxs.find(hash);
|
2016-11-10 20:16:42 +01:00
|
|
|
if (pos != mapMemPoolTxs.end()) {
|
|
|
|
feeStats.removeTx(pos->second.blockHeight, nBestSeenHeight, pos->second.bucketIndex);
|
|
|
|
mapMemPoolTxs.erase(hash);
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
2014-08-26 22:28:32 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-13 22:25:15 +01:00
|
|
|
CBlockPolicyEstimator::CBlockPolicyEstimator()
|
2016-11-11 19:40:27 +01:00
|
|
|
: nBestSeenHeight(0), trackedTxs(0), untrackedTxs(0)
|
2014-08-26 22:28:32 +02:00
|
|
|
{
|
2017-01-13 22:19:25 +01:00
|
|
|
static_assert(MIN_BUCKET_FEERATE > 0, "Min feerate must be nonzero");
|
2017-01-13 22:25:15 +01:00
|
|
|
minTrackedFee = CFeeRate(MIN_BUCKET_FEERATE);
|
2014-08-26 22:28:32 +02:00
|
|
|
std::vector<double> vfeelist;
|
2017-01-13 22:19:25 +01:00
|
|
|
for (double bucketBoundary = minTrackedFee.GetFeePerK(); bucketBoundary <= MAX_BUCKET_FEERATE; bucketBoundary *= FEE_SPACING) {
|
2014-08-26 22:28:32 +02:00
|
|
|
vfeelist.push_back(bucketBoundary);
|
|
|
|
}
|
|
|
|
vfeelist.push_back(INF_FEERATE);
|
2016-03-21 18:04:40 +01:00
|
|
|
feeStats.Initialize(vfeelist, MAX_BLOCK_CONFIRMS, DEFAULT_DECAY);
|
2014-08-26 22:28:32 +02:00
|
|
|
}
|
|
|
|
|
2016-11-11 18:48:01 +01:00
|
|
|
void CBlockPolicyEstimator::processTransaction(const CTxMemPoolEntry& entry, bool validFeeEstimate)
|
2014-08-26 22:28:32 +02:00
|
|
|
{
|
|
|
|
unsigned int txHeight = entry.GetHeight();
|
|
|
|
uint256 hash = entry.GetTx().GetHash();
|
2016-03-21 18:04:40 +01:00
|
|
|
if (mapMemPoolTxs.count(hash)) {
|
2014-08-26 22:28:32 +02:00
|
|
|
LogPrint("estimatefee", "Blockpolicy error mempool tx %s already being tracked\n",
|
|
|
|
hash.ToString().c_str());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-11-29 21:40:03 +01:00
|
|
|
if (txHeight != nBestSeenHeight) {
|
2014-08-26 22:28:32 +02:00
|
|
|
// Ignore side chains and re-orgs; assuming they are random they don't
|
|
|
|
// affect the estimate. We'll potentially double count transactions in 1-block reorgs.
|
2016-11-29 21:40:03 +01:00
|
|
|
// Ignore txs if BlockPolicyEstimator is not in sync with chainActive.Tip().
|
|
|
|
// It will be synced next time a block is processed.
|
2014-08-26 22:28:32 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only want to be updating estimates when our blockchain is synced,
|
|
|
|
// otherwise we'll miscalculate how many blocks its taking to get included.
|
2016-11-11 19:40:27 +01:00
|
|
|
if (!validFeeEstimate) {
|
|
|
|
untrackedTxs++;
|
2014-08-26 22:28:32 +02:00
|
|
|
return;
|
2016-11-11 19:40:27 +01:00
|
|
|
}
|
|
|
|
trackedTxs++;
|
2014-08-26 22:28:32 +02:00
|
|
|
|
2016-03-21 18:04:40 +01:00
|
|
|
// Feerates are stored and reported as BTC-per-kb:
|
2014-08-26 22:28:32 +02:00
|
|
|
CFeeRate feeRate(entry.GetFee(), entry.GetTxSize());
|
|
|
|
|
|
|
|
mapMemPoolTxs[hash].blockHeight = txHeight;
|
2016-03-21 18:04:40 +01:00
|
|
|
mapMemPoolTxs[hash].bucketIndex = feeStats.NewTx(txHeight, (double)feeRate.GetFeePerK());
|
2014-08-26 22:28:32 +02:00
|
|
|
}
|
|
|
|
|
2016-11-11 19:40:27 +01:00
|
|
|
bool CBlockPolicyEstimator::processBlockTx(unsigned int nBlockHeight, const CTxMemPoolEntry* entry)
|
2014-08-26 22:28:32 +02:00
|
|
|
{
|
2016-11-11 20:16:42 +01:00
|
|
|
if (!removeTx(entry->GetTx().GetHash())) {
|
2016-11-11 17:57:51 +01:00
|
|
|
// This transaction wasn't being tracked for fee estimation
|
2016-11-11 19:40:27 +01:00
|
|
|
return false;
|
2014-08-26 22:28:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// How many blocks did it take for miners to include this transaction?
|
|
|
|
// blocksToConfirm is 1-based, so a transaction included in the earliest
|
|
|
|
// possible block has confirmation count of 1
|
2016-11-11 20:16:42 +01:00
|
|
|
int blocksToConfirm = nBlockHeight - entry->GetHeight();
|
2014-08-26 22:28:32 +02:00
|
|
|
if (blocksToConfirm <= 0) {
|
|
|
|
// This can't happen because we don't process transactions from a block with a height
|
|
|
|
// lower than our greatest seen height
|
|
|
|
LogPrint("estimatefee", "Blockpolicy error Transaction had negative blocksToConfirm\n");
|
2016-11-11 19:40:27 +01:00
|
|
|
return false;
|
2014-08-26 22:28:32 +02:00
|
|
|
}
|
|
|
|
|
2016-03-21 18:04:40 +01:00
|
|
|
// Feerates are stored and reported as BTC-per-kb:
|
2016-11-11 20:16:42 +01:00
|
|
|
CFeeRate feeRate(entry->GetFee(), entry->GetTxSize());
|
2014-08-26 22:28:32 +02:00
|
|
|
|
2016-03-21 18:04:40 +01:00
|
|
|
feeStats.Record(blocksToConfirm, (double)feeRate.GetFeePerK());
|
2016-11-11 19:40:27 +01:00
|
|
|
return true;
|
2014-08-26 22:28:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void CBlockPolicyEstimator::processBlock(unsigned int nBlockHeight,
|
2016-11-11 20:16:42 +01:00
|
|
|
std::vector<const CTxMemPoolEntry*>& entries)
|
2014-08-26 22:28:32 +02:00
|
|
|
{
|
|
|
|
if (nBlockHeight <= nBestSeenHeight) {
|
|
|
|
// Ignore side chains and re-orgs; assuming they are random
|
|
|
|
// they don't affect the estimate.
|
|
|
|
// And if an attacker can re-org the chain at will, then
|
|
|
|
// you've got much bigger problems than "attacker can influence
|
|
|
|
// transaction fees."
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-11-11 17:57:51 +01:00
|
|
|
// Must update nBestSeenHeight in sync with ClearCurrent so that
|
|
|
|
// calls to removeTx (via processBlockTx) correctly calculate age
|
|
|
|
// of unconfirmed txs to remove from tracking.
|
|
|
|
nBestSeenHeight = nBlockHeight;
|
|
|
|
|
|
|
|
// Clear the current block state and update unconfirmed circular buffer
|
2014-08-26 22:28:32 +02:00
|
|
|
feeStats.ClearCurrent(nBlockHeight);
|
|
|
|
|
2016-11-11 19:40:27 +01:00
|
|
|
unsigned int countedTxs = 0;
|
2014-08-26 22:28:32 +02:00
|
|
|
// Repopulate the current block states
|
2016-11-11 19:40:27 +01:00
|
|
|
for (unsigned int i = 0; i < entries.size(); i++) {
|
|
|
|
if (processBlockTx(nBlockHeight, entries[i]))
|
|
|
|
countedTxs++;
|
|
|
|
}
|
2014-08-26 22:28:32 +02:00
|
|
|
|
2016-03-21 18:04:40 +01:00
|
|
|
// Update all exponential averages with the current block state
|
2014-08-26 22:28:32 +02:00
|
|
|
feeStats.UpdateMovingAverages();
|
|
|
|
|
2016-11-11 19:40:27 +01:00
|
|
|
LogPrint("estimatefee", "Blockpolicy after updating estimates for %u of %u txs in block, since last block %u of %u tracked, new mempool map size %u\n",
|
|
|
|
countedTxs, entries.size(), trackedTxs, trackedTxs + untrackedTxs, mapMemPoolTxs.size());
|
|
|
|
|
|
|
|
trackedTxs = 0;
|
|
|
|
untrackedTxs = 0;
|
2014-08-26 22:28:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
CFeeRate CBlockPolicyEstimator::estimateFee(int confTarget)
|
|
|
|
{
|
|
|
|
// Return failure if trying to analyze a target we're not tracking
|
2016-11-29 18:18:44 +01:00
|
|
|
// It's not possible to get reasonable estimates for confTarget of 1
|
|
|
|
if (confTarget <= 1 || (unsigned int)confTarget > feeStats.GetMaxConfirms())
|
2014-08-26 22:28:32 +02:00
|
|
|
return CFeeRate(0);
|
|
|
|
|
|
|
|
double median = feeStats.EstimateMedianVal(confTarget, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
|
|
|
|
|
|
|
|
if (median < 0)
|
|
|
|
return CFeeRate(0);
|
|
|
|
|
|
|
|
return CFeeRate(median);
|
|
|
|
}
|
|
|
|
|
2015-11-24 14:53:14 +01:00
|
|
|
CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, int *answerFoundAtTarget, const CTxMemPool& pool)
|
2015-11-16 21:10:22 +01:00
|
|
|
{
|
|
|
|
if (answerFoundAtTarget)
|
|
|
|
*answerFoundAtTarget = confTarget;
|
|
|
|
// Return failure if trying to analyze a target we're not tracking
|
|
|
|
if (confTarget <= 0 || (unsigned int)confTarget > feeStats.GetMaxConfirms())
|
|
|
|
return CFeeRate(0);
|
|
|
|
|
2016-11-29 18:18:44 +01:00
|
|
|
// It's not possible to get reasonable estimates for confTarget of 1
|
|
|
|
if (confTarget == 1)
|
|
|
|
confTarget = 2;
|
|
|
|
|
2015-11-16 21:10:22 +01:00
|
|
|
double median = -1;
|
|
|
|
while (median < 0 && (unsigned int)confTarget <= feeStats.GetMaxConfirms()) {
|
|
|
|
median = feeStats.EstimateMedianVal(confTarget++, SUFFICIENT_FEETXS, MIN_SUCCESS_PCT, true, nBestSeenHeight);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (answerFoundAtTarget)
|
|
|
|
*answerFoundAtTarget = confTarget - 1;
|
|
|
|
|
2016-03-21 18:04:40 +01:00
|
|
|
// If mempool is limiting txs , return at least the min feerate from the mempool
|
2015-11-24 14:53:14 +01:00
|
|
|
CAmount minPoolFee = pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
|
2015-11-16 21:21:51 +01:00
|
|
|
if (minPoolFee > 0 && minPoolFee > median)
|
|
|
|
return CFeeRate(minPoolFee);
|
|
|
|
|
2015-11-16 21:10:22 +01:00
|
|
|
if (median < 0)
|
|
|
|
return CFeeRate(0);
|
|
|
|
|
|
|
|
return CFeeRate(median);
|
|
|
|
}
|
|
|
|
|
2014-08-26 22:28:32 +02:00
|
|
|
void CBlockPolicyEstimator::Write(CAutoFile& fileout)
|
|
|
|
{
|
|
|
|
fileout << nBestSeenHeight;
|
|
|
|
feeStats.Write(fileout);
|
|
|
|
}
|
|
|
|
|
2016-03-21 18:04:40 +01:00
|
|
|
void CBlockPolicyEstimator::Read(CAutoFile& filein, int nFileVersion)
|
2014-08-26 22:28:32 +02:00
|
|
|
{
|
|
|
|
int nFileBestSeenHeight;
|
|
|
|
filein >> nFileBestSeenHeight;
|
|
|
|
feeStats.Read(filein);
|
|
|
|
nBestSeenHeight = nFileBestSeenHeight;
|
2017-02-16 23:21:23 +01:00
|
|
|
// if nVersionThatWrote < 139900 then another TxConfirmStats (for priority) follows but can be ignored.
|
2014-08-26 22:28:32 +02:00
|
|
|
}
|
2016-02-12 21:57:15 +01:00
|
|
|
|
|
|
|
FeeFilterRounder::FeeFilterRounder(const CFeeRate& minIncrementalFee)
|
|
|
|
{
|
2016-12-06 03:46:08 +01:00
|
|
|
CAmount minFeeLimit = std::max(CAmount(1), minIncrementalFee.GetFeePerK() / 2);
|
2016-02-12 21:57:15 +01:00
|
|
|
feeset.insert(0);
|
2017-01-13 22:19:25 +01:00
|
|
|
for (double bucketBoundary = minFeeLimit; bucketBoundary <= MAX_BUCKET_FEERATE; bucketBoundary *= FEE_SPACING) {
|
2016-02-12 21:57:15 +01:00
|
|
|
feeset.insert(bucketBoundary);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
CAmount FeeFilterRounder::round(CAmount currentMinFee)
|
|
|
|
{
|
|
|
|
std::set<double>::iterator it = feeset.lower_bound(currentMinFee);
|
2016-10-13 16:19:20 +02:00
|
|
|
if ((it != feeset.begin() && insecure_rand.rand32() % 3 != 0) || it == feeset.end()) {
|
2016-02-12 21:57:15 +01:00
|
|
|
it--;
|
|
|
|
}
|
|
|
|
return *it;
|
|
|
|
}
|