Introduce a scale factor
For the per confirmation number tracking of data, introduce a scale factor so that in the longer horizones confirmations are bucketed together at a resolution of the scale. (instead of 1008 individual data points for each fee bucket, have 42 data points each covering 24 different confirmation values.. (1-24), (25-48), etc.. )
This commit is contained in:
parent
5f1f0c6490
commit
3ee76d6de5
4 changed files with 45 additions and 35 deletions
|
@ -53,13 +53,14 @@ private:
|
||||||
|
|
||||||
double decay;
|
double decay;
|
||||||
|
|
||||||
|
// Resolution (# of blocks) with which confirmations are tracked
|
||||||
unsigned int scale;
|
unsigned int scale;
|
||||||
|
|
||||||
// Mempool counts of outstanding transactions
|
// Mempool counts of outstanding transactions
|
||||||
// For each bucket X, track the number of transactions in the mempool
|
// For each bucket X, track the number of transactions in the mempool
|
||||||
// that are unconfirmed for each possible confirmation value Y
|
// that are unconfirmed for each possible confirmation value Y
|
||||||
std::vector<std::vector<int> > unconfTxs; //unconfTxs[Y][X]
|
std::vector<std::vector<int> > unconfTxs; //unconfTxs[Y][X]
|
||||||
// transactions still unconfirmed after MAX_CONFIRMS for each bucket
|
// transactions still unconfirmed after GetMaxConfirms for each bucket
|
||||||
std::vector<int> oldUnconfTxs;
|
std::vector<int> oldUnconfTxs;
|
||||||
|
|
||||||
void resizeInMemoryCounters(size_t newbuckets);
|
void resizeInMemoryCounters(size_t newbuckets);
|
||||||
|
@ -73,7 +74,7 @@ public:
|
||||||
* @param decay how much to decay the historical moving average per block
|
* @param decay how much to decay the historical moving average per block
|
||||||
*/
|
*/
|
||||||
TxConfirmStats(const std::vector<double>& defaultBuckets, const std::map<double, unsigned int>& defaultBucketMap,
|
TxConfirmStats(const std::vector<double>& defaultBuckets, const std::map<double, unsigned int>& defaultBucketMap,
|
||||||
unsigned int maxConfirms, double decay);
|
unsigned int maxPeriods, double decay, unsigned int scale);
|
||||||
|
|
||||||
/** Roll the circular buffer for unconfirmed txs*/
|
/** Roll the circular buffer for unconfirmed txs*/
|
||||||
void ClearCurrent(unsigned int nBlockHeight);
|
void ClearCurrent(unsigned int nBlockHeight);
|
||||||
|
@ -113,7 +114,7 @@ public:
|
||||||
EstimationResult *result = nullptr) const;
|
EstimationResult *result = nullptr) const;
|
||||||
|
|
||||||
/** Return the max number of confirms we're tracking */
|
/** Return the max number of confirms we're tracking */
|
||||||
unsigned int GetMaxConfirms() const { return confAvg.size(); }
|
unsigned int GetMaxConfirms() const { return scale * confAvg.size(); }
|
||||||
|
|
||||||
/** Write state of estimation data to a file*/
|
/** Write state of estimation data to a file*/
|
||||||
void Write(CAutoFile& fileout) const;
|
void Write(CAutoFile& fileout) const;
|
||||||
|
@ -128,17 +129,17 @@ public:
|
||||||
|
|
||||||
TxConfirmStats::TxConfirmStats(const std::vector<double>& defaultBuckets,
|
TxConfirmStats::TxConfirmStats(const std::vector<double>& defaultBuckets,
|
||||||
const std::map<double, unsigned int>& defaultBucketMap,
|
const std::map<double, unsigned int>& defaultBucketMap,
|
||||||
unsigned int maxConfirms, double _decay)
|
unsigned int maxPeriods, double _decay, unsigned int _scale)
|
||||||
: buckets(defaultBuckets), bucketMap(defaultBucketMap)
|
: buckets(defaultBuckets), bucketMap(defaultBucketMap)
|
||||||
{
|
{
|
||||||
decay = _decay;
|
decay = _decay;
|
||||||
scale = 1;
|
scale = _scale;
|
||||||
confAvg.resize(maxConfirms);
|
confAvg.resize(maxPeriods);
|
||||||
for (unsigned int i = 0; i < maxConfirms; i++) {
|
for (unsigned int i = 0; i < maxPeriods; i++) {
|
||||||
confAvg[i].resize(buckets.size());
|
confAvg[i].resize(buckets.size());
|
||||||
}
|
}
|
||||||
failAvg.resize(maxConfirms);
|
failAvg.resize(maxPeriods);
|
||||||
for (unsigned int i = 0; i < maxConfirms; i++) {
|
for (unsigned int i = 0; i < maxPeriods; i++) {
|
||||||
failAvg[i].resize(buckets.size());
|
failAvg[i].resize(buckets.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -172,8 +173,9 @@ void TxConfirmStats::Record(int blocksToConfirm, double val)
|
||||||
// blocksToConfirm is 1-based
|
// blocksToConfirm is 1-based
|
||||||
if (blocksToConfirm < 1)
|
if (blocksToConfirm < 1)
|
||||||
return;
|
return;
|
||||||
|
int periodsToConfirm = (blocksToConfirm + scale - 1)/scale;
|
||||||
unsigned int bucketindex = bucketMap.lower_bound(val)->second;
|
unsigned int bucketindex = bucketMap.lower_bound(val)->second;
|
||||||
for (size_t i = blocksToConfirm; i <= confAvg.size(); i++) {
|
for (size_t i = periodsToConfirm; i <= confAvg.size(); i++) {
|
||||||
confAvg[i - 1][bucketindex]++;
|
confAvg[i - 1][bucketindex]++;
|
||||||
}
|
}
|
||||||
txCtAvg[bucketindex]++;
|
txCtAvg[bucketindex]++;
|
||||||
|
@ -202,6 +204,7 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
|
||||||
double totalNum = 0; // Total number of tx's that were ever confirmed
|
double totalNum = 0; // Total number of tx's that were ever confirmed
|
||||||
int extraNum = 0; // Number of tx's still in mempool for confTarget or longer
|
int extraNum = 0; // Number of tx's still in mempool for confTarget or longer
|
||||||
double failNum = 0; // Number of tx's that were never confirmed but removed from the mempool after confTarget
|
double failNum = 0; // Number of tx's that were never confirmed but removed from the mempool after confTarget
|
||||||
|
int periodTarget = (confTarget + scale - 1)/scale;
|
||||||
|
|
||||||
int maxbucketindex = buckets.size() - 1;
|
int maxbucketindex = buckets.size() - 1;
|
||||||
|
|
||||||
|
@ -236,9 +239,9 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
|
||||||
newBucketRange = false;
|
newBucketRange = false;
|
||||||
}
|
}
|
||||||
curFarBucket = bucket;
|
curFarBucket = bucket;
|
||||||
nConf += confAvg[confTarget - 1][bucket];
|
nConf += confAvg[periodTarget - 1][bucket];
|
||||||
totalNum += txCtAvg[bucket];
|
totalNum += txCtAvg[bucket];
|
||||||
failNum += failAvg[confTarget - 1][bucket];
|
failNum += failAvg[periodTarget - 1][bucket];
|
||||||
for (unsigned int confct = confTarget; confct < GetMaxConfirms(); confct++)
|
for (unsigned int confct = confTarget; confct < GetMaxConfirms(); confct++)
|
||||||
extraNum += unconfTxs[(nBlockHeight - confct)%bins][bucket];
|
extraNum += unconfTxs[(nBlockHeight - confct)%bins][bucket];
|
||||||
extraNum += oldUnconfTxs[bucket];
|
extraNum += oldUnconfTxs[bucket];
|
||||||
|
@ -339,6 +342,7 @@ double TxConfirmStats::EstimateMedianVal(int confTarget, double sufficientTxVal,
|
||||||
result->pass = passBucket;
|
result->pass = passBucket;
|
||||||
result->fail = failBucket;
|
result->fail = failBucket;
|
||||||
result->decay = decay;
|
result->decay = decay;
|
||||||
|
result->scale = scale;
|
||||||
}
|
}
|
||||||
return median;
|
return median;
|
||||||
}
|
}
|
||||||
|
@ -358,7 +362,7 @@ void TxConfirmStats::Read(CAutoFile& filein, int nFileVersion, size_t numBuckets
|
||||||
// Read data file and do some very basic sanity checking
|
// Read data file and do some very basic sanity checking
|
||||||
// buckets and bucketMap are not updated yet, so don't access them
|
// buckets and bucketMap are not updated yet, so don't access them
|
||||||
// If there is a read failure, we'll just discard this entire object anyway
|
// If there is a read failure, we'll just discard this entire object anyway
|
||||||
size_t maxConfirms;
|
size_t maxConfirms, maxPeriods;
|
||||||
|
|
||||||
// The current version will store the decay with each individual TxConfirmStats and also keep a scale factor
|
// The current version will store the decay with each individual TxConfirmStats and also keep a scale factor
|
||||||
if (nFileVersion >= 149900) {
|
if (nFileVersion >= 149900) {
|
||||||
|
@ -366,7 +370,7 @@ void TxConfirmStats::Read(CAutoFile& filein, int nFileVersion, size_t numBuckets
|
||||||
if (decay <= 0 || decay >= 1) {
|
if (decay <= 0 || decay >= 1) {
|
||||||
throw std::runtime_error("Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive)");
|
throw std::runtime_error("Corrupt estimates file. Decay must be between 0 and 1 (non-inclusive)");
|
||||||
}
|
}
|
||||||
filein >> scale; //Unused for now
|
filein >> scale;
|
||||||
}
|
}
|
||||||
|
|
||||||
filein >> avg;
|
filein >> avg;
|
||||||
|
@ -378,11 +382,13 @@ void TxConfirmStats::Read(CAutoFile& filein, int nFileVersion, size_t numBuckets
|
||||||
throw std::runtime_error("Corrupt estimates file. Mismatch in tx count bucket count");
|
throw std::runtime_error("Corrupt estimates file. Mismatch in tx count bucket count");
|
||||||
}
|
}
|
||||||
filein >> confAvg;
|
filein >> confAvg;
|
||||||
maxConfirms = confAvg.size();
|
maxPeriods = confAvg.size();
|
||||||
|
maxConfirms = scale * maxPeriods;
|
||||||
|
|
||||||
if (maxConfirms <= 0 || maxConfirms > 6 * 24 * 7) { // one week
|
if (maxConfirms <= 0 || maxConfirms > 6 * 24 * 7) { // one week
|
||||||
throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms");
|
throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms");
|
||||||
}
|
}
|
||||||
for (unsigned int i = 0; i < maxConfirms; i++) {
|
for (unsigned int i = 0; i < maxPeriods; i++) {
|
||||||
if (confAvg[i].size() != numBuckets) {
|
if (confAvg[i].size() != numBuckets) {
|
||||||
throw std::runtime_error("Corrupt estimates file. Mismatch in feerate conf average bucket count");
|
throw std::runtime_error("Corrupt estimates file. Mismatch in feerate conf average bucket count");
|
||||||
}
|
}
|
||||||
|
@ -390,10 +396,10 @@ void TxConfirmStats::Read(CAutoFile& filein, int nFileVersion, size_t numBuckets
|
||||||
|
|
||||||
if (nFileVersion >= 149900) {
|
if (nFileVersion >= 149900) {
|
||||||
filein >> failAvg;
|
filein >> failAvg;
|
||||||
if (maxConfirms != failAvg.size()) {
|
if (maxPeriods != failAvg.size()) {
|
||||||
throw std::runtime_error("Corrupt estimates file. Mismatch in confirms tracked for failures");
|
throw std::runtime_error("Corrupt estimates file. Mismatch in confirms tracked for failures");
|
||||||
}
|
}
|
||||||
for (unsigned int i = 0; i < maxConfirms; i++) {
|
for (unsigned int i = 0; i < maxPeriods; i++) {
|
||||||
if (failAvg[i].size() != numBuckets) {
|
if (failAvg[i].size() != numBuckets) {
|
||||||
throw std::runtime_error("Corrupt estimates file. Mismatch in one of failure average bucket counts");
|
throw std::runtime_error("Corrupt estimates file. Mismatch in one of failure average bucket counts");
|
||||||
}
|
}
|
||||||
|
@ -449,8 +455,9 @@ void TxConfirmStats::removeTx(unsigned int entryHeight, unsigned int nBestSeenHe
|
||||||
blockIndex, bucketindex);
|
blockIndex, bucketindex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!inBlock && blocksAgo >= 1) {
|
if (!inBlock && (unsigned int)blocksAgo >= scale) { // Only counts as a failure if not confirmed for entire period
|
||||||
for (size_t i = 0; i < blocksAgo && i < failAvg.size(); i++) {
|
unsigned int periodsAgo = blocksAgo / scale;
|
||||||
|
for (size_t i = 0; i < periodsAgo && i < failAvg.size(); i++) {
|
||||||
failAvg[i][bucketindex]++;
|
failAvg[i][bucketindex]++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -490,9 +497,9 @@ CBlockPolicyEstimator::CBlockPolicyEstimator()
|
||||||
bucketMap[INF_FEERATE] = bucketIndex;
|
bucketMap[INF_FEERATE] = bucketIndex;
|
||||||
assert(bucketMap.size() == buckets.size());
|
assert(bucketMap.size() == buckets.size());
|
||||||
|
|
||||||
feeStats = new TxConfirmStats(buckets, bucketMap, MED_BLOCK_CONFIRMS, MED_DECAY);
|
feeStats = new TxConfirmStats(buckets, bucketMap, MED_BLOCK_PERIODS, MED_DECAY, MED_SCALE);
|
||||||
shortStats = new TxConfirmStats(buckets, bucketMap, SHORT_BLOCK_CONFIRMS, SHORT_DECAY);
|
shortStats = new TxConfirmStats(buckets, bucketMap, SHORT_BLOCK_PERIODS, SHORT_DECAY, SHORT_SCALE);
|
||||||
longStats = new TxConfirmStats(buckets, bucketMap, LONG_BLOCK_CONFIRMS, LONG_DECAY);
|
longStats = new TxConfirmStats(buckets, bucketMap, LONG_BLOCK_PERIODS, LONG_DECAY, LONG_SCALE);
|
||||||
}
|
}
|
||||||
|
|
||||||
CBlockPolicyEstimator::~CBlockPolicyEstimator()
|
CBlockPolicyEstimator::~CBlockPolicyEstimator()
|
||||||
|
@ -864,7 +871,7 @@ bool CBlockPolicyEstimator::Read(CAutoFile& filein)
|
||||||
|
|
||||||
std::map<double, unsigned int> tempMap;
|
std::map<double, unsigned int> tempMap;
|
||||||
|
|
||||||
std::unique_ptr<TxConfirmStats> tempFeeStats(new TxConfirmStats(tempBuckets, tempMap, MED_BLOCK_CONFIRMS, tempDecay));
|
std::unique_ptr<TxConfirmStats> tempFeeStats(new TxConfirmStats(tempBuckets, tempMap, MED_BLOCK_PERIODS, tempDecay, 1));
|
||||||
tempFeeStats->Read(filein, nVersionThatWrote, tempNum);
|
tempFeeStats->Read(filein, nVersionThatWrote, tempNum);
|
||||||
// if nVersionThatWrote < 139900 then another TxConfirmStats (for priority) follows but can be ignored.
|
// if nVersionThatWrote < 139900 then another TxConfirmStats (for priority) follows but can be ignored.
|
||||||
|
|
||||||
|
@ -884,9 +891,9 @@ bool CBlockPolicyEstimator::Read(CAutoFile& filein)
|
||||||
if (numBuckets <= 1 || numBuckets > 1000)
|
if (numBuckets <= 1 || numBuckets > 1000)
|
||||||
throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets");
|
throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets");
|
||||||
|
|
||||||
std::unique_ptr<TxConfirmStats> fileFeeStats(new TxConfirmStats(buckets, bucketMap, MED_BLOCK_CONFIRMS, MED_DECAY));
|
std::unique_ptr<TxConfirmStats> fileFeeStats(new TxConfirmStats(buckets, bucketMap, MED_BLOCK_PERIODS, MED_DECAY, MED_SCALE));
|
||||||
std::unique_ptr<TxConfirmStats> fileShortStats(new TxConfirmStats(buckets, bucketMap, SHORT_BLOCK_CONFIRMS, SHORT_DECAY));
|
std::unique_ptr<TxConfirmStats> fileShortStats(new TxConfirmStats(buckets, bucketMap, SHORT_BLOCK_PERIODS, SHORT_DECAY, SHORT_SCALE));
|
||||||
std::unique_ptr<TxConfirmStats> fileLongStats(new TxConfirmStats(buckets, bucketMap, LONG_BLOCK_CONFIRMS, LONG_DECAY));
|
std::unique_ptr<TxConfirmStats> fileLongStats(new TxConfirmStats(buckets, bucketMap, LONG_BLOCK_PERIODS, LONG_DECAY, LONG_SCALE));
|
||||||
fileFeeStats->Read(filein, nVersionThatWrote, numBuckets);
|
fileFeeStats->Read(filein, nVersionThatWrote, numBuckets);
|
||||||
fileShortStats->Read(filein, nVersionThatWrote, numBuckets);
|
fileShortStats->Read(filein, nVersionThatWrote, numBuckets);
|
||||||
fileLongStats->Read(filein, nVersionThatWrote, numBuckets);
|
fileLongStats->Read(filein, nVersionThatWrote, numBuckets);
|
||||||
|
|
|
@ -82,6 +82,7 @@ struct EstimationResult
|
||||||
EstimatorBucket pass;
|
EstimatorBucket pass;
|
||||||
EstimatorBucket fail;
|
EstimatorBucket fail;
|
||||||
double decay;
|
double decay;
|
||||||
|
unsigned int scale;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -93,11 +94,14 @@ class CBlockPolicyEstimator
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
/** Track confirm delays up to 12 blocks medium decay */
|
/** Track confirm delays up to 12 blocks medium decay */
|
||||||
static constexpr unsigned int SHORT_BLOCK_CONFIRMS = 12;
|
static constexpr unsigned int SHORT_BLOCK_PERIODS = 12;
|
||||||
|
static constexpr unsigned int SHORT_SCALE = 1;
|
||||||
/** Track confirm delays up to 48 blocks medium decay */
|
/** Track confirm delays up to 48 blocks medium decay */
|
||||||
static constexpr unsigned int MED_BLOCK_CONFIRMS = 48;
|
static constexpr unsigned int MED_BLOCK_PERIODS = 24;
|
||||||
|
static constexpr unsigned int MED_SCALE = 2;
|
||||||
/** Track confirm delays up to 1008 blocks for longer decay */
|
/** Track confirm delays up to 1008 blocks for longer decay */
|
||||||
static constexpr unsigned int LONG_BLOCK_CONFIRMS = 1008;
|
static constexpr unsigned int LONG_BLOCK_PERIODS = 42;
|
||||||
|
static constexpr unsigned int LONG_SCALE = 24;
|
||||||
/** Historical estimates that are older than this aren't valid */
|
/** Historical estimates that are older than this aren't valid */
|
||||||
static const unsigned int OLDEST_ESTIMATE_HISTORY = 6 * 1008;
|
static const unsigned int OLDEST_ESTIMATE_HISTORY = 6 * 1008;
|
||||||
|
|
||||||
|
|
|
@ -895,6 +895,7 @@ UniValue estimaterawfee(const JSONRPCRequest& request)
|
||||||
"{\n"
|
"{\n"
|
||||||
" \"feerate\" : x.x, (numeric) estimate fee-per-kilobyte (in BTC)\n"
|
" \"feerate\" : x.x, (numeric) estimate fee-per-kilobyte (in BTC)\n"
|
||||||
" \"decay\" : x.x, (numeric) exponential decay (per block) for historical moving average of confirmation data\n"
|
" \"decay\" : x.x, (numeric) exponential decay (per block) for historical moving average of confirmation data\n"
|
||||||
|
" \"scale\" : x, (numeric) The resolution of confirmation targets at this time horizon\n"
|
||||||
" \"pass.\" information about the lowest range of feerates to succeed in meeting the threshold\n"
|
" \"pass.\" information about the lowest range of feerates to succeed in meeting the threshold\n"
|
||||||
" \"fail.\" information about the highest range of feerates to fail to meet the threshold\n"
|
" \"fail.\" information about the highest range of feerates to fail to meet the threshold\n"
|
||||||
" \"startrange\" : x.x, (numeric) start of feerate range\n"
|
" \"startrange\" : x.x, (numeric) start of feerate range\n"
|
||||||
|
@ -932,6 +933,7 @@ UniValue estimaterawfee(const JSONRPCRequest& request)
|
||||||
|
|
||||||
result.push_back(Pair("feerate", feeRate == CFeeRate(0) ? -1.0 : ValueFromAmount(feeRate.GetFeePerK())));
|
result.push_back(Pair("feerate", feeRate == CFeeRate(0) ? -1.0 : ValueFromAmount(feeRate.GetFeePerK())));
|
||||||
result.push_back(Pair("decay", buckets.decay));
|
result.push_back(Pair("decay", buckets.decay));
|
||||||
|
result.push_back(Pair("scale", (int)buckets.scale));
|
||||||
result.push_back(Pair("pass.startrange", round(buckets.pass.start)));
|
result.push_back(Pair("pass.startrange", round(buckets.pass.start)));
|
||||||
result.push_back(Pair("pass.endrange", round(buckets.pass.end)));
|
result.push_back(Pair("pass.endrange", round(buckets.pass.end)));
|
||||||
result.push_back(Pair("pass.withintarget", round(buckets.pass.withinTarget * 100.0) / 100.0));
|
result.push_back(Pair("pass.withintarget", round(buckets.pass.withinTarget * 100.0) / 100.0));
|
||||||
|
|
|
@ -99,13 +99,10 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
|
||||||
BOOST_CHECK(origFeeEst[i-1] <= origFeeEst[i-2]);
|
BOOST_CHECK(origFeeEst[i-1] <= origFeeEst[i-2]);
|
||||||
}
|
}
|
||||||
int mult = 11-i;
|
int mult = 11-i;
|
||||||
if (i > 1) {
|
if (i % 2 == 0) { //At scale 2, test logic is only correct for even targets
|
||||||
BOOST_CHECK(origFeeEst[i-1] < mult*baseRate.GetFeePerK() + deltaFee);
|
BOOST_CHECK(origFeeEst[i-1] < mult*baseRate.GetFeePerK() + deltaFee);
|
||||||
BOOST_CHECK(origFeeEst[i-1] > mult*baseRate.GetFeePerK() - deltaFee);
|
BOOST_CHECK(origFeeEst[i-1] > mult*baseRate.GetFeePerK() - deltaFee);
|
||||||
}
|
}
|
||||||
else {
|
|
||||||
BOOST_CHECK(origFeeEst[i-1] == CFeeRate(0).GetFeePerK());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// Fill out rest of the original estimates
|
// Fill out rest of the original estimates
|
||||||
for (int i = 10; i <= 48; i++) {
|
for (int i = 10; i <= 48; i++) {
|
||||||
|
@ -177,7 +174,7 @@ BOOST_AUTO_TEST_CASE(BlockPolicyEstimates)
|
||||||
block.clear();
|
block.clear();
|
||||||
}
|
}
|
||||||
BOOST_CHECK(feeEst.estimateFee(1) == CFeeRate(0));
|
BOOST_CHECK(feeEst.estimateFee(1) == CFeeRate(0));
|
||||||
for (int i = 2; i < 10; i++) {
|
for (int i = 2; i < 9; i++) { // At 9, the original estimate was already at the bottom (b/c scale = 2)
|
||||||
BOOST_CHECK(feeEst.estimateFee(i).GetFeePerK() < origFeeEst[i-1] - deltaFee);
|
BOOST_CHECK(feeEst.estimateFee(i).GetFeePerK() < origFeeEst[i-1] - deltaFee);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue