2016-03-19 20:58:06 +01:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
# Copyright (c) 2014-2016 The Bitcoin Core developers
|
2014-10-23 03:48:19 +02:00
|
|
|
# Distributed under the MIT software license, see the accompanying
|
|
|
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
2017-01-18 00:34:40 +01:00
|
|
|
"""Test fee estimation code."""
|
2014-03-17 13:19:54 +01:00
|
|
|
|
2015-05-02 12:53:35 +02:00
|
|
|
from test_framework.test_framework import BitcoinTestFramework
|
|
|
|
from test_framework.util import *
|
2017-03-10 19:37:35 +01:00
|
|
|
from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE
|
2017-03-22 01:35:57 +01:00
|
|
|
from test_framework.mininode import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, COIN
|
2014-03-17 13:19:54 +01:00
|
|
|
|
2014-08-26 22:28:32 +02:00
|
|
|
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
|
|
|
|
# So we can create many many transactions without needing to spend
|
|
|
|
# time signing.
|
2017-03-10 19:37:35 +01:00
|
|
|
redeem_script_1 = CScript([OP_1, OP_DROP])
|
|
|
|
redeem_script_2 = CScript([OP_2, OP_DROP])
|
|
|
|
P2SH_1 = CScript([OP_HASH160, hash160(redeem_script_1), OP_EQUAL])
|
|
|
|
P2SH_2 = CScript([OP_HASH160, hash160(redeem_script_2), OP_EQUAL])
|
|
|
|
|
2014-08-26 22:28:32 +02:00
|
|
|
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
|
2017-03-10 19:37:35 +01:00
|
|
|
SCRIPT_SIG = [CScript([OP_TRUE, redeem_script_1]), CScript([OP_TRUE, redeem_script_2])]
|
2014-08-26 22:28:32 +02:00
|
|
|
|
2017-03-10 21:02:47 +01:00
|
|
|
global log
|
|
|
|
|
2014-08-26 22:28:32 +02:00
|
|
|
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
|
2017-01-18 00:34:40 +01:00
|
|
|
"""
|
2014-08-26 22:28:32 +02:00
|
|
|
Create and send a transaction with a random fee.
|
2016-05-02 22:23:21 +02:00
|
|
|
The transaction pays to a trivial P2SH script, and assumes that its inputs
|
2014-08-26 22:28:32 +02:00
|
|
|
are of the same form.
|
|
|
|
The function takes a list of confirmed outputs and unconfirmed outputs
|
|
|
|
and attempts to use the confirmed list first for its inputs.
|
|
|
|
It adds the newly created outputs to the unconfirmed list.
|
|
|
|
Returns (raw transaction, fee)
|
2017-01-18 00:34:40 +01:00
|
|
|
"""
|
2014-08-26 22:28:32 +02:00
|
|
|
# It's best to exponentially distribute our random fees
|
|
|
|
# because the buckets are exponentially spaced.
|
|
|
|
# Exponentially distributed from 1-128 * fee_increment
|
|
|
|
rand_fee = float(fee_increment)*(1.1892**random.randint(0,28))
|
|
|
|
# Total fee ranges from min_fee to min_fee + 127*fee_increment
|
|
|
|
fee = min_fee - fee_increment + satoshi_round(rand_fee)
|
2017-03-10 19:37:35 +01:00
|
|
|
tx = CTransaction()
|
2014-08-26 22:28:32 +02:00
|
|
|
total_in = Decimal("0.00000000")
|
|
|
|
while total_in <= (amount + fee) and len(conflist) > 0:
|
|
|
|
t = conflist.pop(0)
|
|
|
|
total_in += t["amount"]
|
2017-03-10 19:37:35 +01:00
|
|
|
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
|
2014-08-26 22:28:32 +02:00
|
|
|
if total_in <= amount + fee:
|
|
|
|
while total_in <= (amount + fee) and len(unconflist) > 0:
|
|
|
|
t = unconflist.pop(0)
|
|
|
|
total_in += t["amount"]
|
2017-03-10 19:37:35 +01:00
|
|
|
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
|
2014-08-26 22:28:32 +02:00
|
|
|
if total_in <= amount + fee:
|
|
|
|
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount+fee, total_in))
|
2017-03-10 19:37:35 +01:00
|
|
|
tx.vout.append(CTxOut(int((total_in - amount - fee)*COIN), P2SH_1))
|
|
|
|
tx.vout.append(CTxOut(int(amount*COIN), P2SH_2))
|
|
|
|
# These transactions don't need to be signed, but we still have to insert
|
|
|
|
# the ScriptSig that will satisfy the ScriptPubKey.
|
|
|
|
for inp in tx.vin:
|
|
|
|
inp.scriptSig = SCRIPT_SIG[inp.prevout.n]
|
|
|
|
txid = from_node.sendrawtransaction(ToHex(tx), True)
|
2014-08-26 22:28:32 +02:00
|
|
|
unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee})
|
|
|
|
unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount})
|
|
|
|
|
2017-03-10 19:37:35 +01:00
|
|
|
return (ToHex(tx), fee)
|
2014-08-26 22:28:32 +02:00
|
|
|
|
|
|
|
def split_inputs(from_node, txins, txouts, initial_split = False):
|
2017-01-18 00:34:40 +01:00
|
|
|
"""
|
2017-01-20 04:46:50 +01:00
|
|
|
We need to generate a lot of inputs so we can generate a ton of transactions.
|
2014-08-26 22:28:32 +02:00
|
|
|
This function takes an input from txins, and creates and sends a transaction
|
|
|
|
which splits the value into 2 outputs which are appended to txouts.
|
2017-01-20 04:46:50 +01:00
|
|
|
Previously this was designed to be small inputs so they wouldn't have
|
|
|
|
a high coin age when the notion of priority still existed.
|
2017-01-18 00:34:40 +01:00
|
|
|
"""
|
2014-08-26 22:28:32 +02:00
|
|
|
prevtxout = txins.pop()
|
2017-03-10 19:37:35 +01:00
|
|
|
tx = CTransaction()
|
|
|
|
tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b""))
|
|
|
|
|
2014-08-26 22:28:32 +02:00
|
|
|
half_change = satoshi_round(prevtxout["amount"]/2)
|
|
|
|
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
|
2017-03-10 19:37:35 +01:00
|
|
|
tx.vout.append(CTxOut(int(half_change*COIN), P2SH_1))
|
|
|
|
tx.vout.append(CTxOut(int(rem_change*COIN), P2SH_2))
|
|
|
|
|
2014-08-26 22:28:32 +02:00
|
|
|
# If this is the initial split we actually need to sign the transaction
|
2017-03-10 19:37:35 +01:00
|
|
|
# Otherwise we just need to insert the proper ScriptSig
|
2014-08-26 22:28:32 +02:00
|
|
|
if (initial_split) :
|
2017-03-10 19:37:35 +01:00
|
|
|
completetx = from_node.signrawtransaction(ToHex(tx))["hex"]
|
2014-08-26 22:28:32 +02:00
|
|
|
else :
|
2017-03-10 19:37:35 +01:00
|
|
|
tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]]
|
|
|
|
completetx = ToHex(tx)
|
2014-08-26 22:28:32 +02:00
|
|
|
txid = from_node.sendrawtransaction(completetx, True)
|
|
|
|
txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change})
|
|
|
|
txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change})
|
|
|
|
|
|
|
|
def check_estimates(node, fees_seen, max_invalid, print_estimates = True):
|
2017-01-18 00:34:40 +01:00
|
|
|
"""
|
2014-08-26 22:28:32 +02:00
|
|
|
This function calls estimatefee and verifies that the estimates
|
|
|
|
meet certain invariants.
|
2017-01-18 00:34:40 +01:00
|
|
|
"""
|
2014-08-26 22:28:32 +02:00
|
|
|
all_estimates = [ node.estimatefee(i) for i in range(1,26) ]
|
|
|
|
if print_estimates:
|
2017-03-10 21:02:47 +01:00
|
|
|
log.info([str(all_estimates[e-1]) for e in [1,2,3,6,15,25]])
|
2014-08-26 22:28:32 +02:00
|
|
|
delta = 1.0e-6 # account for rounding error
|
|
|
|
last_e = max(fees_seen)
|
2016-03-19 21:36:32 +01:00
|
|
|
for e in [x for x in all_estimates if x >= 0]:
|
2014-08-26 22:28:32 +02:00
|
|
|
# Estimates should be within the bounds of what transactions fees actually were:
|
|
|
|
if float(e)+delta < min(fees_seen) or float(e)-delta > max(fees_seen):
|
|
|
|
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
|
|
|
|
%(float(e), min(fees_seen), max(fees_seen)))
|
|
|
|
# Estimates should be monotonically decreasing
|
|
|
|
if float(e)-delta > last_e:
|
|
|
|
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
|
|
|
|
%(float(e),float(last_e)))
|
|
|
|
last_e = e
|
|
|
|
valid_estimate = False
|
|
|
|
invalid_estimates = 0
|
2015-11-16 21:26:57 +01:00
|
|
|
for i,e in enumerate(all_estimates): # estimate is for i+1
|
2014-08-26 22:28:32 +02:00
|
|
|
if e >= 0:
|
|
|
|
valid_estimate = True
|
2017-03-07 17:33:44 +01:00
|
|
|
if i >= 13: # for n>=14 estimatesmartfee(n/2) should be at least as high as estimatefee(n)
|
|
|
|
assert(node.estimatesmartfee((i+1)//2)["feerate"] > float(e) - delta)
|
2015-11-16 21:26:57 +01:00
|
|
|
|
2014-08-26 22:28:32 +02:00
|
|
|
else:
|
|
|
|
invalid_estimates += 1
|
2015-11-16 21:26:57 +01:00
|
|
|
|
|
|
|
# estimatesmartfee should still be valid
|
|
|
|
approx_estimate = node.estimatesmartfee(i+1)["feerate"]
|
|
|
|
answer_found = node.estimatesmartfee(i+1)["blocks"]
|
|
|
|
assert(approx_estimate > 0)
|
|
|
|
assert(answer_found > i+1)
|
|
|
|
|
|
|
|
# Once we're at a high enough confirmation count that we can give an estimate
|
|
|
|
# We should have estimates for all higher confirmation counts
|
|
|
|
if valid_estimate:
|
|
|
|
raise AssertionError("Invalid estimate appears at higher confirm count than valid estimate")
|
|
|
|
|
2014-08-26 22:28:32 +02:00
|
|
|
# Check on the expected number of different confirmation counts
|
|
|
|
# that we might not have valid estimates for
|
|
|
|
if invalid_estimates > max_invalid:
|
|
|
|
raise AssertionError("More than (%d) invalid estimates"%(max_invalid))
|
|
|
|
return all_estimates
|
|
|
|
|
|
|
|
|
2014-07-08 18:07:23 +02:00
|
|
|
class EstimateFeeTest(BitcoinTestFramework):
|
2014-03-17 13:19:54 +01:00
|
|
|
|
2016-05-14 13:01:31 +02:00
|
|
|
def __init__(self):
|
|
|
|
super().__init__()
|
|
|
|
self.num_nodes = 3
|
|
|
|
self.setup_clean_chain = False
|
|
|
|
|
2014-10-20 14:14:04 +02:00
|
|
|
def setup_network(self):
|
2017-01-18 00:34:40 +01:00
|
|
|
"""
|
2014-08-26 22:28:32 +02:00
|
|
|
We'll setup the network to have 3 nodes that all mine with different parameters.
|
2017-01-20 04:46:50 +01:00
|
|
|
But first we need to use one node to create a lot of outputs
|
2014-08-26 22:28:32 +02:00
|
|
|
which we will use to generate our transactions.
|
2017-01-18 00:34:40 +01:00
|
|
|
"""
|
2014-10-20 14:14:04 +02:00
|
|
|
self.nodes = []
|
2014-08-26 22:28:32 +02:00
|
|
|
# Use node0 to mine blocks for input splitting
|
|
|
|
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000",
|
2016-12-04 00:03:51 +01:00
|
|
|
"-whitelist=127.0.0.1"]))
|
2014-08-26 22:28:32 +02:00
|
|
|
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("This test is time consuming, please be patient")
|
|
|
|
self.log.info("Splitting inputs so we can generate tx's")
|
2014-08-26 22:28:32 +02:00
|
|
|
self.txouts = []
|
|
|
|
self.txouts2 = []
|
|
|
|
# Split a coinbase into two transaction puzzle outputs
|
|
|
|
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
|
|
|
|
|
|
|
|
# Mine
|
|
|
|
while (len(self.nodes[0].getrawmempool()) > 0):
|
|
|
|
self.nodes[0].generate(1)
|
|
|
|
|
|
|
|
# Repeatedly split those 2 outputs, doubling twice for each rep
|
|
|
|
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
|
|
|
|
reps = 0
|
|
|
|
while (reps < 5):
|
|
|
|
#Double txouts to txouts2
|
|
|
|
while (len(self.txouts)>0):
|
|
|
|
split_inputs(self.nodes[0], self.txouts, self.txouts2)
|
|
|
|
while (len(self.nodes[0].getrawmempool()) > 0):
|
|
|
|
self.nodes[0].generate(1)
|
|
|
|
#Double txouts2 to txouts
|
|
|
|
while (len(self.txouts2)>0):
|
|
|
|
split_inputs(self.nodes[0], self.txouts2, self.txouts)
|
|
|
|
while (len(self.nodes[0].getrawmempool()) > 0):
|
|
|
|
self.nodes[0].generate(1)
|
|
|
|
reps += 1
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Finished splitting")
|
2014-08-26 22:28:32 +02:00
|
|
|
|
|
|
|
# Now we can connect the other nodes, didn't want to connect them earlier
|
|
|
|
# so the estimates would not be affected by the splitting transactions
|
2017-01-20 04:07:56 +01:00
|
|
|
# Node1 mines small blocks but that are bigger than the expected transaction rate.
|
2014-07-08 18:07:23 +02:00
|
|
|
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
|
2014-08-26 22:28:32 +02:00
|
|
|
# (17k is room enough for 110 or so transactions)
|
2014-10-20 14:14:04 +02:00
|
|
|
self.nodes.append(start_node(1, self.options.tmpdir,
|
2017-02-24 18:39:33 +01:00
|
|
|
["-blockmaxsize=17000", "-maxorphantx=1000"]))
|
2014-10-20 14:14:04 +02:00
|
|
|
connect_nodes(self.nodes[1], 0)
|
2014-07-08 18:07:23 +02:00
|
|
|
|
|
|
|
# Node2 is a stingy miner, that
|
2015-11-16 21:26:57 +01:00
|
|
|
# produces too small blocks (room for only 55 or so transactions)
|
2017-01-19 19:58:42 +01:00
|
|
|
node2args = ["-blockmaxsize=8000", "-maxorphantx=1000"]
|
2014-08-26 22:28:32 +02:00
|
|
|
|
2014-10-20 14:14:04 +02:00
|
|
|
self.nodes.append(start_node(2, self.options.tmpdir, node2args))
|
2014-08-26 22:28:32 +02:00
|
|
|
connect_nodes(self.nodes[0], 2)
|
|
|
|
connect_nodes(self.nodes[2], 1)
|
2014-03-17 13:19:54 +01:00
|
|
|
|
2014-10-20 14:14:04 +02:00
|
|
|
self.sync_all()
|
2014-08-26 22:28:32 +02:00
|
|
|
|
|
|
|
def transact_and_mine(self, numblocks, mining_node):
|
|
|
|
min_fee = Decimal("0.00001")
|
|
|
|
# We will now mine numblocks blocks generating on average 100 transactions between each block
|
|
|
|
# We shuffle our confirmed txout set before each set of transactions
|
|
|
|
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
|
|
|
|
# resorting to tx's that depend on the mempool when those run out
|
|
|
|
for i in range(numblocks):
|
|
|
|
random.shuffle(self.confutxo)
|
|
|
|
for j in range(random.randrange(100-50,100+50)):
|
|
|
|
from_index = random.randint(1,2)
|
|
|
|
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
|
|
|
|
self.memutxo, Decimal("0.005"), min_fee, min_fee)
|
2016-03-20 18:18:32 +01:00
|
|
|
tx_kbytes = (len(txhex) // 2) / 1000.0
|
2014-08-26 22:28:32 +02:00
|
|
|
self.fees_per_kb.append(float(fee)/tx_kbytes)
|
2016-11-07 19:52:41 +01:00
|
|
|
sync_mempools(self.nodes[0:3], wait=.1)
|
2014-08-26 22:28:32 +02:00
|
|
|
mined = mining_node.getblock(mining_node.generate(1)[0],True)["tx"]
|
2016-11-07 19:52:41 +01:00
|
|
|
sync_blocks(self.nodes[0:3], wait=.1)
|
2016-05-02 22:23:21 +02:00
|
|
|
# update which txouts are confirmed
|
2014-08-26 22:28:32 +02:00
|
|
|
newmem = []
|
|
|
|
for utx in self.memutxo:
|
|
|
|
if utx["txid"] in mined:
|
|
|
|
self.confutxo.append(utx)
|
|
|
|
else:
|
|
|
|
newmem.append(utx)
|
|
|
|
self.memutxo = newmem
|
2014-07-08 18:07:23 +02:00
|
|
|
|
2014-10-20 14:14:04 +02:00
|
|
|
def run_test(self):
|
2017-03-10 21:02:47 +01:00
|
|
|
# Make log handler available to helper functions
|
|
|
|
global log
|
|
|
|
log = self.log
|
2014-08-26 22:28:32 +02:00
|
|
|
self.fees_per_kb = []
|
|
|
|
self.memutxo = []
|
|
|
|
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Will output estimates for 1/2/3/6/15/25 blocks")
|
2014-07-08 18:07:23 +02:00
|
|
|
|
2016-03-19 20:58:06 +01:00
|
|
|
for i in range(2):
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Creating transactions and mining them with a block size that can't keep up")
|
2015-11-16 21:26:57 +01:00
|
|
|
# Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
|
|
|
|
self.transact_and_mine(10, self.nodes[2])
|
|
|
|
check_estimates(self.nodes[1], self.fees_per_kb, 14)
|
2014-07-08 18:07:23 +02:00
|
|
|
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Creating transactions and mining them at a block size that is just big enough")
|
2015-11-16 21:26:57 +01:00
|
|
|
# Generate transactions while mining 10 more blocks, this time with node1
|
|
|
|
# which mines blocks with capacity just above the rate that transactions are being created
|
|
|
|
self.transact_and_mine(10, self.nodes[1])
|
|
|
|
check_estimates(self.nodes[1], self.fees_per_kb, 2)
|
2014-07-08 18:07:23 +02:00
|
|
|
|
|
|
|
# Finish by mining a normal-sized block:
|
2014-08-26 22:28:32 +02:00
|
|
|
while len(self.nodes[1].getrawmempool()) > 0:
|
|
|
|
self.nodes[1].generate(1)
|
2014-03-17 13:19:54 +01:00
|
|
|
|
2016-11-07 19:52:41 +01:00
|
|
|
sync_blocks(self.nodes[0:3], wait=.1)
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Final estimates after emptying mempools")
|
2014-08-26 22:28:32 +02:00
|
|
|
check_estimates(self.nodes[1], self.fees_per_kb, 2)
|
2014-03-17 13:19:54 +01:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2014-07-08 18:07:23 +02:00
|
|
|
EstimateFeeTest().main()
|