Merge pull request #6715
60de0d5
Fix mempool package tracking edge case (Suhas Daftuar)598b25d
Add test showing bug in mempool packages (Suhas Daftuar)
This commit is contained in:
commit
82d2aef7b3
3 changed files with 121 additions and 27 deletions
|
@ -15,22 +15,24 @@ class MempoolPackagesTest(BitcoinTestFramework):
|
|||
|
||||
def setup_network(self):
|
||||
self.nodes = []
|
||||
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-relaypriority=0"]))
|
||||
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-relaypriority=0", "-debug"]))
|
||||
self.nodes.append(start_node(1, self.options.tmpdir, ["-maxorphantx=1000", "-relaypriority=0", "-limitancestorcount=5", "-debug"]))
|
||||
connect_nodes(self.nodes[0], 1)
|
||||
self.is_network_split = False
|
||||
self.sync_all()
|
||||
|
||||
# Build a transaction that spends parent_txid:vout
|
||||
# Return amount sent
|
||||
def chain_transaction(self, parent_txid, vout, value, fee, num_outputs):
|
||||
def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs):
|
||||
send_value = satoshi_round((value - fee)/num_outputs)
|
||||
inputs = [ {'txid' : parent_txid, 'vout' : vout} ]
|
||||
outputs = {}
|
||||
for i in xrange(num_outputs):
|
||||
outputs[self.nodes[0].getnewaddress()] = send_value
|
||||
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
|
||||
signedtx = self.nodes[0].signrawtransaction(rawtx)
|
||||
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
|
||||
fulltx = self.nodes[0].getrawtransaction(txid, 1)
|
||||
outputs[node.getnewaddress()] = send_value
|
||||
rawtx = node.createrawtransaction(inputs, outputs)
|
||||
signedtx = node.signrawtransaction(rawtx)
|
||||
txid = node.sendrawtransaction(signedtx['hex'])
|
||||
fulltx = node.getrawtransaction(txid, 1)
|
||||
assert(len(fulltx['vout']) == num_outputs) # make sure we didn't generate a change output
|
||||
return (txid, send_value)
|
||||
|
||||
|
@ -46,7 +48,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
|
|||
# 100 transactions off a confirmed tx should be fine
|
||||
chain = []
|
||||
for i in xrange(100):
|
||||
(txid, sent_value) = self.chain_transaction(txid, 0, value, fee, 1)
|
||||
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1)
|
||||
value = sent_value
|
||||
chain.append(txid)
|
||||
|
||||
|
@ -69,10 +71,12 @@ class MempoolPackagesTest(BitcoinTestFramework):
|
|||
|
||||
# Adding one more transaction on to the chain should fail.
|
||||
try:
|
||||
self.chain_transaction(txid, vout, value, fee, 1)
|
||||
self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
|
||||
except JSONRPCException as e:
|
||||
print "too-long-ancestor-chain successfully rejected"
|
||||
|
||||
# TODO: check that node1's mempool is as expected
|
||||
|
||||
# TODO: test ancestor size limits
|
||||
|
||||
# Now test descendant chain limits
|
||||
|
@ -82,7 +86,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
|
|||
|
||||
transaction_package = []
|
||||
# First create one parent tx with 10 children
|
||||
(txid, sent_value) = self.chain_transaction(txid, vout, value, fee, 10)
|
||||
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 10)
|
||||
parent_transaction = txid
|
||||
for i in xrange(10):
|
||||
transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value})
|
||||
|
@ -90,7 +94,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
|
|||
for i in xrange(1000):
|
||||
utxo = transaction_package.pop(0)
|
||||
try:
|
||||
(txid, sent_value) = self.chain_transaction(utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
|
||||
(txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
|
||||
for j in xrange(10):
|
||||
transaction_package.append({'txid': txid, 'vout': j, 'amount': sent_value})
|
||||
if i == 998:
|
||||
|
@ -101,7 +105,74 @@ class MempoolPackagesTest(BitcoinTestFramework):
|
|||
assert_equal(i, 999)
|
||||
print "tx that would create too large descendant package successfully rejected"
|
||||
|
||||
# TODO: check that node1's mempool is as expected
|
||||
|
||||
# TODO: test descendant size limits
|
||||
|
||||
# Test reorg handling
|
||||
# First, the basics:
|
||||
self.nodes[0].generate(1)
|
||||
sync_blocks(self.nodes)
|
||||
self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash())
|
||||
self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash())
|
||||
|
||||
# Now test the case where node1 has a transaction T in its mempool that
|
||||
# depends on transactions A and B which are in a mined block, and the
|
||||
# block containing A and B is disconnected, AND B is not accepted back
|
||||
# into node1's mempool because its ancestor count is too high.
|
||||
|
||||
# Create 8 transactions, like so:
|
||||
# Tx0 -> Tx1 (vout0)
|
||||
# \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7
|
||||
#
|
||||
# Mine them in the next block, then generate a new tx8 that spends
|
||||
# Tx1 and Tx7, and add to node1's mempool, then disconnect the
|
||||
# last block.
|
||||
|
||||
# Create tx0 with 2 outputs
|
||||
utxo = self.nodes[0].listunspent()
|
||||
txid = utxo[0]['txid']
|
||||
value = utxo[0]['amount']
|
||||
vout = utxo[0]['vout']
|
||||
|
||||
send_value = satoshi_round((value - fee)/2)
|
||||
inputs = [ {'txid' : txid, 'vout' : vout} ]
|
||||
outputs = {}
|
||||
for i in xrange(2):
|
||||
outputs[self.nodes[0].getnewaddress()] = send_value
|
||||
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
|
||||
signedtx = self.nodes[0].signrawtransaction(rawtx)
|
||||
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
|
||||
tx0_id = txid
|
||||
value = send_value
|
||||
|
||||
# Create tx1
|
||||
(tx1_id, tx1_value) = self.chain_transaction(self.nodes[0], tx0_id, 0, value, fee, 1)
|
||||
|
||||
# Create tx2-7
|
||||
vout = 1
|
||||
txid = tx0_id
|
||||
for i in xrange(6):
|
||||
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
|
||||
vout = 0
|
||||
value = sent_value
|
||||
|
||||
# Mine these in a block
|
||||
self.nodes[0].generate(1)
|
||||
self.sync_all()
|
||||
|
||||
# Now generate tx8, with a big fee
|
||||
inputs = [ {'txid' : tx1_id, 'vout': 0}, {'txid' : txid, 'vout': 0} ]
|
||||
outputs = { self.nodes[0].getnewaddress() : send_value + value - 4*fee }
|
||||
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
|
||||
signedtx = self.nodes[0].signrawtransaction(rawtx)
|
||||
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
|
||||
sync_mempools(self.nodes)
|
||||
|
||||
# Now try to disconnect the tip on each node...
|
||||
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
|
||||
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
if __name__ == '__main__':
|
||||
MempoolPackagesTest().main()
|
||||
|
|
|
@ -159,17 +159,15 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes
|
|||
}
|
||||
}
|
||||
|
||||
bool CTxMemPool::CalculateMemPoolAncestors(const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString)
|
||||
bool CTxMemPool::CalculateMemPoolAncestors(const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString, bool fSearchForParents /* = true */)
|
||||
{
|
||||
setEntries parentHashes;
|
||||
const CTransaction &tx = entry.GetTx();
|
||||
|
||||
if (fSearchForParents) {
|
||||
// Get parents of this transaction that are in the mempool
|
||||
// Entry may or may not already be in the mempool, and GetMemPoolParents()
|
||||
// is only valid for entries in the mempool, so we iterate mapTx to find
|
||||
// parents.
|
||||
// TODO: optimize this so that we only check limits and walk
|
||||
// tx.vin when called on entries not already in the mempool.
|
||||
// GetMemPoolParents() is only valid for entries in the mempool, so we
|
||||
// iterate mapTx to find parents.
|
||||
for (unsigned int i = 0; i < tx.vin.size(); i++) {
|
||||
txiter piter = mapTx.find(tx.vin[i].prevout.hash);
|
||||
if (piter != mapTx.end()) {
|
||||
|
@ -180,6 +178,12 @@ bool CTxMemPool::CalculateMemPoolAncestors(const CTxMemPoolEntry &entry, setEntr
|
|||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If we're not searching for parents, we require this to be an
|
||||
// entry in the mempool already.
|
||||
txiter it = mapTx.iterator_to(entry);
|
||||
parentHashes = GetMemPoolParents(it);
|
||||
}
|
||||
|
||||
size_t totalSizeWithAncestors = entry.GetTxSize();
|
||||
|
||||
|
@ -249,7 +253,24 @@ void CTxMemPool::UpdateForRemoveFromMempool(const setEntries &entriesToRemove)
|
|||
setEntries setAncestors;
|
||||
const CTxMemPoolEntry &entry = *removeIt;
|
||||
std::string dummy;
|
||||
CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy);
|
||||
// Since this is a tx that is already in the mempool, we can call CMPA
|
||||
// with fSearchForParents = false. If the mempool is in a consistent
|
||||
// state, then using true or false should both be correct, though false
|
||||
// should be a bit faster.
|
||||
// However, if we happen to be in the middle of processing a reorg, then
|
||||
// the mempool can be in an inconsistent state. In this case, the set
|
||||
// of ancestors reachable via mapLinks will be the same as the set of
|
||||
// ancestors whose packages include this transaction, because when we
|
||||
// add a new transaction to the mempool in addUnchecked(), we assume it
|
||||
// has no children, and in the case of a reorg where that assumption is
|
||||
// false, the in-mempool children aren't linked to the in-block tx's
|
||||
// until UpdateTransactionsFromBlock() is called.
|
||||
// So if we're being called during a reorg, ie before
|
||||
// UpdateTransactionsFromBlock() has been called, then mapLinks[] will
|
||||
// differ from the set of mempool parents we'd calculate by searching,
|
||||
// and it's important that we use the mapLinks[] notion of ancestor
|
||||
// transactions as the set of things to update for removal.
|
||||
CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit, nNoLimit, dummy, false);
|
||||
// Note that UpdateAncestorsOf severs the child links that point to
|
||||
// removeIt in the entries for the parents of removeIt. This is
|
||||
// fine since we don't need to use the mempool children of any entries
|
||||
|
|
|
@ -392,8 +392,10 @@ public:
|
|||
* limitDescendantCount = max number of descendants any ancestor can have
|
||||
* limitDescendantSize = max size of descendants any ancestor can have
|
||||
* errString = populated with error reason if any limits are hit
|
||||
* fSearchForParents = whether to search a tx's vin for in-mempool parents, or
|
||||
* look up parents from mapLinks. Must be true for entries not in the mempool
|
||||
*/
|
||||
bool CalculateMemPoolAncestors(const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString);
|
||||
bool CalculateMemPoolAncestors(const CTxMemPoolEntry &entry, setEntries &setAncestors, uint64_t limitAncestorCount, uint64_t limitAncestorSize, uint64_t limitDescendantCount, uint64_t limitDescendantSize, std::string &errString, bool fSearchForParents = true);
|
||||
|
||||
unsigned long size()
|
||||
{
|
||||
|
|
Loading…
Add table
Reference in a new issue