Merge #12643: [qa] util: Remove unused sync_chain

fa1436c42 [qa] util: Remove unused sync_chain (MarcoFalke)

Pull request description:

  The util function `sync_blocks` already checks for equal chains, so we can remove the unused `sync_chain`.

  Also cleaned up the errors that are printed in case of timeout:

  ```
  AssertionError: Block sync timed out:
    '72a3a3e9dcfd0a09204c3447af0f481d19641eeadbe6a91b8e680ed614bc7712'
    '5032af4ae22ae7a21afdc9d9f516877309c4dd8ef2ecadb9354be7088439b4a6'
    '5032af4ae22ae7a21afdc9d9f516877309c4dd8ef2ecadb9354be7088439b4a6'
  ```
  and
  ```
  AssertionError: Mempool sync timed out:
    {'c2af943d9b321c36e0f5a153a9d3d8b11bdd46ceb28e38f5fd2c722e3edb3563'}
    set()
    set()
  ```

Tree-SHA512: cb4ad30e3e3773072f59afa2c81cfa27bd8f389a93f02acb919990298627fcfbaa53a3d3944d827cc8a5d009871e42a47ea09e9bb93e85c22e3af6a24a574e5d
This commit is contained in:
Wladimir J. van der Laan 2018-03-13 17:45:06 +01:00
commit 0f0229d0c3
No known key found for this signature in database
GPG key ID: 1E4AED62986CD25D
2 changed files with 10 additions and 36 deletions

View file

@ -8,7 +8,6 @@ from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import ( from test_framework.util import (
assert_equal, assert_equal,
connect_nodes_bi, connect_nodes_bi,
sync_chain,
sync_blocks, sync_blocks,
) )
@ -72,7 +71,7 @@ class PreciousTest(BitcoinTestFramework):
assert_equal(self.nodes[0].getbestblockhash(), hashC) assert_equal(self.nodes[0].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block C") self.log.info("Make Node1 prefer block C")
self.nodes[1].preciousblock(hashC) self.nodes[1].preciousblock(hashC)
sync_chain(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC sync_blocks(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC
assert_equal(self.nodes[1].getbestblockhash(), hashC) assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block G again") self.log.info("Make Node1 prefer block G again")
self.nodes[1].preciousblock(hashG) self.nodes[1].preciousblock(hashG)

View file

@ -370,54 +370,29 @@ def sync_blocks(rpc_connections, *, wait=1, timeout=60):
one node already synced to the latest, stable tip, otherwise there's a one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced. chance it might return before all nodes are stably synced.
""" """
# Use getblockcount() instead of waitforblockheight() to determine the stop_time = time.time() + timeout
# initial max height because the two RPCs look at different internal global while time.time() <= stop_time:
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections] best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash): if best_hash.count(best_hash[0]) == len(rpc_connections):
return return
time.sleep(wait) time.sleep(wait)
timeout -= wait raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True): def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
""" """
Wait until everybody has the same transactions in their memory Wait until everybody has the same transactions in their memory
pools pools
""" """
while timeout > 0: stop_time = time.time() + timeout
pool = set(rpc_connections[0].getrawmempool()) while time.time() <= stop_time:
num_match = 1 pool = [set(r.getrawmempool()) for r in rpc_connections]
for i in range(1, len(rpc_connections)): if pool.count(pool[0]) == len(rpc_connections):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
if flush_scheduler: if flush_scheduler:
for r in rpc_connections: for r in rpc_connections:
r.syncwithvalidationinterfacequeue() r.syncwithvalidationinterfacequeue()
return return
time.sleep(wait) time.sleep(wait)
timeout -= wait raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
raise AssertionError("Mempool sync failed")
# Transaction/Block functions # Transaction/Block functions
############################# #############################