2016-03-19 20:58:06 +01:00
|
|
|
#!/usr/bin/env python3
|
2019-04-07 00:38:51 +02:00
|
|
|
# Copyright (c) 2014-2019 The Bitcoin Core developers
|
2016-03-19 20:58:06 +01:00
|
|
|
# Distributed under the MIT software license, see the accompanying
|
2014-11-18 22:16:32 +01:00
|
|
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
2017-01-18 00:34:40 +01:00
|
|
|
"""Test behavior of headers messages to announce blocks.
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2017-11-16 15:53:35 +01:00
|
|
|
Setup:
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2017-11-17 23:15:28 +01:00
|
|
|
- Two nodes:
|
|
|
|
- node0 is the node-under-test. We create two p2p connections to it. The
|
|
|
|
first p2p connection is a control and should only ever receive inv's. The
|
|
|
|
second p2p connection tests the headers sending logic.
|
|
|
|
- node1 is used to create reorgs.
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2017-11-01 23:57:26 +01:00
|
|
|
test_null_locators
|
|
|
|
==================
|
|
|
|
|
|
|
|
Sends two getheaders requests with null locator values. First request's hashstop
|
|
|
|
value refers to validated block, while second request's hashstop value refers to
|
|
|
|
a block which hasn't been validated. Verifies only the first request returns
|
|
|
|
headers.
|
|
|
|
|
|
|
|
test_nonnull_locators
|
|
|
|
=====================
|
|
|
|
|
2014-11-18 22:16:32 +01:00
|
|
|
Part 1: No headers announcements before "sendheaders"
|
|
|
|
a. node mines a block [expect: inv]
|
|
|
|
send getdata for the block [expect: block]
|
|
|
|
b. node mines another block [expect: inv]
|
|
|
|
send getheaders and getdata [expect: headers, then block]
|
|
|
|
c. node mines another block [expect: inv]
|
|
|
|
peer mines a block, announces with header [expect: getdata]
|
|
|
|
d. node mines another block [expect: inv]
|
|
|
|
|
|
|
|
Part 2: After "sendheaders", headers announcements should generally work.
|
|
|
|
a. peer sends sendheaders [expect: no response]
|
|
|
|
peer sends getheaders with current tip [expect: no response]
|
|
|
|
b. node mines a block [expect: tip header]
|
|
|
|
c. for N in 1, ..., 10:
|
|
|
|
* for announce-type in {inv, header}
|
|
|
|
- peer mines N blocks, announces with announce-type
|
|
|
|
[ expect: getheaders/getdata or getdata, deliver block(s) ]
|
|
|
|
- node mines a block [ expect: 1 header ]
|
|
|
|
|
|
|
|
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
|
|
|
|
- For response-type in {inv, getheaders}
|
|
|
|
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
|
|
|
|
* node mines an 8-block reorg [ expect: inv at tip ]
|
|
|
|
* peer responds with getblocks/getdata [expect: inv, blocks ]
|
|
|
|
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
|
|
|
|
* node mines another block at tip [ expect: inv ]
|
|
|
|
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
|
|
|
|
* peer requests block [ expect: block ]
|
|
|
|
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
|
|
|
|
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
|
|
|
|
* node mines 1 block [expect: 1 header, peer responds with getdata]
|
|
|
|
|
|
|
|
Part 4: Test direct fetch behavior
|
|
|
|
a. Announce 2 old block headers.
|
|
|
|
Expect: no getdata requests.
|
|
|
|
b. Announce 3 new blocks via 1 headers message.
|
|
|
|
Expect: one getdata request for all 3 blocks.
|
|
|
|
(Send blocks.)
|
|
|
|
c. Announce 1 header that forks off the last two blocks.
|
|
|
|
Expect: no response.
|
|
|
|
d. Announce 1 more header that builds on that fork.
|
|
|
|
Expect: one getdata request for two blocks.
|
|
|
|
e. Announce 16 more headers that build on that fork.
|
|
|
|
Expect: getdata request for 14 more blocks.
|
|
|
|
f. Announce 1 more header that builds on that fork.
|
|
|
|
Expect: no response.
|
2016-07-07 03:19:32 +02:00
|
|
|
|
|
|
|
Part 5: Test handling of headers that don't connect.
|
|
|
|
a. Repeat 10 times:
|
|
|
|
1. Announce a header that doesn't connect.
|
|
|
|
Expect: getheaders message
|
|
|
|
2. Send headers chain.
|
|
|
|
Expect: getdata for the missing blocks, tip update.
|
|
|
|
b. Then send 9 more headers that don't connect.
|
|
|
|
Expect: getheaders message each time.
|
|
|
|
c. Announce a header that does connect.
|
|
|
|
Expect: no response.
|
|
|
|
d. Announce 49 headers that don't connect.
|
|
|
|
Expect: getheaders message each time.
|
|
|
|
e. Announce one more that doesn't connect.
|
|
|
|
Expect: disconnect.
|
2017-01-18 00:34:40 +01:00
|
|
|
"""
|
|
|
|
from test_framework.blocktools import create_block, create_coinbase
|
2018-07-07 00:10:35 +02:00
|
|
|
from test_framework.messages import CInv
|
2017-11-16 15:53:35 +01:00
|
|
|
from test_framework.mininode import (
|
|
|
|
CBlockHeader,
|
|
|
|
NODE_WITNESS,
|
2017-10-17 22:16:39 +02:00
|
|
|
P2PInterface,
|
2017-11-16 15:53:35 +01:00
|
|
|
mininode_lock,
|
|
|
|
msg_block,
|
|
|
|
msg_getblocks,
|
|
|
|
msg_getdata,
|
|
|
|
msg_getheaders,
|
|
|
|
msg_headers,
|
|
|
|
msg_inv,
|
|
|
|
msg_sendheaders,
|
|
|
|
)
|
|
|
|
from test_framework.test_framework import BitcoinTestFramework
|
|
|
|
from test_framework.util import (
|
|
|
|
assert_equal,
|
|
|
|
wait_until,
|
|
|
|
)
|
2017-01-18 00:34:40 +01:00
|
|
|
|
2017-11-16 15:53:35 +01:00
|
|
|
DIRECT_FETCH_RESPONSE_TIME = 0.05
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2017-10-17 22:16:39 +02:00
|
|
|
class BaseNode(P2PInterface):
|
2014-11-18 22:16:32 +01:00
|
|
|
def __init__(self):
|
2017-03-28 19:41:22 +02:00
|
|
|
super().__init__()
|
2017-11-17 21:01:24 +01:00
|
|
|
|
2014-11-18 22:16:32 +01:00
|
|
|
self.block_announced = False
|
2016-10-06 20:21:11 +02:00
|
|
|
self.last_blockhash_announced = None
|
2018-05-08 22:38:44 +02:00
|
|
|
self.recent_headers_announced = []
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2017-11-16 17:07:40 +01:00
|
|
|
def send_get_data(self, block_hashes):
|
|
|
|
"""Request data for a list of block hashes."""
|
2014-11-18 22:16:32 +01:00
|
|
|
msg = msg_getdata()
|
|
|
|
for x in block_hashes:
|
|
|
|
msg.inv.append(CInv(2, x))
|
2017-11-17 21:01:24 +01:00
|
|
|
self.send_message(msg)
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2017-11-16 17:07:40 +01:00
|
|
|
def send_get_headers(self, locator, hashstop):
|
2014-11-18 22:16:32 +01:00
|
|
|
msg = msg_getheaders()
|
|
|
|
msg.locator.vHave = locator
|
|
|
|
msg.hashstop = hashstop
|
2017-11-17 21:01:24 +01:00
|
|
|
self.send_message(msg)
|
2014-11-18 22:16:32 +01:00
|
|
|
|
|
|
|
def send_block_inv(self, blockhash):
|
|
|
|
msg = msg_inv()
|
|
|
|
msg.inv = [CInv(2, blockhash)]
|
2017-11-17 21:01:24 +01:00
|
|
|
self.send_message(msg)
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2017-11-16 17:07:40 +01:00
|
|
|
def send_header_for_blocks(self, new_blocks):
|
|
|
|
headers_message = msg_headers()
|
|
|
|
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
|
|
|
|
self.send_message(headers_message)
|
|
|
|
|
|
|
|
def send_getblocks(self, locator):
|
|
|
|
getblocks_message = msg_getblocks()
|
|
|
|
getblocks_message.locator.vHave = locator
|
|
|
|
self.send_message(getblocks_message)
|
|
|
|
|
|
|
|
def wait_for_getdata(self, hash_list, timeout=60):
|
2017-11-17 23:15:28 +01:00
|
|
|
if hash_list == []:
|
|
|
|
return
|
|
|
|
|
|
|
|
test_function = lambda: "getdata" in self.last_message and [x.hash for x in self.last_message["getdata"].inv] == hash_list
|
|
|
|
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
2017-11-16 17:07:40 +01:00
|
|
|
|
|
|
|
def wait_for_block_announcement(self, block_hash, timeout=60):
|
|
|
|
test_function = lambda: self.last_blockhash_announced == block_hash
|
|
|
|
wait_until(test_function, timeout=timeout, lock=mininode_lock)
|
|
|
|
|
2017-11-17 21:01:24 +01:00
|
|
|
def on_inv(self, message):
|
2014-11-18 22:16:32 +01:00
|
|
|
self.block_announced = True
|
2016-10-06 20:21:11 +02:00
|
|
|
self.last_blockhash_announced = message.inv[-1].hash
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2017-11-17 21:01:24 +01:00
|
|
|
def on_headers(self, message):
|
2016-10-06 20:21:11 +02:00
|
|
|
if len(message.headers):
|
|
|
|
self.block_announced = True
|
2018-05-08 22:38:44 +02:00
|
|
|
for x in message.headers:
|
|
|
|
x.calc_sha256()
|
|
|
|
# append because headers may be announced over multiple messages.
|
|
|
|
self.recent_headers_announced.append(x.sha256)
|
2016-10-06 20:21:11 +02:00
|
|
|
self.last_blockhash_announced = message.headers[-1].sha256
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2018-05-08 22:38:44 +02:00
|
|
|
def clear_block_announcements(self):
|
2017-11-16 17:07:40 +01:00
|
|
|
with mininode_lock:
|
|
|
|
self.block_announced = False
|
|
|
|
self.last_message.pop("inv", None)
|
|
|
|
self.last_message.pop("headers", None)
|
2018-05-08 22:38:44 +02:00
|
|
|
self.recent_headers_announced = []
|
2017-11-16 17:07:40 +01:00
|
|
|
|
|
|
|
|
2018-05-08 22:38:44 +02:00
|
|
|
def check_last_headers_announcement(self, headers):
|
|
|
|
"""Test whether the last headers announcements received are right.
|
|
|
|
Headers may be announced across more than one message."""
|
|
|
|
test_function = lambda: (len(self.recent_headers_announced) >= len(headers))
|
|
|
|
wait_until(test_function, timeout=60, lock=mininode_lock)
|
|
|
|
with mininode_lock:
|
|
|
|
assert_equal(self.recent_headers_announced, headers)
|
|
|
|
self.block_announced = False
|
|
|
|
self.last_message.pop("headers", None)
|
|
|
|
self.recent_headers_announced = []
|
|
|
|
|
|
|
|
def check_last_inv_announcement(self, inv):
|
|
|
|
"""Test whether the last announcement received had the right inv.
|
|
|
|
inv should be a list of block hashes."""
|
2017-11-16 17:52:45 +01:00
|
|
|
|
2014-11-18 22:16:32 +01:00
|
|
|
test_function = lambda: self.block_announced
|
2017-08-16 18:17:34 +02:00
|
|
|
wait_until(test_function, timeout=60, lock=mininode_lock)
|
2017-11-16 17:52:45 +01:00
|
|
|
|
2014-11-18 22:16:32 +01:00
|
|
|
with mininode_lock:
|
|
|
|
compare_inv = []
|
2017-03-30 14:38:46 +02:00
|
|
|
if "inv" in self.last_message:
|
|
|
|
compare_inv = [x.hash for x in self.last_message["inv"].inv]
|
2018-05-08 22:38:44 +02:00
|
|
|
assert_equal(compare_inv, inv)
|
|
|
|
self.block_announced = False
|
2017-03-30 14:38:46 +02:00
|
|
|
self.last_message.pop("inv", None)
|
2014-11-18 22:16:32 +01:00
|
|
|
|
|
|
|
class SendHeadersTest(BitcoinTestFramework):
|
2017-06-10 00:21:21 +02:00
|
|
|
def set_test_params(self):
|
2016-05-14 13:01:31 +02:00
|
|
|
self.setup_clean_chain = True
|
|
|
|
self.num_nodes = 2
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2018-09-09 19:32:37 +02:00
|
|
|
def skip_test_if_missing_module(self):
|
|
|
|
self.skip_if_no_wallet()
|
|
|
|
|
2014-11-18 22:16:32 +01:00
|
|
|
def mine_blocks(self, count):
|
2017-11-16 17:07:40 +01:00
|
|
|
"""Mine count blocks and return the new tip."""
|
|
|
|
|
2018-05-08 22:38:44 +02:00
|
|
|
# Clear out block announcements from each p2p listener
|
|
|
|
[x.clear_block_announcements() for x in self.nodes[0].p2ps]
|
2018-09-15 11:32:12 +02:00
|
|
|
self.nodes[0].generatetoaddress(count, self.nodes[0].get_deterministic_priv_key().address)
|
2014-11-18 22:16:32 +01:00
|
|
|
return int(self.nodes[0].getbestblockhash(), 16)
|
|
|
|
|
2016-01-07 15:23:05 +01:00
|
|
|
def mine_reorg(self, length):
|
2017-11-16 17:07:40 +01:00
|
|
|
"""Mine a reorg that invalidates length blocks (replacing them with # length+1 blocks).
|
|
|
|
|
|
|
|
Note: we clear the state of our p2p connections after the
|
|
|
|
to-be-reorged-out blocks are mined, so that we don't break later tests.
|
|
|
|
return the list of block hashes newly mined."""
|
|
|
|
|
2018-09-15 11:32:12 +02:00
|
|
|
# make sure all invalidated blocks are node0's
|
|
|
|
self.nodes[0].generatetoaddress(length, self.nodes[0].get_deterministic_priv_key().address)
|
2019-04-07 00:19:45 +02:00
|
|
|
self.sync_blocks(self.nodes, wait=0.1)
|
2017-08-24 21:36:02 +02:00
|
|
|
for x in self.nodes[0].p2ps:
|
2016-10-06 20:21:11 +02:00
|
|
|
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
|
2018-05-08 22:38:44 +02:00
|
|
|
x.clear_block_announcements()
|
2014-11-18 22:16:32 +01:00
|
|
|
|
|
|
|
tip_height = self.nodes[1].getblockcount()
|
2017-11-16 15:53:35 +01:00
|
|
|
hash_to_invalidate = self.nodes[1].getblockhash(tip_height - (length - 1))
|
2014-11-18 22:16:32 +01:00
|
|
|
self.nodes[1].invalidateblock(hash_to_invalidate)
|
2018-09-15 11:32:12 +02:00
|
|
|
all_hashes = self.nodes[1].generatetoaddress(length + 1, self.nodes[1].get_deterministic_priv_key().address) # Must be longer than the orig chain
|
2019-04-07 00:19:45 +02:00
|
|
|
self.sync_blocks(self.nodes, wait=0.1)
|
2014-11-18 22:16:32 +01:00
|
|
|
return [int(x, 16) for x in all_hashes]
|
|
|
|
|
|
|
|
def run_test(self):
|
2018-06-18 23:28:37 +02:00
|
|
|
# Setup the p2p connections
|
2017-11-16 15:53:35 +01:00
|
|
|
inv_node = self.nodes[0].add_p2p_connection(BaseNode())
|
2017-11-17 23:15:28 +01:00
|
|
|
# Make sure NODE_NETWORK is not set for test_node, so no block download
|
|
|
|
# will occur outside of direct fetching
|
2017-11-16 15:53:35 +01:00
|
|
|
test_node = self.nodes[0].add_p2p_connection(BaseNode(), services=NODE_WITNESS)
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2017-10-20 22:24:10 +02:00
|
|
|
# Ensure verack's have been processed by our peer
|
|
|
|
inv_node.sync_with_ping()
|
|
|
|
test_node.sync_with_ping()
|
|
|
|
|
2017-11-16 17:52:59 +01:00
|
|
|
self.test_null_locators(test_node, inv_node)
|
2017-11-01 23:57:26 +01:00
|
|
|
self.test_nonnull_locators(test_node, inv_node)
|
|
|
|
|
2017-11-16 17:52:59 +01:00
|
|
|
def test_null_locators(self, test_node, inv_node):
|
2018-09-15 11:32:12 +02:00
|
|
|
tip = self.nodes[0].getblockheader(self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)[0])
|
2017-11-01 23:57:26 +01:00
|
|
|
tip_hash = int(tip["hash"], 16)
|
|
|
|
|
2018-05-08 22:38:44 +02:00
|
|
|
inv_node.check_last_inv_announcement(inv=[tip_hash])
|
|
|
|
test_node.check_last_inv_announcement(inv=[tip_hash])
|
2017-11-16 17:52:59 +01:00
|
|
|
|
2017-11-01 23:57:26 +01:00
|
|
|
self.log.info("Verify getheaders with null locator and valid hashstop returns headers.")
|
2018-05-08 22:38:44 +02:00
|
|
|
test_node.clear_block_announcements()
|
2017-11-16 17:07:40 +01:00
|
|
|
test_node.send_get_headers(locator=[], hashstop=tip_hash)
|
2018-05-08 22:38:44 +02:00
|
|
|
test_node.check_last_headers_announcement(headers=[tip_hash])
|
2017-11-01 23:57:26 +01:00
|
|
|
|
|
|
|
self.log.info("Verify getheaders with null locator and invalid hashstop does not return headers.")
|
|
|
|
block = create_block(int(tip["hash"], 16), create_coinbase(tip["height"] + 1), tip["mediantime"] + 1)
|
|
|
|
block.solve()
|
|
|
|
test_node.send_header_for_blocks([block])
|
2018-05-08 22:38:44 +02:00
|
|
|
test_node.clear_block_announcements()
|
2017-11-16 17:07:40 +01:00
|
|
|
test_node.send_get_headers(locator=[], hashstop=int(block.hash, 16))
|
2017-11-01 23:57:26 +01:00
|
|
|
test_node.sync_with_ping()
|
|
|
|
assert_equal(test_node.block_announced, False)
|
2018-05-08 22:38:44 +02:00
|
|
|
inv_node.clear_block_announcements()
|
2017-11-01 23:57:26 +01:00
|
|
|
test_node.send_message(msg_block(block))
|
2018-05-08 22:38:44 +02:00
|
|
|
inv_node.check_last_inv_announcement(inv=[int(block.hash, 16)])
|
2017-11-01 23:57:26 +01:00
|
|
|
|
|
|
|
def test_nonnull_locators(self, test_node, inv_node):
|
2014-11-18 22:16:32 +01:00
|
|
|
tip = int(self.nodes[0].getbestblockhash(), 16)
|
|
|
|
|
|
|
|
# PART 1
|
|
|
|
# 1. Mine a block; expect inv announcements each time
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Part 1: headers don't start before sendheaders message...")
|
2016-03-19 20:58:06 +01:00
|
|
|
for i in range(4):
|
2018-05-30 20:44:20 +02:00
|
|
|
self.log.debug("Part 1.{}: starting...".format(i))
|
2014-11-18 22:16:32 +01:00
|
|
|
old_tip = tip
|
|
|
|
tip = self.mine_blocks(1)
|
2018-05-08 22:38:44 +02:00
|
|
|
inv_node.check_last_inv_announcement(inv=[tip])
|
|
|
|
test_node.check_last_inv_announcement(inv=[tip])
|
2014-11-18 22:16:32 +01:00
|
|
|
# Try a few different responses; none should affect next announcement
|
|
|
|
if i == 0:
|
|
|
|
# first request the block
|
2017-11-16 17:07:40 +01:00
|
|
|
test_node.send_get_data([tip])
|
2017-05-08 23:14:07 +02:00
|
|
|
test_node.wait_for_block(tip)
|
2014-11-18 22:16:32 +01:00
|
|
|
elif i == 1:
|
|
|
|
# next try requesting header and block
|
2017-11-16 17:07:40 +01:00
|
|
|
test_node.send_get_headers(locator=[old_tip], hashstop=tip)
|
|
|
|
test_node.send_get_data([tip])
|
2014-11-18 22:16:32 +01:00
|
|
|
test_node.wait_for_block(tip)
|
2018-05-08 22:38:44 +02:00
|
|
|
test_node.clear_block_announcements() # since we requested headers...
|
2014-11-18 22:16:32 +01:00
|
|
|
elif i == 2:
|
|
|
|
# this time announce own block via headers
|
2018-06-22 17:55:29 +02:00
|
|
|
inv_node.clear_block_announcements()
|
2014-11-18 22:16:32 +01:00
|
|
|
height = self.nodes[0].getblockcount()
|
|
|
|
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
|
|
|
|
block_time = last_time + 1
|
2017-11-16 15:53:35 +01:00
|
|
|
new_block = create_block(tip, create_coinbase(height + 1), block_time)
|
2014-11-18 22:16:32 +01:00
|
|
|
new_block.solve()
|
|
|
|
test_node.send_header_for_blocks([new_block])
|
2017-05-08 23:14:07 +02:00
|
|
|
test_node.wait_for_getdata([new_block.sha256])
|
2014-11-18 22:16:32 +01:00
|
|
|
test_node.send_message(msg_block(new_block))
|
2017-11-16 15:53:35 +01:00
|
|
|
test_node.sync_with_ping() # make sure this block is processed
|
2018-06-22 17:55:29 +02:00
|
|
|
wait_until(lambda: inv_node.block_announced, timeout=60, lock=mininode_lock)
|
2018-05-08 22:38:44 +02:00
|
|
|
inv_node.clear_block_announcements()
|
|
|
|
test_node.clear_block_announcements()
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Part 1: success!")
|
|
|
|
self.log.info("Part 2: announce blocks with headers after sendheaders message...")
|
2014-11-18 22:16:32 +01:00
|
|
|
# PART 2
|
|
|
|
# 2. Send a sendheaders message and test that headers announcements
|
|
|
|
# commence and keep working.
|
|
|
|
test_node.send_message(msg_sendheaders())
|
|
|
|
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
|
2017-11-16 17:07:40 +01:00
|
|
|
test_node.send_get_headers(locator=[prev_tip], hashstop=0)
|
2014-11-18 22:16:32 +01:00
|
|
|
test_node.sync_with_ping()
|
|
|
|
|
|
|
|
# Now that we've synced headers, headers announcements should work
|
|
|
|
tip = self.mine_blocks(1)
|
2018-05-08 22:38:44 +02:00
|
|
|
inv_node.check_last_inv_announcement(inv=[tip])
|
|
|
|
test_node.check_last_headers_announcement(headers=[tip])
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2017-11-16 15:53:35 +01:00
|
|
|
height = self.nodes[0].getblockcount() + 1
|
2014-11-18 22:16:32 +01:00
|
|
|
block_time += 10 # Advance far enough ahead
|
2016-03-19 20:58:06 +01:00
|
|
|
for i in range(10):
|
2018-05-30 20:44:20 +02:00
|
|
|
self.log.debug("Part 2.{}: starting...".format(i))
|
2014-11-18 22:16:32 +01:00
|
|
|
# Mine i blocks, and alternate announcing either via
|
|
|
|
# inv (of tip) or via headers. After each, new blocks
|
|
|
|
# mined by the node should successfully be announced
|
|
|
|
# with block header, even though the blocks are never requested
|
2016-03-19 20:58:06 +01:00
|
|
|
for j in range(2):
|
2018-05-30 20:44:20 +02:00
|
|
|
self.log.debug("Part 2.{}.{}: starting...".format(i, j))
|
2014-11-18 22:16:32 +01:00
|
|
|
blocks = []
|
2017-11-16 15:53:35 +01:00
|
|
|
for b in range(i + 1):
|
2014-11-18 22:16:32 +01:00
|
|
|
blocks.append(create_block(tip, create_coinbase(height), block_time))
|
|
|
|
blocks[-1].solve()
|
|
|
|
tip = blocks[-1].sha256
|
|
|
|
block_time += 1
|
|
|
|
height += 1
|
|
|
|
if j == 0:
|
|
|
|
# Announce via inv
|
|
|
|
test_node.send_block_inv(tip)
|
2017-05-08 23:14:07 +02:00
|
|
|
test_node.wait_for_getheaders()
|
2016-10-04 00:30:52 +02:00
|
|
|
# Should have received a getheaders now
|
|
|
|
test_node.send_header_for_blocks(blocks)
|
2014-11-18 22:16:32 +01:00
|
|
|
# Test that duplicate inv's won't result in duplicate
|
|
|
|
# getdata requests, or duplicate headers announcements
|
2017-11-16 15:53:35 +01:00
|
|
|
[inv_node.send_block_inv(x.sha256) for x in blocks]
|
2017-05-08 23:14:07 +02:00
|
|
|
test_node.wait_for_getdata([x.sha256 for x in blocks])
|
2014-11-18 22:16:32 +01:00
|
|
|
inv_node.sync_with_ping()
|
|
|
|
else:
|
|
|
|
# Announce via headers
|
|
|
|
test_node.send_header_for_blocks(blocks)
|
2017-05-08 23:14:07 +02:00
|
|
|
test_node.wait_for_getdata([x.sha256 for x in blocks])
|
2014-11-18 22:16:32 +01:00
|
|
|
# Test that duplicate headers won't result in duplicate
|
|
|
|
# getdata requests (the check is further down)
|
|
|
|
inv_node.send_header_for_blocks(blocks)
|
|
|
|
inv_node.sync_with_ping()
|
2017-11-16 15:53:35 +01:00
|
|
|
[test_node.send_message(msg_block(x)) for x in blocks]
|
2014-11-18 22:16:32 +01:00
|
|
|
test_node.sync_with_ping()
|
|
|
|
inv_node.sync_with_ping()
|
|
|
|
# This block should not be announced to the inv node (since it also
|
|
|
|
# broadcast it)
|
2017-03-30 14:38:46 +02:00
|
|
|
assert "inv" not in inv_node.last_message
|
|
|
|
assert "headers" not in inv_node.last_message
|
2014-11-18 22:16:32 +01:00
|
|
|
tip = self.mine_blocks(1)
|
2018-05-08 22:38:44 +02:00
|
|
|
inv_node.check_last_inv_announcement(inv=[tip])
|
|
|
|
test_node.check_last_headers_announcement(headers=[tip])
|
2014-11-18 22:16:32 +01:00
|
|
|
height += 1
|
|
|
|
block_time += 1
|
|
|
|
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Part 2: success!")
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
|
2014-11-18 22:16:32 +01:00
|
|
|
|
|
|
|
# PART 3. Headers announcements can stop after large reorg, and resume after
|
|
|
|
# getheaders or inv from peer.
|
2016-03-19 20:58:06 +01:00
|
|
|
for j in range(2):
|
2018-05-30 20:44:20 +02:00
|
|
|
self.log.debug("Part 3.{}: starting...".format(j))
|
2014-11-18 22:16:32 +01:00
|
|
|
# First try mining a reorg that can propagate with header announcement
|
2016-01-07 15:23:05 +01:00
|
|
|
new_block_hashes = self.mine_reorg(length=7)
|
2014-11-18 22:16:32 +01:00
|
|
|
tip = new_block_hashes[-1]
|
2018-05-08 22:38:44 +02:00
|
|
|
inv_node.check_last_inv_announcement(inv=[tip])
|
|
|
|
test_node.check_last_headers_announcement(headers=new_block_hashes)
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2017-11-16 15:53:35 +01:00
|
|
|
block_time += 8
|
2014-11-18 22:16:32 +01:00
|
|
|
|
|
|
|
# Mine a too-large reorg, which should be announced with a single inv
|
2016-01-07 15:23:05 +01:00
|
|
|
new_block_hashes = self.mine_reorg(length=8)
|
2014-11-18 22:16:32 +01:00
|
|
|
tip = new_block_hashes[-1]
|
2018-05-08 22:38:44 +02:00
|
|
|
inv_node.check_last_inv_announcement(inv=[tip])
|
|
|
|
test_node.check_last_inv_announcement(inv=[tip])
|
2014-11-18 22:16:32 +01:00
|
|
|
|
|
|
|
block_time += 9
|
|
|
|
|
2018-06-08 20:16:07 +02:00
|
|
|
fork_point = self.nodes[0].getblock("%064x" % new_block_hashes[0])["previousblockhash"]
|
2014-11-18 22:16:32 +01:00
|
|
|
fork_point = int(fork_point, 16)
|
|
|
|
|
|
|
|
# Use getblocks/getdata
|
2017-11-16 15:53:35 +01:00
|
|
|
test_node.send_getblocks(locator=[fork_point])
|
2018-05-08 22:38:44 +02:00
|
|
|
test_node.check_last_inv_announcement(inv=new_block_hashes)
|
2017-11-16 17:07:40 +01:00
|
|
|
test_node.send_get_data(new_block_hashes)
|
2014-11-18 22:16:32 +01:00
|
|
|
test_node.wait_for_block(new_block_hashes[-1])
|
|
|
|
|
2016-03-19 20:58:06 +01:00
|
|
|
for i in range(3):
|
2018-05-30 20:44:20 +02:00
|
|
|
self.log.debug("Part 3.{}.{}: starting...".format(j, i))
|
|
|
|
|
2014-11-18 22:16:32 +01:00
|
|
|
# Mine another block, still should get only an inv
|
|
|
|
tip = self.mine_blocks(1)
|
2018-05-08 22:38:44 +02:00
|
|
|
inv_node.check_last_inv_announcement(inv=[tip])
|
|
|
|
test_node.check_last_inv_announcement(inv=[tip])
|
2014-11-18 22:16:32 +01:00
|
|
|
if i == 0:
|
2018-05-30 20:44:20 +02:00
|
|
|
# Just get the data -- shouldn't cause headers announcements to resume
|
2017-11-16 17:07:40 +01:00
|
|
|
test_node.send_get_data([tip])
|
2014-11-18 22:16:32 +01:00
|
|
|
test_node.wait_for_block(tip)
|
|
|
|
elif i == 1:
|
2018-05-30 20:44:20 +02:00
|
|
|
# Send a getheaders message that shouldn't trigger headers announcements
|
|
|
|
# to resume (best header sent will be too old)
|
2017-11-16 17:07:40 +01:00
|
|
|
test_node.send_get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
|
|
|
|
test_node.send_get_data([tip])
|
2014-11-18 22:16:32 +01:00
|
|
|
test_node.wait_for_block(tip)
|
|
|
|
elif i == 2:
|
2018-05-30 20:44:20 +02:00
|
|
|
# This time, try sending either a getheaders to trigger resumption
|
|
|
|
# of headers announcements, or mine a new block and inv it, also
|
|
|
|
# triggering resumption of headers announcements.
|
2017-11-16 17:07:40 +01:00
|
|
|
test_node.send_get_data([tip])
|
2014-11-18 22:16:32 +01:00
|
|
|
test_node.wait_for_block(tip)
|
|
|
|
if j == 0:
|
2017-11-16 17:07:40 +01:00
|
|
|
test_node.send_get_headers(locator=[tip], hashstop=0)
|
2014-11-18 22:16:32 +01:00
|
|
|
test_node.sync_with_ping()
|
|
|
|
else:
|
|
|
|
test_node.send_block_inv(tip)
|
|
|
|
test_node.sync_with_ping()
|
|
|
|
# New blocks should now be announced with header
|
|
|
|
tip = self.mine_blocks(1)
|
2018-05-08 22:38:44 +02:00
|
|
|
inv_node.check_last_inv_announcement(inv=[tip])
|
|
|
|
test_node.check_last_headers_announcement(headers=[tip])
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Part 3: success!")
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Part 4: Testing direct fetch behavior...")
|
2014-11-18 22:16:32 +01:00
|
|
|
tip = self.mine_blocks(1)
|
|
|
|
height = self.nodes[0].getblockcount() + 1
|
|
|
|
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
|
|
|
|
block_time = last_time + 1
|
|
|
|
|
|
|
|
# Create 2 blocks. Send the blocks, then send the headers.
|
|
|
|
blocks = []
|
2016-03-19 20:58:06 +01:00
|
|
|
for b in range(2):
|
2014-11-18 22:16:32 +01:00
|
|
|
blocks.append(create_block(tip, create_coinbase(height), block_time))
|
|
|
|
blocks[-1].solve()
|
|
|
|
tip = blocks[-1].sha256
|
|
|
|
block_time += 1
|
|
|
|
height += 1
|
|
|
|
inv_node.send_message(msg_block(blocks[-1]))
|
|
|
|
|
2017-11-16 15:53:35 +01:00
|
|
|
inv_node.sync_with_ping() # Make sure blocks are processed
|
2017-03-30 14:38:46 +02:00
|
|
|
test_node.last_message.pop("getdata", None)
|
2015-12-02 18:12:23 +01:00
|
|
|
test_node.send_header_for_blocks(blocks)
|
2014-11-18 22:16:32 +01:00
|
|
|
test_node.sync_with_ping()
|
|
|
|
# should not have received any getdata messages
|
|
|
|
with mininode_lock:
|
2017-03-30 14:38:46 +02:00
|
|
|
assert "getdata" not in test_node.last_message
|
2014-11-18 22:16:32 +01:00
|
|
|
|
|
|
|
# This time, direct fetch should work
|
|
|
|
blocks = []
|
2016-03-19 20:58:06 +01:00
|
|
|
for b in range(3):
|
2014-11-18 22:16:32 +01:00
|
|
|
blocks.append(create_block(tip, create_coinbase(height), block_time))
|
|
|
|
blocks[-1].solve()
|
|
|
|
tip = blocks[-1].sha256
|
|
|
|
block_time += 1
|
|
|
|
height += 1
|
|
|
|
|
|
|
|
test_node.send_header_for_blocks(blocks)
|
|
|
|
test_node.sync_with_ping()
|
2017-11-16 15:53:35 +01:00
|
|
|
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME)
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2017-11-16 15:53:35 +01:00
|
|
|
[test_node.send_message(msg_block(x)) for x in blocks]
|
2014-11-18 22:16:32 +01:00
|
|
|
|
|
|
|
test_node.sync_with_ping()
|
|
|
|
|
|
|
|
# Now announce a header that forks the last two blocks
|
|
|
|
tip = blocks[0].sha256
|
2018-10-22 22:47:50 +02:00
|
|
|
height -= 2
|
2014-11-18 22:16:32 +01:00
|
|
|
blocks = []
|
|
|
|
|
|
|
|
# Create extra blocks for later
|
2016-03-19 20:58:06 +01:00
|
|
|
for b in range(20):
|
2014-11-18 22:16:32 +01:00
|
|
|
blocks.append(create_block(tip, create_coinbase(height), block_time))
|
|
|
|
blocks[-1].solve()
|
|
|
|
tip = blocks[-1].sha256
|
|
|
|
block_time += 1
|
|
|
|
height += 1
|
|
|
|
|
|
|
|
# Announcing one block on fork should not trigger direct fetch
|
|
|
|
# (less work than tip)
|
2017-03-30 14:38:46 +02:00
|
|
|
test_node.last_message.pop("getdata", None)
|
2014-11-18 22:16:32 +01:00
|
|
|
test_node.send_header_for_blocks(blocks[0:1])
|
|
|
|
test_node.sync_with_ping()
|
|
|
|
with mininode_lock:
|
2017-03-30 14:38:46 +02:00
|
|
|
assert "getdata" not in test_node.last_message
|
2014-11-18 22:16:32 +01:00
|
|
|
|
|
|
|
# Announcing one more block on fork should trigger direct fetch for
|
|
|
|
# both blocks (same work as tip)
|
|
|
|
test_node.send_header_for_blocks(blocks[1:2])
|
|
|
|
test_node.sync_with_ping()
|
2017-11-16 15:53:35 +01:00
|
|
|
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=DIRECT_FETCH_RESPONSE_TIME)
|
2014-11-18 22:16:32 +01:00
|
|
|
|
|
|
|
# Announcing 16 more headers should trigger direct fetch for 14 more
|
|
|
|
# blocks
|
|
|
|
test_node.send_header_for_blocks(blocks[2:18])
|
|
|
|
test_node.sync_with_ping()
|
2017-11-16 15:53:35 +01:00
|
|
|
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=DIRECT_FETCH_RESPONSE_TIME)
|
2014-11-18 22:16:32 +01:00
|
|
|
|
|
|
|
# Announcing 1 more header should not trigger any response
|
2017-03-30 14:38:46 +02:00
|
|
|
test_node.last_message.pop("getdata", None)
|
2014-11-18 22:16:32 +01:00
|
|
|
test_node.send_header_for_blocks(blocks[18:19])
|
|
|
|
test_node.sync_with_ping()
|
|
|
|
with mininode_lock:
|
2017-03-30 14:38:46 +02:00
|
|
|
assert "getdata" not in test_node.last_message
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Part 4: success!")
|
2014-11-18 22:16:32 +01:00
|
|
|
|
2016-07-07 03:19:32 +02:00
|
|
|
# Now deliver all those blocks we announced.
|
2017-11-16 15:53:35 +01:00
|
|
|
[test_node.send_message(msg_block(x)) for x in blocks]
|
2016-07-07 03:19:32 +02:00
|
|
|
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Part 5: Testing handling of unconnecting headers")
|
2016-07-07 03:19:32 +02:00
|
|
|
# First we test that receipt of an unconnecting header doesn't prevent
|
|
|
|
# chain sync.
|
|
|
|
for i in range(10):
|
2018-05-30 20:44:20 +02:00
|
|
|
self.log.debug("Part 5.{}: starting...".format(i))
|
2017-03-30 14:38:46 +02:00
|
|
|
test_node.last_message.pop("getdata", None)
|
2016-07-07 03:19:32 +02:00
|
|
|
blocks = []
|
|
|
|
# Create two more blocks.
|
|
|
|
for j in range(2):
|
|
|
|
blocks.append(create_block(tip, create_coinbase(height), block_time))
|
|
|
|
blocks[-1].solve()
|
|
|
|
tip = blocks[-1].sha256
|
|
|
|
block_time += 1
|
|
|
|
height += 1
|
|
|
|
# Send the header of the second block -> this won't connect.
|
|
|
|
with mininode_lock:
|
2017-03-30 14:38:46 +02:00
|
|
|
test_node.last_message.pop("getheaders", None)
|
2016-07-07 03:19:32 +02:00
|
|
|
test_node.send_header_for_blocks([blocks[1]])
|
2017-05-08 23:14:07 +02:00
|
|
|
test_node.wait_for_getheaders()
|
2016-07-07 03:19:32 +02:00
|
|
|
test_node.send_header_for_blocks(blocks)
|
|
|
|
test_node.wait_for_getdata([x.sha256 for x in blocks])
|
2017-11-16 15:53:35 +01:00
|
|
|
[test_node.send_message(msg_block(x)) for x in blocks]
|
2016-07-07 03:19:32 +02:00
|
|
|
test_node.sync_with_ping()
|
|
|
|
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
|
|
|
|
|
|
|
|
blocks = []
|
|
|
|
# Now we test that if we repeatedly don't send connecting headers, we
|
|
|
|
# don't go into an infinite loop trying to get them to connect.
|
|
|
|
MAX_UNCONNECTING_HEADERS = 10
|
2017-11-16 15:53:35 +01:00
|
|
|
for j in range(MAX_UNCONNECTING_HEADERS + 1):
|
2016-07-07 03:19:32 +02:00
|
|
|
blocks.append(create_block(tip, create_coinbase(height), block_time))
|
|
|
|
blocks[-1].solve()
|
|
|
|
tip = blocks[-1].sha256
|
|
|
|
block_time += 1
|
|
|
|
height += 1
|
|
|
|
|
|
|
|
for i in range(1, MAX_UNCONNECTING_HEADERS):
|
|
|
|
# Send a header that doesn't connect, check that we get a getheaders.
|
|
|
|
with mininode_lock:
|
2017-03-30 14:38:46 +02:00
|
|
|
test_node.last_message.pop("getheaders", None)
|
2016-07-07 03:19:32 +02:00
|
|
|
test_node.send_header_for_blocks([blocks[i]])
|
2017-05-08 23:14:07 +02:00
|
|
|
test_node.wait_for_getheaders()
|
2016-07-07 03:19:32 +02:00
|
|
|
|
|
|
|
# Next header will connect, should re-set our count:
|
|
|
|
test_node.send_header_for_blocks([blocks[0]])
|
|
|
|
|
|
|
|
# Remove the first two entries (blocks[1] would connect):
|
|
|
|
blocks = blocks[2:]
|
|
|
|
|
|
|
|
# Now try to see how many unconnecting headers we can send
|
|
|
|
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
|
2017-11-16 15:53:35 +01:00
|
|
|
for i in range(5 * MAX_UNCONNECTING_HEADERS - 1):
|
2016-07-07 03:19:32 +02:00
|
|
|
# Send a header that doesn't connect, check that we get a getheaders.
|
|
|
|
with mininode_lock:
|
2017-03-30 14:38:46 +02:00
|
|
|
test_node.last_message.pop("getheaders", None)
|
2017-11-16 15:53:35 +01:00
|
|
|
test_node.send_header_for_blocks([blocks[i % len(blocks)]])
|
2017-05-08 23:14:07 +02:00
|
|
|
test_node.wait_for_getheaders()
|
2016-07-07 03:19:32 +02:00
|
|
|
|
|
|
|
# Eventually this stops working.
|
|
|
|
test_node.send_header_for_blocks([blocks[-1]])
|
|
|
|
|
|
|
|
# Should get disconnected
|
|
|
|
test_node.wait_for_disconnect()
|
|
|
|
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Part 5: success!")
|
2016-07-07 03:19:32 +02:00
|
|
|
|
2014-11-18 22:16:32 +01:00
|
|
|
# Finally, check that the inv node never received a getdata request,
|
|
|
|
# throughout the test
|
2017-03-30 14:38:46 +02:00
|
|
|
assert "getdata" not in inv_node.last_message
|
2014-11-18 22:16:32 +01:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
SendHeadersTest().main()
|