2016-03-19 20:58:06 +01:00
#!/usr/bin/env python3
2018-01-02 18:12:05 +01:00
# Copyright (c) 2014-2017 The Bitcoin Core developers
2016-03-19 20:58:06 +01:00
# Distributed under the MIT software license, see the accompanying
2014-11-18 22:16:32 +01:00
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
2017-01-18 00:34:40 +01:00
""" Test behavior of headers messages to announce blocks.
2014-11-18 22:16:32 +01:00
2017-11-16 15:53:35 +01:00
Setup :
2014-11-18 22:16:32 +01:00
2017-11-17 23:15:28 +01:00
- Two nodes :
- node0 is the node - under - test . We create two p2p connections to it . The
first p2p connection is a control and should only ever receive inv ' s. The
second p2p connection tests the headers sending logic .
- node1 is used to create reorgs .
2014-11-18 22:16:32 +01:00
2017-11-01 23:57:26 +01:00
test_null_locators
== == == == == == == == ==
Sends two getheaders requests with null locator values . First request ' s hashstop
value refers to validated block , while second request ' s hashstop value refers to
a block which hasn ' t been validated. Verifies only the first request returns
headers .
test_nonnull_locators
== == == == == == == == == == =
2014-11-18 22:16:32 +01:00
Part 1 : No headers announcements before " sendheaders "
a . node mines a block [ expect : inv ]
send getdata for the block [ expect : block ]
b . node mines another block [ expect : inv ]
send getheaders and getdata [ expect : headers , then block ]
c . node mines another block [ expect : inv ]
peer mines a block , announces with header [ expect : getdata ]
d . node mines another block [ expect : inv ]
Part 2 : After " sendheaders " , headers announcements should generally work .
a . peer sends sendheaders [ expect : no response ]
peer sends getheaders with current tip [ expect : no response ]
b . node mines a block [ expect : tip header ]
c . for N in 1 , . . . , 10 :
* for announce - type in { inv , header }
- peer mines N blocks , announces with announce - type
[ expect : getheaders / getdata or getdata , deliver block ( s ) ]
- node mines a block [ expect : 1 header ]
Part 3 : Headers announcements stop after large reorg and resume after getheaders or inv from peer .
- For response - type in { inv , getheaders }
* node mines a 7 block reorg [ expect : headers announcement of 8 blocks ]
* node mines an 8 - block reorg [ expect : inv at tip ]
* peer responds with getblocks / getdata [ expect : inv , blocks ]
* node mines another block [ expect : inv at tip , peer sends getdata , expect : block ]
* node mines another block at tip [ expect : inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [ expect : headers ]
* peer requests block [ expect : block ]
* node mines another block at tip [ expect : inv , peer sends getdata , expect : block ]
* peer sends response - type [ expect headers if getheaders , getheaders / getdata if mining new block ]
* node mines 1 block [ expect : 1 header , peer responds with getdata ]
Part 4 : Test direct fetch behavior
a . Announce 2 old block headers .
Expect : no getdata requests .
b . Announce 3 new blocks via 1 headers message .
Expect : one getdata request for all 3 blocks .
( Send blocks . )
c . Announce 1 header that forks off the last two blocks .
Expect : no response .
d . Announce 1 more header that builds on that fork .
Expect : one getdata request for two blocks .
e . Announce 16 more headers that build on that fork .
Expect : getdata request for 14 more blocks .
f . Announce 1 more header that builds on that fork .
Expect : no response .
2016-07-07 03:19:32 +02:00
Part 5 : Test handling of headers that don ' t connect.
a . Repeat 10 times :
1. Announce a header that doesn ' t connect.
Expect : getheaders message
2. Send headers chain .
Expect : getdata for the missing blocks , tip update .
b . Then send 9 more headers that don ' t connect.
Expect : getheaders message each time .
c . Announce a header that does connect .
Expect : no response .
d . Announce 49 headers that don ' t connect.
Expect : getheaders message each time .
e . Announce one more that doesn ' t connect.
Expect : disconnect .
2017-01-18 00:34:40 +01:00
"""
from test_framework . blocktools import create_block , create_coinbase
2017-11-16 15:53:35 +01:00
from test_framework . mininode import (
CBlockHeader ,
CInv ,
NODE_WITNESS ,
2017-12-08 17:45:46 +01:00
network_thread_start ,
2017-10-17 22:16:39 +02:00
P2PInterface ,
2017-11-16 15:53:35 +01:00
mininode_lock ,
msg_block ,
msg_getblocks ,
msg_getdata ,
msg_getheaders ,
msg_headers ,
msg_inv ,
msg_sendheaders ,
)
from test_framework . test_framework import BitcoinTestFramework
from test_framework . util import (
assert_equal ,
sync_blocks ,
wait_until ,
)
2017-01-18 00:34:40 +01:00
2017-11-16 15:53:35 +01:00
DIRECT_FETCH_RESPONSE_TIME = 0.05
2014-11-18 22:16:32 +01:00
2017-10-17 22:16:39 +02:00
class BaseNode ( P2PInterface ) :
2014-11-18 22:16:32 +01:00
def __init__ ( self ) :
2017-03-28 19:41:22 +02:00
super ( ) . __init__ ( )
2017-11-17 21:01:24 +01:00
2014-11-18 22:16:32 +01:00
self . block_announced = False
2016-10-06 20:21:11 +02:00
self . last_blockhash_announced = None
2014-11-18 22:16:32 +01:00
2017-11-16 17:07:40 +01:00
def send_get_data ( self , block_hashes ) :
""" Request data for a list of block hashes. """
2014-11-18 22:16:32 +01:00
msg = msg_getdata ( )
for x in block_hashes :
msg . inv . append ( CInv ( 2 , x ) )
2017-11-17 21:01:24 +01:00
self . send_message ( msg )
2014-11-18 22:16:32 +01:00
2017-11-16 17:07:40 +01:00
def send_get_headers ( self , locator , hashstop ) :
2014-11-18 22:16:32 +01:00
msg = msg_getheaders ( )
msg . locator . vHave = locator
msg . hashstop = hashstop
2017-11-17 21:01:24 +01:00
self . send_message ( msg )
2014-11-18 22:16:32 +01:00
def send_block_inv ( self , blockhash ) :
msg = msg_inv ( )
msg . inv = [ CInv ( 2 , blockhash ) ]
2017-11-17 21:01:24 +01:00
self . send_message ( msg )
2014-11-18 22:16:32 +01:00
2017-11-16 17:07:40 +01:00
def send_header_for_blocks ( self , new_blocks ) :
headers_message = msg_headers ( )
headers_message . headers = [ CBlockHeader ( b ) for b in new_blocks ]
self . send_message ( headers_message )
def send_getblocks ( self , locator ) :
getblocks_message = msg_getblocks ( )
getblocks_message . locator . vHave = locator
self . send_message ( getblocks_message )
def wait_for_getdata ( self , hash_list , timeout = 60 ) :
2017-11-17 23:15:28 +01:00
if hash_list == [ ] :
return
test_function = lambda : " getdata " in self . last_message and [ x . hash for x in self . last_message [ " getdata " ] . inv ] == hash_list
wait_until ( test_function , timeout = timeout , lock = mininode_lock )
2017-11-16 17:07:40 +01:00
def wait_for_block_announcement ( self , block_hash , timeout = 60 ) :
test_function = lambda : self . last_blockhash_announced == block_hash
wait_until ( test_function , timeout = timeout , lock = mininode_lock )
2017-11-17 21:01:24 +01:00
def on_inv ( self , message ) :
2014-11-18 22:16:32 +01:00
self . block_announced = True
2016-10-06 20:21:11 +02:00
self . last_blockhash_announced = message . inv [ - 1 ] . hash
2014-11-18 22:16:32 +01:00
2017-11-17 21:01:24 +01:00
def on_headers ( self , message ) :
2016-10-06 20:21:11 +02:00
if len ( message . headers ) :
self . block_announced = True
message . headers [ - 1 ] . calc_sha256 ( )
self . last_blockhash_announced = message . headers [ - 1 ] . sha256
2014-11-18 22:16:32 +01:00
2017-11-16 17:07:40 +01:00
def clear_last_announcement ( self ) :
with mininode_lock :
self . block_announced = False
self . last_message . pop ( " inv " , None )
self . last_message . pop ( " headers " , None )
2014-11-18 22:16:32 +01:00
def check_last_announcement ( self , headers = None , inv = None ) :
2017-11-16 17:07:40 +01:00
""" Test whether the last announcement received had the right header or the right inv.
inv and headers should be lists of block hashes . """
2017-11-16 17:52:45 +01:00
2014-11-18 22:16:32 +01:00
test_function = lambda : self . block_announced
2017-08-16 18:17:34 +02:00
wait_until ( test_function , timeout = 60 , lock = mininode_lock )
2017-11-16 17:52:45 +01:00
2014-11-18 22:16:32 +01:00
with mininode_lock :
self . block_announced = False
compare_inv = [ ]
2017-03-30 14:38:46 +02:00
if " inv " in self . last_message :
compare_inv = [ x . hash for x in self . last_message [ " inv " ] . inv ]
2017-11-16 17:52:45 +01:00
if inv is not None :
assert_equal ( compare_inv , inv )
2014-11-18 22:16:32 +01:00
2017-11-16 17:52:45 +01:00
compare_headers = [ ]
2017-03-30 14:38:46 +02:00
if " headers " in self . last_message :
2017-11-16 17:52:45 +01:00
compare_headers = [ x . sha256 for x in self . last_message [ " headers " ] . headers ]
if headers is not None :
assert_equal ( compare_headers , headers )
2014-11-18 22:16:32 +01:00
2017-03-30 14:38:46 +02:00
self . last_message . pop ( " inv " , None )
self . last_message . pop ( " headers " , None )
2014-11-18 22:16:32 +01:00
class SendHeadersTest ( BitcoinTestFramework ) :
2017-06-10 00:21:21 +02:00
def set_test_params ( self ) :
2016-05-14 13:01:31 +02:00
self . setup_clean_chain = True
self . num_nodes = 2
2014-11-18 22:16:32 +01:00
def mine_blocks ( self , count ) :
2017-11-16 17:07:40 +01:00
""" Mine count blocks and return the new tip. """
2016-01-07 15:23:05 +01:00
# Clear out last block announcement from each p2p listener
2017-08-24 21:36:02 +02:00
[ x . clear_last_announcement ( ) for x in self . nodes [ 0 ] . p2ps ]
2014-11-18 22:16:32 +01:00
self . nodes [ 0 ] . generate ( count )
return int ( self . nodes [ 0 ] . getbestblockhash ( ) , 16 )
2016-01-07 15:23:05 +01:00
def mine_reorg ( self , length ) :
2017-11-16 17:07:40 +01:00
""" Mine a reorg that invalidates length blocks (replacing them with # length+1 blocks).
Note : we clear the state of our p2p connections after the
to - be - reorged - out blocks are mined , so that we don ' t break later tests.
return the list of block hashes newly mined . """
2017-11-16 15:53:35 +01:00
self . nodes [ 0 ] . generate ( length ) # make sure all invalidated blocks are node0's
2014-11-18 22:16:32 +01:00
sync_blocks ( self . nodes , wait = 0.1 )
2017-08-24 21:36:02 +02:00
for x in self . nodes [ 0 ] . p2ps :
2016-10-06 20:21:11 +02:00
x . wait_for_block_announcement ( int ( self . nodes [ 0 ] . getbestblockhash ( ) , 16 ) )
x . clear_last_announcement ( )
2014-11-18 22:16:32 +01:00
tip_height = self . nodes [ 1 ] . getblockcount ( )
2017-11-16 15:53:35 +01:00
hash_to_invalidate = self . nodes [ 1 ] . getblockhash ( tip_height - ( length - 1 ) )
2014-11-18 22:16:32 +01:00
self . nodes [ 1 ] . invalidateblock ( hash_to_invalidate )
2017-11-16 15:53:35 +01:00
all_hashes = self . nodes [ 1 ] . generate ( length + 1 ) # Must be longer than the orig chain
2014-11-18 22:16:32 +01:00
sync_blocks ( self . nodes , wait = 0.1 )
return [ int ( x , 16 ) for x in all_hashes ]
def run_test ( self ) :
# Setup the p2p connections and start up the network thread.
2017-11-16 15:53:35 +01:00
inv_node = self . nodes [ 0 ] . add_p2p_connection ( BaseNode ( ) )
2017-11-17 23:15:28 +01:00
# Make sure NODE_NETWORK is not set for test_node, so no block download
# will occur outside of direct fetching
2017-11-16 15:53:35 +01:00
test_node = self . nodes [ 0 ] . add_p2p_connection ( BaseNode ( ) , services = NODE_WITNESS )
2014-11-18 22:16:32 +01:00
2017-12-08 17:45:46 +01:00
network_thread_start ( )
2014-11-18 22:16:32 +01:00
# Test logic begins here
inv_node . wait_for_verack ( )
test_node . wait_for_verack ( )
2017-10-20 22:24:10 +02:00
# Ensure verack's have been processed by our peer
inv_node . sync_with_ping ( )
test_node . sync_with_ping ( )
2017-11-16 17:52:59 +01:00
self . test_null_locators ( test_node , inv_node )
2017-11-01 23:57:26 +01:00
self . test_nonnull_locators ( test_node , inv_node )
2017-11-16 17:52:59 +01:00
def test_null_locators ( self , test_node , inv_node ) :
2017-11-01 23:57:26 +01:00
tip = self . nodes [ 0 ] . getblockheader ( self . nodes [ 0 ] . generate ( 1 ) [ 0 ] )
tip_hash = int ( tip [ " hash " ] , 16 )
2017-11-16 17:52:59 +01:00
inv_node . check_last_announcement ( inv = [ tip_hash ] , headers = [ ] )
test_node . check_last_announcement ( inv = [ tip_hash ] , headers = [ ] )
2017-11-01 23:57:26 +01:00
self . log . info ( " Verify getheaders with null locator and valid hashstop returns headers. " )
test_node . clear_last_announcement ( )
2017-11-16 17:07:40 +01:00
test_node . send_get_headers ( locator = [ ] , hashstop = tip_hash )
2017-11-16 17:52:45 +01:00
test_node . check_last_announcement ( headers = [ tip_hash ] )
2017-11-01 23:57:26 +01:00
self . log . info ( " Verify getheaders with null locator and invalid hashstop does not return headers. " )
block = create_block ( int ( tip [ " hash " ] , 16 ) , create_coinbase ( tip [ " height " ] + 1 ) , tip [ " mediantime " ] + 1 )
block . solve ( )
test_node . send_header_for_blocks ( [ block ] )
test_node . clear_last_announcement ( )
2017-11-16 17:07:40 +01:00
test_node . send_get_headers ( locator = [ ] , hashstop = int ( block . hash , 16 ) )
2017-11-01 23:57:26 +01:00
test_node . sync_with_ping ( )
assert_equal ( test_node . block_announced , False )
2017-11-16 17:52:59 +01:00
inv_node . clear_last_announcement ( )
2017-11-01 23:57:26 +01:00
test_node . send_message ( msg_block ( block ) )
2017-11-16 17:52:59 +01:00
inv_node . check_last_announcement ( inv = [ int ( block . hash , 16 ) ] , headers = [ ] )
2017-11-01 23:57:26 +01:00
def test_nonnull_locators ( self , test_node , inv_node ) :
2014-11-18 22:16:32 +01:00
tip = int ( self . nodes [ 0 ] . getbestblockhash ( ) , 16 )
# PART 1
# 1. Mine a block; expect inv announcements each time
2017-03-08 00:46:17 +01:00
self . log . info ( " Part 1: headers don ' t start before sendheaders message... " )
2016-03-19 20:58:06 +01:00
for i in range ( 4 ) :
2014-11-18 22:16:32 +01:00
old_tip = tip
tip = self . mine_blocks ( 1 )
2017-11-16 17:52:45 +01:00
inv_node . check_last_announcement ( inv = [ tip ] , headers = [ ] )
test_node . check_last_announcement ( inv = [ tip ] , headers = [ ] )
2014-11-18 22:16:32 +01:00
# Try a few different responses; none should affect next announcement
if i == 0 :
# first request the block
2017-11-16 17:07:40 +01:00
test_node . send_get_data ( [ tip ] )
2017-05-08 23:14:07 +02:00
test_node . wait_for_block ( tip )
2014-11-18 22:16:32 +01:00
elif i == 1 :
# next try requesting header and block
2017-11-16 17:07:40 +01:00
test_node . send_get_headers ( locator = [ old_tip ] , hashstop = tip )
test_node . send_get_data ( [ tip ] )
2014-11-18 22:16:32 +01:00
test_node . wait_for_block ( tip )
2017-11-16 15:53:35 +01:00
test_node . clear_last_announcement ( ) # since we requested headers...
2014-11-18 22:16:32 +01:00
elif i == 2 :
# this time announce own block via headers
height = self . nodes [ 0 ] . getblockcount ( )
last_time = self . nodes [ 0 ] . getblock ( self . nodes [ 0 ] . getbestblockhash ( ) ) [ ' time ' ]
block_time = last_time + 1
2017-11-16 15:53:35 +01:00
new_block = create_block ( tip , create_coinbase ( height + 1 ) , block_time )
2014-11-18 22:16:32 +01:00
new_block . solve ( )
test_node . send_header_for_blocks ( [ new_block ] )
2017-05-08 23:14:07 +02:00
test_node . wait_for_getdata ( [ new_block . sha256 ] )
2014-11-18 22:16:32 +01:00
test_node . send_message ( msg_block ( new_block ) )
2017-11-16 15:53:35 +01:00
test_node . sync_with_ping ( ) # make sure this block is processed
2014-11-18 22:16:32 +01:00
inv_node . clear_last_announcement ( )
test_node . clear_last_announcement ( )
2017-03-08 00:46:17 +01:00
self . log . info ( " Part 1: success! " )
self . log . info ( " Part 2: announce blocks with headers after sendheaders message... " )
2014-11-18 22:16:32 +01:00
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node . send_message ( msg_sendheaders ( ) )
prev_tip = int ( self . nodes [ 0 ] . getbestblockhash ( ) , 16 )
2017-11-16 17:07:40 +01:00
test_node . send_get_headers ( locator = [ prev_tip ] , hashstop = 0 )
2014-11-18 22:16:32 +01:00
test_node . sync_with_ping ( )
# Now that we've synced headers, headers announcements should work
tip = self . mine_blocks ( 1 )
2017-11-16 17:52:45 +01:00
inv_node . check_last_announcement ( inv = [ tip ] , headers = [ ] )
test_node . check_last_announcement ( headers = [ tip ] )
2014-11-18 22:16:32 +01:00
2017-11-16 15:53:35 +01:00
height = self . nodes [ 0 ] . getblockcount ( ) + 1
2014-11-18 22:16:32 +01:00
block_time + = 10 # Advance far enough ahead
2016-03-19 20:58:06 +01:00
for i in range ( 10 ) :
2014-11-18 22:16:32 +01:00
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
2016-03-19 20:58:06 +01:00
for j in range ( 2 ) :
2014-11-18 22:16:32 +01:00
blocks = [ ]
2017-11-16 15:53:35 +01:00
for b in range ( i + 1 ) :
2014-11-18 22:16:32 +01:00
blocks . append ( create_block ( tip , create_coinbase ( height ) , block_time ) )
blocks [ - 1 ] . solve ( )
tip = blocks [ - 1 ] . sha256
block_time + = 1
height + = 1
if j == 0 :
# Announce via inv
test_node . send_block_inv ( tip )
2017-05-08 23:14:07 +02:00
test_node . wait_for_getheaders ( )
2016-10-04 00:30:52 +02:00
# Should have received a getheaders now
test_node . send_header_for_blocks ( blocks )
2014-11-18 22:16:32 +01:00
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
2017-11-16 15:53:35 +01:00
[ inv_node . send_block_inv ( x . sha256 ) for x in blocks ]
2017-05-08 23:14:07 +02:00
test_node . wait_for_getdata ( [ x . sha256 for x in blocks ] )
2014-11-18 22:16:32 +01:00
inv_node . sync_with_ping ( )
else :
# Announce via headers
test_node . send_header_for_blocks ( blocks )
2017-05-08 23:14:07 +02:00
test_node . wait_for_getdata ( [ x . sha256 for x in blocks ] )
2014-11-18 22:16:32 +01:00
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node . send_header_for_blocks ( blocks )
inv_node . sync_with_ping ( )
2017-11-16 15:53:35 +01:00
[ test_node . send_message ( msg_block ( x ) ) for x in blocks ]
2014-11-18 22:16:32 +01:00
test_node . sync_with_ping ( )
inv_node . sync_with_ping ( )
# This block should not be announced to the inv node (since it also
# broadcast it)
2017-03-30 14:38:46 +02:00
assert " inv " not in inv_node . last_message
assert " headers " not in inv_node . last_message
2014-11-18 22:16:32 +01:00
tip = self . mine_blocks ( 1 )
2017-11-16 17:52:45 +01:00
inv_node . check_last_announcement ( inv = [ tip ] , headers = [ ] )
test_node . check_last_announcement ( headers = [ tip ] )
2014-11-18 22:16:32 +01:00
height + = 1
block_time + = 1
2017-03-08 00:46:17 +01:00
self . log . info ( " Part 2: success! " )
2014-11-18 22:16:32 +01:00
2017-03-08 00:46:17 +01:00
self . log . info ( " Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer... " )
2014-11-18 22:16:32 +01:00
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
2016-03-19 20:58:06 +01:00
for j in range ( 2 ) :
2014-11-18 22:16:32 +01:00
# First try mining a reorg that can propagate with header announcement
2016-01-07 15:23:05 +01:00
new_block_hashes = self . mine_reorg ( length = 7 )
2014-11-18 22:16:32 +01:00
tip = new_block_hashes [ - 1 ]
2017-11-16 17:52:45 +01:00
inv_node . check_last_announcement ( inv = [ tip ] , headers = [ ] )
test_node . check_last_announcement ( headers = new_block_hashes )
2014-11-18 22:16:32 +01:00
2017-11-16 15:53:35 +01:00
block_time + = 8
2014-11-18 22:16:32 +01:00
# Mine a too-large reorg, which should be announced with a single inv
2016-01-07 15:23:05 +01:00
new_block_hashes = self . mine_reorg ( length = 8 )
2014-11-18 22:16:32 +01:00
tip = new_block_hashes [ - 1 ]
2017-11-16 17:52:45 +01:00
inv_node . check_last_announcement ( inv = [ tip ] , headers = [ ] )
test_node . check_last_announcement ( inv = [ tip ] , headers = [ ] )
2014-11-18 22:16:32 +01:00
block_time + = 9
fork_point = self . nodes [ 0 ] . getblock ( " %02x " % new_block_hashes [ 0 ] ) [ " previousblockhash " ]
fork_point = int ( fork_point , 16 )
# Use getblocks/getdata
2017-11-16 15:53:35 +01:00
test_node . send_getblocks ( locator = [ fork_point ] )
2017-11-16 17:52:45 +01:00
test_node . check_last_announcement ( inv = new_block_hashes , headers = [ ] )
2017-11-16 17:07:40 +01:00
test_node . send_get_data ( new_block_hashes )
2014-11-18 22:16:32 +01:00
test_node . wait_for_block ( new_block_hashes [ - 1 ] )
2016-03-19 20:58:06 +01:00
for i in range ( 3 ) :
2014-11-18 22:16:32 +01:00
# Mine another block, still should get only an inv
tip = self . mine_blocks ( 1 )
2017-11-16 17:52:45 +01:00
inv_node . check_last_announcement ( inv = [ tip ] , headers = [ ] )
test_node . check_last_announcement ( inv = [ tip ] , headers = [ ] )
2014-11-18 22:16:32 +01:00
if i == 0 :
2018-03-31 15:28:07 +02:00
self . log . debug ( " Just get the data -- shouldn ' t cause headers announcements to resume " )
2017-11-16 17:07:40 +01:00
test_node . send_get_data ( [ tip ] )
2014-11-18 22:16:32 +01:00
test_node . wait_for_block ( tip )
elif i == 1 :
2018-03-31 15:28:07 +02:00
self . log . debug ( " Send a getheaders message that shouldn ' t trigger headers announcements to resume (best header sent will be too old) " )
2017-11-16 17:07:40 +01:00
test_node . send_get_headers ( locator = [ fork_point ] , hashstop = new_block_hashes [ 1 ] )
test_node . send_get_data ( [ tip ] )
2014-11-18 22:16:32 +01:00
test_node . wait_for_block ( tip )
elif i == 2 :
2017-11-16 17:07:40 +01:00
test_node . send_get_data ( [ tip ] )
2014-11-18 22:16:32 +01:00
test_node . wait_for_block ( tip )
2018-03-31 15:28:07 +02:00
self . log . debug ( " This time, try sending either a getheaders to trigger resumption of headers announcements, or mine a new block and inv it, also triggering resumption of headers announcements. " )
2014-11-18 22:16:32 +01:00
if j == 0 :
2017-11-16 17:07:40 +01:00
test_node . send_get_headers ( locator = [ tip ] , hashstop = 0 )
2014-11-18 22:16:32 +01:00
test_node . sync_with_ping ( )
else :
test_node . send_block_inv ( tip )
test_node . sync_with_ping ( )
# New blocks should now be announced with header
tip = self . mine_blocks ( 1 )
2017-11-16 17:52:45 +01:00
inv_node . check_last_announcement ( inv = [ tip ] , headers = [ ] )
test_node . check_last_announcement ( headers = [ tip ] )
2014-11-18 22:16:32 +01:00
2017-03-08 00:46:17 +01:00
self . log . info ( " Part 3: success! " )
2014-11-18 22:16:32 +01:00
2017-03-08 00:46:17 +01:00
self . log . info ( " Part 4: Testing direct fetch behavior... " )
2014-11-18 22:16:32 +01:00
tip = self . mine_blocks ( 1 )
height = self . nodes [ 0 ] . getblockcount ( ) + 1
last_time = self . nodes [ 0 ] . getblock ( self . nodes [ 0 ] . getbestblockhash ( ) ) [ ' time ' ]
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = [ ]
2016-03-19 20:58:06 +01:00
for b in range ( 2 ) :
2014-11-18 22:16:32 +01:00
blocks . append ( create_block ( tip , create_coinbase ( height ) , block_time ) )
blocks [ - 1 ] . solve ( )
tip = blocks [ - 1 ] . sha256
block_time + = 1
height + = 1
inv_node . send_message ( msg_block ( blocks [ - 1 ] ) )
2017-11-16 15:53:35 +01:00
inv_node . sync_with_ping ( ) # Make sure blocks are processed
2017-03-30 14:38:46 +02:00
test_node . last_message . pop ( " getdata " , None )
2015-12-02 18:12:23 +01:00
test_node . send_header_for_blocks ( blocks )
2014-11-18 22:16:32 +01:00
test_node . sync_with_ping ( )
# should not have received any getdata messages
with mininode_lock :
2017-03-30 14:38:46 +02:00
assert " getdata " not in test_node . last_message
2014-11-18 22:16:32 +01:00
# This time, direct fetch should work
blocks = [ ]
2016-03-19 20:58:06 +01:00
for b in range ( 3 ) :
2014-11-18 22:16:32 +01:00
blocks . append ( create_block ( tip , create_coinbase ( height ) , block_time ) )
blocks [ - 1 ] . solve ( )
tip = blocks [ - 1 ] . sha256
block_time + = 1
height + = 1
test_node . send_header_for_blocks ( blocks )
test_node . sync_with_ping ( )
2017-11-16 15:53:35 +01:00
test_node . wait_for_getdata ( [ x . sha256 for x in blocks ] , timeout = DIRECT_FETCH_RESPONSE_TIME )
2014-11-18 22:16:32 +01:00
2017-11-16 15:53:35 +01:00
[ test_node . send_message ( msg_block ( x ) ) for x in blocks ]
2014-11-18 22:16:32 +01:00
test_node . sync_with_ping ( )
# Now announce a header that forks the last two blocks
tip = blocks [ 0 ] . sha256
height - = 1
blocks = [ ]
# Create extra blocks for later
2016-03-19 20:58:06 +01:00
for b in range ( 20 ) :
2014-11-18 22:16:32 +01:00
blocks . append ( create_block ( tip , create_coinbase ( height ) , block_time ) )
blocks [ - 1 ] . solve ( )
tip = blocks [ - 1 ] . sha256
block_time + = 1
height + = 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
2017-03-30 14:38:46 +02:00
test_node . last_message . pop ( " getdata " , None )
2014-11-18 22:16:32 +01:00
test_node . send_header_for_blocks ( blocks [ 0 : 1 ] )
test_node . sync_with_ping ( )
with mininode_lock :
2017-03-30 14:38:46 +02:00
assert " getdata " not in test_node . last_message
2014-11-18 22:16:32 +01:00
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node . send_header_for_blocks ( blocks [ 1 : 2 ] )
test_node . sync_with_ping ( )
2017-11-16 15:53:35 +01:00
test_node . wait_for_getdata ( [ x . sha256 for x in blocks [ 0 : 2 ] ] , timeout = DIRECT_FETCH_RESPONSE_TIME )
2014-11-18 22:16:32 +01:00
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node . send_header_for_blocks ( blocks [ 2 : 18 ] )
test_node . sync_with_ping ( )
2017-11-16 15:53:35 +01:00
test_node . wait_for_getdata ( [ x . sha256 for x in blocks [ 2 : 16 ] ] , timeout = DIRECT_FETCH_RESPONSE_TIME )
2014-11-18 22:16:32 +01:00
# Announcing 1 more header should not trigger any response
2017-03-30 14:38:46 +02:00
test_node . last_message . pop ( " getdata " , None )
2014-11-18 22:16:32 +01:00
test_node . send_header_for_blocks ( blocks [ 18 : 19 ] )
test_node . sync_with_ping ( )
with mininode_lock :
2017-03-30 14:38:46 +02:00
assert " getdata " not in test_node . last_message
2014-11-18 22:16:32 +01:00
2017-03-08 00:46:17 +01:00
self . log . info ( " Part 4: success! " )
2014-11-18 22:16:32 +01:00
2016-07-07 03:19:32 +02:00
# Now deliver all those blocks we announced.
2017-11-16 15:53:35 +01:00
[ test_node . send_message ( msg_block ( x ) ) for x in blocks ]
2016-07-07 03:19:32 +02:00
2017-03-08 00:46:17 +01:00
self . log . info ( " Part 5: Testing handling of unconnecting headers " )
2016-07-07 03:19:32 +02:00
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range ( 10 ) :
2017-03-30 14:38:46 +02:00
test_node . last_message . pop ( " getdata " , None )
2016-07-07 03:19:32 +02:00
blocks = [ ]
# Create two more blocks.
for j in range ( 2 ) :
blocks . append ( create_block ( tip , create_coinbase ( height ) , block_time ) )
blocks [ - 1 ] . solve ( )
tip = blocks [ - 1 ] . sha256
block_time + = 1
height + = 1
# Send the header of the second block -> this won't connect.
with mininode_lock :
2017-03-30 14:38:46 +02:00
test_node . last_message . pop ( " getheaders " , None )
2016-07-07 03:19:32 +02:00
test_node . send_header_for_blocks ( [ blocks [ 1 ] ] )
2017-05-08 23:14:07 +02:00
test_node . wait_for_getheaders ( )
2016-07-07 03:19:32 +02:00
test_node . send_header_for_blocks ( blocks )
test_node . wait_for_getdata ( [ x . sha256 for x in blocks ] )
2017-11-16 15:53:35 +01:00
[ test_node . send_message ( msg_block ( x ) ) for x in blocks ]
2016-07-07 03:19:32 +02:00
test_node . sync_with_ping ( )
assert_equal ( int ( self . nodes [ 0 ] . getbestblockhash ( ) , 16 ) , blocks [ 1 ] . sha256 )
blocks = [ ]
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
2017-11-16 15:53:35 +01:00
for j in range ( MAX_UNCONNECTING_HEADERS + 1 ) :
2016-07-07 03:19:32 +02:00
blocks . append ( create_block ( tip , create_coinbase ( height ) , block_time ) )
blocks [ - 1 ] . solve ( )
tip = blocks [ - 1 ] . sha256
block_time + = 1
height + = 1
for i in range ( 1 , MAX_UNCONNECTING_HEADERS ) :
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock :
2017-03-30 14:38:46 +02:00
test_node . last_message . pop ( " getheaders " , None )
2016-07-07 03:19:32 +02:00
test_node . send_header_for_blocks ( [ blocks [ i ] ] )
2017-05-08 23:14:07 +02:00
test_node . wait_for_getheaders ( )
2016-07-07 03:19:32 +02:00
# Next header will connect, should re-set our count:
test_node . send_header_for_blocks ( [ blocks [ 0 ] ] )
# Remove the first two entries (blocks[1] would connect):
blocks = blocks [ 2 : ]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
2017-11-16 15:53:35 +01:00
for i in range ( 5 * MAX_UNCONNECTING_HEADERS - 1 ) :
2016-07-07 03:19:32 +02:00
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock :
2017-03-30 14:38:46 +02:00
test_node . last_message . pop ( " getheaders " , None )
2017-11-16 15:53:35 +01:00
test_node . send_header_for_blocks ( [ blocks [ i % len ( blocks ) ] ] )
2017-05-08 23:14:07 +02:00
test_node . wait_for_getheaders ( )
2016-07-07 03:19:32 +02:00
# Eventually this stops working.
test_node . send_header_for_blocks ( [ blocks [ - 1 ] ] )
# Should get disconnected
test_node . wait_for_disconnect ( )
2017-03-08 00:46:17 +01:00
self . log . info ( " Part 5: success! " )
2016-07-07 03:19:32 +02:00
2014-11-18 22:16:32 +01:00
# Finally, check that the inv node never received a getdata request,
# throughout the test
2017-03-30 14:38:46 +02:00
assert " getdata " not in inv_node . last_message
2014-11-18 22:16:32 +01:00
if __name__ == ' __main__ ' :
SendHeadersTest ( ) . main ( )