2016-03-19 20:58:06 +01:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
# Copyright (c) 2015-2016 The Bitcoin Core developers
|
|
|
|
# Distributed under the MIT software license, see the accompanying
|
2015-05-04 16:50:24 +02:00
|
|
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
2017-01-18 00:34:40 +01:00
|
|
|
"""Test processing of unrequested blocks.
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
Since behavior differs when receiving unrequested blocks from whitelisted peers
|
|
|
|
versus non-whitelisted peers, this tests the behavior of both (effectively two
|
|
|
|
separate tests running in parallel).
|
|
|
|
|
|
|
|
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
|
|
|
|
whitelist localhost, but node1 does. They will each be on their own chain for
|
|
|
|
this test.
|
|
|
|
|
|
|
|
We have one NodeConn connection to each, test_node and white_node respectively.
|
|
|
|
|
|
|
|
The test:
|
|
|
|
1. Generate one block on each node, to leave IBD.
|
|
|
|
|
|
|
|
2. Mine a new block on each tip, and deliver to each node from node's peer.
|
|
|
|
The tip should advance.
|
|
|
|
|
|
|
|
3. Mine a block that forks the previous block, and deliver to each node from
|
|
|
|
corresponding peer.
|
|
|
|
Node0 should not process this block (just accept the header), because it is
|
|
|
|
unrequested and doesn't have more work than the tip.
|
|
|
|
Node1 should process because this is coming from a whitelisted peer.
|
|
|
|
|
|
|
|
4. Send another block that builds on the forking block.
|
|
|
|
Node0 should process this block but be stuck on the shorter chain, because
|
|
|
|
it's missing an intermediate block.
|
|
|
|
Node1 should reorg to this longer chain.
|
|
|
|
|
2015-06-02 21:17:36 +02:00
|
|
|
4b.Send 288 more blocks on the longer chain.
|
|
|
|
Node0 should process all but the last block (too far ahead in height).
|
|
|
|
Send all headers to Node1, and then send the last block in that chain.
|
|
|
|
Node1 should accept the block because it's coming from a whitelisted peer.
|
|
|
|
|
2015-05-04 16:50:24 +02:00
|
|
|
5. Send a duplicate of the block in #3 to Node0.
|
|
|
|
Node0 should not process the block because it is unrequested, and stay on
|
|
|
|
the shorter chain.
|
|
|
|
|
|
|
|
6. Send Node0 an inv for the height 3 block produced in #4 above.
|
|
|
|
Node0 should figure out that Node0 has the missing height 2 block and send a
|
|
|
|
getdata.
|
|
|
|
|
|
|
|
7. Send Node0 the missing block again.
|
|
|
|
Node0 should process and the tip should advance.
|
2017-01-18 00:34:40 +01:00
|
|
|
"""
|
|
|
|
|
|
|
|
from test_framework.mininode import *
|
|
|
|
from test_framework.test_framework import BitcoinTestFramework
|
|
|
|
from test_framework.util import *
|
|
|
|
import time
|
|
|
|
from test_framework.blocktools import create_block, create_coinbase
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
|
|
|
|
# p2p messages to a node, generating the messages in the main testing logic.
|
|
|
|
class TestNode(NodeConnCB):
|
|
|
|
def __init__(self):
|
2017-03-28 19:41:22 +02:00
|
|
|
super().__init__()
|
2015-05-04 16:50:24 +02:00
|
|
|
self.connection = None
|
2015-06-15 21:30:05 +02:00
|
|
|
self.ping_counter = 1
|
|
|
|
self.last_pong = msg_pong()
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
def add_connection(self, conn):
|
|
|
|
self.connection = conn
|
|
|
|
|
|
|
|
# Track the last getdata message we receive (used in the test)
|
|
|
|
def on_getdata(self, conn, message):
|
|
|
|
self.last_getdata = message
|
|
|
|
|
|
|
|
# Spin until verack message is received from the node.
|
|
|
|
# We use this to signal that our test can begin. This
|
|
|
|
# is called from the testing thread, so it needs to acquire
|
|
|
|
# the global lock.
|
|
|
|
def wait_for_verack(self):
|
|
|
|
while True:
|
|
|
|
with mininode_lock:
|
|
|
|
if self.verack_received:
|
|
|
|
return
|
|
|
|
time.sleep(0.05)
|
|
|
|
|
|
|
|
# Wrapper for the NodeConn's send_message function
|
|
|
|
def send_message(self, message):
|
|
|
|
self.connection.send_message(message)
|
|
|
|
|
2015-06-15 21:30:05 +02:00
|
|
|
def on_pong(self, conn, message):
|
|
|
|
self.last_pong = message
|
|
|
|
|
|
|
|
# Sync up with the node after delivery of a block
|
|
|
|
def sync_with_ping(self, timeout=30):
|
|
|
|
self.connection.send_message(msg_ping(nonce=self.ping_counter))
|
|
|
|
received_pong = False
|
|
|
|
sleep_time = 0.05
|
|
|
|
while not received_pong and timeout > 0:
|
|
|
|
time.sleep(sleep_time)
|
|
|
|
timeout -= sleep_time
|
|
|
|
with mininode_lock:
|
|
|
|
if self.last_pong.nonce == self.ping_counter:
|
|
|
|
received_pong = True
|
|
|
|
self.ping_counter += 1
|
|
|
|
return received_pong
|
|
|
|
|
|
|
|
|
2015-05-04 16:50:24 +02:00
|
|
|
class AcceptBlockTest(BitcoinTestFramework):
|
|
|
|
def add_options(self, parser):
|
|
|
|
parser.add_option("--testbinary", dest="testbinary",
|
|
|
|
default=os.getenv("BITCOIND", "bitcoind"),
|
|
|
|
help="bitcoind binary to test")
|
|
|
|
|
2016-05-14 13:01:31 +02:00
|
|
|
def __init__(self):
|
|
|
|
super().__init__()
|
|
|
|
self.setup_clean_chain = True
|
|
|
|
self.num_nodes = 2
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
def setup_network(self):
|
|
|
|
# Node0 will be used to test behavior of processing unrequested blocks
|
|
|
|
# from peers which are not whitelisted, while Node1 will be used for
|
|
|
|
# the whitelisted case.
|
|
|
|
self.nodes = []
|
2017-02-24 18:39:33 +01:00
|
|
|
self.nodes.append(start_node(0, self.options.tmpdir,
|
2015-05-04 16:50:24 +02:00
|
|
|
binary=self.options.testbinary))
|
|
|
|
self.nodes.append(start_node(1, self.options.tmpdir,
|
2017-02-24 18:39:33 +01:00
|
|
|
["-whitelist=127.0.0.1"],
|
2015-05-04 16:50:24 +02:00
|
|
|
binary=self.options.testbinary))
|
|
|
|
|
|
|
|
def run_test(self):
|
|
|
|
# Setup the p2p connections and start up the network thread.
|
|
|
|
test_node = TestNode() # connects to node0 (not whitelisted)
|
|
|
|
white_node = TestNode() # connects to node1 (whitelisted)
|
|
|
|
|
|
|
|
connections = []
|
|
|
|
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
|
|
|
|
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
|
|
|
|
test_node.add_connection(connections[0])
|
|
|
|
white_node.add_connection(connections[1])
|
|
|
|
|
|
|
|
NetworkThread().start() # Start up network handling in another thread
|
|
|
|
|
|
|
|
# Test logic begins here
|
|
|
|
test_node.wait_for_verack()
|
|
|
|
white_node.wait_for_verack()
|
|
|
|
|
|
|
|
# 1. Have both nodes mine a block (leave IBD)
|
|
|
|
[ n.generate(1) for n in self.nodes ]
|
2016-03-19 20:58:06 +01:00
|
|
|
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
# 2. Send one block that builds on each tip.
|
|
|
|
# This should be accepted.
|
|
|
|
blocks_h2 = [] # the height 2 blocks on each node's chain
|
2016-03-20 18:18:32 +01:00
|
|
|
block_time = int(time.time()) + 1
|
2016-03-19 20:58:06 +01:00
|
|
|
for i in range(2):
|
2015-08-05 23:47:34 +02:00
|
|
|
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
|
2015-05-04 16:50:24 +02:00
|
|
|
blocks_h2[i].solve()
|
2015-06-02 21:17:36 +02:00
|
|
|
block_time += 1
|
2015-05-04 16:50:24 +02:00
|
|
|
test_node.send_message(msg_block(blocks_h2[0]))
|
|
|
|
white_node.send_message(msg_block(blocks_h2[1]))
|
|
|
|
|
2015-06-15 21:30:05 +02:00
|
|
|
[ x.sync_with_ping() for x in [test_node, white_node] ]
|
2015-05-04 16:50:24 +02:00
|
|
|
assert_equal(self.nodes[0].getblockcount(), 2)
|
|
|
|
assert_equal(self.nodes[1].getblockcount(), 2)
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("First height 2 block accepted by both nodes")
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
# 3. Send another block that builds on the original tip.
|
|
|
|
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
|
2016-03-19 20:58:06 +01:00
|
|
|
for i in range(2):
|
2015-08-05 23:47:34 +02:00
|
|
|
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
|
2015-05-04 16:50:24 +02:00
|
|
|
blocks_h2f[i].solve()
|
|
|
|
test_node.send_message(msg_block(blocks_h2f[0]))
|
|
|
|
white_node.send_message(msg_block(blocks_h2f[1]))
|
|
|
|
|
2015-06-15 21:30:05 +02:00
|
|
|
[ x.sync_with_ping() for x in [test_node, white_node] ]
|
2015-05-04 16:50:24 +02:00
|
|
|
for x in self.nodes[0].getchaintips():
|
|
|
|
if x['hash'] == blocks_h2f[0].hash:
|
|
|
|
assert_equal(x['status'], "headers-only")
|
|
|
|
|
|
|
|
for x in self.nodes[1].getchaintips():
|
|
|
|
if x['hash'] == blocks_h2f[1].hash:
|
|
|
|
assert_equal(x['status'], "valid-headers")
|
|
|
|
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Second height 2 block accepted only from whitelisted peer")
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
# 4. Now send another block that builds on the forking chain.
|
|
|
|
blocks_h3 = []
|
2016-03-19 20:58:06 +01:00
|
|
|
for i in range(2):
|
2015-08-05 23:47:34 +02:00
|
|
|
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
|
2015-05-04 16:50:24 +02:00
|
|
|
blocks_h3[i].solve()
|
|
|
|
test_node.send_message(msg_block(blocks_h3[0]))
|
|
|
|
white_node.send_message(msg_block(blocks_h3[1]))
|
|
|
|
|
2015-06-15 21:30:05 +02:00
|
|
|
[ x.sync_with_ping() for x in [test_node, white_node] ]
|
2015-05-04 16:50:24 +02:00
|
|
|
# Since the earlier block was not processed by node0, the new block
|
|
|
|
# can't be fully validated.
|
|
|
|
for x in self.nodes[0].getchaintips():
|
|
|
|
if x['hash'] == blocks_h3[0].hash:
|
|
|
|
assert_equal(x['status'], "headers-only")
|
|
|
|
|
|
|
|
# But this block should be accepted by node0 since it has more work.
|
2017-02-09 22:39:18 +01:00
|
|
|
self.nodes[0].getblock(blocks_h3[0].hash)
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Unrequested more-work block accepted from non-whitelisted peer")
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
# Node1 should have accepted and reorged.
|
|
|
|
assert_equal(self.nodes[1].getblockcount(), 3)
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Successfully reorged to length 3 chain from whitelisted peer")
|
2015-05-04 16:50:24 +02:00
|
|
|
|
2015-06-02 21:17:36 +02:00
|
|
|
# 4b. Now mine 288 more blocks and deliver; all should be processed but
|
|
|
|
# the last (height-too-high) on node0. Node1 should process the tip if
|
|
|
|
# we give it the headers chain leading to the tip.
|
|
|
|
tips = blocks_h3
|
|
|
|
headers_message = msg_headers()
|
|
|
|
all_blocks = [] # node0's blocks
|
2016-03-19 20:58:06 +01:00
|
|
|
for j in range(2):
|
|
|
|
for i in range(288):
|
2015-08-05 23:47:34 +02:00
|
|
|
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
|
2015-06-02 21:17:36 +02:00
|
|
|
next_block.solve()
|
|
|
|
if j==0:
|
|
|
|
test_node.send_message(msg_block(next_block))
|
|
|
|
all_blocks.append(next_block)
|
|
|
|
else:
|
|
|
|
headers_message.headers.append(CBlockHeader(next_block))
|
|
|
|
tips[j] = next_block
|
|
|
|
|
|
|
|
time.sleep(2)
|
2017-02-09 22:39:18 +01:00
|
|
|
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
|
|
|
|
for x in all_blocks[:-1]:
|
|
|
|
self.nodes[0].getblock(x.hash)
|
|
|
|
assert_raises_jsonrpc(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
|
2015-06-02 21:17:36 +02:00
|
|
|
|
|
|
|
headers_message.headers.pop() # Ensure the last block is unrequested
|
|
|
|
white_node.send_message(headers_message) # Send headers leading to tip
|
|
|
|
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
|
2017-02-09 22:39:18 +01:00
|
|
|
white_node.sync_with_ping()
|
|
|
|
self.nodes[1].getblock(tips[1].hash)
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Unrequested block far ahead of tip accepted from whitelisted peer")
|
2015-06-02 21:17:36 +02:00
|
|
|
|
2015-05-04 16:50:24 +02:00
|
|
|
# 5. Test handling of unrequested block on the node that didn't process
|
|
|
|
# Should still not be processed (even though it has a child that has more
|
|
|
|
# work).
|
|
|
|
test_node.send_message(msg_block(blocks_h2f[0]))
|
|
|
|
|
|
|
|
# Here, if the sleep is too short, the test could falsely succeed (if the
|
|
|
|
# node hasn't processed the block by the time the sleep returns, and then
|
|
|
|
# the node processes it and incorrectly advances the tip).
|
|
|
|
# But this would be caught later on, when we verify that an inv triggers
|
|
|
|
# a getdata request for this block.
|
2015-06-15 21:30:05 +02:00
|
|
|
test_node.sync_with_ping()
|
2015-05-04 16:50:24 +02:00
|
|
|
assert_equal(self.nodes[0].getblockcount(), 2)
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Unrequested block that would complete more-work chain was ignored")
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
# 6. Try to get node to request the missing block.
|
|
|
|
# Poke the node with an inv for block at height 3 and see if that
|
|
|
|
# triggers a getdata on block 2 (it should if block 2 is missing).
|
|
|
|
with mininode_lock:
|
|
|
|
# Clear state so we can check the getdata request
|
|
|
|
test_node.last_getdata = None
|
|
|
|
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
|
|
|
|
|
2015-06-15 21:30:05 +02:00
|
|
|
test_node.sync_with_ping()
|
2015-05-04 16:50:24 +02:00
|
|
|
with mininode_lock:
|
|
|
|
getdata = test_node.last_getdata
|
|
|
|
|
2015-06-02 21:17:36 +02:00
|
|
|
# Check that the getdata includes the right block
|
2015-05-04 16:50:24 +02:00
|
|
|
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Inv at tip triggered getdata for unprocessed block")
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
# 7. Send the missing block for the third time (now it is requested)
|
|
|
|
test_node.send_message(msg_block(blocks_h2f[0]))
|
|
|
|
|
2015-06-15 21:30:05 +02:00
|
|
|
test_node.sync_with_ping()
|
2015-06-02 21:17:36 +02:00
|
|
|
assert_equal(self.nodes[0].getblockcount(), 290)
|
2017-03-08 00:46:17 +01:00
|
|
|
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
|
2015-05-04 16:50:24 +02:00
|
|
|
|
|
|
|
[ c.disconnect_node() for c in connections ]
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
AcceptBlockTest().main()
|