Merge pull request #7129
49fb8e8
Documentation updates for BIP 130 (Pieter Wuille)50262d8
Allow block announcements with headers (Suhas Daftuar)
This commit is contained in:
commit
5d5ef3a4cf
10 changed files with 791 additions and 14 deletions
|
@ -18,3 +18,4 @@ BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.12.0**):
|
|||
* [`BIP 66`](https://github.com/bitcoin/bips/blob/master/bip-0066.mediawiki): The strict DER rules and associated version 3 blocks have been implemented since **v0.10.0** ([PR #5713](https://github.com/bitcoin/bitcoin/pull/5713)).
|
||||
* [`BIP 70`](https://github.com/bitcoin/bips/blob/master/bip-0070.mediawiki) [`71`](https://github.com/bitcoin/bips/blob/master/bip-0071.mediawiki) [`72`](https://github.com/bitcoin/bips/blob/master/bip-0072.mediawiki): Payment Protocol support has been available in Bitcoin Core GUI since **v0.9.0** ([PR #5216](https://github.com/bitcoin/bitcoin/pull/5216)).
|
||||
* [`BIP 111`](https://github.com/bitcoin/bips/blob/master/bip-0111.mediawiki): `NODE_BLOOM` service bit added, but only enforced for peer versions `>=70011` as of **v0.12.0** ([PR #6579](https://github.com/bitcoin/bitcoin/pull/6579)).
|
||||
* [`BIP 130`](https://github.com/bitcoin/bips/blob/master/bip-0130.mediawiki): direct headers announcement is negotiated with peer versions `>=70012` as of **v0.12.0** ([PR 6494](https://github.com/bitcoin/bitcoin/pull/6494)).
|
||||
|
|
|
@ -206,6 +206,15 @@ Libsecp256k1 has undergone very extensive testing and validation.
|
|||
|
||||
A side effect of this change is that libconsensus no longer depends on OpenSSL.
|
||||
|
||||
Direct headers announcement (BIP 130)
|
||||
-------------------------------------
|
||||
|
||||
Between compatible peers, BIP 130 direct headers announcement is used. This
|
||||
means that blocks are advertized by announcing their headers directly, instead
|
||||
of just announcing the hash. In a reorganization, all new headers are sent,
|
||||
instead of just the new tip. This can often prevent an extra roundtrip before
|
||||
the actual block is downloaded.
|
||||
|
||||
0.12.0 Change log
|
||||
=================
|
||||
|
||||
|
|
|
@ -91,6 +91,7 @@ testScripts = [
|
|||
'p2p-fullblocktest.py',
|
||||
'blockchain.py',
|
||||
'disablewallet.py',
|
||||
'sendheaders.py',
|
||||
]
|
||||
testScriptsExt = [
|
||||
'bip65-cltv.py',
|
||||
|
|
519
qa/rpc-tests/sendheaders.py
Executable file
519
qa/rpc-tests/sendheaders.py
Executable file
|
@ -0,0 +1,519 @@
|
|||
#!/usr/bin/env python2
|
||||
#
|
||||
# Distributed under the MIT/X11 software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
#
|
||||
|
||||
from test_framework.mininode import *
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import *
|
||||
import time
|
||||
from test_framework.blocktools import create_block, create_coinbase
|
||||
|
||||
'''
|
||||
SendHeadersTest -- test behavior of headers messages to announce blocks.
|
||||
|
||||
Setup:
|
||||
|
||||
- Two nodes, two p2p connections to node0. One p2p connection should only ever
|
||||
receive inv's (omitted from testing description below, this is our control).
|
||||
Second node is used for creating reorgs.
|
||||
|
||||
Part 1: No headers announcements before "sendheaders"
|
||||
a. node mines a block [expect: inv]
|
||||
send getdata for the block [expect: block]
|
||||
b. node mines another block [expect: inv]
|
||||
send getheaders and getdata [expect: headers, then block]
|
||||
c. node mines another block [expect: inv]
|
||||
peer mines a block, announces with header [expect: getdata]
|
||||
d. node mines another block [expect: inv]
|
||||
|
||||
Part 2: After "sendheaders", headers announcements should generally work.
|
||||
a. peer sends sendheaders [expect: no response]
|
||||
peer sends getheaders with current tip [expect: no response]
|
||||
b. node mines a block [expect: tip header]
|
||||
c. for N in 1, ..., 10:
|
||||
* for announce-type in {inv, header}
|
||||
- peer mines N blocks, announces with announce-type
|
||||
[ expect: getheaders/getdata or getdata, deliver block(s) ]
|
||||
- node mines a block [ expect: 1 header ]
|
||||
|
||||
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
|
||||
- For response-type in {inv, getheaders}
|
||||
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
|
||||
* node mines an 8-block reorg [ expect: inv at tip ]
|
||||
* peer responds with getblocks/getdata [expect: inv, blocks ]
|
||||
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
|
||||
* node mines another block at tip [ expect: inv ]
|
||||
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
|
||||
* peer requests block [ expect: block ]
|
||||
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
|
||||
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
|
||||
* node mines 1 block [expect: 1 header, peer responds with getdata]
|
||||
|
||||
Part 4: Test direct fetch behavior
|
||||
a. Announce 2 old block headers.
|
||||
Expect: no getdata requests.
|
||||
b. Announce 3 new blocks via 1 headers message.
|
||||
Expect: one getdata request for all 3 blocks.
|
||||
(Send blocks.)
|
||||
c. Announce 1 header that forks off the last two blocks.
|
||||
Expect: no response.
|
||||
d. Announce 1 more header that builds on that fork.
|
||||
Expect: one getdata request for two blocks.
|
||||
e. Announce 16 more headers that build on that fork.
|
||||
Expect: getdata request for 14 more blocks.
|
||||
f. Announce 1 more header that builds on that fork.
|
||||
Expect: no response.
|
||||
'''
|
||||
|
||||
class BaseNode(NodeConnCB):
|
||||
def __init__(self):
|
||||
NodeConnCB.__init__(self)
|
||||
self.create_callback_map()
|
||||
self.connection = None
|
||||
self.last_inv = None
|
||||
self.last_headers = None
|
||||
self.last_block = None
|
||||
self.ping_counter = 1
|
||||
self.last_pong = msg_pong(0)
|
||||
self.last_getdata = None
|
||||
self.sleep_time = 0.05
|
||||
self.block_announced = False
|
||||
|
||||
def clear_last_announcement(self):
|
||||
with mininode_lock:
|
||||
self.block_announced = False
|
||||
self.last_inv = None
|
||||
self.last_headers = None
|
||||
|
||||
def add_connection(self, conn):
|
||||
self.connection = conn
|
||||
|
||||
# Request data for a list of block hashes
|
||||
def get_data(self, block_hashes):
|
||||
msg = msg_getdata()
|
||||
for x in block_hashes:
|
||||
msg.inv.append(CInv(2, x))
|
||||
self.connection.send_message(msg)
|
||||
|
||||
def get_headers(self, locator, hashstop):
|
||||
msg = msg_getheaders()
|
||||
msg.locator.vHave = locator
|
||||
msg.hashstop = hashstop
|
||||
self.connection.send_message(msg)
|
||||
|
||||
def send_block_inv(self, blockhash):
|
||||
msg = msg_inv()
|
||||
msg.inv = [CInv(2, blockhash)]
|
||||
self.connection.send_message(msg)
|
||||
|
||||
# Wrapper for the NodeConn's send_message function
|
||||
def send_message(self, message):
|
||||
self.connection.send_message(message)
|
||||
|
||||
def on_inv(self, conn, message):
|
||||
self.last_inv = message
|
||||
self.block_announced = True
|
||||
|
||||
def on_headers(self, conn, message):
|
||||
self.last_headers = message
|
||||
self.block_announced = True
|
||||
|
||||
def on_block(self, conn, message):
|
||||
self.last_block = message.block
|
||||
self.last_block.calc_sha256()
|
||||
|
||||
def on_getdata(self, conn, message):
|
||||
self.last_getdata = message
|
||||
|
||||
def on_pong(self, conn, message):
|
||||
self.last_pong = message
|
||||
|
||||
# Test whether the last announcement we received had the
|
||||
# right header or the right inv
|
||||
# inv and headers should be lists of block hashes
|
||||
def check_last_announcement(self, headers=None, inv=None):
|
||||
expect_headers = headers if headers != None else []
|
||||
expect_inv = inv if inv != None else []
|
||||
test_function = lambda: self.block_announced
|
||||
self.sync(test_function)
|
||||
with mininode_lock:
|
||||
self.block_announced = False
|
||||
|
||||
success = True
|
||||
compare_inv = []
|
||||
if self.last_inv != None:
|
||||
compare_inv = [x.hash for x in self.last_inv.inv]
|
||||
if compare_inv != expect_inv:
|
||||
success = False
|
||||
|
||||
hash_headers = []
|
||||
if self.last_headers != None:
|
||||
# treat headers as a list of block hashes
|
||||
hash_headers = [ x.sha256 for x in self.last_headers.headers ]
|
||||
if hash_headers != expect_headers:
|
||||
success = False
|
||||
|
||||
self.last_inv = None
|
||||
self.last_headers = None
|
||||
return success
|
||||
|
||||
# Syncing helpers
|
||||
def sync(self, test_function, timeout=60):
|
||||
while timeout > 0:
|
||||
with mininode_lock:
|
||||
if test_function():
|
||||
return
|
||||
time.sleep(self.sleep_time)
|
||||
timeout -= self.sleep_time
|
||||
raise AssertionError("Sync failed to complete")
|
||||
|
||||
def sync_with_ping(self, timeout=60):
|
||||
self.send_message(msg_ping(nonce=self.ping_counter))
|
||||
test_function = lambda: self.last_pong.nonce == self.ping_counter
|
||||
self.sync(test_function, timeout)
|
||||
self.ping_counter += 1
|
||||
return
|
||||
|
||||
def wait_for_block(self, blockhash, timeout=60):
|
||||
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
|
||||
self.sync(test_function, timeout)
|
||||
return
|
||||
|
||||
def wait_for_getdata(self, hash_list, timeout=60):
|
||||
if hash_list == []:
|
||||
return
|
||||
|
||||
test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list
|
||||
self.sync(test_function, timeout)
|
||||
return
|
||||
|
||||
def send_header_for_blocks(self, new_blocks):
|
||||
headers_message = msg_headers()
|
||||
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
|
||||
self.send_message(headers_message)
|
||||
|
||||
def send_getblocks(self, locator):
|
||||
getblocks_message = msg_getblocks()
|
||||
getblocks_message.locator.vHave = locator
|
||||
self.send_message(getblocks_message)
|
||||
|
||||
# InvNode: This peer should only ever receive inv's, because it doesn't ever send a
|
||||
# "sendheaders" message.
|
||||
class InvNode(BaseNode):
|
||||
def __init__(self):
|
||||
BaseNode.__init__(self)
|
||||
|
||||
# TestNode: This peer is the one we use for most of the testing.
|
||||
class TestNode(BaseNode):
|
||||
def __init__(self):
|
||||
BaseNode.__init__(self)
|
||||
|
||||
class SendHeadersTest(BitcoinTestFramework):
|
||||
def setup_chain(self):
|
||||
initialize_chain_clean(self.options.tmpdir, 2)
|
||||
|
||||
def setup_network(self):
|
||||
self.nodes = []
|
||||
self.nodes = start_nodes(2, self.options.tmpdir, [["-debug", "-logtimemicros=1"]]*2)
|
||||
connect_nodes(self.nodes[0], 1)
|
||||
|
||||
# mine count blocks and return the new tip
|
||||
def mine_blocks(self, count):
|
||||
self.nodes[0].generate(count)
|
||||
return int(self.nodes[0].getbestblockhash(), 16)
|
||||
|
||||
# mine a reorg that invalidates length blocks (replacing them with
|
||||
# length+1 blocks).
|
||||
# peers is the p2p nodes we're using; we clear their state after the
|
||||
# to-be-reorged-out blocks are mined, so that we don't break later tests.
|
||||
# return the list of block hashes newly mined
|
||||
def mine_reorg(self, length, peers):
|
||||
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
|
||||
sync_blocks(self.nodes, wait=0.1)
|
||||
[x.clear_last_announcement() for x in peers]
|
||||
|
||||
tip_height = self.nodes[1].getblockcount()
|
||||
hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
|
||||
self.nodes[1].invalidateblock(hash_to_invalidate)
|
||||
all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain
|
||||
sync_blocks(self.nodes, wait=0.1)
|
||||
return [int(x, 16) for x in all_hashes]
|
||||
|
||||
def run_test(self):
|
||||
# Setup the p2p connections and start up the network thread.
|
||||
inv_node = InvNode()
|
||||
test_node = TestNode()
|
||||
|
||||
connections = []
|
||||
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
|
||||
# Set nServices to 0 for test_node, so no block download will occur outside of
|
||||
# direct fetching
|
||||
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
|
||||
inv_node.add_connection(connections[0])
|
||||
test_node.add_connection(connections[1])
|
||||
|
||||
NetworkThread().start() # Start up network handling in another thread
|
||||
|
||||
# Test logic begins here
|
||||
inv_node.wait_for_verack()
|
||||
test_node.wait_for_verack()
|
||||
|
||||
tip = int(self.nodes[0].getbestblockhash(), 16)
|
||||
|
||||
# PART 1
|
||||
# 1. Mine a block; expect inv announcements each time
|
||||
print "Part 1: headers don't start before sendheaders message..."
|
||||
for i in xrange(4):
|
||||
old_tip = tip
|
||||
tip = self.mine_blocks(1)
|
||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
|
||||
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
|
||||
# Try a few different responses; none should affect next announcement
|
||||
if i == 0:
|
||||
# first request the block
|
||||
test_node.get_data([tip])
|
||||
test_node.wait_for_block(tip, timeout=5)
|
||||
elif i == 1:
|
||||
# next try requesting header and block
|
||||
test_node.get_headers(locator=[old_tip], hashstop=tip)
|
||||
test_node.get_data([tip])
|
||||
test_node.wait_for_block(tip)
|
||||
test_node.clear_last_announcement() # since we requested headers...
|
||||
elif i == 2:
|
||||
# this time announce own block via headers
|
||||
height = self.nodes[0].getblockcount()
|
||||
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
|
||||
block_time = last_time + 1
|
||||
new_block = create_block(tip, create_coinbase(height+1), block_time)
|
||||
new_block.solve()
|
||||
test_node.send_header_for_blocks([new_block])
|
||||
test_node.wait_for_getdata([new_block.sha256], timeout=5)
|
||||
test_node.send_message(msg_block(new_block))
|
||||
test_node.sync_with_ping() # make sure this block is processed
|
||||
inv_node.clear_last_announcement()
|
||||
test_node.clear_last_announcement()
|
||||
|
||||
print "Part 1: success!"
|
||||
print "Part 2: announce blocks with headers after sendheaders message..."
|
||||
# PART 2
|
||||
# 2. Send a sendheaders message and test that headers announcements
|
||||
# commence and keep working.
|
||||
test_node.send_message(msg_sendheaders())
|
||||
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
|
||||
test_node.get_headers(locator=[prev_tip], hashstop=0L)
|
||||
test_node.sync_with_ping()
|
||||
test_node.clear_last_announcement() # Clear out empty headers response
|
||||
|
||||
# Now that we've synced headers, headers announcements should work
|
||||
tip = self.mine_blocks(1)
|
||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
|
||||
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
|
||||
|
||||
height = self.nodes[0].getblockcount()+1
|
||||
block_time += 10 # Advance far enough ahead
|
||||
for i in xrange(10):
|
||||
# Mine i blocks, and alternate announcing either via
|
||||
# inv (of tip) or via headers. After each, new blocks
|
||||
# mined by the node should successfully be announced
|
||||
# with block header, even though the blocks are never requested
|
||||
for j in xrange(2):
|
||||
blocks = []
|
||||
for b in xrange(i+1):
|
||||
blocks.append(create_block(tip, create_coinbase(height), block_time))
|
||||
blocks[-1].solve()
|
||||
tip = blocks[-1].sha256
|
||||
block_time += 1
|
||||
height += 1
|
||||
if j == 0:
|
||||
# Announce via inv
|
||||
test_node.send_block_inv(tip)
|
||||
test_node.wait_for_getdata([tip], timeout=5)
|
||||
# Test that duplicate inv's won't result in duplicate
|
||||
# getdata requests, or duplicate headers announcements
|
||||
inv_node.send_block_inv(tip)
|
||||
# Should have received a getheaders as well!
|
||||
test_node.send_header_for_blocks(blocks)
|
||||
test_node.wait_for_getdata([x.sha256 for x in blocks[0:-1]], timeout=5)
|
||||
[ inv_node.send_block_inv(x.sha256) for x in blocks[0:-1] ]
|
||||
inv_node.sync_with_ping()
|
||||
else:
|
||||
# Announce via headers
|
||||
test_node.send_header_for_blocks(blocks)
|
||||
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
|
||||
# Test that duplicate headers won't result in duplicate
|
||||
# getdata requests (the check is further down)
|
||||
inv_node.send_header_for_blocks(blocks)
|
||||
inv_node.sync_with_ping()
|
||||
[ test_node.send_message(msg_block(x)) for x in blocks ]
|
||||
test_node.sync_with_ping()
|
||||
inv_node.sync_with_ping()
|
||||
# This block should not be announced to the inv node (since it also
|
||||
# broadcast it)
|
||||
assert_equal(inv_node.last_inv, None)
|
||||
assert_equal(inv_node.last_headers, None)
|
||||
inv_node.clear_last_announcement()
|
||||
test_node.clear_last_announcement()
|
||||
tip = self.mine_blocks(1)
|
||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
|
||||
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
|
||||
height += 1
|
||||
block_time += 1
|
||||
|
||||
print "Part 2: success!"
|
||||
|
||||
print "Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer..."
|
||||
|
||||
# PART 3. Headers announcements can stop after large reorg, and resume after
|
||||
# getheaders or inv from peer.
|
||||
for j in xrange(2):
|
||||
# First try mining a reorg that can propagate with header announcement
|
||||
new_block_hashes = self.mine_reorg(length=7, peers=[test_node, inv_node])
|
||||
tip = new_block_hashes[-1]
|
||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
|
||||
assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True)
|
||||
|
||||
block_time += 8
|
||||
|
||||
# Mine a too-large reorg, which should be announced with a single inv
|
||||
new_block_hashes = self.mine_reorg(length=8, peers=[test_node, inv_node])
|
||||
tip = new_block_hashes[-1]
|
||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
|
||||
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
|
||||
|
||||
block_time += 9
|
||||
|
||||
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
|
||||
fork_point = int(fork_point, 16)
|
||||
|
||||
# Use getblocks/getdata
|
||||
test_node.send_getblocks(locator = [fork_point])
|
||||
assert_equal(test_node.check_last_announcement(inv=new_block_hashes[0:-1]), True)
|
||||
test_node.get_data(new_block_hashes)
|
||||
test_node.wait_for_block(new_block_hashes[-1])
|
||||
|
||||
for i in xrange(3):
|
||||
# Mine another block, still should get only an inv
|
||||
tip = self.mine_blocks(1)
|
||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
|
||||
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
|
||||
if i == 0:
|
||||
# Just get the data -- shouldn't cause headers announcements to resume
|
||||
test_node.get_data([tip])
|
||||
test_node.wait_for_block(tip)
|
||||
elif i == 1:
|
||||
# Send a getheaders message that shouldn't trigger headers announcements
|
||||
# to resume (best header sent will be too old)
|
||||
test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
|
||||
test_node.get_data([tip])
|
||||
test_node.wait_for_block(tip)
|
||||
test_node.clear_last_announcement()
|
||||
elif i == 2:
|
||||
test_node.get_data([tip])
|
||||
test_node.wait_for_block(tip)
|
||||
# This time, try sending either a getheaders to trigger resumption
|
||||
# of headers announcements, or mine a new block and inv it, also
|
||||
# triggering resumption of headers announcements.
|
||||
if j == 0:
|
||||
test_node.get_headers(locator=[tip], hashstop=0L)
|
||||
test_node.sync_with_ping()
|
||||
else:
|
||||
test_node.send_block_inv(tip)
|
||||
test_node.sync_with_ping()
|
||||
# New blocks should now be announced with header
|
||||
tip = self.mine_blocks(1)
|
||||
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
|
||||
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
|
||||
|
||||
print "Part 3: success!"
|
||||
|
||||
print "Part 4: Testing direct fetch behavior..."
|
||||
tip = self.mine_blocks(1)
|
||||
height = self.nodes[0].getblockcount() + 1
|
||||
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
|
||||
block_time = last_time + 1
|
||||
|
||||
# Create 2 blocks. Send the blocks, then send the headers.
|
||||
blocks = []
|
||||
for b in xrange(2):
|
||||
blocks.append(create_block(tip, create_coinbase(height), block_time))
|
||||
blocks[-1].solve()
|
||||
tip = blocks[-1].sha256
|
||||
block_time += 1
|
||||
height += 1
|
||||
inv_node.send_message(msg_block(blocks[-1]))
|
||||
|
||||
inv_node.sync_with_ping() # Make sure blocks are processed
|
||||
test_node.last_getdata = None
|
||||
test_node.send_header_for_blocks(blocks);
|
||||
test_node.sync_with_ping()
|
||||
# should not have received any getdata messages
|
||||
with mininode_lock:
|
||||
assert_equal(test_node.last_getdata, None)
|
||||
|
||||
# This time, direct fetch should work
|
||||
blocks = []
|
||||
for b in xrange(3):
|
||||
blocks.append(create_block(tip, create_coinbase(height), block_time))
|
||||
blocks[-1].solve()
|
||||
tip = blocks[-1].sha256
|
||||
block_time += 1
|
||||
height += 1
|
||||
|
||||
test_node.send_header_for_blocks(blocks)
|
||||
test_node.sync_with_ping()
|
||||
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=test_node.sleep_time)
|
||||
|
||||
[ test_node.send_message(msg_block(x)) for x in blocks ]
|
||||
|
||||
test_node.sync_with_ping()
|
||||
|
||||
# Now announce a header that forks the last two blocks
|
||||
tip = blocks[0].sha256
|
||||
height -= 1
|
||||
blocks = []
|
||||
|
||||
# Create extra blocks for later
|
||||
for b in xrange(20):
|
||||
blocks.append(create_block(tip, create_coinbase(height), block_time))
|
||||
blocks[-1].solve()
|
||||
tip = blocks[-1].sha256
|
||||
block_time += 1
|
||||
height += 1
|
||||
|
||||
# Announcing one block on fork should not trigger direct fetch
|
||||
# (less work than tip)
|
||||
test_node.last_getdata = None
|
||||
test_node.send_header_for_blocks(blocks[0:1])
|
||||
test_node.sync_with_ping()
|
||||
with mininode_lock:
|
||||
assert_equal(test_node.last_getdata, None)
|
||||
|
||||
# Announcing one more block on fork should trigger direct fetch for
|
||||
# both blocks (same work as tip)
|
||||
test_node.send_header_for_blocks(blocks[1:2])
|
||||
test_node.sync_with_ping()
|
||||
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=test_node.sleep_time)
|
||||
|
||||
# Announcing 16 more headers should trigger direct fetch for 14 more
|
||||
# blocks
|
||||
test_node.send_header_for_blocks(blocks[2:18])
|
||||
test_node.sync_with_ping()
|
||||
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=test_node.sleep_time)
|
||||
|
||||
# Announcing 1 more header should not trigger any response
|
||||
test_node.last_getdata = None
|
||||
test_node.send_header_for_blocks(blocks[18:19])
|
||||
test_node.sync_with_ping()
|
||||
with mininode_lock:
|
||||
assert_equal(test_node.last_getdata, None)
|
||||
|
||||
print "Part 4: success!"
|
||||
|
||||
# Finally, check that the inv node never received a getdata request,
|
||||
# throughout the test
|
||||
assert_equal(inv_node.last_getdata, None)
|
||||
|
||||
if __name__ == '__main__':
|
||||
SendHeadersTest().main()
|
|
@ -751,8 +751,8 @@ class msg_inv(object):
|
|||
class msg_getdata(object):
|
||||
command = "getdata"
|
||||
|
||||
def __init__(self):
|
||||
self.inv = []
|
||||
def __init__(self, inv=None):
|
||||
self.inv = inv if inv != None else []
|
||||
|
||||
def deserialize(self, f):
|
||||
self.inv = deser_vector(f, CInv)
|
||||
|
@ -905,6 +905,20 @@ class msg_mempool(object):
|
|||
def __repr__(self):
|
||||
return "msg_mempool()"
|
||||
|
||||
class msg_sendheaders(object):
|
||||
command = "sendheaders"
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def deserialize(self, f):
|
||||
pass
|
||||
|
||||
def serialize(self):
|
||||
return ""
|
||||
|
||||
def __repr__(self):
|
||||
return "msg_sendheaders()"
|
||||
|
||||
# getheaders message has
|
||||
# number of entries
|
||||
|
@ -990,6 +1004,17 @@ class NodeConnCB(object):
|
|||
def __init__(self):
|
||||
self.verack_received = False
|
||||
|
||||
# Spin until verack message is received from the node.
|
||||
# Tests may want to use this as a signal that the test can begin.
|
||||
# This can be called from the testing thread, so it needs to acquire the
|
||||
# global lock.
|
||||
def wait_for_verack(self):
|
||||
while True:
|
||||
with mininode_lock:
|
||||
if self.verack_received:
|
||||
return
|
||||
time.sleep(0.05)
|
||||
|
||||
# Derived classes should call this function once to set the message map
|
||||
# which associates the derived classes' functions to incoming messages
|
||||
def create_callback_map(self):
|
||||
|
@ -1084,7 +1109,7 @@ class NodeConn(asyncore.dispatcher):
|
|||
"regtest": "\xfa\xbf\xb5\xda" # regtest
|
||||
}
|
||||
|
||||
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest"):
|
||||
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=1):
|
||||
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
|
||||
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
|
||||
self.dstaddr = dstaddr
|
||||
|
@ -1102,6 +1127,7 @@ class NodeConn(asyncore.dispatcher):
|
|||
|
||||
# stuff version msg into sendbuf
|
||||
vt = msg_version()
|
||||
vt.nServices = services
|
||||
vt.addrTo.ip = self.dstaddr
|
||||
vt.addrTo.port = self.dstport
|
||||
vt.addrFrom.ip = "0.0.0.0"
|
||||
|
|
|
@ -51,6 +51,9 @@ CBlockLocator CChain::GetLocator(const CBlockIndex *pindex) const {
|
|||
}
|
||||
|
||||
const CBlockIndex *CChain::FindFork(const CBlockIndex *pindex) const {
|
||||
if (pindex == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
if (pindex->nHeight > Height())
|
||||
pindex = pindex->GetAncestor(Height());
|
||||
while (pindex && !Contains(pindex))
|
||||
|
|
221
src/main.cpp
221
src/main.cpp
|
@ -247,6 +247,8 @@ struct CNodeState {
|
|||
uint256 hashLastUnknownBlock;
|
||||
//! The last full block we both have.
|
||||
CBlockIndex *pindexLastCommonBlock;
|
||||
//! The best header we have sent our peer.
|
||||
CBlockIndex *pindexBestHeaderSent;
|
||||
//! Whether we've started headers synchronization with this peer.
|
||||
bool fSyncStarted;
|
||||
//! Since when we're stalling block download progress (in microseconds), or 0.
|
||||
|
@ -256,6 +258,8 @@ struct CNodeState {
|
|||
int nBlocksInFlightValidHeaders;
|
||||
//! Whether we consider this a preferred download peer.
|
||||
bool fPreferredDownload;
|
||||
//! Whether this peer wants invs or headers (when possible) for block announcements.
|
||||
bool fPreferHeaders;
|
||||
|
||||
CNodeState() {
|
||||
fCurrentlyConnected = false;
|
||||
|
@ -264,11 +268,13 @@ struct CNodeState {
|
|||
pindexBestKnownBlock = NULL;
|
||||
hashLastUnknownBlock.SetNull();
|
||||
pindexLastCommonBlock = NULL;
|
||||
pindexBestHeaderSent = NULL;
|
||||
fSyncStarted = false;
|
||||
nStallingSince = 0;
|
||||
nBlocksInFlight = 0;
|
||||
nBlocksInFlightValidHeaders = 0;
|
||||
fPreferredDownload = false;
|
||||
fPreferHeaders = false;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -398,6 +404,22 @@ void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
|
|||
}
|
||||
}
|
||||
|
||||
// Requires cs_main
|
||||
bool CanDirectFetch(const Consensus::Params &consensusParams)
|
||||
{
|
||||
return chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
|
||||
}
|
||||
|
||||
// Requires cs_main
|
||||
bool PeerHasHeader(CNodeState *state, CBlockIndex *pindex)
|
||||
{
|
||||
if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
|
||||
return true;
|
||||
if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Find the last common ancestor two blocks have.
|
||||
* Both pa and pb must be non-NULL. */
|
||||
CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) {
|
||||
|
@ -2557,16 +2579,17 @@ static bool ActivateBestChainStep(CValidationState& state, const CChainParams& c
|
|||
* or an activated best chain. pblock is either NULL or a pointer to a block
|
||||
* that is already loaded (to avoid loading it again from disk).
|
||||
*/
|
||||
bool ActivateBestChain(CValidationState& state, const CChainParams& chainparams, const CBlock* pblock)
|
||||
{
|
||||
CBlockIndex *pindexNewTip = NULL;
|
||||
bool ActivateBestChain(CValidationState &state, const CChainParams& chainparams, const CBlock *pblock) {
|
||||
CBlockIndex *pindexMostWork = NULL;
|
||||
do {
|
||||
boost::this_thread::interruption_point();
|
||||
|
||||
CBlockIndex *pindexNewTip = NULL;
|
||||
const CBlockIndex *pindexFork;
|
||||
bool fInitialDownload;
|
||||
{
|
||||
LOCK(cs_main);
|
||||
CBlockIndex *pindexOldTip = chainActive.Tip();
|
||||
pindexMostWork = FindMostWorkChain();
|
||||
|
||||
// Whether we have anything to do at all.
|
||||
|
@ -2577,26 +2600,44 @@ bool ActivateBestChain(CValidationState& state, const CChainParams& chainparams,
|
|||
return false;
|
||||
|
||||
pindexNewTip = chainActive.Tip();
|
||||
pindexFork = chainActive.FindFork(pindexOldTip);
|
||||
fInitialDownload = IsInitialBlockDownload();
|
||||
}
|
||||
// When we reach this point, we switched to a new tip (stored in pindexNewTip).
|
||||
|
||||
// Notifications/callbacks that can run without cs_main
|
||||
if (!fInitialDownload) {
|
||||
uint256 hashNewTip = pindexNewTip->GetBlockHash();
|
||||
// Find the hashes of all blocks that weren't previously in the best chain.
|
||||
std::vector<uint256> vHashes;
|
||||
CBlockIndex *pindexToAnnounce = pindexNewTip;
|
||||
while (pindexToAnnounce != pindexFork) {
|
||||
vHashes.push_back(pindexToAnnounce->GetBlockHash());
|
||||
pindexToAnnounce = pindexToAnnounce->pprev;
|
||||
if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
|
||||
// Limit announcements in case of a huge reorganization.
|
||||
// Rely on the peer's synchronization mechanism in that case.
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Relay inventory, but don't relay old inventory during initial block download.
|
||||
int nBlockEstimate = 0;
|
||||
if (fCheckpointsEnabled)
|
||||
nBlockEstimate = Checkpoints::GetTotalBlocksEstimate(chainparams.Checkpoints());
|
||||
{
|
||||
LOCK(cs_vNodes);
|
||||
BOOST_FOREACH(CNode* pnode, vNodes)
|
||||
if (chainActive.Height() > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate))
|
||||
pnode->PushInventory(CInv(MSG_BLOCK, hashNewTip));
|
||||
BOOST_FOREACH(CNode* pnode, vNodes) {
|
||||
if (chainActive.Height() > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate)) {
|
||||
BOOST_REVERSE_FOREACH(const uint256& hash, vHashes) {
|
||||
pnode->PushBlockHash(hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Notify external listeners about the new tip.
|
||||
if (!vHashes.empty()) {
|
||||
GetMainSignals().UpdatedBlockTip(pindexNewTip);
|
||||
uiInterface.NotifyBlockTip(hashNewTip);
|
||||
uiInterface.NotifyBlockTip(vHashes.front());
|
||||
}
|
||||
}
|
||||
} while(pindexMostWork != chainActive.Tip());
|
||||
CheckBlockIndex(chainparams.GetConsensus());
|
||||
|
@ -4333,6 +4374,14 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
|||
LOCK(cs_main);
|
||||
State(pfrom->GetId())->fCurrentlyConnected = true;
|
||||
}
|
||||
|
||||
if (pfrom->nVersion >= SENDHEADERS_VERSION) {
|
||||
// Tell our peer we prefer to receive headers rather than inv's
|
||||
// We send this to non-NODE NETWORK peers as well, because even
|
||||
// non-NODE NETWORK peers can announce blocks (such as pruning
|
||||
// nodes)
|
||||
pfrom->PushMessage("sendheaders");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -4402,6 +4451,12 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
|||
pfrom->fDisconnect = true;
|
||||
}
|
||||
|
||||
else if (strCommand == "sendheaders")
|
||||
{
|
||||
LOCK(cs_main);
|
||||
State(pfrom->GetId())->fPreferHeaders = true;
|
||||
}
|
||||
|
||||
|
||||
else if (strCommand == "inv")
|
||||
{
|
||||
|
@ -4446,7 +4501,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
|||
// not a direct successor.
|
||||
pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexBestHeader), inv.hash);
|
||||
CNodeState *nodestate = State(pfrom->GetId());
|
||||
if (chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - chainparams.GetConsensus().nPowTargetSpacing * 20 &&
|
||||
if (CanDirectFetch(chainparams.GetConsensus()) &&
|
||||
nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
|
||||
vToFetch.push_back(inv);
|
||||
// Mark block as in flight already, even though the actual "getdata" message only goes out
|
||||
|
@ -4554,6 +4609,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
|||
LogPrint("net", "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->id);
|
||||
return true;
|
||||
}
|
||||
|
||||
CNodeState *nodestate = State(pfrom->GetId());
|
||||
CBlockIndex* pindex = NULL;
|
||||
if (locator.IsNull())
|
||||
{
|
||||
|
@ -4581,6 +4638,11 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
|||
if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
|
||||
break;
|
||||
}
|
||||
// pindex can be NULL either if we sent chainActive.Tip() OR
|
||||
// if our peer has chainActive.Tip() (and thus we are sending an empty
|
||||
// headers message). In both cases it's safe to update
|
||||
// pindexBestHeaderSent to be our tip.
|
||||
nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip();
|
||||
pfrom->PushMessage("headers", vHeaders);
|
||||
}
|
||||
|
||||
|
@ -4772,6 +4834,53 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
|||
pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexLast), uint256());
|
||||
}
|
||||
|
||||
bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
|
||||
CNodeState *nodestate = State(pfrom->GetId());
|
||||
// If this set of headers is valid and ends in a block with at least as
|
||||
// much work as our tip, download as much as possible.
|
||||
if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) {
|
||||
vector<CBlockIndex *> vToFetch;
|
||||
CBlockIndex *pindexWalk = pindexLast;
|
||||
// Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
|
||||
while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
|
||||
if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
|
||||
!mapBlocksInFlight.count(pindexWalk->GetBlockHash())) {
|
||||
// We don't have this block, and it's not yet in flight.
|
||||
vToFetch.push_back(pindexWalk);
|
||||
}
|
||||
pindexWalk = pindexWalk->pprev;
|
||||
}
|
||||
// If pindexWalk still isn't on our main chain, we're looking at a
|
||||
// very large reorg at a time we think we're close to caught up to
|
||||
// the main chain -- this shouldn't really happen. Bail out on the
|
||||
// direct fetch and rely on parallel download instead.
|
||||
if (!chainActive.Contains(pindexWalk)) {
|
||||
LogPrint("net", "Large reorg, won't direct fetch to %s (%d)\n",
|
||||
pindexLast->GetBlockHash().ToString(),
|
||||
pindexLast->nHeight);
|
||||
} else {
|
||||
vector<CInv> vGetData;
|
||||
// Download as much as possible, from earliest to latest.
|
||||
BOOST_REVERSE_FOREACH(CBlockIndex *pindex, vToFetch) {
|
||||
if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
|
||||
// Can't download any more from this peer
|
||||
break;
|
||||
}
|
||||
vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
|
||||
MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex);
|
||||
LogPrint("net", "Requesting block %s from peer=%d\n",
|
||||
pindex->GetBlockHash().ToString(), pfrom->id);
|
||||
}
|
||||
if (vGetData.size() > 1) {
|
||||
LogPrint("net", "Downloading blocks toward %s (%d) via headers direct fetch\n",
|
||||
pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
|
||||
}
|
||||
if (vGetData.size() > 0) {
|
||||
pfrom->PushMessage("getdata", vGetData);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CheckBlockIndex(chainparams.GetConsensus());
|
||||
}
|
||||
|
||||
|
@ -5297,6 +5406,100 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
|
|||
GetMainSignals().Broadcast(nTimeBestReceived);
|
||||
}
|
||||
|
||||
//
|
||||
// Try sending block announcements via headers
|
||||
//
|
||||
{
|
||||
// If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
|
||||
// list of block hashes we're relaying, and our peer wants
|
||||
// headers announcements, then find the first header
|
||||
// not yet known to our peer but would connect, and send.
|
||||
// If no header would connect, or if we have too many
|
||||
// blocks, or if the peer doesn't want headers, just
|
||||
// add all to the inv queue.
|
||||
LOCK(pto->cs_inventory);
|
||||
vector<CBlock> vHeaders;
|
||||
bool fRevertToInv = (!state.fPreferHeaders || pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
|
||||
CBlockIndex *pBestIndex = NULL; // last header queued for delivery
|
||||
ProcessBlockAvailability(pto->id); // ensure pindexBestKnownBlock is up-to-date
|
||||
|
||||
if (!fRevertToInv) {
|
||||
bool fFoundStartingHeader = false;
|
||||
// Try to find first header that our peer doesn't have, and
|
||||
// then send all headers past that one. If we come across any
|
||||
// headers that aren't on chainActive, give up.
|
||||
BOOST_FOREACH(const uint256 &hash, pto->vBlockHashesToAnnounce) {
|
||||
BlockMap::iterator mi = mapBlockIndex.find(hash);
|
||||
assert(mi != mapBlockIndex.end());
|
||||
CBlockIndex *pindex = mi->second;
|
||||
if (chainActive[pindex->nHeight] != pindex) {
|
||||
// Bail out if we reorged away from this block
|
||||
fRevertToInv = true;
|
||||
break;
|
||||
}
|
||||
assert(pBestIndex == NULL || pindex->pprev == pBestIndex);
|
||||
pBestIndex = pindex;
|
||||
if (fFoundStartingHeader) {
|
||||
// add this to the headers message
|
||||
vHeaders.push_back(pindex->GetBlockHeader());
|
||||
} else if (PeerHasHeader(&state, pindex)) {
|
||||
continue; // keep looking for the first new block
|
||||
} else if (pindex->pprev == NULL || PeerHasHeader(&state, pindex->pprev)) {
|
||||
// Peer doesn't have this header but they do have the prior one.
|
||||
// Start sending headers.
|
||||
fFoundStartingHeader = true;
|
||||
vHeaders.push_back(pindex->GetBlockHeader());
|
||||
} else {
|
||||
// Peer doesn't have this header or the prior one -- nothing will
|
||||
// connect, so bail out.
|
||||
fRevertToInv = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (fRevertToInv) {
|
||||
// If falling back to using an inv, just try to inv the tip.
|
||||
// The last entry in vBlockHashesToAnnounce was our tip at some point
|
||||
// in the past.
|
||||
if (!pto->vBlockHashesToAnnounce.empty()) {
|
||||
const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back();
|
||||
BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce);
|
||||
assert(mi != mapBlockIndex.end());
|
||||
CBlockIndex *pindex = mi->second;
|
||||
|
||||
// Warn if we're announcing a block that is not on the main chain.
|
||||
// This should be very rare and could be optimized out.
|
||||
// Just log for now.
|
||||
if (chainActive[pindex->nHeight] != pindex) {
|
||||
LogPrint("net", "Announcing block %s not on main chain (tip=%s)\n",
|
||||
hashToAnnounce.ToString(), chainActive.Tip()->GetBlockHash().ToString());
|
||||
}
|
||||
|
||||
// If the peer announced this block to us, don't inv it back.
|
||||
// (Since block announcements may not be via inv's, we can't solely rely on
|
||||
// setInventoryKnown to track this.)
|
||||
if (!PeerHasHeader(&state, pindex)) {
|
||||
pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
|
||||
LogPrint("net", "%s: sending inv peer=%d hash=%s\n", __func__,
|
||||
pto->id, hashToAnnounce.ToString());
|
||||
}
|
||||
}
|
||||
} else if (!vHeaders.empty()) {
|
||||
if (vHeaders.size() > 1) {
|
||||
LogPrint("net", "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
|
||||
vHeaders.size(),
|
||||
vHeaders.front().GetHash().ToString(),
|
||||
vHeaders.back().GetHash().ToString(), pto->id);
|
||||
} else {
|
||||
LogPrint("net", "%s: sending header %s to peer=%d\n", __func__,
|
||||
vHeaders.front().GetHash().ToString(), pto->id);
|
||||
}
|
||||
pto->PushMessage("headers", vHeaders);
|
||||
state.pindexBestHeaderSent = pBestIndex;
|
||||
}
|
||||
pto->vBlockHashesToAnnounce.clear();
|
||||
}
|
||||
|
||||
//
|
||||
// Message: inventory
|
||||
//
|
||||
|
|
|
@ -98,6 +98,9 @@ static const unsigned int DEFAULT_BANSCORE_THRESHOLD = 100;
|
|||
|
||||
static const bool DEFAULT_TESTSAFEMODE = false;
|
||||
|
||||
/** Maximum number of headers to announce when relaying blocks with headers message.*/
|
||||
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
|
||||
|
||||
struct BlockHasher
|
||||
{
|
||||
size_t operator()(const uint256& hash) const { return hash.GetCheapHash(); }
|
||||
|
|
|
@ -390,6 +390,9 @@ public:
|
|||
std::vector<CInv> vInventoryToSend;
|
||||
CCriticalSection cs_inventory;
|
||||
std::multimap<int64_t, CInv> mapAskFor;
|
||||
// Used for headers announcements - unfiltered blocks to relay
|
||||
// Also protected by cs_inventory
|
||||
std::vector<uint256> vBlockHashesToAnnounce;
|
||||
|
||||
// Ping time measurement:
|
||||
// The pong reply we're expecting, or 0 if no pong expected.
|
||||
|
@ -504,6 +507,12 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
void PushBlockHash(const uint256 &hash)
|
||||
{
|
||||
LOCK(cs_inventory);
|
||||
vBlockHashesToAnnounce.push_back(hash);
|
||||
}
|
||||
|
||||
void AskFor(const CInv& inv);
|
||||
|
||||
// TODO: Document the postcondition of this function. Is cs_vSend locked?
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
* network protocol versioning
|
||||
*/
|
||||
|
||||
static const int PROTOCOL_VERSION = 70011;
|
||||
static const int PROTOCOL_VERSION = 70012;
|
||||
|
||||
//! initial proto version, to be increased after version/verack negotiation
|
||||
static const int INIT_PROTO_VERSION = 209;
|
||||
|
@ -37,4 +37,7 @@ static const int MEMPOOL_GD_VERSION = 60002;
|
|||
//! "filter*" commands are disabled without NODE_BLOOM after and including this version
|
||||
static const int NO_BLOOM_VERSION = 70011;
|
||||
|
||||
//! "sendheaders" command and announcing blocks with headers starts with this version
|
||||
static const int SENDHEADERS_VERSION = 70012;
|
||||
|
||||
#endif // BITCOIN_VERSION_H
|
||||
|
|
Loading…
Reference in a new issue