Disconnecting from bad outbound peers in IBD

When in IBD, we'd like to use all our outbound peers to help us
sync the chain.  Disconnect any outbound peers whose headers have
insufficient work.
This commit is contained in:
Suhas Daftuar 2017-10-11 08:55:14 -04:00 committed by Suhas Daftuar
parent 326a5652e0
commit c60fd71a65
2 changed files with 26 additions and 0 deletions

View file

@ -2383,6 +2383,24 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
}
}
}
// If we're in IBD, we want outbound peers that will serve us a useful
// chain. Disconnect peers that are on chains with insufficient work.
if (IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) {
// When nCount < MAX_HEADERS_RESULTS, we know we have no more
// headers to fetch from this peer.
if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
// This peer has too little work on their headers chain to help
// us sync -- disconnect if using an outbound slot (unless
// whitelisted or addnode).
// Note: We compare their tip to nMinimumChainWork (rather than
// chainActive.Tip()) because we won't start block download
// until we have a headers chain that has at least
// nMinimumChainWork, even if a peer has a chain past our tip,
if (!(pfrom->fInbound || pfrom->fWhitelisted || pfrom->m_manual_connection)) {
pfrom->fDisconnect = true;
}
}
}
}
}

View file

@ -27,6 +27,7 @@ class MinimumChainWorkTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
@ -74,6 +75,13 @@ class MinimumChainWorkTest(BitcoinTestFramework):
self.nodes[0].generate(1)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])