Headers-first synchronization
Many changes: * Do not use 'getblocks', but 'getheaders', and use it to build a headers tree. * Blocks are fetched in parallel from all available outbound peers, using a limited moving window. When one peer stalls the movement of the window, it is disconnected. * No more orphan blocks. At all. We only ever request a block for which we have verified the headers, and store it to disk immediately. This means that a disk-fill attack would require PoW. * Require protocol version 31800 for every peer (released in december 2010). * No more syncnode (we sync from everyone we can, though limited to 1 during initial *headers* sync). * Introduce some extra named constants, comments and asserts.
This commit is contained in:
parent
992ab87114
commit
341735eb8f
9 changed files with 375 additions and 370 deletions
32
src/chain.h
32
src/chain.h
|
@ -49,12 +49,29 @@ struct CDiskBlockPos
|
|||
};
|
||||
|
||||
enum BlockStatus {
|
||||
// Unused.
|
||||
BLOCK_VALID_UNKNOWN = 0,
|
||||
BLOCK_VALID_HEADER = 1, // parsed, version ok, hash satisfies claimed PoW, 1 <= vtx count <= max, timestamp not in future
|
||||
BLOCK_VALID_TREE = 2, // parent found, difficulty matches, timestamp >= median previous, checkpoint
|
||||
BLOCK_VALID_TRANSACTIONS = 3, // only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid, no duplicate txids, sigops, size, merkle root
|
||||
BLOCK_VALID_CHAIN = 4, // outputs do not overspend inputs, no double spends, coinbase output ok, immature coinbase spends, BIP30
|
||||
BLOCK_VALID_SCRIPTS = 5, // scripts/signatures ok
|
||||
|
||||
// Parsed, version ok, hash satisfies claimed PoW, 1 <= vtx count <= max, timestamp not in future
|
||||
BLOCK_VALID_HEADER = 1,
|
||||
|
||||
// All parent headers found, difficulty matches, timestamp >= median previous, checkpoint. Implies all parents
|
||||
// are also at least TREE.
|
||||
BLOCK_VALID_TREE = 2,
|
||||
|
||||
// Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid, no duplicate txids,
|
||||
// sigops, size, merkle root. Implies all parents are at least TREE but not necessarily TRANSACTIONS. When all
|
||||
// parent blocks also have TRANSACTIONS, CBlockIndex::nChainTx will be set.
|
||||
BLOCK_VALID_TRANSACTIONS = 3,
|
||||
|
||||
// Outputs do not overspend inputs, no double spends, coinbase output ok, immature coinbase spends, BIP30.
|
||||
// Implies all parents are also at least CHAIN.
|
||||
BLOCK_VALID_CHAIN = 4,
|
||||
|
||||
// Scripts & signatures ok. Implies all parents are also at least SCRIPTS.
|
||||
BLOCK_VALID_SCRIPTS = 5,
|
||||
|
||||
// All validity bits.
|
||||
BLOCK_VALID_MASK = BLOCK_VALID_HEADER | BLOCK_VALID_TREE | BLOCK_VALID_TRANSACTIONS |
|
||||
BLOCK_VALID_CHAIN | BLOCK_VALID_SCRIPTS,
|
||||
|
||||
|
@ -103,7 +120,8 @@ public:
|
|||
// Note: in a potential headers-first mode, this number cannot be relied upon
|
||||
unsigned int nTx;
|
||||
|
||||
// (memory only) Number of transactions in the chain up to and including this block
|
||||
// (memory only) Number of transactions in the chain up to and including this block.
|
||||
// This value will be non-zero only if and only if transactions for this block and all its parents are available.
|
||||
unsigned int nChainTx; // change to 64-bit type when necessary; won't happen before 2030
|
||||
|
||||
// Verification status of this block. See enum BlockStatus
|
||||
|
@ -146,7 +164,7 @@ public:
|
|||
SetNull();
|
||||
}
|
||||
|
||||
CBlockIndex(CBlockHeader& block)
|
||||
CBlockIndex(const CBlockHeader& block)
|
||||
{
|
||||
SetNull();
|
||||
|
||||
|
|
590
src/main.cpp
590
src/main.cpp
|
@ -56,14 +56,6 @@ CFeeRate minRelayTxFee = CFeeRate(1000);
|
|||
|
||||
CTxMemPool mempool(::minRelayTxFee);
|
||||
|
||||
struct COrphanBlock {
|
||||
uint256 hashBlock;
|
||||
uint256 hashPrev;
|
||||
vector<unsigned char> vchBlock;
|
||||
};
|
||||
map<uint256, COrphanBlock*> mapOrphanBlocks;
|
||||
multimap<uint256, COrphanBlock*> mapOrphanBlocksByPrev;
|
||||
|
||||
struct COrphanTx {
|
||||
CTransaction tx;
|
||||
NodeId fromPeer;
|
||||
|
@ -106,6 +98,12 @@ namespace {
|
|||
// The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS or better that are at least
|
||||
// as good as our current tip. Entries may be failed, though.
|
||||
set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexValid;
|
||||
// Best header we've seen so far (used for getheaders queries' starting points).
|
||||
CBlockIndex *pindexBestHeader = NULL;
|
||||
// Number of nodes with fSyncStarted.
|
||||
int nSyncStarted = 0;
|
||||
// All pairs A->B, where A (or one if its ancestors) misses transactions, but B has transactions.
|
||||
multimap<CBlockIndex*, CBlockIndex*> mapBlocksUnlinked;
|
||||
|
||||
CCriticalSection cs_LastBlockFile;
|
||||
CBlockFileInfo infoLastBlockFile;
|
||||
|
@ -125,11 +123,10 @@ namespace {
|
|||
// Protected by cs_main.
|
||||
struct QueuedBlock {
|
||||
uint256 hash;
|
||||
CBlockIndex *pindex; // Optional.
|
||||
int64_t nTime; // Time of "getdata" request in microseconds.
|
||||
int nQueuedBefore; // Number of blocks in flight at the time of request.
|
||||
};
|
||||
map<uint256, pair<NodeId, list<QueuedBlock>::iterator> > mapBlocksInFlight;
|
||||
map<uint256, pair<NodeId, list<uint256>::iterator> > mapBlocksToDownload;
|
||||
|
||||
} // anon namespace
|
||||
|
||||
|
@ -220,22 +217,24 @@ struct CNodeState {
|
|||
CBlockIndex *pindexBestKnownBlock;
|
||||
// The hash of the last unknown block this peer has announced.
|
||||
uint256 hashLastUnknownBlock;
|
||||
// The last full block we both have.
|
||||
CBlockIndex *pindexLastCommonBlock;
|
||||
// Whether we've started headers synchronization with this peer.
|
||||
bool fSyncStarted;
|
||||
// Since when we're stalling block download progress (in microseconds), or 0.
|
||||
int64_t nStallingSince;
|
||||
list<QueuedBlock> vBlocksInFlight;
|
||||
int nBlocksInFlight;
|
||||
list<uint256> vBlocksToDownload;
|
||||
int nBlocksToDownload;
|
||||
int64_t nLastBlockReceive;
|
||||
int64_t nLastBlockProcess;
|
||||
|
||||
CNodeState() {
|
||||
nMisbehavior = 0;
|
||||
fShouldBan = false;
|
||||
pindexBestKnownBlock = NULL;
|
||||
hashLastUnknownBlock = uint256(0);
|
||||
nBlocksToDownload = 0;
|
||||
pindexLastCommonBlock = NULL;
|
||||
fSyncStarted = false;
|
||||
nStallingSince = 0;
|
||||
nBlocksInFlight = 0;
|
||||
nLastBlockReceive = 0;
|
||||
nLastBlockProcess = 0;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -266,64 +265,37 @@ void FinalizeNode(NodeId nodeid) {
|
|||
LOCK(cs_main);
|
||||
CNodeState *state = State(nodeid);
|
||||
|
||||
if (state->fSyncStarted)
|
||||
nSyncStarted--;
|
||||
|
||||
BOOST_FOREACH(const QueuedBlock& entry, state->vBlocksInFlight)
|
||||
mapBlocksInFlight.erase(entry.hash);
|
||||
BOOST_FOREACH(const uint256& hash, state->vBlocksToDownload)
|
||||
mapBlocksToDownload.erase(hash);
|
||||
EraseOrphansFor(nodeid);
|
||||
|
||||
mapNodeState.erase(nodeid);
|
||||
}
|
||||
|
||||
// Requires cs_main.
|
||||
void MarkBlockAsReceived(const uint256 &hash, NodeId nodeFrom = -1) {
|
||||
map<uint256, pair<NodeId, list<uint256>::iterator> >::iterator itToDownload = mapBlocksToDownload.find(hash);
|
||||
if (itToDownload != mapBlocksToDownload.end()) {
|
||||
CNodeState *state = State(itToDownload->second.first);
|
||||
state->vBlocksToDownload.erase(itToDownload->second.second);
|
||||
state->nBlocksToDownload--;
|
||||
mapBlocksToDownload.erase(itToDownload);
|
||||
}
|
||||
|
||||
void MarkBlockAsReceived(const uint256& hash) {
|
||||
map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
|
||||
if (itInFlight != mapBlocksInFlight.end()) {
|
||||
CNodeState *state = State(itInFlight->second.first);
|
||||
state->vBlocksInFlight.erase(itInFlight->second.second);
|
||||
state->nBlocksInFlight--;
|
||||
if (itInFlight->second.first == nodeFrom)
|
||||
state->nLastBlockReceive = GetTimeMicros();
|
||||
state->nStallingSince = 0;
|
||||
mapBlocksInFlight.erase(itInFlight);
|
||||
}
|
||||
}
|
||||
|
||||
// Requires cs_main.
|
||||
bool AddBlockToQueue(NodeId nodeid, const uint256 &hash) {
|
||||
if (mapBlocksToDownload.count(hash) || mapBlocksInFlight.count(hash))
|
||||
return false;
|
||||
|
||||
CNodeState *state = State(nodeid);
|
||||
if (state == NULL)
|
||||
return false;
|
||||
|
||||
list<uint256>::iterator it = state->vBlocksToDownload.insert(state->vBlocksToDownload.end(), hash);
|
||||
state->nBlocksToDownload++;
|
||||
if (state->nBlocksToDownload > 5000)
|
||||
Misbehaving(nodeid, 10);
|
||||
mapBlocksToDownload[hash] = std::make_pair(nodeid, it);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Requires cs_main.
|
||||
void MarkBlockAsInFlight(NodeId nodeid, const uint256 &hash) {
|
||||
void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, CBlockIndex *pindex = NULL) {
|
||||
CNodeState *state = State(nodeid);
|
||||
assert(state != NULL);
|
||||
|
||||
// Make sure it's not listed somewhere already.
|
||||
MarkBlockAsReceived(hash);
|
||||
|
||||
QueuedBlock newentry = {hash, GetTimeMicros(), state->nBlocksInFlight};
|
||||
if (state->nBlocksInFlight == 0)
|
||||
state->nLastBlockReceive = newentry.nTime; // Reset when a first request is sent.
|
||||
QueuedBlock newentry = {hash, pindex, GetTimeMicros()};
|
||||
list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), newentry);
|
||||
state->nBlocksInFlight++;
|
||||
mapBlocksInFlight[hash] = std::make_pair(nodeid, it);
|
||||
|
@ -362,6 +334,103 @@ void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
|
|||
}
|
||||
}
|
||||
|
||||
/** Find the last common ancestor two blocks have.
|
||||
* Both pa and pb must be non-NULL. */
|
||||
CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) {
|
||||
if (pa->nHeight > pb->nHeight) {
|
||||
pa = pa->GetAncestor(pb->nHeight);
|
||||
} else if (pb->nHeight > pa->nHeight) {
|
||||
pb = pb->GetAncestor(pa->nHeight);
|
||||
}
|
||||
|
||||
while (pa != pb && pa && pb) {
|
||||
pa = pa->pprev;
|
||||
pb = pb->pprev;
|
||||
}
|
||||
|
||||
// Eventually all chain branches meet at the genesis block.
|
||||
assert(pa == pb);
|
||||
return pa;
|
||||
}
|
||||
|
||||
/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
|
||||
* at most count entries. */
|
||||
void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBlockIndex*>& vBlocks, NodeId& nodeStaller) {
|
||||
if (count == 0)
|
||||
return;
|
||||
|
||||
vBlocks.reserve(vBlocks.size() + count);
|
||||
CNodeState *state = State(nodeid);
|
||||
assert(state != NULL);
|
||||
|
||||
// Make sure pindexBestKnownBlock is up to date, we'll need it.
|
||||
ProcessBlockAvailability(nodeid);
|
||||
|
||||
if (state->pindexBestKnownBlock == NULL || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork) {
|
||||
// This peer has nothing interesting.
|
||||
return;
|
||||
}
|
||||
|
||||
if (state->pindexLastCommonBlock == NULL) {
|
||||
// Bootstrap quickly by guessing a parent of our best tip is the forking point.
|
||||
// Guessing wrong in either direction is not a problem.
|
||||
state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())];
|
||||
}
|
||||
|
||||
// If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
|
||||
// of their current tip anymore. Go back enough to fix that.
|
||||
state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
|
||||
if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
|
||||
return;
|
||||
|
||||
std::vector<CBlockIndex*> vToFetch;
|
||||
CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
|
||||
// Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond our
|
||||
// current tip. The +1 is so we can detect stalling, namely if we would be able to download that next block if the
|
||||
// window were 1 larger.
|
||||
int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, chainActive.Height() + BLOCK_DOWNLOAD_WINDOW + 1);
|
||||
NodeId waitingfor = -1;
|
||||
while (pindexWalk->nHeight < nMaxHeight) {
|
||||
// Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
|
||||
// pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
|
||||
// as iterating over ~100 CBlockIndex* entries anyway.
|
||||
int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
|
||||
vToFetch.resize(nToFetch);
|
||||
pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
|
||||
vToFetch[nToFetch - 1] = pindexWalk;
|
||||
for (unsigned int i = nToFetch - 1; i > 0; i--) {
|
||||
vToFetch[i - 1] = vToFetch[i]->pprev;
|
||||
}
|
||||
|
||||
// Iterate over those blocks in vToFetch (in forward direction), adding the ones that
|
||||
// are not yet downloaded and not in flight to vBlocks. In the mean time, update
|
||||
// pindexLastCommonBlock as long as all ancestors are already downloaded.
|
||||
BOOST_FOREACH(CBlockIndex* pindex, vToFetch) {
|
||||
if (pindex->nStatus & BLOCK_HAVE_DATA) {
|
||||
if (pindex->nChainTx)
|
||||
state->pindexLastCommonBlock = pindex;
|
||||
} else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
|
||||
// The block is not already downloaded, and not yet in flight.
|
||||
if (pindex->nHeight > chainActive.Height() + (int)BLOCK_DOWNLOAD_WINDOW) {
|
||||
// We reached the end of the window.
|
||||
if (vBlocks.size() == 0 && waitingfor != nodeid) {
|
||||
// We aren't able to fetch anything, but we would be if the download window was one larger.
|
||||
nodeStaller = waitingfor;
|
||||
}
|
||||
return;
|
||||
}
|
||||
vBlocks.push_back(pindex);
|
||||
if (vBlocks.size() == count) {
|
||||
return;
|
||||
}
|
||||
} else if (waitingfor == -1) {
|
||||
// This is the first already-in-flight block.
|
||||
waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // anon namespace
|
||||
|
||||
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
|
||||
|
@ -1086,46 +1155,6 @@ bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex)
|
|||
return true;
|
||||
}
|
||||
|
||||
uint256 static GetOrphanRoot(const uint256& hash)
|
||||
{
|
||||
map<uint256, COrphanBlock*>::iterator it = mapOrphanBlocks.find(hash);
|
||||
if (it == mapOrphanBlocks.end())
|
||||
return hash;
|
||||
|
||||
// Work back to the first block in the orphan chain
|
||||
do {
|
||||
map<uint256, COrphanBlock*>::iterator it2 = mapOrphanBlocks.find(it->second->hashPrev);
|
||||
if (it2 == mapOrphanBlocks.end())
|
||||
return it->first;
|
||||
it = it2;
|
||||
} while(true);
|
||||
}
|
||||
|
||||
// Remove a random orphan block (which does not have any dependent orphans).
|
||||
void static PruneOrphanBlocks()
|
||||
{
|
||||
if (mapOrphanBlocksByPrev.size() <= (size_t)std::max((int64_t)0, GetArg("-maxorphanblocks", DEFAULT_MAX_ORPHAN_BLOCKS)))
|
||||
return;
|
||||
|
||||
// Pick a random orphan block.
|
||||
int pos = insecure_rand() % mapOrphanBlocksByPrev.size();
|
||||
std::multimap<uint256, COrphanBlock*>::iterator it = mapOrphanBlocksByPrev.begin();
|
||||
while (pos--) it++;
|
||||
|
||||
// As long as this block has other orphans depending on it, move to one of those successors.
|
||||
do {
|
||||
std::multimap<uint256, COrphanBlock*>::iterator it2 = mapOrphanBlocksByPrev.find(it->second->hashBlock);
|
||||
if (it2 == mapOrphanBlocksByPrev.end())
|
||||
break;
|
||||
it = it2;
|
||||
} while(1);
|
||||
|
||||
uint256 hash = it->second->hashBlock;
|
||||
delete it->second;
|
||||
mapOrphanBlocksByPrev.erase(it);
|
||||
mapOrphanBlocks.erase(hash);
|
||||
}
|
||||
|
||||
CAmount GetBlockValue(int nHeight, const CAmount& nFees)
|
||||
{
|
||||
int64_t nSubsidy = 50 * COIN;
|
||||
|
@ -1664,11 +1693,6 @@ bool ConnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex, C
|
|||
if (fJustCheck)
|
||||
return true;
|
||||
|
||||
// Correct transaction counts.
|
||||
pindex->nTx = block.vtx.size();
|
||||
if (pindex->pprev)
|
||||
pindex->nChainTx = pindex->pprev->nChainTx + block.vtx.size();
|
||||
|
||||
// Write undo information to disk
|
||||
if (pindex->GetUndoPos().IsNull() || !pindex->IsValid(BLOCK_VALID_SCRIPTS))
|
||||
{
|
||||
|
@ -1900,6 +1924,8 @@ static CBlockIndex* FindMostWorkChain() {
|
|||
CBlockIndex *pindexTest = pindexNew;
|
||||
bool fInvalidAncestor = false;
|
||||
while (pindexTest && !chainActive.Contains(pindexTest)) {
|
||||
assert(pindexTest->nStatus & BLOCK_HAVE_DATA);
|
||||
assert(pindexTest->nChainTx || pindexTest->nHeight == 0);
|
||||
if (pindexTest->nStatus & BLOCK_FAILED_MASK) {
|
||||
// Candidate has an invalid ancestor, remove entire chain from the set.
|
||||
if (pindexBestInvalid == NULL || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
|
||||
|
@ -2032,7 +2058,7 @@ bool ActivateBestChain(CValidationState &state, CBlock *pblock) {
|
|||
return true;
|
||||
}
|
||||
|
||||
CBlockIndex* AddToBlockIndex(CBlockHeader& block)
|
||||
CBlockIndex* AddToBlockIndex(const CBlockHeader& block)
|
||||
{
|
||||
// Check for duplicate
|
||||
uint256 hash = block.GetHash();
|
||||
|
@ -2043,10 +2069,10 @@ CBlockIndex* AddToBlockIndex(CBlockHeader& block)
|
|||
// Construct new block index object
|
||||
CBlockIndex* pindexNew = new CBlockIndex(block);
|
||||
assert(pindexNew);
|
||||
{
|
||||
LOCK(cs_nBlockSequenceId);
|
||||
pindexNew->nSequenceId = nBlockSequenceId++;
|
||||
}
|
||||
// We assign the sequence id to blocks only when the full data is available,
|
||||
// to avoid miners withholding blocks but broadcasting headers, to get a
|
||||
// competitive advantage.
|
||||
pindexNew->nSequenceId = 0;
|
||||
BlockMap::iterator mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first;
|
||||
pindexNew->phashBlock = &((*mi).first);
|
||||
BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock);
|
||||
|
@ -2058,6 +2084,11 @@ CBlockIndex* AddToBlockIndex(CBlockHeader& block)
|
|||
}
|
||||
pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + pindexNew->GetBlockWork();
|
||||
pindexNew->RaiseValidity(BLOCK_VALID_TREE);
|
||||
if (pindexBestHeader == NULL || pindexBestHeader->nChainWork < pindexNew->nChainWork)
|
||||
pindexBestHeader = pindexNew;
|
||||
|
||||
// Ok if it fails, we'll download the header again next time.
|
||||
pblocktree->WriteBlockIndex(CDiskBlockIndex(pindexNew));
|
||||
|
||||
return pindexNew;
|
||||
}
|
||||
|
@ -2066,30 +2097,45 @@ CBlockIndex* AddToBlockIndex(CBlockHeader& block)
|
|||
bool ReceivedBlockTransactions(const CBlock &block, CValidationState& state, CBlockIndex *pindexNew, const CDiskBlockPos& pos)
|
||||
{
|
||||
pindexNew->nTx = block.vtx.size();
|
||||
if (pindexNew->pprev) {
|
||||
// Not the genesis block.
|
||||
if (pindexNew->pprev->nChainTx) {
|
||||
// This parent's block's total number transactions is known, so compute outs.
|
||||
pindexNew->nChainTx = pindexNew->pprev->nChainTx + pindexNew->nTx;
|
||||
} else {
|
||||
// The total number of transactions isn't known yet.
|
||||
// We will compute it when the block is connected.
|
||||
pindexNew->nChainTx = 0;
|
||||
}
|
||||
} else {
|
||||
// Genesis block.
|
||||
pindexNew->nChainTx = pindexNew->nTx;
|
||||
}
|
||||
pindexNew->nFile = pos.nFile;
|
||||
pindexNew->nDataPos = pos.nPos;
|
||||
pindexNew->nUndoPos = 0;
|
||||
pindexNew->nStatus |= BLOCK_HAVE_DATA;
|
||||
pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS);
|
||||
{
|
||||
LOCK(cs_nBlockSequenceId);
|
||||
pindexNew->nSequenceId = nBlockSequenceId++;
|
||||
}
|
||||
|
||||
if (pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS))
|
||||
setBlockIndexValid.insert(pindexNew);
|
||||
if (pindexNew->pprev == NULL || pindexNew->pprev->nChainTx) {
|
||||
// If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
|
||||
deque<CBlockIndex*> queue;
|
||||
queue.push_back(pindexNew);
|
||||
|
||||
// Recursively process any descendant blocks that now may be eligible to be connected.
|
||||
while (!queue.empty()) {
|
||||
CBlockIndex *pindex = queue.front();
|
||||
queue.pop_front();
|
||||
pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
|
||||
setBlockIndexValid.insert(pindex);
|
||||
std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex);
|
||||
while (range.first != range.second) {
|
||||
std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first;
|
||||
queue.push_back(it->second);
|
||||
range.first++;
|
||||
mapBlocksUnlinked.erase(it);
|
||||
}
|
||||
if (!pblocktree->WriteBlockIndex(CDiskBlockIndex(pindex)))
|
||||
return state.Abort("Failed to write block index");
|
||||
}
|
||||
} else {
|
||||
if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
|
||||
mapBlocksUnlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
|
||||
}
|
||||
if (!pblocktree->WriteBlockIndex(CDiskBlockIndex(pindexNew)))
|
||||
return state.Abort("Failed to write block index");
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -2205,12 +2251,31 @@ bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, bool f
|
|||
|
||||
bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bool fCheckMerkleRoot)
|
||||
{
|
||||
// These are checks that are independent of context
|
||||
// that can be verified before saving an orphan block.
|
||||
// These are checks that are independent of context.
|
||||
|
||||
if (!CheckBlockHeader(block, state, fCheckPOW))
|
||||
return false;
|
||||
|
||||
// Check the merkle root.
|
||||
if (fCheckMerkleRoot) {
|
||||
bool mutated;
|
||||
uint256 hashMerkleRoot2 = block.BuildMerkleTree(&mutated);
|
||||
if (block.hashMerkleRoot != hashMerkleRoot2)
|
||||
return state.DoS(100, error("CheckBlock() : hashMerkleRoot mismatch"),
|
||||
REJECT_INVALID, "bad-txnmrklroot", true);
|
||||
|
||||
// Check for merkle tree malleability (CVE-2012-2459): repeating sequences
|
||||
// of transactions in a block without affecting the merkle root of a block,
|
||||
// while still invalidating it.
|
||||
if (mutated)
|
||||
return state.DoS(100, error("CheckBlock() : duplicate transaction"),
|
||||
REJECT_INVALID, "bad-txns-duplicate", true);
|
||||
}
|
||||
|
||||
// All potential-corruption validation must be done before we do any
|
||||
// transaction validation, as otherwise we may mark the header as invalid
|
||||
// because we receive the wrong transactions for it.
|
||||
|
||||
// Size limits
|
||||
if (block.vtx.empty() || block.vtx.size() > MAX_BLOCK_SIZE || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE)
|
||||
return state.DoS(100, error("CheckBlock() : size limits failed"),
|
||||
|
@ -2230,15 +2295,6 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bo
|
|||
if (!CheckTransaction(tx, state))
|
||||
return error("CheckBlock() : CheckTransaction failed");
|
||||
|
||||
// Check for merkle tree malleability (CVE-2012-2459): repeating sequences
|
||||
// of transactions in a block without affecting the merkle root of a block,
|
||||
// while still invalidating it.
|
||||
bool mutated;
|
||||
uint256 hashMerkleRoot2 = block.BuildMerkleTree(&mutated);
|
||||
if (mutated)
|
||||
return state.DoS(100, error("CheckBlock() : duplicate transaction"),
|
||||
REJECT_INVALID, "bad-txns-duplicate", true);
|
||||
|
||||
unsigned int nSigOps = 0;
|
||||
BOOST_FOREACH(const CTransaction& tx, block.vtx)
|
||||
{
|
||||
|
@ -2248,15 +2304,10 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW, bo
|
|||
return state.DoS(100, error("CheckBlock() : out-of-bounds SigOpCount"),
|
||||
REJECT_INVALID, "bad-blk-sigops", true);
|
||||
|
||||
// Check merkle root
|
||||
if (fCheckMerkleRoot && block.hashMerkleRoot != hashMerkleRoot2)
|
||||
return state.DoS(100, error("CheckBlock() : hashMerkleRoot mismatch"),
|
||||
REJECT_INVALID, "bad-txnmrklroot", true);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AcceptBlockHeader(CBlockHeader& block, CValidationState& state, CBlockIndex** ppindex)
|
||||
bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex** ppindex)
|
||||
{
|
||||
AssertLockHeld(cs_main);
|
||||
// Check for duplicate
|
||||
|
@ -2264,9 +2315,13 @@ bool AcceptBlockHeader(CBlockHeader& block, CValidationState& state, CBlockIndex
|
|||
BlockMap::iterator miSelf = mapBlockIndex.find(hash);
|
||||
CBlockIndex *pindex = NULL;
|
||||
if (miSelf != mapBlockIndex.end()) {
|
||||
// Block header is already known.
|
||||
pindex = miSelf->second;
|
||||
if (ppindex)
|
||||
*ppindex = pindex;
|
||||
if (pindex->nStatus & BLOCK_FAILED_MASK)
|
||||
return state.Invalid(error("%s : block is marked invalid", __func__), 0, "duplicate");
|
||||
return true;
|
||||
}
|
||||
|
||||
CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint();
|
||||
|
@ -2344,6 +2399,12 @@ bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex,
|
|||
if (!AcceptBlockHeader(block, state, &pindex))
|
||||
return false;
|
||||
|
||||
if (pindex->nStatus & BLOCK_HAVE_DATA) {
|
||||
// TODO: deal better with duplicate blocks.
|
||||
// return state.DoS(20, error("AcceptBlock() : already have block %d %s", pindex->nHeight, pindex->GetBlockHash().ToString()), REJECT_DUPLICATE, "duplicate");
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!CheckBlock(block, state)) {
|
||||
if (state.IsInvalid() && !state.CorruptionPossible()) {
|
||||
pindex->nStatus |= BLOCK_FAILED_VALID;
|
||||
|
@ -2456,93 +2517,26 @@ void CBlockIndex::BuildSkip()
|
|||
pskip = pprev->GetAncestor(GetSkipHeight(nHeight));
|
||||
}
|
||||
|
||||
void PushGetBlocks(CNode* pnode, CBlockIndex* pindexBegin, uint256 hashEnd)
|
||||
{
|
||||
AssertLockHeld(cs_main);
|
||||
// Filter out duplicate requests
|
||||
if (pindexBegin == pnode->pindexLastGetBlocksBegin && hashEnd == pnode->hashLastGetBlocksEnd)
|
||||
return;
|
||||
pnode->pindexLastGetBlocksBegin = pindexBegin;
|
||||
pnode->hashLastGetBlocksEnd = hashEnd;
|
||||
|
||||
pnode->PushMessage("getblocks", chainActive.GetLocator(pindexBegin), hashEnd);
|
||||
}
|
||||
|
||||
bool ProcessBlock(CValidationState &state, CNode* pfrom, CBlock* pblock, CDiskBlockPos *dbp)
|
||||
{
|
||||
// Check for duplicate
|
||||
uint256 hash = pblock->GetHash();
|
||||
// Preliminary checks
|
||||
bool checked = CheckBlock(*pblock, state);
|
||||
|
||||
{
|
||||
LOCK(cs_main);
|
||||
if (mapBlockIndex.count(hash))
|
||||
return state.Invalid(error("ProcessBlock() : already have block %d %s", mapBlockIndex[hash]->nHeight, hash.ToString()), 0, "duplicate");
|
||||
if (mapOrphanBlocks.count(hash))
|
||||
return state.Invalid(error("ProcessBlock() : already have block (orphan) %s", hash.ToString()), 0, "duplicate");
|
||||
|
||||
// Preliminary checks
|
||||
if (!CheckBlock(*pblock, state))
|
||||
MarkBlockAsReceived(pblock->GetHash());
|
||||
if (!checked) {
|
||||
return error("ProcessBlock() : CheckBlock FAILED");
|
||||
|
||||
// If we don't already have its previous block (with full data), shunt it off to holding area until we get it
|
||||
BlockMap::iterator it = mapBlockIndex.find(pblock->hashPrevBlock);
|
||||
if (pblock->hashPrevBlock != 0 && (it == mapBlockIndex.end() || !(it->second->nStatus & BLOCK_HAVE_DATA)))
|
||||
{
|
||||
LogPrintf("ProcessBlock: ORPHAN BLOCK %lu, prev=%s\n", (unsigned long)mapOrphanBlocks.size(), pblock->hashPrevBlock.ToString());
|
||||
|
||||
// Accept orphans as long as there is a node to request its parents from
|
||||
if (pfrom) {
|
||||
PruneOrphanBlocks();
|
||||
COrphanBlock* pblock2 = new COrphanBlock();
|
||||
{
|
||||
CDataStream ss(SER_DISK, CLIENT_VERSION);
|
||||
ss << *pblock;
|
||||
pblock2->vchBlock = std::vector<unsigned char>(ss.begin(), ss.end());
|
||||
}
|
||||
pblock2->hashBlock = hash;
|
||||
pblock2->hashPrev = pblock->hashPrevBlock;
|
||||
mapOrphanBlocks.insert(make_pair(hash, pblock2));
|
||||
mapOrphanBlocksByPrev.insert(make_pair(pblock2->hashPrev, pblock2));
|
||||
|
||||
// Ask this guy to fill in what we're missing
|
||||
PushGetBlocks(pfrom, chainActive.Tip(), GetOrphanRoot(hash));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Store to disk
|
||||
CBlockIndex *pindex = NULL;
|
||||
bool ret = AcceptBlock(*pblock, state, &pindex, dbp);
|
||||
if (pindex && pfrom) {
|
||||
mapBlockSource[pindex->GetBlockHash()] = pfrom->GetId();
|
||||
}
|
||||
if (!ret)
|
||||
return error("ProcessBlock() : AcceptBlock FAILED");
|
||||
|
||||
// Recursively process any orphan blocks that depended on this one
|
||||
vector<uint256> vWorkQueue;
|
||||
vWorkQueue.push_back(hash);
|
||||
for (unsigned int i = 0; i < vWorkQueue.size(); i++)
|
||||
{
|
||||
uint256 hashPrev = vWorkQueue[i];
|
||||
for (multimap<uint256, COrphanBlock*>::iterator mi = mapOrphanBlocksByPrev.lower_bound(hashPrev);
|
||||
mi != mapOrphanBlocksByPrev.upper_bound(hashPrev);
|
||||
++mi)
|
||||
{
|
||||
CBlock block;
|
||||
{
|
||||
CDataStream ss(mi->second->vchBlock, SER_DISK, CLIENT_VERSION);
|
||||
ss >> block;
|
||||
}
|
||||
block.BuildMerkleTree();
|
||||
// Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan resolution (that is, feeding people an invalid block based on LegitBlockX in order to get anyone relaying LegitBlockX banned)
|
||||
CValidationState stateDummy;
|
||||
CBlockIndex *pindexChild = NULL;
|
||||
if (AcceptBlock(block, stateDummy, &pindexChild))
|
||||
vWorkQueue.push_back(mi->second->hashBlock);
|
||||
mapOrphanBlocks.erase(mi->second->hashBlock);
|
||||
delete mi->second;
|
||||
}
|
||||
mapOrphanBlocksByPrev.erase(hashPrev);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (!ActivateBestChain(state, pblock))
|
||||
|
@ -2808,13 +2802,26 @@ bool static LoadBlockIndexDB()
|
|||
{
|
||||
CBlockIndex* pindex = item.second;
|
||||
pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + pindex->GetBlockWork();
|
||||
pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
|
||||
if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS))
|
||||
if (pindex->nStatus & BLOCK_HAVE_DATA) {
|
||||
if (pindex->pprev) {
|
||||
if (pindex->pprev->nChainTx) {
|
||||
pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
|
||||
} else {
|
||||
pindex->nChainTx = 0;
|
||||
mapBlocksUnlinked.insert(std::make_pair(pindex->pprev, pindex));
|
||||
}
|
||||
} else {
|
||||
pindex->nChainTx = pindex->nTx;
|
||||
}
|
||||
}
|
||||
if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == NULL))
|
||||
setBlockIndexValid.insert(pindex);
|
||||
if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
|
||||
pindexBestInvalid = pindex;
|
||||
if (pindex->pprev)
|
||||
pindex->BuildSkip();
|
||||
if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == NULL || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
|
||||
pindexBestHeader = pindex;
|
||||
}
|
||||
|
||||
// Load block file info
|
||||
|
@ -3226,8 +3233,7 @@ bool static AlreadyHave(const CInv& inv)
|
|||
pcoinsTip->HaveCoins(inv.hash);
|
||||
}
|
||||
case MSG_BLOCK:
|
||||
return mapBlockIndex.count(inv.hash) ||
|
||||
mapOrphanBlocks.count(inv.hash);
|
||||
return mapBlockIndex.count(inv.hash);
|
||||
}
|
||||
// Don't know what it is, just say we already got one
|
||||
return true;
|
||||
|
@ -3375,10 +3381,6 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
|||
return true;
|
||||
}
|
||||
|
||||
{
|
||||
LOCK(cs_main);
|
||||
State(pfrom->GetId())->nLastBlockProcess = GetTimeMicros();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
@ -3587,6 +3589,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
|||
|
||||
LOCK(cs_main);
|
||||
|
||||
std::vector<CInv> vToFetch;
|
||||
|
||||
for (unsigned int nInv = 0; nInv < vInv.size(); nInv++)
|
||||
{
|
||||
const CInv &inv = vInv[nInv];
|
||||
|
@ -3597,19 +3601,29 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
|||
bool fAlreadyHave = AlreadyHave(inv);
|
||||
LogPrint("net", "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id);
|
||||
|
||||
if (!fAlreadyHave) {
|
||||
if (!fImporting && !fReindex) {
|
||||
if (inv.type == MSG_BLOCK)
|
||||
AddBlockToQueue(pfrom->GetId(), inv.hash);
|
||||
else
|
||||
if (!fAlreadyHave && !fImporting && !fReindex && inv.type != MSG_BLOCK)
|
||||
pfrom->AskFor(inv);
|
||||
}
|
||||
} else if (inv.type == MSG_BLOCK && mapOrphanBlocks.count(inv.hash)) {
|
||||
PushGetBlocks(pfrom, chainActive.Tip(), GetOrphanRoot(inv.hash));
|
||||
}
|
||||
|
||||
if (inv.type == MSG_BLOCK)
|
||||
if (inv.type == MSG_BLOCK) {
|
||||
UpdateBlockAvailability(pfrom->GetId(), inv.hash);
|
||||
if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
|
||||
// First request the headers preceeding the announced block. In the normal fully-synced
|
||||
// case where a new block is announced that succeeds the current tip (no reorganization),
|
||||
// there are no such headers.
|
||||
// Secondly, and only when we are close to being synced, we request the announced block directly,
|
||||
// to avoid an extra round-trip. Note that we must *first* ask for the headers, so by the
|
||||
// time the block arrives, the header chain leading up to it is already validated. Not
|
||||
// doing this will result in the received block being rejected as an orphan in case it is
|
||||
// not a direct successor.
|
||||
pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexBestHeader), inv.hash);
|
||||
if (chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - Params().TargetSpacing() * 20) {
|
||||
vToFetch.push_back(inv);
|
||||
// Mark block as in flight already, even though the actual "getdata" message only goes out
|
||||
// later (within the same cs_main lock, though).
|
||||
MarkBlockAsInFlight(pfrom->GetId(), inv.hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Track requests for our stuff
|
||||
g_signals.Inventory(inv.hash);
|
||||
|
@ -3619,6 +3633,9 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
|||
return error("send buffer size() = %u", pfrom->nSendSize);
|
||||
}
|
||||
}
|
||||
|
||||
if (!vToFetch.empty())
|
||||
pfrom->PushMessage("getdata", vToFetch);
|
||||
}
|
||||
|
||||
|
||||
|
@ -3706,7 +3723,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
|||
|
||||
// we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
|
||||
vector<CBlock> vHeaders;
|
||||
int nLimit = 2000;
|
||||
int nLimit = MAX_HEADERS_RESULTS;
|
||||
LogPrint("net", "getheaders %d to %s\n", (pindex ? pindex->nHeight : -1), hashStop.ToString());
|
||||
for (; pindex; pindex = chainActive.Next(pindex))
|
||||
{
|
||||
|
@ -3826,22 +3843,66 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
|||
}
|
||||
|
||||
|
||||
else if (strCommand == "headers" && !fImporting && !fReindex) // Ignore headers received while importing
|
||||
{
|
||||
std::vector<CBlockHeader> headers;
|
||||
|
||||
// Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
|
||||
unsigned int nCount = ReadCompactSize(vRecv);
|
||||
if (nCount > MAX_HEADERS_RESULTS) {
|
||||
Misbehaving(pfrom->GetId(), 20);
|
||||
return error("headers message size = %u", nCount);
|
||||
}
|
||||
headers.resize(nCount);
|
||||
for (unsigned int n = 0; n < nCount; n++) {
|
||||
vRecv >> headers[n];
|
||||
ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
|
||||
}
|
||||
|
||||
LOCK(cs_main);
|
||||
|
||||
if (nCount == 0) {
|
||||
// Nothing interesting. Stop asking this peers for more headers.
|
||||
return true;
|
||||
}
|
||||
|
||||
CBlockIndex *pindexLast = NULL;
|
||||
BOOST_FOREACH(const CBlockHeader& header, headers) {
|
||||
CValidationState state;
|
||||
if (pindexLast != NULL && header.hashPrevBlock != pindexLast->GetBlockHash()) {
|
||||
Misbehaving(pfrom->GetId(), 20);
|
||||
return error("non-continuous headers sequence");
|
||||
}
|
||||
if (!AcceptBlockHeader(header, state, &pindexLast)) {
|
||||
int nDoS;
|
||||
if (state.IsInvalid(nDoS)) {
|
||||
if (nDoS > 0)
|
||||
Misbehaving(pfrom->GetId(), nDoS);
|
||||
return error("invalid header received");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (pindexLast)
|
||||
UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
|
||||
|
||||
if (nCount == MAX_HEADERS_RESULTS && pindexLast) {
|
||||
// Headers message had its maximum size; the peer may have more headers.
|
||||
// TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
|
||||
// from there instead.
|
||||
pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexLast), uint256(0));
|
||||
}
|
||||
}
|
||||
|
||||
else if (strCommand == "block" && !fImporting && !fReindex) // Ignore blocks received while importing
|
||||
{
|
||||
CBlock block;
|
||||
vRecv >> block;
|
||||
|
||||
LogPrint("net", "received block %s peer=%d\n", block.GetHash().ToString(), pfrom->id);
|
||||
|
||||
CInv inv(MSG_BLOCK, block.GetHash());
|
||||
pfrom->AddInventoryKnown(inv);
|
||||
LogPrint("net", "received block %s peer=%d\n", inv.hash.ToString(), pfrom->id);
|
||||
|
||||
{
|
||||
LOCK(cs_main);
|
||||
// Remember who we got this block from.
|
||||
mapBlockSource[inv.hash] = pfrom->GetId();
|
||||
MarkBlockAsReceived(inv.hash, pfrom->GetId());
|
||||
}
|
||||
pfrom->AddInventoryKnown(inv);
|
||||
|
||||
CValidationState state;
|
||||
ProcessBlock(state, pfrom, &block);
|
||||
|
@ -4323,9 +4384,17 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
|
|||
state.rejects.clear();
|
||||
|
||||
// Start block sync
|
||||
if (pto->fStartSync && !fImporting && !fReindex) {
|
||||
pto->fStartSync = false;
|
||||
PushGetBlocks(pto, chainActive.Tip(), uint256(0));
|
||||
if (pindexBestHeader == NULL)
|
||||
pindexBestHeader = chainActive.Tip();
|
||||
bool fFetch = !pto->fInbound || (pindexBestHeader && (state.pindexLastCommonBlock ? state.pindexLastCommonBlock->nHeight : 0) + 144 > pindexBestHeader->nHeight);
|
||||
if (!state.fSyncStarted && !pto->fClient && fFetch && !fImporting && !fReindex) {
|
||||
// Only actively request headers from a single peer, unless we're close to today.
|
||||
if (nSyncStarted == 0 || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
|
||||
state.fSyncStarted = true;
|
||||
nSyncStarted++;
|
||||
CBlockIndex *pindexStart = pindexBestHeader->pprev ? pindexBestHeader->pprev : pindexBestHeader;
|
||||
pto->PushMessage("getheaders", chainActive.GetLocator(pindexStart), uint256(0));
|
||||
}
|
||||
}
|
||||
|
||||
// Resend wallet transactions that haven't gotten in a block yet
|
||||
|
@ -4384,35 +4453,32 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
|
|||
if (!vInv.empty())
|
||||
pto->PushMessage("inv", vInv);
|
||||
|
||||
|
||||
// Detect stalled peers. Require that blocks are in flight, we haven't
|
||||
// received a (requested) block in one minute, and that all blocks are
|
||||
// in flight for over two minutes, since we first had a chance to
|
||||
// process an incoming block.
|
||||
// Detect whether we're stalling
|
||||
int64_t nNow = GetTimeMicros();
|
||||
if (!pto->fDisconnect && state.nBlocksInFlight &&
|
||||
state.nLastBlockReceive < state.nLastBlockProcess - BLOCK_DOWNLOAD_TIMEOUT*1000000 &&
|
||||
state.vBlocksInFlight.front().nTime < state.nLastBlockProcess - 2*BLOCK_DOWNLOAD_TIMEOUT*1000000) {
|
||||
LogPrintf("Peer %s is stalling block download, disconnecting\n", state.name);
|
||||
if (!pto->fDisconnect && state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
|
||||
// Stalling only triggers when the block download window cannot move. During normal steady state,
|
||||
// the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
|
||||
// should only happen during initial block download.
|
||||
LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->id);
|
||||
pto->fDisconnect = true;
|
||||
}
|
||||
|
||||
// Update knowledge of peer's block availability.
|
||||
ProcessBlockAvailability(pto->GetId());
|
||||
|
||||
//
|
||||
// Message: getdata (blocks)
|
||||
//
|
||||
vector<CInv> vGetData;
|
||||
while (!pto->fDisconnect && state.nBlocksToDownload && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
|
||||
uint256 hash = state.vBlocksToDownload.front();
|
||||
vGetData.push_back(CInv(MSG_BLOCK, hash));
|
||||
MarkBlockAsInFlight(pto->GetId(), hash);
|
||||
LogPrint("net", "Requesting block %s peer=%d\n", hash.ToString(), pto->id);
|
||||
if (vGetData.size() >= 1000)
|
||||
{
|
||||
pto->PushMessage("getdata", vGetData);
|
||||
vGetData.clear();
|
||||
if (!pto->fDisconnect && !pto->fClient && fFetch && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
|
||||
vector<CBlockIndex*> vToDownload;
|
||||
NodeId staller = -1;
|
||||
FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller);
|
||||
BOOST_FOREACH(CBlockIndex *pindex, vToDownload) {
|
||||
vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
|
||||
MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), pindex);
|
||||
LogPrint("net", "Requesting block %s peer=%d\n", pindex->GetBlockHash().ToString(), pto->id);
|
||||
}
|
||||
if (state.nBlocksInFlight == 0 && staller != -1) {
|
||||
if (State(staller)->nStallingSince == 0)
|
||||
State(staller)->nStallingSince = nNow;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4519,12 +4585,6 @@ public:
|
|||
delete (*it1).second;
|
||||
mapBlockIndex.clear();
|
||||
|
||||
// orphan blocks
|
||||
std::map<uint256, COrphanBlock*>::iterator it2 = mapOrphanBlocks.begin();
|
||||
for (; it2 != mapOrphanBlocks.end(); it2++)
|
||||
delete (*it2).second;
|
||||
mapOrphanBlocks.clear();
|
||||
|
||||
// orphan transactions
|
||||
mapOrphanTransactions.clear();
|
||||
mapOrphanTransactionsByPrev.clear();
|
||||
|
|
21
src/main.h
21
src/main.h
|
@ -72,9 +72,17 @@ static const int MAX_SCRIPTCHECK_THREADS = 16;
|
|||
/** -par default (number of script-checking threads, 0 = auto) */
|
||||
static const int DEFAULT_SCRIPTCHECK_THREADS = 0;
|
||||
/** Number of blocks that can be requested at any given time from a single peer. */
|
||||
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 128;
|
||||
/** Timeout in seconds before considering a block download peer unresponsive. */
|
||||
static const unsigned int BLOCK_DOWNLOAD_TIMEOUT = 60;
|
||||
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
|
||||
/** Timeout in seconds during which a peer must stall block download progress before being disconnected. */
|
||||
static const unsigned int BLOCK_STALLING_TIMEOUT = 2;
|
||||
/** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
|
||||
* less than this number, we reached their tip. Changing this value is a protocol upgrade. */
|
||||
static const unsigned int MAX_HEADERS_RESULTS = 2000;
|
||||
/** Size of the "block download window": how far ahead of our current height do we fetch?
|
||||
* Larger windows tolerate larger download speed differences between peer, but increase the potential
|
||||
* degree of disordering of blocks on disk (which make reindexing and in the future perhaps pruning
|
||||
* harder). We'll probably want to make this a per-peer adaptive value at some point. */
|
||||
static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
|
||||
|
||||
/** "reject" message codes **/
|
||||
static const unsigned char REJECT_MALFORMED = 0x01;
|
||||
|
@ -137,8 +145,6 @@ void RegisterNodeSignals(CNodeSignals& nodeSignals);
|
|||
/** Unregister a network node */
|
||||
void UnregisterNodeSignals(CNodeSignals& nodeSignals);
|
||||
|
||||
void PushGetBlocks(CNode* pnode, CBlockIndex* pindexBegin, uint256 hashEnd);
|
||||
|
||||
/** Process an incoming block */
|
||||
bool ProcessBlock(CValidationState &state, CNode* pfrom, CBlock* pblock, CDiskBlockPos *dbp = NULL);
|
||||
/** Check whether enough disk space is available for an incoming block */
|
||||
|
@ -439,9 +445,6 @@ bool DisconnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex
|
|||
// Apply the effects of this block (with given index) on the UTXO set represented by coins
|
||||
bool ConnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& coins, bool fJustCheck = false);
|
||||
|
||||
// Add this block to the block index, and if necessary, switch the active block chain to this
|
||||
bool AddToBlockIndex(CBlock& block, CValidationState& state, const CDiskBlockPos& pos);
|
||||
|
||||
// Context-independent validity checks
|
||||
bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, bool fCheckPOW = true);
|
||||
bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW = true, bool fCheckMerkleRoot = true);
|
||||
|
@ -449,7 +452,7 @@ bool CheckBlock(const CBlock& block, CValidationState& state, bool fCheckPOW = t
|
|||
// Store block on disk
|
||||
// if dbp is provided, the file is known to already reside on disk
|
||||
bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex **pindex, CDiskBlockPos* dbp = NULL);
|
||||
bool AcceptBlockHeader(CBlockHeader& block, CValidationState& state, CBlockIndex **ppindex= NULL);
|
||||
bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex **ppindex= NULL);
|
||||
|
||||
|
||||
|
||||
|
|
50
src/net.cpp
50
src/net.cpp
|
@ -73,7 +73,6 @@ map<CNetAddr, LocalServiceInfo> mapLocalHost;
|
|||
static bool vfReachable[NET_MAX] = {};
|
||||
static bool vfLimited[NET_MAX] = {};
|
||||
static CNode* pnodeLocalHost = NULL;
|
||||
static CNode* pnodeSync = NULL;
|
||||
uint64_t nLocalHostNonce = 0;
|
||||
static std::vector<ListenSocket> vhListenSocket;
|
||||
CAddrMan addrman;
|
||||
|
@ -519,10 +518,6 @@ void CNode::CloseSocketDisconnect()
|
|||
TRY_LOCK(cs_vRecvMsg, lockRecv);
|
||||
if (lockRecv)
|
||||
vRecvMsg.clear();
|
||||
|
||||
// if this was the sync node, we'll need a new one
|
||||
if (this == pnodeSync)
|
||||
pnodeSync = NULL;
|
||||
}
|
||||
|
||||
void CNode::PushVersion()
|
||||
|
@ -615,7 +610,6 @@ void CNode::copyStats(CNodeStats &stats)
|
|||
X(nSendBytes);
|
||||
X(nRecvBytes);
|
||||
X(fWhitelisted);
|
||||
stats.fSyncNode = (this == pnodeSync);
|
||||
|
||||
// It is common for nodes with good ping times to suddenly become lagged,
|
||||
// due to a new block arriving or other large transfer.
|
||||
|
@ -1487,61 +1481,20 @@ bool OpenNetworkConnection(const CAddress& addrConnect, CSemaphoreGrant *grantOu
|
|||
}
|
||||
|
||||
|
||||
// for now, use a very simple selection metric: the node from which we received
|
||||
// most recently
|
||||
static int64_t NodeSyncScore(const CNode *pnode) {
|
||||
return pnode->nLastRecv;
|
||||
}
|
||||
|
||||
void static StartSync(const vector<CNode*> &vNodes) {
|
||||
CNode *pnodeNewSync = NULL;
|
||||
int64_t nBestScore = 0;
|
||||
|
||||
int nBestHeight = g_signals.GetHeight().get_value_or(0);
|
||||
|
||||
// Iterate over all nodes
|
||||
BOOST_FOREACH(CNode* pnode, vNodes) {
|
||||
// check preconditions for allowing a sync
|
||||
if (!pnode->fClient && !pnode->fOneShot &&
|
||||
!pnode->fDisconnect && pnode->fSuccessfullyConnected &&
|
||||
(pnode->nStartingHeight > (nBestHeight - 144)) &&
|
||||
(pnode->nVersion < NOBLKS_VERSION_START || pnode->nVersion >= NOBLKS_VERSION_END)) {
|
||||
// if ok, compare node's score with the best so far
|
||||
int64_t nScore = NodeSyncScore(pnode);
|
||||
if (pnodeNewSync == NULL || nScore > nBestScore) {
|
||||
pnodeNewSync = pnode;
|
||||
nBestScore = nScore;
|
||||
}
|
||||
}
|
||||
}
|
||||
// if a new sync candidate was found, start sync!
|
||||
if (pnodeNewSync) {
|
||||
pnodeNewSync->fStartSync = true;
|
||||
pnodeSync = pnodeNewSync;
|
||||
}
|
||||
}
|
||||
|
||||
void ThreadMessageHandler()
|
||||
{
|
||||
SetThreadPriority(THREAD_PRIORITY_BELOW_NORMAL);
|
||||
while (true)
|
||||
{
|
||||
bool fHaveSyncNode = false;
|
||||
|
||||
vector<CNode*> vNodesCopy;
|
||||
{
|
||||
LOCK(cs_vNodes);
|
||||
vNodesCopy = vNodes;
|
||||
BOOST_FOREACH(CNode* pnode, vNodesCopy) {
|
||||
pnode->AddRef();
|
||||
if (pnode == pnodeSync)
|
||||
fHaveSyncNode = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!fHaveSyncNode)
|
||||
StartSync(vNodesCopy);
|
||||
|
||||
// Poll the connected nodes for messages
|
||||
CNode* pnodeTrickle = NULL;
|
||||
if (!vNodesCopy.empty())
|
||||
|
@ -2078,10 +2031,7 @@ CNode::CNode(SOCKET hSocketIn, CAddress addrIn, std::string addrNameIn, bool fIn
|
|||
nSendSize = 0;
|
||||
nSendOffset = 0;
|
||||
hashContinue = 0;
|
||||
pindexLastGetBlocksBegin = 0;
|
||||
hashLastGetBlocksEnd = 0;
|
||||
nStartingHeight = -1;
|
||||
fStartSync = false;
|
||||
fGetAddr = false;
|
||||
fRelayTxes = false;
|
||||
setInventoryKnown.max_size(SendBufferSize() / 1000);
|
||||
|
|
|
@ -158,7 +158,6 @@ public:
|
|||
int nStartingHeight;
|
||||
uint64_t nSendBytes;
|
||||
uint64_t nRecvBytes;
|
||||
bool fSyncNode;
|
||||
bool fWhitelisted;
|
||||
double dPingTime;
|
||||
double dPingWait;
|
||||
|
@ -276,10 +275,7 @@ protected:
|
|||
|
||||
public:
|
||||
uint256 hashContinue;
|
||||
CBlockIndex* pindexLastGetBlocksBegin;
|
||||
uint256 hashLastGetBlocksEnd;
|
||||
int nStartingHeight;
|
||||
bool fStartSync;
|
||||
|
||||
// flood relay
|
||||
std::vector<CAddress> vAddrToSend;
|
||||
|
|
|
@ -836,29 +836,6 @@
|
|||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="4" column="0">
|
||||
<widget class="QLabel" name="label_25">
|
||||
<property name="text">
|
||||
<string>Sync Node</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="4" column="2">
|
||||
<widget class="QLabel" name="peerSyncNode">
|
||||
<property name="cursor">
|
||||
<cursorShape>IBeamCursor</cursorShape>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>N/A</string>
|
||||
</property>
|
||||
<property name="textFormat">
|
||||
<enum>Qt::PlainText</enum>
|
||||
</property>
|
||||
<property name="textInteractionFlags">
|
||||
<set>Qt::LinksAccessibleByMouse|Qt::TextSelectableByKeyboard|Qt::TextSelectableByMouse</set>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item row="5" column="0">
|
||||
<widget class="QLabel" name="label_29">
|
||||
<property name="text">
|
||||
|
|
|
@ -611,7 +611,6 @@ void RPCConsole::updateNodeDetail(const CNodeCombinedStats *stats)
|
|||
ui->peerSubversion->setText(QString::fromStdString(stats->nodeStats.cleanSubVer));
|
||||
ui->peerDirection->setText(stats->nodeStats.fInbound ? tr("Inbound") : tr("Outbound"));
|
||||
ui->peerHeight->setText(QString("%1").arg(stats->nodeStats.nStartingHeight));
|
||||
ui->peerSyncNode->setText(stats->nodeStats.fSyncNode ? tr("Yes") : tr("No"));
|
||||
|
||||
// This check fails for example if the lock was busy and
|
||||
// nodeStateStats couldn't be fetched.
|
||||
|
|
|
@ -139,7 +139,6 @@ Value getpeerinfo(const Array& params, bool fHelp)
|
|||
obj.push_back(Pair("banscore", statestats.nMisbehavior));
|
||||
obj.push_back(Pair("syncheight", statestats.nSyncHeight));
|
||||
}
|
||||
obj.push_back(Pair("syncnode", stats.fSyncNode));
|
||||
obj.push_back(Pair("whitelisted", stats.fWhitelisted));
|
||||
|
||||
ret.push_back(obj);
|
||||
|
|
|
@ -33,8 +33,11 @@ static const int PROTOCOL_VERSION = 70002;
|
|||
// initial proto version, to be increased after version/verack negotiation
|
||||
static const int INIT_PROTO_VERSION = 209;
|
||||
|
||||
// In this version, 'getheaders' was introduced.
|
||||
static const int GETHEADERS_VERSION = 31800;
|
||||
|
||||
// disconnect from peers older than this proto version
|
||||
static const int MIN_PEER_PROTO_VERSION = 209;
|
||||
static const int MIN_PEER_PROTO_VERSION = GETHEADERS_VERSION;
|
||||
|
||||
// nTime field added to CAddress, starting with this version;
|
||||
// if possible, avoid requesting addresses nodes older than this
|
||||
|
|
Loading…
Reference in a new issue