Merge pull request #6195
8273793
Eliminate compiler warning due to unused variable (Suhas Daftuar)
This commit is contained in:
commit
921ea89bc3
1 changed files with 7 additions and 7 deletions
14
src/main.cpp
14
src/main.cpp
|
@ -264,9 +264,9 @@ void UpdatePreferredDownload(CNode* node, CNodeState* state)
|
|||
}
|
||||
|
||||
// Returns time at which to timeout block request (nTime in microseconds)
|
||||
int64_t GetBlockTimeout(int64_t nTime, int nValidatedQueuedBefore)
|
||||
int64_t GetBlockTimeout(int64_t nTime, int nValidatedQueuedBefore, const Consensus::Params &consensusParams)
|
||||
{
|
||||
return nTime + 500000 * Params().GetConsensus().nPowTargetSpacing * (4 + nValidatedQueuedBefore);
|
||||
return nTime + 500000 * consensusParams.nPowTargetSpacing * (4 + nValidatedQueuedBefore);
|
||||
}
|
||||
|
||||
void InitializeNode(NodeId nodeid, const CNode *pnode) {
|
||||
|
@ -310,7 +310,7 @@ void MarkBlockAsReceived(const uint256& hash) {
|
|||
}
|
||||
|
||||
// Requires cs_main.
|
||||
void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, CBlockIndex *pindex = NULL) {
|
||||
void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, CBlockIndex *pindex = NULL) {
|
||||
CNodeState *state = State(nodeid);
|
||||
assert(state != NULL);
|
||||
|
||||
|
@ -318,7 +318,7 @@ void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, CBlockIndex *pindex
|
|||
MarkBlockAsReceived(hash);
|
||||
|
||||
int64_t nNow = GetTimeMicros();
|
||||
QueuedBlock newentry = {hash, pindex, nNow, pindex != NULL, GetBlockTimeout(nNow, nQueuedValidatedHeaders)};
|
||||
QueuedBlock newentry = {hash, pindex, nNow, pindex != NULL, GetBlockTimeout(nNow, nQueuedValidatedHeaders, consensusParams)};
|
||||
nQueuedValidatedHeaders += newentry.fValidatedHeaders;
|
||||
list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), newentry);
|
||||
state->nBlocksInFlight++;
|
||||
|
@ -4183,7 +4183,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
|||
vToFetch.push_back(inv);
|
||||
// Mark block as in flight already, even though the actual "getdata" message only goes out
|
||||
// later (within the same cs_main lock, though).
|
||||
MarkBlockAsInFlight(pfrom->GetId(), inv.hash);
|
||||
MarkBlockAsInFlight(pfrom->GetId(), inv.hash, chainparams.GetConsensus());
|
||||
}
|
||||
LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
|
||||
}
|
||||
|
@ -5036,7 +5036,7 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
|
|||
// more quickly than once every 5 minutes, then we'll shorten the download window for this block).
|
||||
if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0) {
|
||||
QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
|
||||
int64_t nTimeoutIfRequestedNow = GetBlockTimeout(nNow, nQueuedValidatedHeaders - state.nBlocksInFlightValidHeaders);
|
||||
int64_t nTimeoutIfRequestedNow = GetBlockTimeout(nNow, nQueuedValidatedHeaders - state.nBlocksInFlightValidHeaders, consensusParams);
|
||||
if (queuedBlock.nTimeDisconnect > nTimeoutIfRequestedNow) {
|
||||
LogPrint("net", "Reducing block download timeout for peer=%d block=%s, orig=%d new=%d\n", pto->id, queuedBlock.hash.ToString(), queuedBlock.nTimeDisconnect, nTimeoutIfRequestedNow);
|
||||
queuedBlock.nTimeDisconnect = nTimeoutIfRequestedNow;
|
||||
|
@ -5057,7 +5057,7 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
|
|||
FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller);
|
||||
BOOST_FOREACH(CBlockIndex *pindex, vToDownload) {
|
||||
vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
|
||||
MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), pindex);
|
||||
MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex);
|
||||
LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
|
||||
pindex->nHeight, pto->id);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue