net: switch all callers to connman for pushing messages

Drop all of the old stuff.
This commit is contained in:
Cory Fields 2016-09-12 20:07:44 -04:00 committed by Pieter Wuille
parent 3e32cd09f6
commit ea3326891d
3 changed files with 49 additions and 294 deletions

View file

@ -501,15 +501,15 @@ void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState* nodestate, CNode* pf
if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
// As per BIP152, we only get 3 of our peers to announce // As per BIP152, we only get 3 of our peers to announce
// blocks using compact encodings. // blocks using compact encodings.
bool found = connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion](CNode* pnodeStop){ bool found = connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [&connman, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion](CNode* pnodeStop){
pnodeStop->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion); connman.PushMessage(pnodeStop, NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
return true; return true;
}); });
if(found) if(found)
lNodesAnnouncingHeaderAndIDs.pop_front(); lNodesAnnouncingHeaderAndIDs.pop_front();
} }
fAnnounceUsingCMPCTBLOCK = true; fAnnounceUsingCMPCTBLOCK = true;
pfrom->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion); connman.PushMessage(pfrom, NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
} }
} }
@ -4900,9 +4900,9 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
if (!ReadBlockFromDisk(block, (*mi).second, consensusParams)) if (!ReadBlockFromDisk(block, (*mi).second, consensusParams))
assert(!"cannot load block from disk"); assert(!"cannot load block from disk");
if (inv.type == MSG_BLOCK) if (inv.type == MSG_BLOCK)
pfrom->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block); connman.PushMessageWithFlag(pfrom, SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block);
else if (inv.type == MSG_WITNESS_BLOCK) else if (inv.type == MSG_WITNESS_BLOCK)
pfrom->PushMessage(NetMsgType::BLOCK, block); connman.PushMessage(pfrom, NetMsgType::BLOCK, block);
else if (inv.type == MSG_FILTERED_BLOCK) else if (inv.type == MSG_FILTERED_BLOCK)
{ {
bool sendMerkleBlock = false; bool sendMerkleBlock = false;
@ -4915,7 +4915,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
} }
} }
if (sendMerkleBlock) { if (sendMerkleBlock) {
pfrom->PushMessage(NetMsgType::MERKLEBLOCK, merkleBlock); connman.PushMessage(pfrom, NetMsgType::MERKLEBLOCK, merkleBlock);
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
// This avoids hurting performance by pointlessly requiring a round-trip // This avoids hurting performance by pointlessly requiring a round-trip
// Note that there is currently no way for a node to request any single transactions we didn't send here - // Note that there is currently no way for a node to request any single transactions we didn't send here -
@ -4924,7 +4924,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
// however we MUST always provide at least what the remote peer needs // however we MUST always provide at least what the remote peer needs
typedef std::pair<unsigned int, uint256> PairType; typedef std::pair<unsigned int, uint256> PairType;
BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn) BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn)
pfrom->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, block.vtx[pair.first]); connman.PushMessageWithFlag(pfrom, SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, block.vtx[pair.first]);
} }
// else // else
// no response // no response
@ -4938,9 +4938,9 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness; bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness;
if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) { if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
CBlockHeaderAndShortTxIDs cmpctblock(block, fPeerWantsWitness); CBlockHeaderAndShortTxIDs cmpctblock(block, fPeerWantsWitness);
pfrom->PushMessageWithFlag(fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock); connman.PushMessageWithFlag(pfrom, fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock);
} else } else
pfrom->PushMessageWithFlag(fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block); connman.PushMessageWithFlag(pfrom, fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block);
} }
// Trigger the peer node to send a getblocks request for the next batch of inventory // Trigger the peer node to send a getblocks request for the next batch of inventory
@ -4951,7 +4951,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
// wait for other stuff first. // wait for other stuff first.
vector<CInv> vInv; vector<CInv> vInv;
vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash())); vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
pfrom->PushMessage(NetMsgType::INV, vInv); connman.PushMessage(pfrom, NetMsgType::INV, vInv);
pfrom->hashContinue.SetNull(); pfrom->hashContinue.SetNull();
} }
} }
@ -4962,14 +4962,14 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
bool push = false; bool push = false;
auto mi = mapRelay.find(inv.hash); auto mi = mapRelay.find(inv.hash);
if (mi != mapRelay.end()) { if (mi != mapRelay.end()) {
pfrom->PushMessageWithFlag(inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0, NetMsgType::TX, *mi->second); connman.PushMessageWithFlag(pfrom, inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0, NetMsgType::TX, *mi->second);
push = true; push = true;
} else if (pfrom->timeLastMempoolReq) { } else if (pfrom->timeLastMempoolReq) {
auto txinfo = mempool.info(inv.hash); auto txinfo = mempool.info(inv.hash);
// To protect privacy, do not answer getdata using the mempool when // To protect privacy, do not answer getdata using the mempool when
// that TX couldn't have been INVed in reply to a MEMPOOL request. // that TX couldn't have been INVed in reply to a MEMPOOL request.
if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) { if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
pfrom->PushMessageWithFlag(inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0, NetMsgType::TX, *txinfo.tx); connman.PushMessageWithFlag(pfrom, inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0, NetMsgType::TX, *txinfo.tx);
push = true; push = true;
} }
} }
@ -4996,7 +4996,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
// do that because they want to know about (and store and rebroadcast and // do that because they want to know about (and store and rebroadcast and
// risk analyze) the dependencies of transactions relevant to them, without // risk analyze) the dependencies of transactions relevant to them, without
// having to download the entire memory pool. // having to download the entire memory pool.
pfrom->PushMessage(NetMsgType::NOTFOUND, vNotFound); connman.PushMessage(pfrom, NetMsgType::NOTFOUND, vNotFound);
} }
} }
@ -5159,7 +5159,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Get recent addresses // Get recent addresses
if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || connman.GetAddressCount() < 1000) if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || connman.GetAddressCount() < 1000)
{ {
pfrom->PushMessage(NetMsgType::GETADDR); connman.PushMessage(pfrom, NetMsgType::GETADDR);
pfrom->fGetAddr = true; pfrom->fGetAddr = true;
} }
connman.MarkAddressGood(pfrom->addr); connman.MarkAddressGood(pfrom->addr);
@ -5206,7 +5206,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// We send this to non-NODE NETWORK peers as well, because even // We send this to non-NODE NETWORK peers as well, because even
// non-NODE NETWORK peers can announce blocks (such as pruning // non-NODE NETWORK peers can announce blocks (such as pruning
// nodes) // nodes)
pfrom->PushMessage(NetMsgType::SENDHEADERS); connman.PushMessage(pfrom, NetMsgType::SENDHEADERS);
} }
if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) { if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) {
// Tell our peer we are willing to provide version 1 or 2 cmpctblocks // Tell our peer we are willing to provide version 1 or 2 cmpctblocks
@ -5217,9 +5217,9 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
bool fAnnounceUsingCMPCTBLOCK = false; bool fAnnounceUsingCMPCTBLOCK = false;
uint64_t nCMPCTBLOCKVersion = 2; uint64_t nCMPCTBLOCKVersion = 2;
if (pfrom->GetLocalServices() & NODE_WITNESS) if (pfrom->GetLocalServices() & NODE_WITNESS)
pfrom->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion); connman.PushMessage(pfrom, NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
nCMPCTBLOCKVersion = 1; nCMPCTBLOCKVersion = 1;
pfrom->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion); connman.PushMessage(pfrom, NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
} }
} }
@ -5347,7 +5347,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// time the block arrives, the header chain leading up to it is already validated. Not // time the block arrives, the header chain leading up to it is already validated. Not
// doing this will result in the received block being rejected as an orphan in case it is // doing this will result in the received block being rejected as an orphan in case it is
// not a direct successor. // not a direct successor.
pfrom->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash); connman.PushMessage(pfrom, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash);
CNodeState *nodestate = State(pfrom->GetId()); CNodeState *nodestate = State(pfrom->GetId());
if (CanDirectFetch(chainparams.GetConsensus()) && if (CanDirectFetch(chainparams.GetConsensus()) &&
nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER &&
@ -5383,7 +5383,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
} }
if (!vToFetch.empty()) if (!vToFetch.empty())
pfrom->PushMessage(NetMsgType::GETDATA, vToFetch); connman.PushMessage(pfrom, NetMsgType::GETDATA, vToFetch);
} }
@ -5483,7 +5483,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
} }
resp.txn[i] = block.vtx[req.indexes[i]]; resp.txn[i] = block.vtx[req.indexes[i]];
} }
pfrom->PushMessageWithFlag(State(pfrom->GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCKTXN, resp); connman.PushMessageWithFlag(pfrom, State(pfrom->GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCKTXN, resp);
} }
@ -5532,7 +5532,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// headers message). In both cases it's safe to update // headers message). In both cases it's safe to update
// pindexBestHeaderSent to be our tip. // pindexBestHeaderSent to be our tip.
nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip(); nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip();
pfrom->PushMessage(NetMsgType::HEADERS, vHeaders); connman.PushMessage(pfrom, NetMsgType::HEADERS, vHeaders);
} }
@ -5695,7 +5695,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
pfrom->id, pfrom->id,
FormatStateMessage(state)); FormatStateMessage(state));
if (state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P if (state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P
pfrom->PushMessage(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(), connman.PushMessage(pfrom, NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash); state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash);
if (nDoS > 0) { if (nDoS > 0) {
Misbehaving(pfrom->GetId(), nDoS); Misbehaving(pfrom->GetId(), nDoS);
@ -5714,7 +5714,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
if (mapBlockIndex.find(cmpctblock.header.hashPrevBlock) == mapBlockIndex.end()) { if (mapBlockIndex.find(cmpctblock.header.hashPrevBlock) == mapBlockIndex.end()) {
// Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
if (!IsInitialBlockDownload()) if (!IsInitialBlockDownload())
pfrom->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()); connman.PushMessage(pfrom, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256());
return true; return true;
} }
@ -5747,7 +5747,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// so we just grab the block via normal getdata // so we just grab the block via normal getdata
std::vector<CInv> vInv(1); std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash());
pfrom->PushMessage(NetMsgType::GETDATA, vInv); connman.PushMessage(pfrom, NetMsgType::GETDATA, vInv);
} }
return true; return true;
} }
@ -5791,7 +5791,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Duplicate txindexes, the block is now in-flight, so just request it // Duplicate txindexes, the block is now in-flight, so just request it
std::vector<CInv> vInv(1); std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash());
pfrom->PushMessage(NetMsgType::GETDATA, vInv); connman.PushMessage(pfrom, NetMsgType::GETDATA, vInv);
return true; return true;
} }
@ -5815,7 +5815,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman); return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman);
} else { } else {
req.blockhash = pindex->GetBlockHash(); req.blockhash = pindex->GetBlockHash();
pfrom->PushMessage(NetMsgType::GETBLOCKTXN, req); connman.PushMessage(pfrom, NetMsgType::GETBLOCKTXN, req);
} }
} }
} else { } else {
@ -5824,7 +5824,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// mempool will probably be useless - request the block normally // mempool will probably be useless - request the block normally
std::vector<CInv> vInv(1); std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash());
pfrom->PushMessage(NetMsgType::GETDATA, vInv); connman.PushMessage(pfrom, NetMsgType::GETDATA, vInv);
return true; return true;
} else { } else {
// If this was an announce-cmpctblock, we want the same treatment as a header message // If this was an announce-cmpctblock, we want the same treatment as a header message
@ -5866,7 +5866,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// Might have collided, fall back to getdata now :( // Might have collided, fall back to getdata now :(
std::vector<CInv> invs; std::vector<CInv> invs;
invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus()), resp.blockhash)); invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus()), resp.blockhash));
pfrom->PushMessage(NetMsgType::GETDATA, invs); connman.PushMessage(pfrom, NetMsgType::GETDATA, invs);
} else { } else {
MarkBlockAsReceived(resp.blockhash); // it is now an empty pointer MarkBlockAsReceived(resp.blockhash); // it is now an empty pointer
fBlockRead = true; fBlockRead = true;
@ -5880,7 +5880,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
int nDoS; int nDoS;
if (state.IsInvalid(nDoS)) { if (state.IsInvalid(nDoS)) {
assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes
pfrom->PushMessage(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(), connman.PushMessage(pfrom, NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), block.GetHash()); state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), block.GetHash());
if (nDoS > 0) { if (nDoS > 0) {
LOCK(cs_main); LOCK(cs_main);
@ -5928,7 +5928,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// nUnconnectingHeaders gets reset back to 0. // nUnconnectingHeaders gets reset back to 0.
if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) { if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
nodestate->nUnconnectingHeaders++; nodestate->nUnconnectingHeaders++;
pfrom->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()); connman.PushMessage(pfrom, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256());
LogPrint("net", "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n", LogPrint("net", "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
headers[0].GetHash().ToString(), headers[0].GetHash().ToString(),
headers[0].hashPrevBlock.ToString(), headers[0].hashPrevBlock.ToString(),
@ -5975,7 +5975,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue // TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
// from there instead. // from there instead.
LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight); LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight);
pfrom->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256()); connman.PushMessage(pfrom, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256());
} }
bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus()); bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
@ -6028,7 +6028,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// In any case, we want to download using a compact block, not a regular one // In any case, we want to download using a compact block, not a regular one
vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
} }
pfrom->PushMessage(NetMsgType::GETDATA, vGetData); connman.PushMessage(pfrom, NetMsgType::GETDATA, vGetData);
} }
} }
} }
@ -6060,7 +6060,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
int nDoS; int nDoS;
if (state.IsInvalid(nDoS)) { if (state.IsInvalid(nDoS)) {
assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes assert (state.GetRejectCode() < REJECT_INTERNAL); // Blocks are never rejected with internal reject codes
pfrom->PushMessage(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(), connman.PushMessage(pfrom, NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), block.GetHash()); state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), block.GetHash());
if (nDoS > 0) { if (nDoS > 0) {
LOCK(cs_main); LOCK(cs_main);
@ -6137,7 +6137,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
// it, if the remote node sends a ping once per second and this node takes 5 // it, if the remote node sends a ping once per second and this node takes 5
// seconds to respond to each, the 5th ping the remote sends would appear to // seconds to respond to each, the 5th ping the remote sends would appear to
// return very quickly. // return very quickly.
pfrom->PushMessage(NetMsgType::PONG, nonce); connman.PushMessage(pfrom, NetMsgType::PONG, nonce);
} }
} }
@ -6480,11 +6480,11 @@ bool SendMessages(CNode* pto, CConnman& connman)
pto->nPingUsecStart = GetTimeMicros(); pto->nPingUsecStart = GetTimeMicros();
if (pto->nVersion > BIP0031_VERSION) { if (pto->nVersion > BIP0031_VERSION) {
pto->nPingNonceSent = nonce; pto->nPingNonceSent = nonce;
pto->PushMessage(NetMsgType::PING, nonce); connman.PushMessage(pto, NetMsgType::PING, nonce);
} else { } else {
// Peer is too old to support ping command with nonce, pong will never arrive. // Peer is too old to support ping command with nonce, pong will never arrive.
pto->nPingNonceSent = 0; pto->nPingNonceSent = 0;
pto->PushMessage(NetMsgType::PING); connman.PushMessage(pto, NetMsgType::PING);
} }
} }
@ -6515,14 +6515,14 @@ bool SendMessages(CNode* pto, CConnman& connman)
// receiver rejects addr messages larger than 1000 // receiver rejects addr messages larger than 1000
if (vAddr.size() >= 1000) if (vAddr.size() >= 1000)
{ {
pto->PushMessage(NetMsgType::ADDR, vAddr); connman.PushMessage(pto, NetMsgType::ADDR, vAddr);
vAddr.clear(); vAddr.clear();
} }
} }
} }
pto->vAddrToSend.clear(); pto->vAddrToSend.clear();
if (!vAddr.empty()) if (!vAddr.empty())
pto->PushMessage(NetMsgType::ADDR, vAddr); connman.PushMessage(pto, NetMsgType::ADDR, vAddr);
// we only send the big addr message once // we only send the big addr message once
if (pto->vAddrToSend.capacity() > 40) if (pto->vAddrToSend.capacity() > 40)
pto->vAddrToSend.shrink_to_fit(); pto->vAddrToSend.shrink_to_fit();
@ -6545,7 +6545,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
} }
BOOST_FOREACH(const CBlockReject& reject, state.rejects) BOOST_FOREACH(const CBlockReject& reject, state.rejects)
pto->PushMessage(NetMsgType::REJECT, (string)NetMsgType::BLOCK, reject.chRejectCode, reject.strRejectReason, reject.hashBlock); connman.PushMessage(pto, NetMsgType::REJECT, (string)NetMsgType::BLOCK, reject.chRejectCode, reject.strRejectReason, reject.hashBlock);
state.rejects.clear(); state.rejects.clear();
// Start block sync // Start block sync
@ -6568,7 +6568,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
if (pindexStart->pprev) if (pindexStart->pprev)
pindexStart = pindexStart->pprev; pindexStart = pindexStart->pprev;
LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight); LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight);
pto->PushMessage(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256()); connman.PushMessage(pto, NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256());
} }
} }
@ -6657,7 +6657,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
CBlock block; CBlock block;
assert(ReadBlockFromDisk(block, pBestIndex, consensusParams)); assert(ReadBlockFromDisk(block, pBestIndex, consensusParams));
CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness); CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
pto->PushMessageWithFlag(state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock); connman.PushMessageWithFlag(pto, state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock);
state.pindexBestHeaderSent = pBestIndex; state.pindexBestHeaderSent = pBestIndex;
} else if (state.fPreferHeaders) { } else if (state.fPreferHeaders) {
if (vHeaders.size() > 1) { if (vHeaders.size() > 1) {
@ -6669,7 +6669,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
LogPrint("net", "%s: sending header %s to peer=%d\n", __func__, LogPrint("net", "%s: sending header %s to peer=%d\n", __func__,
vHeaders.front().GetHash().ToString(), pto->id); vHeaders.front().GetHash().ToString(), pto->id);
} }
pto->PushMessage(NetMsgType::HEADERS, vHeaders); connman.PushMessage(pto, NetMsgType::HEADERS, vHeaders);
state.pindexBestHeaderSent = pBestIndex; state.pindexBestHeaderSent = pBestIndex;
} else } else
fRevertToInv = true; fRevertToInv = true;
@ -6715,7 +6715,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
BOOST_FOREACH(const uint256& hash, pto->vInventoryBlockToSend) { BOOST_FOREACH(const uint256& hash, pto->vInventoryBlockToSend) {
vInv.push_back(CInv(MSG_BLOCK, hash)); vInv.push_back(CInv(MSG_BLOCK, hash));
if (vInv.size() == MAX_INV_SZ) { if (vInv.size() == MAX_INV_SZ) {
pto->PushMessage(NetMsgType::INV, vInv); connman.PushMessage(pto, NetMsgType::INV, vInv);
vInv.clear(); vInv.clear();
} }
} }
@ -6761,7 +6761,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
pto->filterInventoryKnown.insert(hash); pto->filterInventoryKnown.insert(hash);
vInv.push_back(inv); vInv.push_back(inv);
if (vInv.size() == MAX_INV_SZ) { if (vInv.size() == MAX_INV_SZ) {
pto->PushMessage(NetMsgType::INV, vInv); connman.PushMessage(pto, NetMsgType::INV, vInv);
vInv.clear(); vInv.clear();
} }
} }
@ -6827,7 +6827,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
} }
} }
if (vInv.size() == MAX_INV_SZ) { if (vInv.size() == MAX_INV_SZ) {
pto->PushMessage(NetMsgType::INV, vInv); connman.PushMessage(pto, NetMsgType::INV, vInv);
vInv.clear(); vInv.clear();
} }
pto->filterInventoryKnown.insert(hash); pto->filterInventoryKnown.insert(hash);
@ -6835,7 +6835,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
} }
} }
if (!vInv.empty()) if (!vInv.empty())
pto->PushMessage(NetMsgType::INV, vInv); connman.PushMessage(pto, NetMsgType::INV, vInv);
// Detect whether we're stalling // Detect whether we're stalling
nNow = GetTimeMicros(); nNow = GetTimeMicros();
@ -6896,7 +6896,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
vGetData.push_back(inv); vGetData.push_back(inv);
if (vGetData.size() >= 1000) if (vGetData.size() >= 1000)
{ {
pto->PushMessage(NetMsgType::GETDATA, vGetData); connman.PushMessage(pto, NetMsgType::GETDATA, vGetData);
vGetData.clear(); vGetData.clear();
} }
} else { } else {
@ -6906,7 +6906,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
pto->mapAskFor.erase(pto->mapAskFor.begin()); pto->mapAskFor.erase(pto->mapAskFor.begin());
} }
if (!vGetData.empty()) if (!vGetData.empty())
pto->PushMessage(NetMsgType::GETDATA, vGetData); connman.PushMessage(pto, NetMsgType::GETDATA, vGetData);
// //
// Message: feefilter // Message: feefilter
@ -6919,7 +6919,7 @@ bool SendMessages(CNode* pto, CConnman& connman)
if (timeNow > pto->nextSendTimeFeeFilter) { if (timeNow > pto->nextSendTimeFeeFilter) {
CAmount filterToSend = filterRounder.round(currentFilter); CAmount filterToSend = filterRounder.round(currentFilter);
if (filterToSend != pto->lastSentFeeFilter) { if (filterToSend != pto->lastSentFeeFilter) {
pto->PushMessage(NetMsgType::FEEFILTER, filterToSend); connman.PushMessage(pto, NetMsgType::FEEFILTER, filterToSend);
pto->lastSentFeeFilter = filterToSend; pto->lastSentFeeFilter = filterToSend;
} }
pto->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL); pto->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);

View file

@ -2636,67 +2636,6 @@ void CNode::AskFor(const CInv& inv)
mapAskFor.insert(std::make_pair(nRequestTime, inv)); mapAskFor.insert(std::make_pair(nRequestTime, inv));
} }
void CNode::BeginMessage(const char* pszCommand) EXCLUSIVE_LOCK_FUNCTION(cs_vSend)
{
ENTER_CRITICAL_SECTION(cs_vSend);
assert(ssSend.size() == 0);
ssSend << CMessageHeader(Params().MessageStart(), pszCommand, 0);
LogPrint("net", "sending: %s ", SanitizeString(pszCommand));
}
void CNode::AbortMessage() UNLOCK_FUNCTION(cs_vSend)
{
ssSend.clear();
LEAVE_CRITICAL_SECTION(cs_vSend);
LogPrint("net", "(aborted)\n");
}
void CNode::EndMessage(const char* pszCommand) UNLOCK_FUNCTION(cs_vSend)
{
// The -*messagestest options are intentionally not documented in the help message,
// since they are only used during development to debug the networking code and are
// not intended for end-users.
if (mapArgs.count("-dropmessagestest") && GetRand(GetArg("-dropmessagestest", 2)) == 0)
{
LogPrint("net", "dropmessages DROPPING SEND MESSAGE\n");
AbortMessage();
return;
}
if (mapArgs.count("-fuzzmessagestest"))
Fuzz(GetArg("-fuzzmessagestest", 10));
if (ssSend.size() == 0)
{
LEAVE_CRITICAL_SECTION(cs_vSend);
return;
}
// Set the size
unsigned int nSize = ssSend.size() - CMessageHeader::HEADER_SIZE;
WriteLE32((uint8_t*)&ssSend[CMessageHeader::MESSAGE_SIZE_OFFSET], nSize);
//log total amount of bytes per command
mapSendBytesPerMsgCmd[std::string(pszCommand)] += nSize + CMessageHeader::HEADER_SIZE;
// Set the checksum
uint256 hash = Hash(ssSend.begin() + CMessageHeader::HEADER_SIZE, ssSend.end());
assert(ssSend.size () >= CMessageHeader::CHECKSUM_OFFSET + CMessageHeader::CHECKSUM_SIZE);
memcpy((char*)&ssSend[CMessageHeader::CHECKSUM_OFFSET], hash.begin(), CMessageHeader::CHECKSUM_SIZE);
LogPrint("net", "(%d bytes) peer=%d\n", nSize, id);
std::deque<CSerializeData>::iterator it = vSendMsg.insert(vSendMsg.end(), CSerializeData());
ssSend.GetAndClear(*it);
nSendSize += (*it).size();
// If write queue empty, attempt "optimistic write"
if (it == vSendMsg.begin())
nOptimisticBytesWritten += SocketSendData(this);
LEAVE_CRITICAL_SECTION(cs_vSend);
}
CDataStream CConnman::BeginMessage(CNode* pnode, int nVersion, int flags, const std::string& sCommand) CDataStream CConnman::BeginMessage(CNode* pnode, int nVersion, int flags, const std::string& sCommand)
{ {
return {SER_NETWORK, (nVersion ? nVersion : pnode->GetSendVersion()) | flags, CMessageHeader(Params().MessageStart(), sCommand.c_str(), 0) }; return {SER_NETWORK, (nVersion ? nVersion : pnode->GetSendVersion()) | flags, CMessageHeader(Params().MessageStart(), sCommand.c_str(), 0) };

184
src/net.h
View file

@ -833,190 +833,6 @@ public:
void AskFor(const CInv& inv); void AskFor(const CInv& inv);
// TODO: Document the postcondition of this function. Is cs_vSend locked?
void BeginMessage(const char* pszCommand) EXCLUSIVE_LOCK_FUNCTION(cs_vSend);
// TODO: Document the precondition of this function. Is cs_vSend locked?
void AbortMessage() UNLOCK_FUNCTION(cs_vSend);
// TODO: Document the precondition of this function. Is cs_vSend locked?
void EndMessage(const char* pszCommand) UNLOCK_FUNCTION(cs_vSend);
void PushMessage(const char* pszCommand)
{
try
{
BeginMessage(pszCommand);
EndMessage(pszCommand);
}
catch (...)
{
AbortMessage();
throw;
}
}
template<typename T1>
void PushMessage(const char* pszCommand, const T1& a1)
{
try
{
BeginMessage(pszCommand);
ssSend << a1;
EndMessage(pszCommand);
}
catch (...)
{
AbortMessage();
throw;
}
}
/** Send a message containing a1, serialized with flag flag. */
template<typename T1>
void PushMessageWithFlag(int flag, const char* pszCommand, const T1& a1)
{
try
{
BeginMessage(pszCommand);
WithOrVersion(&ssSend, flag) << a1;
EndMessage(pszCommand);
}
catch (...)
{
AbortMessage();
throw;
}
}
template<typename T1, typename T2>
void PushMessage(const char* pszCommand, const T1& a1, const T2& a2)
{
try
{
BeginMessage(pszCommand);
ssSend << a1 << a2;
EndMessage(pszCommand);
}
catch (...)
{
AbortMessage();
throw;
}
}
template<typename T1, typename T2, typename T3>
void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3)
{
try
{
BeginMessage(pszCommand);
ssSend << a1 << a2 << a3;
EndMessage(pszCommand);
}
catch (...)
{
AbortMessage();
throw;
}
}
template<typename T1, typename T2, typename T3, typename T4>
void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4)
{
try
{
BeginMessage(pszCommand);
ssSend << a1 << a2 << a3 << a4;
EndMessage(pszCommand);
}
catch (...)
{
AbortMessage();
throw;
}
}
template<typename T1, typename T2, typename T3, typename T4, typename T5>
void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4, const T5& a5)
{
try
{
BeginMessage(pszCommand);
ssSend << a1 << a2 << a3 << a4 << a5;
EndMessage(pszCommand);
}
catch (...)
{
AbortMessage();
throw;
}
}
template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4, const T5& a5, const T6& a6)
{
try
{
BeginMessage(pszCommand);
ssSend << a1 << a2 << a3 << a4 << a5 << a6;
EndMessage(pszCommand);
}
catch (...)
{
AbortMessage();
throw;
}
}
template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4, const T5& a5, const T6& a6, const T7& a7)
{
try
{
BeginMessage(pszCommand);
ssSend << a1 << a2 << a3 << a4 << a5 << a6 << a7;
EndMessage(pszCommand);
}
catch (...)
{
AbortMessage();
throw;
}
}
template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>
void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4, const T5& a5, const T6& a6, const T7& a7, const T8& a8)
{
try
{
BeginMessage(pszCommand);
ssSend << a1 << a2 << a3 << a4 << a5 << a6 << a7 << a8;
EndMessage(pszCommand);
}
catch (...)
{
AbortMessage();
throw;
}
}
template<typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9>
void PushMessage(const char* pszCommand, const T1& a1, const T2& a2, const T3& a3, const T4& a4, const T5& a5, const T6& a6, const T7& a7, const T8& a8, const T9& a9)
{
try
{
BeginMessage(pszCommand);
ssSend << a1 << a2 << a3 << a4 << a5 << a6 << a7 << a8 << a9;
EndMessage(pszCommand);
}
catch (...)
{
AbortMessage();
throw;
}
}
void CloseSocketDisconnect(); void CloseSocketDisconnect();
void copyStats(CNodeStats &stats); void copyStats(CNodeStats &stats);