Split CNode::cs_vSend: message processing and message sending
cs_vSend is used for two purposes - to lock the datastructures used to queue messages to place on the wire and to only call SendMessages once at a time per-node. I believe SendMessages used to access some of the vSendMsg stuff, but it doesn't anymore, so these locks do not need to be on the same mutex, and also make deadlocking much more likely.
This commit is contained in:
parent
8b66bf74e2
commit
d7c58ad514
2 changed files with 11 additions and 13 deletions
10
src/net.cpp
10
src/net.cpp
|
@ -1147,14 +1147,12 @@ void CConnman::ThreadSocketHandler()
|
|||
// * Hand off all complete messages to the processor, to be handled without
|
||||
// blocking here.
|
||||
{
|
||||
TRY_LOCK(pnode->cs_vSend, lockSend);
|
||||
if (lockSend) {
|
||||
LOCK(pnode->cs_vSend);
|
||||
if (!pnode->vSendMsg.empty()) {
|
||||
FD_SET(pnode->hSocket, &fdsetSend);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
if (!pnode->fPauseRecv)
|
||||
FD_SET(pnode->hSocket, &fdsetRecv);
|
||||
|
@ -1272,14 +1270,12 @@ void CConnman::ThreadSocketHandler()
|
|||
continue;
|
||||
if (FD_ISSET(pnode->hSocket, &fdsetSend))
|
||||
{
|
||||
TRY_LOCK(pnode->cs_vSend, lockSend);
|
||||
if (lockSend) {
|
||||
LOCK(pnode->cs_vSend);
|
||||
size_t nBytes = SocketSendData(pnode);
|
||||
if (nBytes) {
|
||||
RecordBytesSent(nBytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Inactivity checking
|
||||
|
@ -1875,7 +1871,7 @@ void CConnman::ThreadMessageHandler()
|
|||
|
||||
// Send messages
|
||||
{
|
||||
TRY_LOCK(pnode->cs_vSend, lockSend);
|
||||
TRY_LOCK(pnode->cs_sendProcessing, lockSend);
|
||||
if (lockSend)
|
||||
GetNodeSignals().SendMessages(pnode, *this, flagInterruptMsgProc);
|
||||
}
|
||||
|
|
|
@ -618,6 +618,8 @@ public:
|
|||
std::list<CNetMessage> vProcessMsg;
|
||||
size_t nProcessQueueSize;
|
||||
|
||||
CCriticalSection cs_sendProcessing;
|
||||
|
||||
std::deque<CInv> vRecvGetData;
|
||||
uint64_t nRecvBytes;
|
||||
std::atomic<int> nRecvVersion;
|
||||
|
|
Loading…
Reference in a new issue