lbrycrd/src/sync.h
Wladimir J. van der Laan 927a1d7d08
Merge #10286: Call wallet notify callbacks in scheduler thread (without cs_main)
89f0312 Remove redundant pwallet nullptr check (Matt Corallo)
c4784b5 Add a dev notes document describing the new wallet RPC blocking (Matt Corallo)
3ea8b75 Give ZMQ consistent order with UpdatedBlockTip on scheduler thread (Matt Corallo)
cb06edf Fix wallet RPC race by waiting for callbacks in sendrawtransaction (Matt Corallo)
e545ded Also call other wallet notify callbacks in scheduler thread (Matt Corallo)
17220d6 Use callbacks to cache whether wallet transactions are in mempool (Matt Corallo)
5d67a78 Add calls to CWallet::BlockUntilSyncedToCurrentChain() in RPCs (Matt Corallo)
5ee3172 Add CWallet::BlockUntilSyncedToCurrentChain() (Matt Corallo)
0b2f42d Add CallFunctionInQueue to wait on validation interface queue drain (Matt Corallo)
2b4b345 Add ability to assert a lock is not held in DEBUG_LOCKORDER (Matt Corallo)
0343676 Call TransactionRemovedFromMempool in the CScheduler thread (Matt Corallo)
a7d3936 Add a CValidationInterface::TransactionRemovedFromMempool (Matt Corallo)

Pull request description:

  Based on #10179, this effectively reverts #9583, regaining most of the original speedups of #7946.

  This concludes the work of #9725, #10178, and #10179.

  See individual commit messages for more information.

Tree-SHA512: eead4809b0a75d1fb33b0765174ff52c972e45040635e38cf3686cef310859c1e6b3c00e7186cbd17374c6ae547bfbd6c1718fe36f26c76ba8a8b052d6ed7bc9
2017-11-15 16:25:40 +01:00

294 lines
7.7 KiB
C++

// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BITCOIN_SYNC_H
#define BITCOIN_SYNC_H
#include "threadsafety.h"
#include <boost/thread/condition_variable.hpp>
#include <boost/thread/mutex.hpp>
#include <condition_variable>
#include <thread>
#include <mutex>
////////////////////////////////////////////////
// //
// THE SIMPLE DEFINITION, EXCLUDING DEBUG CODE //
// //
////////////////////////////////////////////////
/*
CCriticalSection mutex;
std::recursive_mutex mutex;
LOCK(mutex);
std::unique_lock<std::recursive_mutex> criticalblock(mutex);
LOCK2(mutex1, mutex2);
std::unique_lock<std::recursive_mutex> criticalblock1(mutex1);
std::unique_lock<std::recursive_mutex> criticalblock2(mutex2);
TRY_LOCK(mutex, name);
std::unique_lock<std::recursive_mutex> name(mutex, std::try_to_lock_t);
ENTER_CRITICAL_SECTION(mutex); // no RAII
mutex.lock();
LEAVE_CRITICAL_SECTION(mutex); // no RAII
mutex.unlock();
*/
///////////////////////////////
// //
// THE ACTUAL IMPLEMENTATION //
// //
///////////////////////////////
/**
* Template mixin that adds -Wthread-safety locking
* annotations to a subset of the mutex API.
*/
template <typename PARENT>
class LOCKABLE AnnotatedMixin : public PARENT
{
public:
void lock() EXCLUSIVE_LOCK_FUNCTION()
{
PARENT::lock();
}
void unlock() UNLOCK_FUNCTION()
{
PARENT::unlock();
}
bool try_lock() EXCLUSIVE_TRYLOCK_FUNCTION(true)
{
return PARENT::try_lock();
}
};
#ifdef DEBUG_LOCKORDER
void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false);
void LeaveCritical();
std::string LocksHeld();
void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs);
void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs);
void DeleteLock(void* cs);
#else
void static inline EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {}
void static inline LeaveCritical() {}
void static inline AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
void static inline AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
void static inline DeleteLock(void* cs) {}
#endif
#define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs)
#define AssertLockNotHeld(cs) AssertLockNotHeldInternal(#cs, __FILE__, __LINE__, &cs)
/**
* Wrapped mutex: supports recursive locking, but no waiting
* TODO: We should move away from using the recursive lock by default.
*/
class CCriticalSection : public AnnotatedMixin<std::recursive_mutex>
{
public:
~CCriticalSection() {
DeleteLock((void*)this);
}
};
/** Wrapped mutex: supports waiting but not recursive locking */
typedef AnnotatedMixin<std::mutex> CWaitableCriticalSection;
/** Just a typedef for std::condition_variable, can be wrapped later if desired */
typedef std::condition_variable CConditionVariable;
/** Just a typedef for std::unique_lock, can be wrapped later if desired */
typedef std::unique_lock<std::mutex> WaitableLock;
#ifdef DEBUG_LOCKCONTENTION
void PrintLockContention(const char* pszName, const char* pszFile, int nLine);
#endif
/** Wrapper around std::unique_lock<CCriticalSection> */
class SCOPED_LOCKABLE CCriticalBlock
{
private:
std::unique_lock<CCriticalSection> lock;
void Enter(const char* pszName, const char* pszFile, int nLine)
{
EnterCritical(pszName, pszFile, nLine, (void*)(lock.mutex()));
#ifdef DEBUG_LOCKCONTENTION
if (!lock.try_lock()) {
PrintLockContention(pszName, pszFile, nLine);
#endif
lock.lock();
#ifdef DEBUG_LOCKCONTENTION
}
#endif
}
bool TryEnter(const char* pszName, const char* pszFile, int nLine)
{
EnterCritical(pszName, pszFile, nLine, (void*)(lock.mutex()), true);
lock.try_lock();
if (!lock.owns_lock())
LeaveCritical();
return lock.owns_lock();
}
public:
CCriticalBlock(CCriticalSection& mutexIn, const char* pszName, const char* pszFile, int nLine, bool fTry = false) EXCLUSIVE_LOCK_FUNCTION(mutexIn) : lock(mutexIn, std::defer_lock)
{
if (fTry)
TryEnter(pszName, pszFile, nLine);
else
Enter(pszName, pszFile, nLine);
}
CCriticalBlock(CCriticalSection* pmutexIn, const char* pszName, const char* pszFile, int nLine, bool fTry = false) EXCLUSIVE_LOCK_FUNCTION(pmutexIn)
{
if (!pmutexIn) return;
lock = std::unique_lock<CCriticalSection>(*pmutexIn, std::defer_lock);
if (fTry)
TryEnter(pszName, pszFile, nLine);
else
Enter(pszName, pszFile, nLine);
}
~CCriticalBlock() UNLOCK_FUNCTION()
{
if (lock.owns_lock())
LeaveCritical();
}
operator bool()
{
return lock.owns_lock();
}
};
#define PASTE(x, y) x ## y
#define PASTE2(x, y) PASTE(x, y)
#define LOCK(cs) CCriticalBlock PASTE2(criticalblock, __COUNTER__)(cs, #cs, __FILE__, __LINE__)
#define LOCK2(cs1, cs2) CCriticalBlock criticalblock1(cs1, #cs1, __FILE__, __LINE__), criticalblock2(cs2, #cs2, __FILE__, __LINE__)
#define TRY_LOCK(cs, name) CCriticalBlock name(cs, #cs, __FILE__, __LINE__, true)
#define ENTER_CRITICAL_SECTION(cs) \
{ \
EnterCritical(#cs, __FILE__, __LINE__, (void*)(&cs)); \
(cs).lock(); \
}
#define LEAVE_CRITICAL_SECTION(cs) \
{ \
(cs).unlock(); \
LeaveCritical(); \
}
class CSemaphore
{
private:
boost::condition_variable condition;
boost::mutex mutex;
int value;
public:
explicit CSemaphore(int init) : value(init) {}
void wait()
{
boost::unique_lock<boost::mutex> lock(mutex);
while (value < 1) {
condition.wait(lock);
}
value--;
}
bool try_wait()
{
boost::unique_lock<boost::mutex> lock(mutex);
if (value < 1)
return false;
value--;
return true;
}
void post()
{
{
boost::unique_lock<boost::mutex> lock(mutex);
value++;
}
condition.notify_one();
}
};
/** RAII-style semaphore lock */
class CSemaphoreGrant
{
private:
CSemaphore* sem;
bool fHaveGrant;
public:
void Acquire()
{
if (fHaveGrant)
return;
sem->wait();
fHaveGrant = true;
}
void Release()
{
if (!fHaveGrant)
return;
sem->post();
fHaveGrant = false;
}
bool TryAcquire()
{
if (!fHaveGrant && sem->try_wait())
fHaveGrant = true;
return fHaveGrant;
}
void MoveTo(CSemaphoreGrant& grant)
{
grant.Release();
grant.sem = sem;
grant.fHaveGrant = fHaveGrant;
fHaveGrant = false;
}
CSemaphoreGrant() : sem(nullptr), fHaveGrant(false) {}
explicit CSemaphoreGrant(CSemaphore& sema, bool fTry = false) : sem(&sema), fHaveGrant(false)
{
if (fTry)
TryAcquire();
else
Acquire();
}
~CSemaphoreGrant()
{
Release();
}
operator bool() const
{
return fHaveGrant;
}
};
#endif // BITCOIN_SYNC_H