Expose more parallelism with relaxed atomics (suggested in #9938). Fix a test to check the exclusive or of two properties rather than just or.

This commit is contained in:
Jeremy Rubin 2017-03-27 14:25:18 -04:00
parent 929fd7276c
commit 8c2f4b8882

View file

@ -38,7 +38,7 @@ struct FakeCheckCheckCompletion {
static std::atomic<size_t> n_calls; static std::atomic<size_t> n_calls;
bool operator()() bool operator()()
{ {
++n_calls; n_calls.fetch_add(1, std::memory_order_relaxed);
return true; return true;
} }
void swap(FakeCheckCheckCompletion& x){}; void swap(FakeCheckCheckCompletion& x){};
@ -88,15 +88,15 @@ struct MemoryCheck {
// //
// Really, copy constructor should be deletable, but CCheckQueue breaks // Really, copy constructor should be deletable, but CCheckQueue breaks
// if it is deleted because of internal push_back. // if it is deleted because of internal push_back.
fake_allocated_memory += b; fake_allocated_memory.fetch_add(b, std::memory_order_relaxed);
}; };
MemoryCheck(bool b_) : b(b_) MemoryCheck(bool b_) : b(b_)
{ {
fake_allocated_memory += b; fake_allocated_memory.fetch_add(b, std::memory_order_relaxed);
}; };
~MemoryCheck(){ ~MemoryCheck()
fake_allocated_memory -= b; {
fake_allocated_memory.fetch_sub(b, std::memory_order_relaxed);
}; };
void swap(MemoryCheck& x) { std::swap(b, x.b); }; void swap(MemoryCheck& x) { std::swap(b, x.b); };
}; };
@ -117,9 +117,9 @@ struct FrozenCleanupCheck {
{ {
if (should_freeze) { if (should_freeze) {
std::unique_lock<std::mutex> l(m); std::unique_lock<std::mutex> l(m);
nFrozen = 1; nFrozen.store(1, std::memory_order_relaxed);
cv.notify_one(); cv.notify_one();
cv.wait(l, []{ return nFrozen == 0;}); cv.wait(l, []{ return nFrozen.load(std::memory_order_relaxed) == 0;});
} }
} }
void swap(FrozenCleanupCheck& x){std::swap(should_freeze, x.should_freeze);}; void swap(FrozenCleanupCheck& x){std::swap(should_freeze, x.should_freeze);};
@ -262,7 +262,7 @@ BOOST_AUTO_TEST_CASE(test_CheckQueue_Recovers_From_Failure)
control.Add(vChecks); control.Add(vChecks);
} }
bool r =control.Wait(); bool r =control.Wait();
BOOST_REQUIRE(r || end_fails); BOOST_REQUIRE(r != end_fails);
} }
} }
tg.interrupt_all(); tg.interrupt_all();
@ -337,7 +337,7 @@ BOOST_AUTO_TEST_CASE(test_CheckQueue_Memory)
tg.join_all(); tg.join_all();
} }
// Test that a new verification cannot occur until all checks // Test that a new verification cannot occur until all checks
// have been destructed // have been destructed
BOOST_AUTO_TEST_CASE(test_CheckQueue_FrozenCleanup) BOOST_AUTO_TEST_CASE(test_CheckQueue_FrozenCleanup)
{ {
@ -361,11 +361,14 @@ BOOST_AUTO_TEST_CASE(test_CheckQueue_FrozenCleanup)
std::unique_lock<std::mutex> l(FrozenCleanupCheck::m); std::unique_lock<std::mutex> l(FrozenCleanupCheck::m);
// Wait until the queue has finished all jobs and frozen // Wait until the queue has finished all jobs and frozen
FrozenCleanupCheck::cv.wait(l, [](){return FrozenCleanupCheck::nFrozen == 1;}); FrozenCleanupCheck::cv.wait(l, [](){return FrozenCleanupCheck::nFrozen == 1;});
// Try to get control of the queue a bunch of times }
for (auto x = 0; x < 100 && !fails; ++x) { // Try to get control of the queue a bunch of times
fails = queue->ControlMutex.try_lock(); for (auto x = 0; x < 100 && !fails; ++x) {
} fails = queue->ControlMutex.try_lock();
// Unfreeze }
{
// Unfreeze (we need lock n case of spurious wakeup)
std::unique_lock<std::mutex> l(FrozenCleanupCheck::m);
FrozenCleanupCheck::nFrozen = 0; FrozenCleanupCheck::nFrozen = 0;
} }
// Awaken frozen destructor // Awaken frozen destructor