mirror of
https://github.com/verilator/verilator.git
synced 2025-01-01 04:07:34 +00:00
Internals: Fix unbalanced V3LockGuard locking (#4193)
This commit is contained in:
parent
be429a5800
commit
949be301de
@ -3228,15 +3228,18 @@ void VerilatedAssertOneThread::fatal_different() VL_MT_SAFE {
|
||||
//===========================================================================
|
||||
// VlDeleter:: Methods
|
||||
|
||||
void VlDeleter::deleteAll() {
|
||||
void VlDeleter::deleteAll() VL_EXCLUDES(m_mutex) VL_EXCLUDES(m_deleteMutex) VL_MT_SAFE {
|
||||
while (true) {
|
||||
VerilatedLockGuard lock{m_mutex};
|
||||
if (m_newGarbage.empty()) break;
|
||||
VerilatedLockGuard deleteLock{m_deleteMutex};
|
||||
std::swap(m_newGarbage, m_toDelete);
|
||||
lock.unlock(); // So destructors can enqueue new objects
|
||||
{
|
||||
VerilatedLockGuard lock{m_mutex};
|
||||
if (m_newGarbage.empty()) break;
|
||||
m_deleteMutex.lock();
|
||||
std::swap(m_newGarbage, m_toDelete);
|
||||
// m_mutex is unlocked here, so destructors can enqueue new objects
|
||||
}
|
||||
for (VlDeletable* const objp : m_toDelete) delete objp;
|
||||
m_toDelete.clear();
|
||||
m_deleteMutex.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -222,19 +222,6 @@ public:
|
||||
}
|
||||
/// Destruct and unlock the mutex
|
||||
~VerilatedLockGuard() VL_RELEASE() { m_mutexr.unlock(); }
|
||||
/// Lock the mutex
|
||||
void lock() VL_ACQUIRE() VL_MT_SAFE { m_mutexr.lock(); }
|
||||
/// Unlock the mutex
|
||||
void unlock() VL_RELEASE() VL_MT_SAFE { m_mutexr.unlock(); }
|
||||
/// Acquire/lock mutex and check for stop request.
|
||||
/// It tries to lock the mutex and if it fails, it check if stop request was send.
|
||||
/// It returns after locking mutex.
|
||||
/// This function should be extracted to V3ThreadPool, but due to clang thread-safety
|
||||
/// limitations it needs to be placed here.
|
||||
void lockCheckStopRequest(std::function<void()> checkStopRequestFunction)
|
||||
VL_ACQUIRE() VL_MT_SAFE {
|
||||
m_mutexr.lockCheckStopRequest(checkStopRequestFunction);
|
||||
}
|
||||
};
|
||||
|
||||
// Internals: Remember the calling thread at construction time, and make
|
||||
|
@ -173,7 +173,7 @@ public:
|
||||
VerilatedLockGuard lock{m_mutex};
|
||||
while (m_ready.empty()) {
|
||||
m_waiting = true;
|
||||
m_cv.wait(lock);
|
||||
m_cv.wait(m_mutex);
|
||||
}
|
||||
m_waiting = false;
|
||||
// As noted above this is inefficient if our ready list is ever
|
||||
|
@ -75,7 +75,7 @@ public:
|
||||
// Get an element from the front of the queue. Blocks if none available
|
||||
T get() VL_MT_SAFE_EXCLUDES(m_mutex) {
|
||||
VerilatedLockGuard lock{m_mutex};
|
||||
m_cv.wait(lock, [this]() VL_REQUIRES(m_mutex) { return !m_queue.empty(); });
|
||||
m_cv.wait(m_mutex, [this]() VL_REQUIRES(m_mutex) { return !m_queue.empty(); });
|
||||
assert(!m_queue.empty());
|
||||
T value = m_queue.front();
|
||||
m_queue.pop_front();
|
||||
|
@ -482,7 +482,7 @@ VL_ATTR_NOINLINE void VerilatedTrace<VL_SUB_T, VL_BUF_T>::ParallelWorkerData::wa
|
||||
// We have been spinning for a while, so yield the thread
|
||||
VerilatedLockGuard lock{m_mutex};
|
||||
m_waiting = true;
|
||||
m_cv.wait(lock, [this] { return m_ready.load(std::memory_order_relaxed); });
|
||||
m_cv.wait(m_mutex, [this] { return m_ready.load(std::memory_order_relaxed); });
|
||||
m_waiting = false;
|
||||
}
|
||||
|
||||
|
@ -1138,7 +1138,7 @@ public:
|
||||
}
|
||||
|
||||
// Deletes all queued garbage objects.
|
||||
void deleteAll() VL_MT_SAFE;
|
||||
void deleteAll() VL_EXCLUDES(m_mutex) VL_EXCLUDES(m_deleteMutex) VL_MT_SAFE;
|
||||
};
|
||||
|
||||
//===================================================================
|
||||
|
@ -141,19 +141,6 @@ public:
|
||||
}
|
||||
/// Destruct and unlock the mutex
|
||||
~V3LockGuardImp() VL_RELEASE() { m_mutexr.unlock(); }
|
||||
/// Lock the mutex
|
||||
void lock() VL_ACQUIRE() VL_MT_SAFE { m_mutexr.lock(); }
|
||||
/// Unlock the mutex
|
||||
void unlock() VL_RELEASE() VL_MT_SAFE { m_mutexr.unlock(); }
|
||||
/// Acquire/lock mutex and check for stop request.
|
||||
/// It tries to lock the mutex and if it fails, it check if stop request was send.
|
||||
/// It returns after locking mutex.
|
||||
/// This function should be extracted to V3ThreadPool, but due to clang thread-safety
|
||||
/// limitations it needs to be placed here.
|
||||
void lockCheckStopRequest(std::function<void()> checkStopRequestFunction)
|
||||
VL_ACQUIRE() VL_MT_SAFE {
|
||||
m_mutexr.lockCheckStopRequest(checkStopRequestFunction);
|
||||
}
|
||||
};
|
||||
|
||||
using V3Mutex = V3MutexImp<std::mutex>;
|
||||
|
@ -23,29 +23,33 @@
|
||||
// c++11 requires definition of static constexpr as well as declaration
|
||||
constexpr unsigned int V3ThreadPool::FUTUREWAITFOR_MS;
|
||||
|
||||
void V3ThreadPool::resize(unsigned n) VL_MT_UNSAFE {
|
||||
void V3ThreadPool::resize(unsigned n) VL_MT_UNSAFE VL_EXCLUDES(m_mutex)
|
||||
VL_EXCLUDES(m_stoppedJobsMutex) {
|
||||
// This function is not thread-safe and can result in race between threads
|
||||
UASSERT(V3MutexConfig::s().lockConfig(),
|
||||
"Mutex config needs to be locked before starting ThreadPool");
|
||||
V3LockGuard lock{m_mutex};
|
||||
V3LockGuard stoppedJobsLock{m_stoppedJobsMutex};
|
||||
UASSERT(m_queue.empty(), "Resizing busy thread pool");
|
||||
// Shut down old threads
|
||||
m_shutdown = true;
|
||||
m_stoppedJobs = 0;
|
||||
m_cv.notify_all();
|
||||
m_stoppedJobsCV.notify_all();
|
||||
stoppedJobsLock.unlock();
|
||||
lock.unlock();
|
||||
{
|
||||
V3LockGuard lock{m_mutex};
|
||||
V3LockGuard stoppedJobsLock{m_stoppedJobsMutex};
|
||||
|
||||
UASSERT(m_queue.empty(), "Resizing busy thread pool");
|
||||
// Shut down old threads
|
||||
m_shutdown = true;
|
||||
m_stoppedJobs = 0;
|
||||
m_cv.notify_all();
|
||||
m_stoppedJobsCV.notify_all();
|
||||
}
|
||||
while (!m_workers.empty()) {
|
||||
m_workers.front().join();
|
||||
m_workers.pop_front();
|
||||
}
|
||||
lock.lock();
|
||||
// Start new threads
|
||||
m_shutdown = false;
|
||||
for (unsigned int i = 1; i < n; ++i) {
|
||||
m_workers.emplace_back(&V3ThreadPool::startWorker, this, i);
|
||||
if (n > 1) {
|
||||
V3LockGuard lock{m_mutex};
|
||||
// Start new threads
|
||||
m_shutdown = false;
|
||||
for (unsigned int i = 1; i < n; ++i) {
|
||||
m_workers.emplace_back(&V3ThreadPool::startWorker, this, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -60,7 +64,7 @@ void V3ThreadPool::workerJobLoop(int id) VL_MT_SAFE {
|
||||
job_t job;
|
||||
{
|
||||
V3LockGuard lock(m_mutex);
|
||||
m_cv.wait(lock, [&]() VL_REQUIRES(m_mutex) {
|
||||
m_cv.wait(m_mutex, [&]() VL_REQUIRES(m_mutex) {
|
||||
return !m_queue.empty() || m_shutdown || m_stopRequested;
|
||||
});
|
||||
if (m_shutdown) return; // Terminate if requested
|
||||
@ -100,9 +104,9 @@ void V3ThreadPool::requestExclusiveAccess(const V3ThreadPool::job_t&& exclusiveA
|
||||
V3LockGuard stoppedJobLock{m_stoppedJobsMutex};
|
||||
// if some other job already requested exclusive access
|
||||
// wait until it stops
|
||||
if (stopRequested()) { waitStopRequested(stoppedJobLock); }
|
||||
if (stopRequested()) { waitStopRequested(); }
|
||||
m_stopRequested = true;
|
||||
waitOtherThreads(stoppedJobLock);
|
||||
waitOtherThreads();
|
||||
m_exclusiveAccess = true;
|
||||
exclusiveAccessJob();
|
||||
m_exclusiveAccess = false;
|
||||
@ -114,25 +118,26 @@ void V3ThreadPool::requestExclusiveAccess(const V3ThreadPool::job_t&& exclusiveA
|
||||
bool V3ThreadPool::waitIfStopRequested() VL_MT_SAFE {
|
||||
V3LockGuard stoppedJobLock(m_stoppedJobsMutex);
|
||||
if (!stopRequested()) return false;
|
||||
waitStopRequested(stoppedJobLock);
|
||||
waitStopRequested();
|
||||
return true;
|
||||
}
|
||||
|
||||
void V3ThreadPool::waitStopRequested(V3LockGuard& stoppedJobLock) VL_REQUIRES(m_stoppedJobsMutex) {
|
||||
void V3ThreadPool::waitStopRequested() VL_REQUIRES(m_stoppedJobsMutex) {
|
||||
++m_stoppedJobs;
|
||||
m_stoppedJobsCV.notify_all();
|
||||
m_stoppedJobsCV.wait(
|
||||
stoppedJobLock, [&]() VL_REQUIRES(m_stoppedJobsMutex) { return !m_stopRequested.load(); });
|
||||
m_stoppedJobsCV.wait(m_stoppedJobsMutex, [&]() VL_REQUIRES(m_stoppedJobsMutex) {
|
||||
return !m_stopRequested.load();
|
||||
});
|
||||
--m_stoppedJobs;
|
||||
m_stoppedJobsCV.notify_all();
|
||||
}
|
||||
|
||||
void V3ThreadPool::waitOtherThreads(V3LockGuard& stoppedJobLock) VL_MT_SAFE_EXCLUDES(m_mutex)
|
||||
void V3ThreadPool::waitOtherThreads() VL_MT_SAFE_EXCLUDES(m_mutex)
|
||||
VL_REQUIRES(m_stoppedJobsMutex) {
|
||||
++m_stoppedJobs;
|
||||
m_stoppedJobsCV.notify_all();
|
||||
m_cv.notify_all();
|
||||
m_stoppedJobsCV.wait(stoppedJobLock, [&]() VL_REQUIRES(m_stoppedJobsMutex) {
|
||||
m_stoppedJobsCV.wait(m_stoppedJobsMutex, [&]() VL_REQUIRES(m_stoppedJobsMutex) {
|
||||
// count also the main thread
|
||||
return m_stoppedJobs == (m_workers.size() + 1);
|
||||
});
|
||||
@ -152,19 +157,20 @@ void V3ThreadPool::selfTest() {
|
||||
});
|
||||
};
|
||||
auto secondJob = [&](int sleep) -> void {
|
||||
V3LockGuard lock{commonMutex};
|
||||
lock.unlock();
|
||||
commonMutex.lock();
|
||||
commonMutex.unlock();
|
||||
s().waitIfStopRequested();
|
||||
lock.lock();
|
||||
V3LockGuard lock{commonMutex};
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds{sleep});
|
||||
commonValue = 1000;
|
||||
};
|
||||
auto thirdJob = [&](int sleep) -> void {
|
||||
V3LockGuard lock{commonMutex};
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds{sleep});
|
||||
lock.unlock();
|
||||
{
|
||||
V3LockGuard lock{commonMutex};
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds{sleep});
|
||||
}
|
||||
s().requestExclusiveAccess([&]() { firstJob(sleep); });
|
||||
lock.lock();
|
||||
V3LockGuard lock{commonMutex};
|
||||
commonValue = 1000;
|
||||
};
|
||||
std::list<std::future<void>> futures;
|
||||
|
@ -67,9 +67,10 @@ class V3ThreadPool final {
|
||||
// CONSTRUCTORS
|
||||
V3ThreadPool() = default;
|
||||
~V3ThreadPool() {
|
||||
V3LockGuard lock{m_mutex};
|
||||
m_queue = {}; // make sure queue is empty
|
||||
lock.unlock();
|
||||
{
|
||||
V3LockGuard lock{m_mutex};
|
||||
m_queue = {}; // make sure queue is empty
|
||||
}
|
||||
resize(0);
|
||||
}
|
||||
|
||||
@ -82,7 +83,7 @@ public:
|
||||
}
|
||||
|
||||
// Resize thread pool to n workers (queue must be empty)
|
||||
void resize(unsigned n) VL_MT_UNSAFE;
|
||||
void resize(unsigned n) VL_MT_UNSAFE VL_EXCLUDES(m_mutex) VL_EXCLUDES(m_stoppedJobsMutex);
|
||||
|
||||
// Enqueue a job for asynchronous execution
|
||||
// Due to missing support for lambda annotations in c++11,
|
||||
@ -165,11 +166,10 @@ private:
|
||||
}
|
||||
|
||||
// Waits until exclusive access job completes its job
|
||||
void waitStopRequested(V3LockGuard& stoppedJobLock) VL_REQUIRES(m_stoppedJobsMutex);
|
||||
void waitStopRequested() VL_REQUIRES(m_stoppedJobsMutex);
|
||||
|
||||
// Waits until all other jobs are stopped
|
||||
void waitOtherThreads(V3LockGuard& stoppedJobLock) VL_MT_SAFE_EXCLUDES(m_mutex)
|
||||
VL_REQUIRES(m_stoppedJobsMutex);
|
||||
void waitOtherThreads() VL_MT_SAFE_EXCLUDES(m_mutex) VL_REQUIRES(m_stoppedJobsMutex);
|
||||
|
||||
template <typename T>
|
||||
void pushJob(std::shared_ptr<std::promise<T>>& prom, std::function<T()>&& f) VL_MT_SAFE;
|
||||
|
Loading…
Reference in New Issue
Block a user