2018-07-23 00:54:28 +00:00
|
|
|
// -*- mode: C++; c-file-style: "cc-mode" -*-
|
|
|
|
//=============================================================================
|
|
|
|
//
|
2020-03-21 15:24:24 +00:00
|
|
|
// Copyright 2012-2020 by Wilson Snyder. This program is free software; you can
|
|
|
|
// redistribute it and/or modify it under the terms of either the GNU
|
|
|
|
// Lesser General Public License Version 3 or the Perl Artistic License
|
|
|
|
// Version 2.0.
|
|
|
|
// SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0
|
2018-07-23 00:54:28 +00:00
|
|
|
//
|
|
|
|
//=============================================================================
|
|
|
|
///
|
|
|
|
/// \file
|
|
|
|
/// \brief Thread pool for verilated modules
|
|
|
|
///
|
|
|
|
//=============================================================================
|
|
|
|
|
|
|
|
#include "verilatedos.h"
|
|
|
|
#include "verilated_threads.h"
|
2018-10-14 17:43:24 +00:00
|
|
|
|
2018-07-23 00:54:28 +00:00
|
|
|
#include <cstdio>
|
|
|
|
|
2019-10-07 23:27:31 +00:00
|
|
|
std::atomic<vluint64_t> VlMTaskVertex::s_yields;
|
2018-07-23 00:54:28 +00:00
|
|
|
|
2020-08-15 14:12:55 +00:00
|
|
|
VL_THREAD_LOCAL VlThreadPool::ProfileTrace* VlThreadPool::t_profilep = nullptr;
|
2018-07-23 00:54:28 +00:00
|
|
|
|
|
|
|
//=============================================================================
|
|
|
|
// VlMTaskVertex
|
|
|
|
|
|
|
|
VlMTaskVertex::VlMTaskVertex(vluint32_t upstreamDepCount)
|
2020-08-16 13:55:36 +00:00
|
|
|
: m_upstreamDepsDone{0}
|
|
|
|
, m_upstreamDepCount{upstreamDepCount} {
|
2018-07-23 00:54:28 +00:00
|
|
|
assert(atomic_is_lock_free(&m_upstreamDepsDone));
|
|
|
|
}
|
|
|
|
|
|
|
|
//=============================================================================
|
|
|
|
// VlWorkerThread
|
|
|
|
|
|
|
|
VlWorkerThread::VlWorkerThread(VlThreadPool* poolp, bool profiling)
|
2020-08-16 13:55:36 +00:00
|
|
|
: m_waiting{false}
|
|
|
|
, m_poolp{poolp}
|
|
|
|
, m_profiling{profiling} // Must init this last -- after setting up fields that it might read:
|
|
|
|
, m_exiting{false}
|
|
|
|
, m_cthread{startWorker, this} {}
|
2018-07-23 00:54:28 +00:00
|
|
|
|
|
|
|
VlWorkerThread::~VlWorkerThread() {
|
|
|
|
m_exiting.store(true, std::memory_order_release);
|
2019-10-07 23:27:31 +00:00
|
|
|
wakeUp();
|
2018-07-23 00:54:28 +00:00
|
|
|
// The thread should exit; join it.
|
|
|
|
m_cthread.join();
|
|
|
|
}
|
|
|
|
|
|
|
|
void VlWorkerThread::workerLoop() {
|
2020-04-04 17:45:24 +00:00
|
|
|
if (VL_UNLIKELY(m_profiling)) m_poolp->setupProfilingClientThread();
|
2018-07-23 00:54:28 +00:00
|
|
|
|
|
|
|
ExecRec work;
|
2020-08-15 14:12:55 +00:00
|
|
|
work.m_fnp = nullptr;
|
2018-07-23 00:54:28 +00:00
|
|
|
|
2020-04-04 02:31:54 +00:00
|
|
|
while (true) {
|
2020-04-04 17:45:24 +00:00
|
|
|
if (VL_LIKELY(!work.m_fnp)) dequeWork(&work);
|
2018-07-23 00:54:28 +00:00
|
|
|
|
|
|
|
// Do this here, not above, to avoid a race with the destructor.
|
2020-04-14 02:51:35 +00:00
|
|
|
if (VL_UNLIKELY(m_exiting.load(std::memory_order_acquire))) break;
|
2018-07-23 00:54:28 +00:00
|
|
|
|
|
|
|
if (VL_LIKELY(work.m_fnp)) {
|
|
|
|
work.m_fnp(work.m_evenCycle, work.m_sym);
|
2020-08-15 14:12:55 +00:00
|
|
|
work.m_fnp = nullptr;
|
2018-07-23 00:54:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-04 17:45:24 +00:00
|
|
|
if (VL_UNLIKELY(m_profiling)) m_poolp->tearDownProfilingClientThread();
|
2018-07-23 00:54:28 +00:00
|
|
|
}
|
|
|
|
|
2020-04-04 17:45:24 +00:00
|
|
|
void VlWorkerThread::startWorker(VlWorkerThread* workerp) { workerp->workerLoop(); }
|
2018-07-23 00:54:28 +00:00
|
|
|
|
|
|
|
//=============================================================================
|
|
|
|
// VlThreadPool
|
|
|
|
|
|
|
|
VlThreadPool::VlThreadPool(int nThreads, bool profiling)
|
2020-08-16 13:55:36 +00:00
|
|
|
: m_profiling{profiling} {
|
2018-07-23 00:54:28 +00:00
|
|
|
// --threads N passes nThreads=N-1, as the "main" threads counts as 1
|
|
|
|
unsigned cpus = std::thread::hardware_concurrency();
|
2020-04-04 17:45:24 +00:00
|
|
|
if (cpus < nThreads + 1) {
|
2019-06-15 13:17:51 +00:00
|
|
|
static int warnedOnce = 0;
|
|
|
|
if (!warnedOnce++) {
|
|
|
|
VL_PRINTF_MT("%%Warning: System has %u CPUs but model Verilated with"
|
2020-04-04 17:45:24 +00:00
|
|
|
" --threads %d; may run slow.\n",
|
|
|
|
cpus, nThreads + 1);
|
2019-06-15 13:17:51 +00:00
|
|
|
}
|
2018-07-23 00:54:28 +00:00
|
|
|
}
|
|
|
|
// Create'em
|
2020-04-04 17:45:24 +00:00
|
|
|
for (int i = 0; i < nThreads; ++i) {
|
2018-07-23 00:54:28 +00:00
|
|
|
m_workers.push_back(new VlWorkerThread(this, profiling));
|
|
|
|
}
|
|
|
|
// Set up a profile buffer for the current thread too -- on the
|
|
|
|
// assumption that it's the same thread that calls eval and may be
|
|
|
|
// donated to run mtasks during the eval.
|
2020-04-04 17:45:24 +00:00
|
|
|
if (VL_UNLIKELY(m_profiling)) setupProfilingClientThread();
|
2018-07-23 00:54:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VlThreadPool::~VlThreadPool() {
|
|
|
|
for (int i = 0; i < m_workers.size(); ++i) {
|
|
|
|
// Each ~WorkerThread will wait for its thread to exit.
|
|
|
|
delete m_workers[i];
|
|
|
|
}
|
2020-04-04 17:45:24 +00:00
|
|
|
if (VL_UNLIKELY(m_profiling)) tearDownProfilingClientThread();
|
2018-07-23 00:54:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void VlThreadPool::tearDownProfilingClientThread() {
|
|
|
|
assert(t_profilep);
|
|
|
|
delete t_profilep;
|
2020-08-15 14:12:55 +00:00
|
|
|
t_profilep = nullptr;
|
2018-07-23 00:54:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void VlThreadPool::setupProfilingClientThread() {
|
|
|
|
assert(!t_profilep);
|
|
|
|
t_profilep = new ProfileTrace;
|
|
|
|
// Reserve some space in the thread-local profiling buffer;
|
|
|
|
// try not to malloc while collecting profiling.
|
|
|
|
t_profilep->reserve(4096);
|
|
|
|
{
|
2020-05-28 22:23:37 +00:00
|
|
|
const VerilatedLockGuard lk(m_mutex);
|
2018-07-23 00:54:28 +00:00
|
|
|
m_allProfiles.insert(t_profilep);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void VlThreadPool::profileAppendAll(const VlProfileRec& rec) {
|
2020-05-28 22:23:37 +00:00
|
|
|
const VerilatedLockGuard lk(m_mutex);
|
2020-08-16 15:43:49 +00:00
|
|
|
for (const auto& profilep : m_allProfiles) {
|
2018-07-23 00:54:28 +00:00
|
|
|
// Every thread's profile trace gets a copy of rec.
|
2020-08-16 15:43:49 +00:00
|
|
|
profilep->emplace_back(rec);
|
2018-07-23 00:54:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void VlThreadPool::profileDump(const char* filenamep, vluint64_t ticksElapsed) {
|
2020-05-28 22:23:37 +00:00
|
|
|
const VerilatedLockGuard lk(m_mutex);
|
2018-07-23 00:54:28 +00:00
|
|
|
VL_DEBUG_IF(VL_DBG_MSGF("+prof+threads writing to '%s'\n", filenamep););
|
|
|
|
|
|
|
|
FILE* fp = fopen(filenamep, "w");
|
|
|
|
if (VL_UNLIKELY(!fp)) {
|
|
|
|
VL_FATAL_MT(filenamep, 0, "", "+prof+threads+file file not writable");
|
2020-04-05 22:30:46 +00:00
|
|
|
// cppcheck-suppress resourceLeak // bug, doesn't realize fp is nullptr
|
2020-09-19 01:27:36 +00:00
|
|
|
return; // LCOV_EXCL_LINE
|
2018-07-23 00:54:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO Perhaps merge with verilated_coverage output format, so can
|
|
|
|
// have a common merging and reporting tool, etc.
|
|
|
|
fprintf(fp, "VLPROFTHREAD 1.0 # Verilator thread profile dump version 1.0\n");
|
2020-04-04 17:45:24 +00:00
|
|
|
fprintf(fp, "VLPROF arg --threads %" VL_PRI64 "u\n", vluint64_t(m_workers.size() + 1));
|
2018-07-23 00:54:28 +00:00
|
|
|
fprintf(fp, "VLPROF arg +verilator+prof+threads+start+%" VL_PRI64 "u\n",
|
|
|
|
Verilated::profThreadsStart());
|
2020-04-04 17:45:24 +00:00
|
|
|
fprintf(fp, "VLPROF arg +verilator+prof+threads+window+%u\n", Verilated::profThreadsWindow());
|
|
|
|
fprintf(fp, "VLPROF stat yields %" VL_PRI64 "u\n", VlMTaskVertex::yields());
|
2018-07-23 00:54:28 +00:00
|
|
|
|
|
|
|
vluint32_t thread_id = 0;
|
2020-08-16 15:43:49 +00:00
|
|
|
for (const auto& pi : m_allProfiles) {
|
2018-07-23 00:54:28 +00:00
|
|
|
++thread_id;
|
|
|
|
|
|
|
|
bool printing = false; // False while in warmup phase
|
2020-08-16 15:43:49 +00:00
|
|
|
for (const auto& ei : *pi) {
|
|
|
|
switch (ei.m_type) {
|
2020-04-14 02:51:35 +00:00
|
|
|
case VlProfileRec::TYPE_BARRIER: //
|
2018-07-23 00:54:28 +00:00
|
|
|
printing = true;
|
|
|
|
break;
|
|
|
|
case VlProfileRec::TYPE_MTASK_RUN:
|
|
|
|
if (!printing) break;
|
2020-04-04 17:45:24 +00:00
|
|
|
fprintf(fp,
|
|
|
|
"VLPROF mtask %d"
|
|
|
|
" start %" VL_PRI64 "u end %" VL_PRI64 "u elapsed %" VL_PRI64 "u"
|
2018-07-23 00:54:28 +00:00
|
|
|
" predict_time %u cpu %u on thread %u\n",
|
2020-08-16 15:43:49 +00:00
|
|
|
ei.m_mtaskId, ei.m_startTime, ei.m_endTime,
|
|
|
|
(ei.m_endTime - ei.m_startTime), ei.m_predictTime, ei.m_cpu, thread_id);
|
2018-07-23 00:54:28 +00:00
|
|
|
break;
|
2019-07-01 02:37:03 +00:00
|
|
|
default: assert(false); break; // LCOV_EXCL_LINE
|
2018-07-23 00:54:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-04-04 17:45:24 +00:00
|
|
|
fprintf(fp, "VLPROF stat ticks %" VL_PRI64 "u\n", ticksElapsed);
|
2018-07-23 00:54:28 +00:00
|
|
|
|
|
|
|
fclose(fp);
|
|
|
|
}
|