Fix --prof-exec predicted time values (#4988)

Wrapping the functions in #4933 broke --prof-exec report as the
predicted MTask times are computed during thread packing, but are
emitted in the wrapping functions.
This commit is contained in:
Geza Lore 2024-03-16 12:17:24 +00:00 committed by GitHub
parent bf8a88a0ad
commit df519ff16e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 14 additions and 12 deletions

View File

@ -203,7 +203,6 @@ class PackThreads final {
const uint32_t m_sandbagNumerator; // Numerator padding for est runtime const uint32_t m_sandbagNumerator; // Numerator padding for est runtime
const uint32_t m_sandbagDenom; // Denominator padding for est runtime const uint32_t m_sandbagDenom; // Denominator padding for est runtime
public:
// CONSTRUCTORS // CONSTRUCTORS
explicit PackThreads(uint32_t nThreads = v3Global.opt.threads(), explicit PackThreads(uint32_t nThreads = v3Global.opt.threads(),
unsigned sandbagNumerator = 30, unsigned sandbagDenom = 100) unsigned sandbagNumerator = 30, unsigned sandbagDenom = 100)
@ -211,8 +210,8 @@ public:
, m_sandbagNumerator{sandbagNumerator} , m_sandbagNumerator{sandbagNumerator}
, m_sandbagDenom{sandbagDenom} {} , m_sandbagDenom{sandbagDenom} {}
~PackThreads() = default; ~PackThreads() = default;
VL_UNCOPYABLE(PackThreads);
private:
// METHODS // METHODS
uint32_t completionTime(const ThreadSchedule& schedule, const ExecMTask* mtaskp, uint32_t completionTime(const ThreadSchedule& schedule, const ExecMTask* mtaskp,
uint32_t threadId) { uint32_t threadId) {
@ -256,9 +255,8 @@ private:
return true; return true;
} }
public:
// Pack an MTasks from given graph into m_nThreads threads, return the schedule. // Pack an MTasks from given graph into m_nThreads threads, return the schedule.
const ThreadSchedule pack(const V3Graph& mtaskGraph) { ThreadSchedule pack(const V3Graph& mtaskGraph) {
// The result // The result
ThreadSchedule schedule{m_nThreads}; ThreadSchedule schedule{m_nThreads};
@ -350,6 +348,7 @@ public:
return schedule; return schedule;
} }
public:
// SELF TEST // SELF TEST
static void selfTest() { static void selfTest() {
V3Graph graph; V3Graph graph;
@ -402,8 +401,9 @@ public:
UASSERT_SELFTEST(uint32_t, packer.completionTime(schedule, t2, 1), 1199); UASSERT_SELFTEST(uint32_t, packer.completionTime(schedule, t2, 1), 1199);
} }
private: static const ThreadSchedule apply(const V3Graph& mtaskGraph) {
VL_UNCOPYABLE(PackThreads); return PackThreads{}.pack(mtaskGraph);
}
}; };
using EstimateAndProfiled = std::pair<uint64_t, uint64_t>; // cost est, cost profiled using EstimateAndProfiled = std::pair<uint64_t, uint64_t>; // cost est, cost profiled
@ -780,14 +780,10 @@ void wrapMTaskBodies(AstExecGraph* const execGraphp) {
} }
} }
void implementExecGraph(AstExecGraph* const execGraphp) { void implementExecGraph(AstExecGraph* const execGraphp, const ThreadSchedule& schedule) {
// Nothing to be done if there are no MTasks in the graph at all. // Nothing to be done if there are no MTasks in the graph at all.
if (execGraphp->depGraphp()->empty()) return; if (execGraphp->depGraphp()->empty()) return;
// Schedule the mtasks: statically associate each mtask with a thread,
// and determine the order in which each thread will runs its mtasks.
const ThreadSchedule& schedule = PackThreads{}.pack(*execGraphp->depGraphp());
// Create a function to be run by each thread. Note this moves all AstMTaskBody nodes form the // Create a function to be run by each thread. Note this moves all AstMTaskBody nodes form the
// AstExecGrap into the AstCFunc created // AstExecGrap into the AstCFunc created
const std::vector<AstCFunc*>& funcps = createThreadFunctions(schedule, execGraphp->name()); const std::vector<AstCFunc*>& funcps = createThreadFunctions(schedule, execGraphp->name());
@ -808,11 +804,15 @@ void implement(AstNetlist* netlistp) {
fillinCosts(execGraphp->depGraphp()); fillinCosts(execGraphp->depGraphp());
finalizeCosts(execGraphp->depGraphp()); finalizeCosts(execGraphp->depGraphp());
// Schedule the mtasks: statically associate each mtask with a thread,
// and determine the order in which each thread will runs its mtasks.
const ThreadSchedule& schedule = PackThreads::apply(*execGraphp->depGraphp());
// Wrap each MTask body into a CFunc for better profiling/debugging // Wrap each MTask body into a CFunc for better profiling/debugging
wrapMTaskBodies(execGraphp); wrapMTaskBodies(execGraphp);
// Replace the graph body with its multi-threaded implementation. // Replace the graph body with its multi-threaded implementation.
implementExecGraph(execGraphp); implementExecGraph(execGraphp, schedule);
}); });
} }

View File

@ -45,6 +45,8 @@ run(cmd => ["$ENV{VERILATOR_ROOT}/bin/verilator_gantt",
if ($Self->{vltmt}) { if ($Self->{vltmt}) {
file_grep("$Self->{obj_dir}/gantt.log", qr/Total threads += 2/i); file_grep("$Self->{obj_dir}/gantt.log", qr/Total threads += 2/i);
file_grep("$Self->{obj_dir}/gantt.log", qr/Total mtasks += 7/i); file_grep("$Self->{obj_dir}/gantt.log", qr/Total mtasks += 7/i);
# Predicted thread utilization should be less than 100%
file_grep_not("$Self->{obj_dir}/gantt.log", qr/Thread utilization =\s*\d\d\d+\.\d+%/i);
} else { } else {
file_grep("$Self->{obj_dir}/gantt.log", qr/Total threads += 1/i); file_grep("$Self->{obj_dir}/gantt.log", qr/Total threads += 1/i);
file_grep("$Self->{obj_dir}/gantt.log", qr/Total mtasks += 0/i); file_grep("$Self->{obj_dir}/gantt.log", qr/Total mtasks += 0/i);