2020-04-19 22:57:36 +00:00
|
|
|
// -*- mode: C++; c-file-style: "cc-mode" -*-
|
|
|
|
//=============================================================================
|
|
|
|
//
|
2021-03-20 21:46:00 +00:00
|
|
|
// Code available from: https://verilator.org
|
2020-04-19 22:57:36 +00:00
|
|
|
//
|
2024-01-01 08:19:59 +00:00
|
|
|
// Copyright 2001-2024 by Wilson Snyder. This program is free software; you
|
2020-04-19 22:57:36 +00:00
|
|
|
// can redistribute it and/or modify it under the terms of either the GNU
|
|
|
|
// Lesser General Public License Version 3 or the Perl Artistic License
|
|
|
|
// Version 2.0.
|
|
|
|
// SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0
|
|
|
|
//
|
|
|
|
//=============================================================================
|
2022-05-28 11:20:35 +00:00
|
|
|
//
|
|
|
|
// Verilated tracing implementation code template common to all formats.
|
2024-09-10 23:04:55 +00:00
|
|
|
// This file is included by the format-specific implementations and
|
2022-05-28 11:20:35 +00:00
|
|
|
// should not be used otherwise.
|
|
|
|
//
|
2020-04-19 22:57:36 +00:00
|
|
|
//=============================================================================
|
|
|
|
|
|
|
|
// clang-format off
|
|
|
|
|
2020-11-11 02:40:14 +00:00
|
|
|
#ifndef VL_CPPCHECK
|
2022-05-29 18:08:39 +00:00
|
|
|
#if !defined(VL_SUB_T) || !defined(VL_BUF_T)
|
2020-04-19 22:57:36 +00:00
|
|
|
# error "This file should be included in trace format implementations"
|
|
|
|
#endif
|
|
|
|
|
2020-04-29 23:09:09 +00:00
|
|
|
#include "verilated_intrinsics.h"
|
2020-04-19 22:57:36 +00:00
|
|
|
#include "verilated_trace.h"
|
2022-11-05 12:47:34 +00:00
|
|
|
#include "verilated_threads.h"
|
|
|
|
#include <list>
|
2020-04-19 22:57:36 +00:00
|
|
|
|
2020-04-21 22:49:07 +00:00
|
|
|
#if 0
|
|
|
|
# include <iostream>
|
2023-11-24 16:45:52 +00:00
|
|
|
# define VL_TRACE_OFFLOAD_DEBUG(msg) std::cout << "TRACE OFFLOAD THREAD: " << msg << "\n"
|
2020-04-21 22:49:07 +00:00
|
|
|
#else
|
2022-05-20 14:28:25 +00:00
|
|
|
# define VL_TRACE_OFFLOAD_DEBUG(msg)
|
2020-04-21 22:49:07 +00:00
|
|
|
#endif
|
|
|
|
|
2020-04-19 22:57:36 +00:00
|
|
|
// clang-format on
|
|
|
|
|
|
|
|
//=============================================================================
|
|
|
|
// Static utility functions
|
|
|
|
|
2022-12-12 04:03:27 +00:00
|
|
|
static double timescaleToDouble(const char* unitp) VL_PURE {
|
2020-08-15 14:12:55 +00:00
|
|
|
char* endp = nullptr;
|
2021-03-27 01:23:18 +00:00
|
|
|
double value = std::strtod(unitp, &endp);
|
2020-04-19 22:57:36 +00:00
|
|
|
// On error so we allow just "ns" to return 1e-9.
|
|
|
|
if (value == 0.0 && endp == unitp) value = 1;
|
|
|
|
unitp = endp;
|
2021-03-27 01:23:18 +00:00
|
|
|
for (; *unitp && std::isspace(*unitp); unitp++) {}
|
2020-04-19 22:57:36 +00:00
|
|
|
switch (*unitp) {
|
2020-12-10 00:16:00 +00:00
|
|
|
case 's': value *= 1e0; break;
|
2020-04-19 22:57:36 +00:00
|
|
|
case 'm': value *= 1e-3; break;
|
|
|
|
case 'u': value *= 1e-6; break;
|
|
|
|
case 'n': value *= 1e-9; break;
|
|
|
|
case 'p': value *= 1e-12; break;
|
|
|
|
case 'f': value *= 1e-15; break;
|
|
|
|
case 'a': value *= 1e-18; break;
|
|
|
|
}
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
2020-04-21 22:49:07 +00:00
|
|
|
//=========================================================================
|
|
|
|
// Buffer management
|
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
|
|
|
uint32_t* VerilatedTrace<VL_SUB_T, VL_BUF_T>::getOffloadBuffer() {
|
2022-03-27 19:27:40 +00:00
|
|
|
uint32_t* bufferp;
|
2022-12-02 23:46:38 +00:00
|
|
|
// Some jitter is expected, so some number of alternative offload buffers are
|
2020-04-21 22:49:07 +00:00
|
|
|
// required, but don't allocate more than 8 buffers.
|
2022-05-20 14:28:25 +00:00
|
|
|
if (m_numOffloadBuffers < 8) {
|
2020-04-21 22:49:07 +00:00
|
|
|
// Allocate a new buffer if none is available
|
2022-05-20 14:28:25 +00:00
|
|
|
if (!m_offloadBuffersFromWorker.tryGet(bufferp)) {
|
|
|
|
++m_numOffloadBuffers;
|
2020-04-21 22:49:07 +00:00
|
|
|
// Note: over allocate a bit so pointer comparison is well defined
|
|
|
|
// if we overflow only by a small amount
|
2022-05-20 14:28:25 +00:00
|
|
|
bufferp = new uint32_t[m_offloadBufferSize + 16];
|
2020-04-21 22:49:07 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Block until a buffer becomes available
|
2022-05-20 14:28:25 +00:00
|
|
|
bufferp = m_offloadBuffersFromWorker.get();
|
2020-04-21 22:49:07 +00:00
|
|
|
}
|
|
|
|
return bufferp;
|
|
|
|
}
|
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::waitForOffloadBuffer(const uint32_t* buffp) {
|
2020-04-21 22:49:07 +00:00
|
|
|
// Slow path code only called on flush/shutdown, so use a simple algorithm.
|
|
|
|
// Collect buffers from worker and stash them until we get the one we want.
|
2022-03-27 19:27:40 +00:00
|
|
|
std::deque<uint32_t*> stash;
|
2022-05-20 14:28:25 +00:00
|
|
|
do { stash.push_back(m_offloadBuffersFromWorker.get()); } while (stash.back() != buffp);
|
2020-04-21 22:49:07 +00:00
|
|
|
// Now put them back in the queue, in the original order.
|
|
|
|
while (!stash.empty()) {
|
2022-05-20 14:28:25 +00:00
|
|
|
m_offloadBuffersFromWorker.put_front(stash.back());
|
2020-04-21 22:49:07 +00:00
|
|
|
stash.pop_back();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//=========================================================================
|
|
|
|
// Worker thread
|
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::offloadWorkerThreadMain() {
|
2020-04-25 21:38:25 +00:00
|
|
|
bool shutdown = false;
|
2020-04-21 22:49:07 +00:00
|
|
|
|
2020-04-25 21:38:25 +00:00
|
|
|
do {
|
2022-05-20 14:28:25 +00:00
|
|
|
uint32_t* const bufferp = m_offloadBuffersToWorker.get();
|
2020-04-21 22:49:07 +00:00
|
|
|
|
2022-05-20 14:28:25 +00:00
|
|
|
VL_TRACE_OFFLOAD_DEBUG("");
|
|
|
|
VL_TRACE_OFFLOAD_DEBUG("Got buffer: " << bufferp);
|
2020-04-21 22:49:07 +00:00
|
|
|
|
2022-03-27 19:27:40 +00:00
|
|
|
const uint32_t* readp = bufferp;
|
2020-04-21 22:49:07 +00:00
|
|
|
|
2022-07-18 14:32:14 +00:00
|
|
|
std::unique_ptr<Buffer> traceBufp; // We own the passed tracebuffer
|
2022-05-29 18:08:39 +00:00
|
|
|
|
2020-04-21 22:49:07 +00:00
|
|
|
while (true) {
|
2022-03-27 19:27:40 +00:00
|
|
|
const uint32_t cmd = readp[0];
|
|
|
|
const uint32_t top = cmd >> 4;
|
2020-04-25 21:38:25 +00:00
|
|
|
// Always set this up, as it is almost always needed
|
2022-03-27 19:27:40 +00:00
|
|
|
uint32_t* const oldp = m_sigs_oldvalp + readp[1];
|
2020-04-25 21:38:25 +00:00
|
|
|
// Note this increment needs to be undone on commands which do not
|
|
|
|
// actually contain a code, but those are the rare cases.
|
|
|
|
readp += 2;
|
|
|
|
|
|
|
|
switch (cmd & 0xF) {
|
2020-04-21 22:49:07 +00:00
|
|
|
//===
|
|
|
|
// CHG_* commands
|
2022-05-20 14:28:25 +00:00
|
|
|
case VerilatedTraceOffloadCommand::CHG_BIT_0:
|
|
|
|
VL_TRACE_OFFLOAD_DEBUG("Command CHG_BIT_0 " << top);
|
2022-07-18 14:32:14 +00:00
|
|
|
traceBufp->chgBit(oldp, 0);
|
2020-04-25 21:38:25 +00:00
|
|
|
continue;
|
2022-05-20 14:28:25 +00:00
|
|
|
case VerilatedTraceOffloadCommand::CHG_BIT_1:
|
|
|
|
VL_TRACE_OFFLOAD_DEBUG("Command CHG_BIT_1 " << top);
|
2022-07-18 14:32:14 +00:00
|
|
|
traceBufp->chgBit(oldp, 1);
|
2020-04-21 22:49:07 +00:00
|
|
|
continue;
|
2022-05-20 14:28:25 +00:00
|
|
|
case VerilatedTraceOffloadCommand::CHG_CDATA:
|
|
|
|
VL_TRACE_OFFLOAD_DEBUG("Command CHG_CDATA " << top);
|
2020-04-21 22:49:07 +00:00
|
|
|
// Bits stored in bottom byte of command
|
2022-07-18 14:32:14 +00:00
|
|
|
traceBufp->chgCData(oldp, *readp, top);
|
2020-04-25 21:38:25 +00:00
|
|
|
readp += 1;
|
2020-04-25 18:37:59 +00:00
|
|
|
continue;
|
2022-05-20 14:28:25 +00:00
|
|
|
case VerilatedTraceOffloadCommand::CHG_SDATA:
|
|
|
|
VL_TRACE_OFFLOAD_DEBUG("Command CHG_SDATA " << top);
|
2020-04-21 22:49:07 +00:00
|
|
|
// Bits stored in bottom byte of command
|
2022-07-18 14:32:14 +00:00
|
|
|
traceBufp->chgSData(oldp, *readp, top);
|
2020-04-29 23:09:09 +00:00
|
|
|
readp += 1;
|
|
|
|
continue;
|
2022-05-20 14:28:25 +00:00
|
|
|
case VerilatedTraceOffloadCommand::CHG_IDATA:
|
|
|
|
VL_TRACE_OFFLOAD_DEBUG("Command CHG_IDATA " << top);
|
2020-04-29 23:09:09 +00:00
|
|
|
// Bits stored in bottom byte of command
|
2022-07-18 14:32:14 +00:00
|
|
|
traceBufp->chgIData(oldp, *readp, top);
|
2020-04-29 23:09:09 +00:00
|
|
|
readp += 1;
|
|
|
|
continue;
|
2022-05-20 14:28:25 +00:00
|
|
|
case VerilatedTraceOffloadCommand::CHG_QDATA:
|
|
|
|
VL_TRACE_OFFLOAD_DEBUG("Command CHG_QDATA " << top);
|
2020-04-29 23:09:09 +00:00
|
|
|
// Bits stored in bottom byte of command
|
2022-07-18 14:32:14 +00:00
|
|
|
traceBufp->chgQData(oldp, *reinterpret_cast<const QData*>(readp), top);
|
2020-04-21 22:49:07 +00:00
|
|
|
readp += 2;
|
|
|
|
continue;
|
2022-05-20 14:28:25 +00:00
|
|
|
case VerilatedTraceOffloadCommand::CHG_WDATA:
|
|
|
|
VL_TRACE_OFFLOAD_DEBUG("Command CHG_WDATA " << top);
|
2022-07-18 14:32:14 +00:00
|
|
|
traceBufp->chgWData(oldp, readp, top);
|
2020-04-29 23:09:09 +00:00
|
|
|
readp += VL_WORDS_I(top);
|
2020-04-21 22:49:07 +00:00
|
|
|
continue;
|
2022-05-20 14:28:25 +00:00
|
|
|
case VerilatedTraceOffloadCommand::CHG_DOUBLE:
|
|
|
|
VL_TRACE_OFFLOAD_DEBUG("Command CHG_DOUBLE " << top);
|
2022-07-18 14:32:14 +00:00
|
|
|
traceBufp->chgDouble(oldp, *reinterpret_cast<const double*>(readp));
|
2020-04-21 22:49:07 +00:00
|
|
|
readp += 2;
|
|
|
|
continue;
|
2022-11-23 09:07:14 +00:00
|
|
|
case VerilatedTraceOffloadCommand::CHG_EVENT:
|
|
|
|
VL_TRACE_OFFLOAD_DEBUG("Command CHG_EVENT " << top);
|
2024-09-10 23:14:52 +00:00
|
|
|
traceBufp->chgEventTriggered(oldp);
|
2022-11-23 09:07:14 +00:00
|
|
|
continue;
|
2020-04-21 22:49:07 +00:00
|
|
|
|
|
|
|
//===
|
|
|
|
// Rare commands
|
2024-03-03 17:33:11 +00:00
|
|
|
case VerilatedTraceOffloadCommand::TIME_CHANGE: {
|
2022-05-20 14:28:25 +00:00
|
|
|
VL_TRACE_OFFLOAD_DEBUG("Command TIME_CHANGE " << top);
|
2020-04-25 21:38:25 +00:00
|
|
|
readp -= 1; // No code in this command, undo increment
|
2024-03-03 17:33:11 +00:00
|
|
|
const uint64_t timeui
|
|
|
|
= static_cast<uint64_t>(*reinterpret_cast<const uint32_t*>(readp)) << 32ULL
|
|
|
|
| static_cast<uint64_t>(*reinterpret_cast<const uint32_t*>(readp + 1));
|
|
|
|
emitTimeChange(timeui);
|
2020-04-25 21:38:25 +00:00
|
|
|
readp += 2;
|
2020-04-21 22:49:07 +00:00
|
|
|
continue;
|
2024-03-03 17:33:11 +00:00
|
|
|
}
|
2022-05-29 18:08:39 +00:00
|
|
|
case VerilatedTraceOffloadCommand::TRACE_BUFFER:
|
|
|
|
VL_TRACE_OFFLOAD_DEBUG("Command TRACE_BUFFER " << top);
|
|
|
|
readp -= 1; // No code in this command, undo increment
|
2022-07-18 14:32:14 +00:00
|
|
|
traceBufp.reset(*reinterpret_cast<Buffer* const*>(readp));
|
2022-05-29 18:08:39 +00:00
|
|
|
readp += 2;
|
|
|
|
continue;
|
|
|
|
|
2020-04-21 22:49:07 +00:00
|
|
|
//===
|
|
|
|
// Commands ending this buffer
|
2022-05-29 18:08:39 +00:00
|
|
|
case VerilatedTraceOffloadCommand::END: //
|
|
|
|
VL_TRACE_OFFLOAD_DEBUG("Command END");
|
|
|
|
break;
|
2022-05-20 14:28:25 +00:00
|
|
|
case VerilatedTraceOffloadCommand::SHUTDOWN:
|
|
|
|
VL_TRACE_OFFLOAD_DEBUG("Command SHUTDOWN");
|
2020-04-25 21:38:25 +00:00
|
|
|
shutdown = true;
|
2020-04-21 22:49:07 +00:00
|
|
|
break;
|
|
|
|
|
2020-05-23 14:34:58 +00:00
|
|
|
//===
|
|
|
|
// Unknown command
|
|
|
|
default: { // LCOV_EXCL_START
|
2022-05-29 18:08:39 +00:00
|
|
|
VL_TRACE_OFFLOAD_DEBUG("Command UNKNOWN " << cmd);
|
2020-04-21 22:49:07 +00:00
|
|
|
VL_FATAL_MT(__FILE__, __LINE__, "", "Unknown trace command");
|
|
|
|
break;
|
2020-05-23 14:34:58 +00:00
|
|
|
} // LCOV_EXCL_STOP
|
2020-04-21 22:49:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// The above switch will execute 'continue' when necessary,
|
|
|
|
// so if we ever reach here, we are done with the buffer.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-05-20 14:28:25 +00:00
|
|
|
VL_TRACE_OFFLOAD_DEBUG("Returning buffer");
|
2020-04-21 22:49:07 +00:00
|
|
|
|
|
|
|
// Return buffer
|
2022-05-20 14:28:25 +00:00
|
|
|
m_offloadBuffersFromWorker.put(bufferp);
|
2020-04-25 21:38:25 +00:00
|
|
|
} while (VL_LIKELY(!shutdown));
|
2020-04-21 22:49:07 +00:00
|
|
|
}
|
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::shutdownOffloadWorker() {
|
2020-04-21 22:49:07 +00:00
|
|
|
// If the worker thread is not running, done..
|
|
|
|
if (!m_workerThread) return;
|
|
|
|
|
|
|
|
// Hand an buffer with a shutdown command to the worker thread
|
2022-05-20 14:28:25 +00:00
|
|
|
uint32_t* const bufferp = getOffloadBuffer();
|
|
|
|
bufferp[0] = VerilatedTraceOffloadCommand::SHUTDOWN;
|
|
|
|
m_offloadBuffersToWorker.put(bufferp);
|
2020-04-21 22:49:07 +00:00
|
|
|
// Wait for it to return
|
2022-05-20 14:28:25 +00:00
|
|
|
waitForOffloadBuffer(bufferp);
|
2020-04-21 22:49:07 +00:00
|
|
|
// Join the thread and delete it
|
|
|
|
m_workerThread->join();
|
|
|
|
m_workerThread.reset(nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
//=============================================================================
|
|
|
|
// Life cycle
|
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::closeBase() {
|
2022-07-18 14:32:14 +00:00
|
|
|
if (offload()) {
|
|
|
|
shutdownOffloadWorker();
|
|
|
|
while (m_numOffloadBuffers) {
|
|
|
|
delete[] m_offloadBuffersFromWorker.get();
|
|
|
|
--m_numOffloadBuffers;
|
|
|
|
}
|
2020-04-21 22:49:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::flushBase() {
|
2022-07-18 14:32:14 +00:00
|
|
|
if (offload()) {
|
|
|
|
// Hand an empty buffer to the worker thread
|
|
|
|
uint32_t* const bufferp = getOffloadBuffer();
|
|
|
|
*bufferp = VerilatedTraceOffloadCommand::END;
|
|
|
|
m_offloadBuffersToWorker.put(bufferp);
|
|
|
|
// Wait for it to be returned. As the processing is in-order,
|
|
|
|
// this ensures all previous buffers have been processed.
|
|
|
|
waitForOffloadBuffer(bufferp);
|
|
|
|
}
|
2020-04-21 22:49:07 +00:00
|
|
|
}
|
|
|
|
|
2020-06-12 06:15:42 +00:00
|
|
|
//=============================================================================
|
|
|
|
// Callbacks to run on global events
|
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::onFlush(void* selfp) {
|
2021-10-26 01:02:02 +00:00
|
|
|
// This calls 'flush' on the derived class (which must then get any mutex)
|
2022-05-29 18:08:39 +00:00
|
|
|
reinterpret_cast<VL_SUB_T*>(selfp)->flush();
|
2020-06-12 06:15:42 +00:00
|
|
|
}
|
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::onExit(void* selfp) {
|
2021-03-07 16:01:54 +00:00
|
|
|
// This calls 'close' on the derived class (which must then get any mutex)
|
2022-05-29 18:08:39 +00:00
|
|
|
reinterpret_cast<VL_SUB_T*>(selfp)->close();
|
2020-06-12 06:15:42 +00:00
|
|
|
}
|
|
|
|
|
2020-04-19 22:57:36 +00:00
|
|
|
//=============================================================================
|
|
|
|
// VerilatedTrace
|
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
|
|
|
VerilatedTrace<VL_SUB_T, VL_BUF_T>::VerilatedTrace() {
|
2021-03-07 16:01:54 +00:00
|
|
|
set_time_unit(Verilated::threadContextp()->timeunitString());
|
|
|
|
set_time_resolution(Verilated::threadContextp()->timeprecisionString());
|
2020-04-19 22:57:36 +00:00
|
|
|
}
|
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
|
|
|
VerilatedTrace<VL_SUB_T, VL_BUF_T>::~VerilatedTrace() {
|
2020-08-15 14:12:55 +00:00
|
|
|
if (m_sigs_oldvalp) VL_DO_CLEAR(delete[] m_sigs_oldvalp, m_sigs_oldvalp = nullptr);
|
2022-03-05 20:44:32 +00:00
|
|
|
if (m_sigs_enabledp) VL_DO_CLEAR(delete[] m_sigs_enabledp, m_sigs_enabledp = nullptr);
|
2022-05-29 18:08:39 +00:00
|
|
|
Verilated::removeFlushCb(VerilatedTrace<VL_SUB_T, VL_BUF_T>::onFlush, this);
|
|
|
|
Verilated::removeExitCb(VerilatedTrace<VL_SUB_T, VL_BUF_T>::onExit, this);
|
2022-07-18 14:32:14 +00:00
|
|
|
if (offload()) closeBase();
|
2020-04-19 22:57:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
//=========================================================================
|
2024-09-10 23:04:55 +00:00
|
|
|
// Internals available to format-specific implementations
|
2020-04-19 22:57:36 +00:00
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::traceInit() VL_MT_UNSAFE {
|
2020-04-19 22:57:36 +00:00
|
|
|
// Note: It is possible to re-open a trace file (VCD in particular),
|
|
|
|
// so we must reset the next code here, but it must have the same number
|
|
|
|
// of codes on re-open
|
2022-03-27 19:27:40 +00:00
|
|
|
const uint32_t expectedCodes = nextCode();
|
2020-04-19 22:57:36 +00:00
|
|
|
m_nextCode = 1;
|
2020-04-21 22:49:07 +00:00
|
|
|
m_numSignals = 0;
|
2020-04-29 23:09:09 +00:00
|
|
|
m_maxBits = 0;
|
2022-03-05 20:44:32 +00:00
|
|
|
m_sigs_enabledVec.clear();
|
2020-04-19 22:57:36 +00:00
|
|
|
|
2020-05-08 22:42:34 +00:00
|
|
|
// Call all initialize callbacks, which will:
|
2022-03-05 20:44:32 +00:00
|
|
|
// - Call decl* for each signal (these eventually call ::declCode)
|
2020-05-08 22:42:34 +00:00
|
|
|
// - Store the base code
|
2023-11-12 01:52:23 +00:00
|
|
|
for (const CallbackRecord& cbr : m_initCbs) cbr.m_initCb(cbr.m_userp, self(), nextCode());
|
2020-04-19 22:57:36 +00:00
|
|
|
|
|
|
|
if (expectedCodes && nextCode() != expectedCodes) {
|
|
|
|
VL_FATAL_MT(__FILE__, __LINE__, "",
|
|
|
|
"Reopening trace file with different number of signals");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we know the number of codes, allocate space for the buffer
|
|
|
|
// holding previous signal values.
|
2022-03-27 19:27:40 +00:00
|
|
|
if (!m_sigs_oldvalp) m_sigs_oldvalp = new uint32_t[nextCode()];
|
2020-04-21 22:49:07 +00:00
|
|
|
|
2022-03-05 20:44:32 +00:00
|
|
|
// Apply enables
|
|
|
|
if (m_sigs_enabledp) VL_DO_CLEAR(delete[] m_sigs_enabledp, m_sigs_enabledp = nullptr);
|
|
|
|
if (!m_sigs_enabledVec.empty()) {
|
|
|
|
// Else if was empty, m_sigs_enabledp = nullptr to short circuit tests
|
|
|
|
// But it isn't, so alloc one bit for each code to indicate enablement
|
|
|
|
// We don't want to still use m_signs_enabledVec as std::vector<bool> is not
|
2022-12-02 23:46:38 +00:00
|
|
|
// guaranteed to be fast
|
2022-03-27 19:27:40 +00:00
|
|
|
m_sigs_enabledp = new uint32_t[1 + VL_WORDS_I(nextCode())]{0};
|
2022-03-05 20:44:32 +00:00
|
|
|
m_sigs_enabledVec.reserve(nextCode());
|
|
|
|
for (size_t code = 0; code < nextCode(); ++code) {
|
|
|
|
if (m_sigs_enabledVec[code]) {
|
|
|
|
m_sigs_enabledp[VL_BITWORD_I(code)] |= 1U << VL_BITBIT_I(code);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m_sigs_enabledVec.clear();
|
|
|
|
}
|
|
|
|
|
2020-06-12 06:15:42 +00:00
|
|
|
// Set callback so flush/abort will flush this file
|
2022-05-29 18:08:39 +00:00
|
|
|
Verilated::addFlushCb(VerilatedTrace<VL_SUB_T, VL_BUF_T>::onFlush, this);
|
|
|
|
Verilated::addExitCb(VerilatedTrace<VL_SUB_T, VL_BUF_T>::onExit, this);
|
2020-06-12 06:15:42 +00:00
|
|
|
|
2022-07-18 14:32:14 +00:00
|
|
|
if (offload()) {
|
|
|
|
// Compute offload buffer size. we need to be able to store a new value for
|
|
|
|
// each signal, which is 'nextCode()' entries after the init callbacks
|
|
|
|
// above have been run, plus up to 2 more words of metadata per signal,
|
|
|
|
// plus fixed overhead of 1 for a termination flag and 3 for a time stamp
|
|
|
|
// update.
|
|
|
|
m_offloadBufferSize = nextCode() + numSignals() * 2 + 4;
|
|
|
|
|
|
|
|
// Start the worker thread
|
|
|
|
m_workerThread.reset(
|
|
|
|
new std::thread{&VerilatedTrace<VL_SUB_T, VL_BUF_T>::offloadWorkerThreadMain, this});
|
|
|
|
}
|
2020-04-19 22:57:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <>
|
2023-10-24 15:33:29 +00:00
|
|
|
bool VerilatedTrace<VL_SUB_T, VL_BUF_T>::declCode(uint32_t code, const std::string& declName,
|
|
|
|
uint32_t bits) {
|
2021-03-08 02:05:15 +00:00
|
|
|
if (VL_UNCOVERABLE(!code)) {
|
2020-04-19 22:57:36 +00:00
|
|
|
VL_FATAL_MT(__FILE__, __LINE__, "", "Internal: internal trace problem, code 0 is illegal");
|
|
|
|
}
|
2022-03-05 20:44:32 +00:00
|
|
|
// To keep it simple, this is O(enables * signals), but we expect few enables
|
|
|
|
bool enabled = false;
|
|
|
|
if (m_dumpvars.empty()) enabled = true;
|
|
|
|
for (const auto& item : m_dumpvars) {
|
|
|
|
const int dumpvarsLevel = item.first;
|
|
|
|
const char* dvp = item.second.c_str();
|
|
|
|
const char* np = declName.c_str();
|
|
|
|
while (*dvp && *dvp == *np) {
|
|
|
|
++dvp;
|
|
|
|
++np;
|
|
|
|
}
|
|
|
|
if (*dvp) continue; // Didn't match dumpvar item
|
|
|
|
if (*np && *np != ' ') continue; // e.g. "t" isn't a match for "top"
|
|
|
|
int levels = 0;
|
|
|
|
while (*np) {
|
|
|
|
if (*np++ == ' ') ++levels;
|
|
|
|
}
|
|
|
|
if (levels > dumpvarsLevel) continue; // Too deep
|
|
|
|
// We only need to set first code word if it's a multicode signal
|
|
|
|
// as that's all we'll check for later
|
|
|
|
if (m_sigs_enabledVec.size() <= code) m_sigs_enabledVec.resize((code + 1024) * 2);
|
|
|
|
m_sigs_enabledVec[code] = true;
|
|
|
|
enabled = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-04-29 23:09:09 +00:00
|
|
|
int codesNeeded = VL_WORDS_I(bits);
|
2020-04-19 22:57:36 +00:00
|
|
|
m_nextCode = std::max(m_nextCode, code + codesNeeded);
|
2020-04-21 22:49:07 +00:00
|
|
|
++m_numSignals;
|
2020-04-29 23:09:09 +00:00
|
|
|
m_maxBits = std::max(m_maxBits, bits);
|
2022-03-05 20:44:32 +00:00
|
|
|
return enabled;
|
2020-04-19 22:57:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
//=========================================================================
|
2024-09-10 23:04:55 +00:00
|
|
|
// Internals available to format-specific implementations
|
2020-04-19 22:57:36 +00:00
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
|
|
|
std::string VerilatedTrace<VL_SUB_T, VL_BUF_T>::timeResStr() const {
|
2024-03-11 02:34:32 +00:00
|
|
|
return vl_timescaled_double(m_timeRes);
|
2020-04-19 22:57:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
//=========================================================================
|
|
|
|
// External interface to client code
|
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::set_time_unit(const char* unitp) VL_MT_SAFE {
|
2020-04-19 22:57:36 +00:00
|
|
|
m_timeUnit = timescaleToDouble(unitp);
|
|
|
|
}
|
2022-05-29 18:08:39 +00:00
|
|
|
template <>
|
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::set_time_unit(const std::string& unit) VL_MT_SAFE {
|
2020-04-19 22:57:36 +00:00
|
|
|
set_time_unit(unit.c_str());
|
|
|
|
}
|
2022-05-29 18:08:39 +00:00
|
|
|
template <>
|
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::set_time_resolution(const char* unitp) VL_MT_SAFE {
|
2020-04-19 22:57:36 +00:00
|
|
|
m_timeRes = timescaleToDouble(unitp);
|
|
|
|
}
|
2021-03-07 16:01:54 +00:00
|
|
|
template <>
|
2022-05-29 18:08:39 +00:00
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::set_time_resolution(const std::string& unit) VL_MT_SAFE {
|
2020-04-19 22:57:36 +00:00
|
|
|
set_time_resolution(unit.c_str());
|
|
|
|
}
|
2022-03-05 20:44:32 +00:00
|
|
|
template <>
|
2022-05-29 18:08:39 +00:00
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::dumpvars(int level, const std::string& hier) VL_MT_SAFE {
|
2022-03-05 20:44:32 +00:00
|
|
|
if (level == 0) {
|
|
|
|
m_dumpvars.clear(); // empty = everything on
|
|
|
|
} else {
|
|
|
|
// Convert Verilog . separators to trace space separators
|
|
|
|
std::string hierSpaced = hier;
|
|
|
|
for (auto& i : hierSpaced) {
|
|
|
|
if (i == '.') i = ' ';
|
|
|
|
}
|
2023-10-28 10:24:04 +00:00
|
|
|
m_dumpvars.emplace_back(level, hierSpaced);
|
2022-03-05 20:44:32 +00:00
|
|
|
}
|
|
|
|
}
|
2020-04-19 22:57:36 +00:00
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
2022-05-29 18:08:39 +00:00
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::parallelWorkerTask(void* datap, bool) {
|
|
|
|
ParallelWorkerData* const wdp = reinterpret_cast<ParallelWorkerData*>(datap);
|
|
|
|
// Run the task
|
|
|
|
wdp->m_cb(wdp->m_userp, wdp->m_bufp);
|
|
|
|
// Mark buffer as ready
|
|
|
|
const VerilatedLockGuard lock{wdp->m_mutex};
|
|
|
|
wdp->m_ready.store(true);
|
|
|
|
if (wdp->m_waiting) wdp->m_cv.notify_one();
|
|
|
|
}
|
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
|
|
|
VL_ATTR_NOINLINE void VerilatedTrace<VL_SUB_T, VL_BUF_T>::ParallelWorkerData::wait() {
|
2022-05-29 18:08:39 +00:00
|
|
|
// Spin for a while, waiting for the buffer to become ready
|
|
|
|
for (int i = 0; i < VL_LOCK_SPINS; ++i) {
|
|
|
|
if (VL_LIKELY(m_ready.load(std::memory_order_relaxed))) return;
|
|
|
|
VL_CPU_RELAX();
|
|
|
|
}
|
|
|
|
// We have been spinning for a while, so yield the thread
|
|
|
|
VerilatedLockGuard lock{m_mutex};
|
|
|
|
m_waiting = true;
|
2023-05-13 14:32:33 +00:00
|
|
|
m_cv.wait(m_mutex, [this] { return m_ready.load(std::memory_order_relaxed); });
|
2022-05-29 18:08:39 +00:00
|
|
|
m_waiting = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <>
|
2022-07-12 10:41:15 +00:00
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::runCallbacks(const std::vector<CallbackRecord>& cbVec) {
|
2022-07-19 16:06:26 +00:00
|
|
|
if (parallel()) {
|
|
|
|
// If tracing in parallel, dispatch to the thread pool
|
|
|
|
VlThreadPool* threadPoolp = static_cast<VlThreadPool*>(m_contextp->threadPoolp());
|
|
|
|
// List of work items for thread (std::list, as ParallelWorkerData is not movable)
|
|
|
|
std::list<ParallelWorkerData> workerData;
|
|
|
|
// We use the whole pool + the main thread
|
|
|
|
const unsigned threads = threadPoolp->numThreads() + 1;
|
|
|
|
// Main thread executes all jobs with index % threads == 0
|
|
|
|
std::vector<ParallelWorkerData*> mainThreadWorkerData;
|
2022-12-23 16:32:38 +00:00
|
|
|
// Enqueue all the jobs
|
2023-10-23 15:01:55 +00:00
|
|
|
for (const CallbackRecord& cbr : cbVec) {
|
2022-07-19 16:06:26 +00:00
|
|
|
// Always get the trace buffer on the main thread
|
2023-10-23 15:01:55 +00:00
|
|
|
Buffer* const bufp = getTraceBuffer(cbr.m_fidx);
|
2022-07-19 16:06:26 +00:00
|
|
|
// Create new work item
|
|
|
|
workerData.emplace_back(cbr.m_dumpCb, cbr.m_userp, bufp);
|
|
|
|
// Grab the new work item
|
|
|
|
ParallelWorkerData* const itemp = &workerData.back();
|
|
|
|
// Enqueue task to thread pool, or main thread
|
2023-10-23 15:01:55 +00:00
|
|
|
if (unsigned rem = cbr.m_fidx % threads) {
|
2022-07-19 16:06:26 +00:00
|
|
|
threadPoolp->workerp(rem - 1)->addTask(parallelWorkerTask, itemp);
|
|
|
|
} else {
|
|
|
|
mainThreadWorkerData.push_back(itemp);
|
|
|
|
}
|
|
|
|
}
|
2022-12-02 23:46:38 +00:00
|
|
|
// Execute main thread jobs
|
2022-07-19 16:06:26 +00:00
|
|
|
for (ParallelWorkerData* const itemp : mainThreadWorkerData) {
|
|
|
|
parallelWorkerTask(itemp, false);
|
|
|
|
}
|
|
|
|
// Commit all trace buffers in order
|
|
|
|
for (ParallelWorkerData& item : workerData) {
|
|
|
|
// Wait until ready
|
|
|
|
item.wait();
|
|
|
|
// Commit the buffer
|
|
|
|
commitTraceBuffer(item.m_bufp);
|
2022-05-29 18:08:39 +00:00
|
|
|
}
|
2022-07-12 10:41:15 +00:00
|
|
|
|
2022-07-19 16:06:26 +00:00
|
|
|
// Done
|
|
|
|
return;
|
|
|
|
}
|
2022-07-12 10:41:15 +00:00
|
|
|
// Fall back on sequential execution
|
|
|
|
for (const CallbackRecord& cbr : cbVec) {
|
2023-10-23 15:01:55 +00:00
|
|
|
Buffer* const traceBufferp = getTraceBuffer(cbr.m_fidx);
|
2022-07-12 10:41:15 +00:00
|
|
|
cbr.m_dumpCb(cbr.m_userp, traceBufferp);
|
|
|
|
commitTraceBuffer(traceBufferp);
|
2022-05-29 18:08:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-18 14:32:14 +00:00
|
|
|
template <>
|
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::runOffloadedCallbacks(
|
|
|
|
const std::vector<CallbackRecord>& cbVec) {
|
|
|
|
// Fall back on sequential execution
|
2022-10-01 03:41:35 +00:00
|
|
|
for (const CallbackRecord& cbr : cbVec) {
|
2023-10-23 15:01:55 +00:00
|
|
|
Buffer* traceBufferp = getTraceBuffer(cbr.m_fidx);
|
2022-07-18 14:32:14 +00:00
|
|
|
cbr.m_dumpOffloadCb(cbr.m_userp, static_cast<OffloadBuffer*>(traceBufferp));
|
|
|
|
commitTraceBuffer(traceBufferp);
|
2022-10-01 03:41:35 +00:00
|
|
|
}
|
2022-07-18 14:32:14 +00:00
|
|
|
}
|
|
|
|
|
2022-05-29 18:08:39 +00:00
|
|
|
template <>
|
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::dump(uint64_t timeui) VL_MT_SAFE_EXCLUDES(m_mutex) {
|
2021-03-07 16:01:54 +00:00
|
|
|
// Not really VL_MT_SAFE but more VL_MT_UNSAFE_ONE.
|
|
|
|
// This does get the mutex, but if multiple threads are trying to dump
|
|
|
|
// chances are the data being dumped will have other problems
|
2021-07-24 12:36:11 +00:00
|
|
|
const VerilatedLockGuard lock{m_mutex};
|
2022-07-19 13:16:08 +00:00
|
|
|
if (VL_UNCOVERABLE(m_didSomeDump && timeui <= m_timeLastDump)) { // LCOV_EXCL_START
|
2022-01-01 21:04:20 +00:00
|
|
|
VL_PRINTF_MT("%%Warning: previous dump at t=%" PRIu64 ", requesting t=%" PRIu64
|
|
|
|
", dump call ignored\n",
|
2020-04-19 22:57:36 +00:00
|
|
|
m_timeLastDump, timeui);
|
|
|
|
return;
|
2020-05-23 14:34:58 +00:00
|
|
|
} // LCOV_EXCL_STOP
|
2020-04-19 22:57:36 +00:00
|
|
|
m_timeLastDump = timeui;
|
2022-07-19 13:16:08 +00:00
|
|
|
m_didSomeDump = true;
|
2020-04-21 22:49:07 +00:00
|
|
|
|
2020-04-19 22:57:36 +00:00
|
|
|
Verilated::quiesce();
|
2020-04-21 22:49:07 +00:00
|
|
|
|
2024-09-10 23:04:55 +00:00
|
|
|
// Call hook for format-specific behaviour
|
2020-04-19 22:57:36 +00:00
|
|
|
if (VL_UNLIKELY(m_fullDump)) {
|
|
|
|
if (!preFullDump()) return;
|
2020-04-21 22:49:07 +00:00
|
|
|
} else {
|
|
|
|
if (!preChangeDump()) return;
|
|
|
|
}
|
|
|
|
|
2022-03-27 19:27:40 +00:00
|
|
|
uint32_t* bufferp = nullptr;
|
2022-07-18 14:32:14 +00:00
|
|
|
if (offload()) {
|
|
|
|
// Currently only incremental dumps run on the worker thread
|
|
|
|
if (VL_LIKELY(!m_fullDump)) {
|
|
|
|
// Get the offload buffer we are about to fill
|
|
|
|
bufferp = getOffloadBuffer();
|
|
|
|
m_offloadBufferWritep = bufferp;
|
|
|
|
m_offloadBufferEndp = bufferp + m_offloadBufferSize;
|
|
|
|
|
|
|
|
// Tell worker to update time point
|
|
|
|
m_offloadBufferWritep[0] = VerilatedTraceOffloadCommand::TIME_CHANGE;
|
2024-03-03 17:33:11 +00:00
|
|
|
*reinterpret_cast<uint32_t*>(m_offloadBufferWritep + 1)
|
|
|
|
= static_cast<uint32_t>(timeui >> 32ULL);
|
|
|
|
*reinterpret_cast<uint32_t*>(m_offloadBufferWritep + 2)
|
|
|
|
= static_cast<uint32_t>(timeui);
|
2022-07-18 14:32:14 +00:00
|
|
|
m_offloadBufferWritep += 3;
|
|
|
|
} else {
|
|
|
|
// Update time point
|
|
|
|
flushBase();
|
|
|
|
emitTimeChange(timeui);
|
|
|
|
}
|
2020-04-21 22:49:07 +00:00
|
|
|
} else {
|
|
|
|
// Update time point
|
2020-04-19 22:57:36 +00:00
|
|
|
emitTimeChange(timeui);
|
2020-04-21 22:49:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Run the callbacks
|
|
|
|
if (VL_UNLIKELY(m_fullDump)) {
|
2020-04-19 22:57:36 +00:00
|
|
|
m_fullDump = false; // No more need for next dump to be full
|
2022-07-18 14:32:14 +00:00
|
|
|
if (offload()) {
|
|
|
|
runOffloadedCallbacks(m_fullOffloadCbs);
|
|
|
|
} else {
|
|
|
|
runCallbacks(m_fullCbs);
|
|
|
|
}
|
2020-04-19 22:57:36 +00:00
|
|
|
} else {
|
2022-07-18 14:32:14 +00:00
|
|
|
if (offload()) {
|
|
|
|
runOffloadedCallbacks(m_chgOffloadCbs);
|
|
|
|
} else {
|
|
|
|
runCallbacks(m_chgCbs);
|
|
|
|
}
|
2020-04-19 22:57:36 +00:00
|
|
|
}
|
2020-04-21 22:49:07 +00:00
|
|
|
|
2023-10-23 10:36:24 +00:00
|
|
|
if (VL_UNLIKELY(m_constDump)) {
|
|
|
|
m_constDump = false;
|
|
|
|
if (offload()) {
|
|
|
|
runOffloadedCallbacks(m_constOffloadCbs);
|
|
|
|
} else {
|
|
|
|
runCallbacks(m_constCbs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-12 01:52:23 +00:00
|
|
|
for (const CallbackRecord& cbr : m_cleanupCbs) cbr.m_cleanupCb(cbr.m_userp, self());
|
2020-05-08 22:42:34 +00:00
|
|
|
|
2022-07-18 14:32:14 +00:00
|
|
|
if (offload() && VL_LIKELY(bufferp)) {
|
2022-05-20 14:28:25 +00:00
|
|
|
// Mark end of the offload buffer we just filled
|
|
|
|
*m_offloadBufferWritep++ = VerilatedTraceOffloadCommand::END;
|
2020-04-21 22:49:07 +00:00
|
|
|
|
2020-04-25 21:38:25 +00:00
|
|
|
// Assert no buffer overflow
|
2022-11-12 21:02:58 +00:00
|
|
|
assert(static_cast<size_t>(m_offloadBufferWritep - bufferp) <= m_offloadBufferSize);
|
2020-04-21 22:49:07 +00:00
|
|
|
|
2022-10-03 09:50:47 +00:00
|
|
|
// Reset our pointers as we are giving up the buffer
|
|
|
|
m_offloadBufferWritep = nullptr;
|
|
|
|
m_offloadBufferEndp = nullptr;
|
|
|
|
|
2020-04-25 21:38:25 +00:00
|
|
|
// Pass it to the worker thread
|
2022-05-20 14:28:25 +00:00
|
|
|
m_offloadBuffersToWorker.put(bufferp);
|
2020-04-25 21:38:25 +00:00
|
|
|
}
|
2020-04-19 22:57:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
//=============================================================================
|
|
|
|
// Non-hot path internal interface to Verilator generated code
|
|
|
|
|
|
|
|
template <>
|
2022-07-18 12:14:24 +00:00
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::addModel(VerilatedModel* modelp)
|
2022-05-29 18:08:39 +00:00
|
|
|
VL_MT_SAFE_EXCLUDES(m_mutex) {
|
|
|
|
const VerilatedLockGuard lock{m_mutex};
|
2022-07-20 10:27:10 +00:00
|
|
|
|
|
|
|
const bool firstModel = m_models.empty();
|
|
|
|
const bool newModel = m_models.insert(modelp).second;
|
2022-07-18 12:14:24 +00:00
|
|
|
VerilatedContext* const contextp = modelp->contextp();
|
2022-07-20 10:27:10 +00:00
|
|
|
|
|
|
|
// Validate
|
|
|
|
if (!newModel) { // LCOV_EXCL_START
|
2024-06-11 23:38:58 +00:00
|
|
|
VL_FATAL_MT(
|
|
|
|
__FILE__, __LINE__, "",
|
|
|
|
"The same model has already been added to this trace file or VerilatedContext");
|
2022-07-20 10:27:10 +00:00
|
|
|
}
|
|
|
|
if (VL_UNCOVERABLE(m_contextp && contextp != m_contextp)) {
|
2022-07-19 13:16:08 +00:00
|
|
|
VL_FATAL_MT(__FILE__, __LINE__, "",
|
2024-06-11 23:38:58 +00:00
|
|
|
"A trace file instance can only handle models from the same VerilatedContext");
|
2022-05-29 18:08:39 +00:00
|
|
|
}
|
2022-07-19 13:16:08 +00:00
|
|
|
if (VL_UNCOVERABLE(m_didSomeDump)) {
|
|
|
|
VL_FATAL_MT(__FILE__, __LINE__, "",
|
|
|
|
"Cannot add models to a trace file if 'dump' has already been called");
|
|
|
|
} // LCOV_EXCL_STOP
|
2022-07-20 10:27:10 +00:00
|
|
|
|
|
|
|
// Keep hold of the context
|
2022-07-12 10:41:15 +00:00
|
|
|
m_contextp = contextp;
|
2022-07-20 10:27:10 +00:00
|
|
|
|
|
|
|
// Get the desired trace config from the model
|
|
|
|
const std::unique_ptr<VerilatedTraceConfig> configp = modelp->traceConfig();
|
|
|
|
|
|
|
|
// Configure trace base class
|
|
|
|
if (!firstModel) {
|
|
|
|
if (m_offload != configp->m_useOffloading) {
|
|
|
|
VL_FATAL_MT(__FILE__, __LINE__, "",
|
|
|
|
"Either all or no models using the same trace file must use offloading");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m_offload = configp->m_useOffloading;
|
|
|
|
// If at least one model requests parallel tracing, then use it
|
|
|
|
m_parallel |= configp->m_useParallel;
|
|
|
|
|
|
|
|
if (VL_UNCOVERABLE(m_parallel && m_offload)) { // LCOV_EXCL_START
|
|
|
|
VL_FATAL_MT(__FILE__, __LINE__, "", "Cannot use parallel tracing with offloading");
|
|
|
|
} // LCOV_EXCL_STOP
|
|
|
|
|
2024-09-10 23:04:55 +00:00
|
|
|
// Configure format-specific sub class
|
2022-07-20 10:27:10 +00:00
|
|
|
configure(*(configp.get()));
|
2022-05-29 18:08:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <>
|
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::addCallbackRecord(std::vector<CallbackRecord>& cbVec,
|
2022-07-12 10:41:15 +00:00
|
|
|
CallbackRecord&& cbRec)
|
2021-03-07 16:01:54 +00:00
|
|
|
VL_MT_SAFE_EXCLUDES(m_mutex) {
|
2021-07-24 12:36:11 +00:00
|
|
|
const VerilatedLockGuard lock{m_mutex};
|
2020-05-08 22:42:34 +00:00
|
|
|
cbVec.push_back(cbRec);
|
|
|
|
}
|
|
|
|
|
2022-05-29 18:08:39 +00:00
|
|
|
template <>
|
2022-07-20 10:27:10 +00:00
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::addInitCb(initCb_t cb, void* userp) VL_MT_SAFE {
|
2022-07-12 10:41:15 +00:00
|
|
|
addCallbackRecord(m_initCbs, CallbackRecord{cb, userp});
|
2020-05-08 22:42:34 +00:00
|
|
|
}
|
2022-05-29 18:08:39 +00:00
|
|
|
template <>
|
2023-10-23 15:01:55 +00:00
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::addConstCb(dumpCb_t cb, uint32_t fidx,
|
|
|
|
void* userp) VL_MT_SAFE {
|
|
|
|
addCallbackRecord(m_constCbs, CallbackRecord{cb, fidx, userp});
|
2023-10-23 10:36:24 +00:00
|
|
|
}
|
|
|
|
template <>
|
2023-10-23 15:01:55 +00:00
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::addConstCb(dumpOffloadCb_t cb, uint32_t fidx,
|
|
|
|
void* userp) VL_MT_SAFE {
|
|
|
|
addCallbackRecord(m_constOffloadCbs, CallbackRecord{cb, fidx, userp});
|
2023-10-23 10:36:24 +00:00
|
|
|
}
|
|
|
|
template <>
|
2023-10-23 15:01:55 +00:00
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::addFullCb(dumpCb_t cb, uint32_t fidx,
|
|
|
|
void* userp) VL_MT_SAFE {
|
|
|
|
addCallbackRecord(m_fullCbs, CallbackRecord{cb, fidx, userp});
|
2020-05-08 22:42:34 +00:00
|
|
|
}
|
2022-05-29 18:08:39 +00:00
|
|
|
template <>
|
2023-10-23 15:01:55 +00:00
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::addFullCb(dumpOffloadCb_t cb, uint32_t fidx,
|
|
|
|
void* userp) VL_MT_SAFE {
|
|
|
|
addCallbackRecord(m_fullOffloadCbs, CallbackRecord{cb, fidx, userp});
|
2022-07-18 14:32:14 +00:00
|
|
|
}
|
|
|
|
template <>
|
2023-10-23 15:01:55 +00:00
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::addChgCb(dumpCb_t cb, uint32_t fidx,
|
|
|
|
void* userp) VL_MT_SAFE {
|
|
|
|
addCallbackRecord(m_chgCbs, CallbackRecord{cb, fidx, userp});
|
2020-05-08 22:42:34 +00:00
|
|
|
}
|
2022-05-29 18:08:39 +00:00
|
|
|
template <>
|
2023-10-23 15:01:55 +00:00
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::addChgCb(dumpOffloadCb_t cb, uint32_t fidx,
|
|
|
|
void* userp) VL_MT_SAFE {
|
|
|
|
addCallbackRecord(m_chgOffloadCbs, CallbackRecord{cb, fidx, userp});
|
2022-07-18 14:32:14 +00:00
|
|
|
}
|
|
|
|
template <>
|
2022-07-20 10:27:10 +00:00
|
|
|
void VerilatedTrace<VL_SUB_T, VL_BUF_T>::addCleanupCb(cleanupCb_t cb, void* userp) VL_MT_SAFE {
|
2022-07-12 10:41:15 +00:00
|
|
|
addCallbackRecord(m_cleanupCbs, CallbackRecord{cb, userp});
|
2020-04-19 22:57:36 +00:00
|
|
|
}
|
2021-12-19 15:15:07 +00:00
|
|
|
|
2020-04-29 23:09:09 +00:00
|
|
|
//=========================================================================
|
|
|
|
// Primitives converting binary values to strings...
|
|
|
|
|
|
|
|
// All of these take a destination pointer where the string will be emitted,
|
|
|
|
// and a value to convert. There are a couple of variants for efficiency.
|
|
|
|
|
2020-12-01 23:49:03 +00:00
|
|
|
static inline void cvtCDataToStr(char* dstp, CData value) {
|
2020-04-29 23:09:09 +00:00
|
|
|
#ifdef VL_HAVE_SSE2
|
|
|
|
// Similar to cvtSDataToStr but only the bottom 8 byte lanes are used
|
|
|
|
const __m128i a = _mm_cvtsi32_si128(value);
|
|
|
|
const __m128i b = _mm_unpacklo_epi8(a, a);
|
|
|
|
const __m128i c = _mm_shufflelo_epi16(b, 0);
|
|
|
|
const __m128i m = _mm_set1_epi64x(0x0102040810204080);
|
|
|
|
const __m128i d = _mm_cmpeq_epi8(_mm_and_si128(c, m), m);
|
|
|
|
const __m128i result = _mm_sub_epi8(_mm_set1_epi8('0'), d);
|
|
|
|
_mm_storel_epi64(reinterpret_cast<__m128i*>(dstp), result);
|
|
|
|
#else
|
|
|
|
dstp[0] = '0' | static_cast<char>((value >> 7) & 1);
|
|
|
|
dstp[1] = '0' | static_cast<char>((value >> 6) & 1);
|
|
|
|
dstp[2] = '0' | static_cast<char>((value >> 5) & 1);
|
|
|
|
dstp[3] = '0' | static_cast<char>((value >> 4) & 1);
|
|
|
|
dstp[4] = '0' | static_cast<char>((value >> 3) & 1);
|
|
|
|
dstp[5] = '0' | static_cast<char>((value >> 2) & 1);
|
|
|
|
dstp[6] = '0' | static_cast<char>((value >> 1) & 1);
|
|
|
|
dstp[7] = '0' | static_cast<char>(value & 1);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-12-01 23:49:03 +00:00
|
|
|
static inline void cvtSDataToStr(char* dstp, SData value) {
|
2020-04-29 23:09:09 +00:00
|
|
|
#ifdef VL_HAVE_SSE2
|
|
|
|
// We want each bit in the 16-bit input value to end up in a byte lane
|
|
|
|
// within the 128-bit XMM register. Note that x86 is little-endian and we
|
|
|
|
// want the MSB of the input at the low address, so we will bit-reverse
|
|
|
|
// at the same time.
|
|
|
|
|
|
|
|
// Put value in bottom of 128-bit register a[15:0] = value
|
|
|
|
const __m128i a = _mm_cvtsi32_si128(value);
|
|
|
|
// Interleave bytes with themselves
|
|
|
|
// b[15: 0] = {2{a[ 7:0]}} == {2{value[ 7:0]}}
|
|
|
|
// b[31:16] = {2{a[15:8]}} == {2{value[15:8]}}
|
|
|
|
const __m128i b = _mm_unpacklo_epi8(a, a);
|
|
|
|
// Shuffle bottom 64 bits, note swapping high bytes with low bytes
|
|
|
|
// c[31: 0] = {2{b[31:16]}} == {4{value[15:8}}
|
|
|
|
// c[63:32] = {2{b[15: 0]}} == {4{value[ 7:0}}
|
|
|
|
const __m128i c = _mm_shufflelo_epi16(b, 0x05);
|
|
|
|
// Shuffle whole register
|
|
|
|
// d[ 63: 0] = {2{c[31: 0]}} == {8{value[15:8}}
|
|
|
|
// d[126:54] = {2{c[63:32]}} == {8{value[ 7:0}}
|
|
|
|
const __m128i d = _mm_shuffle_epi32(c, 0x50);
|
|
|
|
// Test each bit within the bytes, this sets each byte lane to 0
|
|
|
|
// if the bit for that lane is 0 and to 0xff if the bit is 1.
|
|
|
|
const __m128i m = _mm_set1_epi64x(0x0102040810204080);
|
|
|
|
const __m128i e = _mm_cmpeq_epi8(_mm_and_si128(d, m), m);
|
|
|
|
// Convert to ASCII by subtracting the masks from ASCII '0':
|
|
|
|
// '0' - 0 is '0', '0' - -1 is '1'
|
|
|
|
const __m128i result = _mm_sub_epi8(_mm_set1_epi8('0'), e);
|
|
|
|
// Store the 16 characters to the un-aligned buffer
|
|
|
|
_mm_storeu_si128(reinterpret_cast<__m128i*>(dstp), result);
|
|
|
|
#else
|
|
|
|
cvtCDataToStr(dstp, value >> 8);
|
|
|
|
cvtCDataToStr(dstp + 8, value);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-12-01 23:49:03 +00:00
|
|
|
static inline void cvtIDataToStr(char* dstp, IData value) {
|
2020-04-29 23:09:09 +00:00
|
|
|
#ifdef VL_HAVE_AVX2
|
|
|
|
// Similar to cvtSDataToStr but the bottom 16-bits are processed in the
|
2022-12-23 16:32:38 +00:00
|
|
|
// top half of the YMM registers
|
2020-04-29 23:09:09 +00:00
|
|
|
const __m256i a = _mm256_insert_epi32(_mm256_undefined_si256(), value, 0);
|
|
|
|
const __m256i b = _mm256_permute4x64_epi64(a, 0);
|
|
|
|
const __m256i s = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
|
|
|
|
2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3);
|
|
|
|
const __m256i c = _mm256_shuffle_epi8(b, s);
|
|
|
|
const __m256i m = _mm256_set1_epi64x(0x0102040810204080);
|
|
|
|
const __m256i d = _mm256_cmpeq_epi8(_mm256_and_si256(c, m), m);
|
|
|
|
const __m256i result = _mm256_sub_epi8(_mm256_set1_epi8('0'), d);
|
|
|
|
_mm256_storeu_si256(reinterpret_cast<__m256i*>(dstp), result);
|
|
|
|
#else
|
|
|
|
cvtSDataToStr(dstp, value >> 16);
|
|
|
|
cvtSDataToStr(dstp + 16, value);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-12-01 23:49:03 +00:00
|
|
|
static inline void cvtQDataToStr(char* dstp, QData value) {
|
2020-04-29 23:09:09 +00:00
|
|
|
cvtIDataToStr(dstp, value >> 32);
|
|
|
|
cvtIDataToStr(dstp + 32, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define cvtEDataToStr cvtIDataToStr
|
2020-11-11 02:40:14 +00:00
|
|
|
|
2022-05-29 18:08:39 +00:00
|
|
|
//=========================================================================
|
|
|
|
// VerilatedTraceBuffer
|
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
2022-07-18 14:32:14 +00:00
|
|
|
VerilatedTraceBuffer<VL_BUF_T>::VerilatedTraceBuffer(Trace& owner)
|
|
|
|
: VL_BUF_T{owner}
|
|
|
|
, m_sigs_oldvalp{owner.m_sigs_oldvalp}
|
|
|
|
, m_sigs_enabledp{owner.m_sigs_enabledp} {}
|
2022-05-29 18:08:39 +00:00
|
|
|
|
|
|
|
// These functions must write the new value back into the old value store,
|
2024-09-10 23:04:55 +00:00
|
|
|
// and subsequently call the format-specific emit* implementations. Note
|
|
|
|
// that this file must be included in the format-specific implementation, so
|
2022-05-29 18:08:39 +00:00
|
|
|
// the emit* functions can be inlined for performance.
|
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
2022-07-18 14:32:14 +00:00
|
|
|
void VerilatedTraceBuffer<VL_BUF_T>::fullBit(uint32_t* oldp, CData newval) {
|
2022-05-29 18:08:39 +00:00
|
|
|
const uint32_t code = oldp - m_sigs_oldvalp;
|
|
|
|
*oldp = newval; // Still copy even if not tracing so chg doesn't call full
|
|
|
|
if (VL_UNLIKELY(m_sigs_enabledp && !(VL_BITISSET_W(m_sigs_enabledp, code)))) return;
|
2022-07-18 14:32:14 +00:00
|
|
|
emitBit(code, newval);
|
2022-05-29 18:08:39 +00:00
|
|
|
}
|
|
|
|
|
2022-11-23 09:07:14 +00:00
|
|
|
template <>
|
2024-09-10 23:04:55 +00:00
|
|
|
void VerilatedTraceBuffer<VL_BUF_T>::fullEvent(uint32_t* oldp, const VlEventBase* newvalp) {
|
2022-11-23 09:07:14 +00:00
|
|
|
const uint32_t code = oldp - m_sigs_oldvalp;
|
2024-09-10 23:14:52 +00:00
|
|
|
// No need to update *oldp
|
|
|
|
if (newvalp->isTriggered()) emitEvent(code);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <>
|
|
|
|
void VerilatedTraceBuffer<VL_BUF_T>::fullEventTriggered(uint32_t* oldp) {
|
|
|
|
const uint32_t code = oldp - m_sigs_oldvalp;
|
|
|
|
// No need to update *oldp
|
|
|
|
emitEvent(code);
|
2022-11-23 09:07:14 +00:00
|
|
|
}
|
|
|
|
|
2022-05-29 18:08:39 +00:00
|
|
|
template <>
|
2022-07-18 14:32:14 +00:00
|
|
|
void VerilatedTraceBuffer<VL_BUF_T>::fullCData(uint32_t* oldp, CData newval, int bits) {
|
2022-05-29 18:08:39 +00:00
|
|
|
const uint32_t code = oldp - m_sigs_oldvalp;
|
|
|
|
*oldp = newval; // Still copy even if not tracing so chg doesn't call full
|
|
|
|
if (VL_UNLIKELY(m_sigs_enabledp && !(VL_BITISSET_W(m_sigs_enabledp, code)))) return;
|
2022-07-18 14:32:14 +00:00
|
|
|
emitCData(code, newval, bits);
|
2022-05-29 18:08:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <>
|
2022-07-18 14:32:14 +00:00
|
|
|
void VerilatedTraceBuffer<VL_BUF_T>::fullSData(uint32_t* oldp, SData newval, int bits) {
|
2022-05-29 18:08:39 +00:00
|
|
|
const uint32_t code = oldp - m_sigs_oldvalp;
|
|
|
|
*oldp = newval; // Still copy even if not tracing so chg doesn't call full
|
|
|
|
if (VL_UNLIKELY(m_sigs_enabledp && !(VL_BITISSET_W(m_sigs_enabledp, code)))) return;
|
2022-07-18 14:32:14 +00:00
|
|
|
emitSData(code, newval, bits);
|
2022-05-29 18:08:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <>
|
2022-07-18 14:32:14 +00:00
|
|
|
void VerilatedTraceBuffer<VL_BUF_T>::fullIData(uint32_t* oldp, IData newval, int bits) {
|
2022-05-29 18:08:39 +00:00
|
|
|
const uint32_t code = oldp - m_sigs_oldvalp;
|
|
|
|
*oldp = newval; // Still copy even if not tracing so chg doesn't call full
|
|
|
|
if (VL_UNLIKELY(m_sigs_enabledp && !(VL_BITISSET_W(m_sigs_enabledp, code)))) return;
|
2022-07-18 14:32:14 +00:00
|
|
|
emitIData(code, newval, bits);
|
2022-05-29 18:08:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <>
|
2022-07-18 14:32:14 +00:00
|
|
|
void VerilatedTraceBuffer<VL_BUF_T>::fullQData(uint32_t* oldp, QData newval, int bits) {
|
2022-05-29 18:08:39 +00:00
|
|
|
const uint32_t code = oldp - m_sigs_oldvalp;
|
2023-06-12 11:13:00 +00:00
|
|
|
std::memcpy(oldp, &newval, sizeof(newval));
|
2022-05-29 18:08:39 +00:00
|
|
|
if (VL_UNLIKELY(m_sigs_enabledp && !(VL_BITISSET_W(m_sigs_enabledp, code)))) return;
|
2022-07-18 14:32:14 +00:00
|
|
|
emitQData(code, newval, bits);
|
2022-05-29 18:08:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <>
|
2022-07-18 14:32:14 +00:00
|
|
|
void VerilatedTraceBuffer<VL_BUF_T>::fullWData(uint32_t* oldp, const WData* newvalp, int bits) {
|
2022-05-29 18:08:39 +00:00
|
|
|
const uint32_t code = oldp - m_sigs_oldvalp;
|
|
|
|
for (int i = 0; i < VL_WORDS_I(bits); ++i) oldp[i] = newvalp[i];
|
|
|
|
if (VL_UNLIKELY(m_sigs_enabledp && !(VL_BITISSET_W(m_sigs_enabledp, code)))) return;
|
2022-07-18 14:32:14 +00:00
|
|
|
emitWData(code, newvalp, bits);
|
2022-05-29 18:08:39 +00:00
|
|
|
}
|
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
2022-07-18 14:32:14 +00:00
|
|
|
void VerilatedTraceBuffer<VL_BUF_T>::fullDouble(uint32_t* oldp, double newval) {
|
2022-05-29 18:08:39 +00:00
|
|
|
const uint32_t code = oldp - m_sigs_oldvalp;
|
2023-06-12 11:13:00 +00:00
|
|
|
std::memcpy(oldp, &newval, sizeof(newval));
|
2022-05-29 18:08:39 +00:00
|
|
|
if (VL_UNLIKELY(m_sigs_enabledp && !(VL_BITISSET_W(m_sigs_enabledp, code)))) return;
|
|
|
|
// cppcheck-suppress invalidPointerCast
|
2022-07-18 14:32:14 +00:00
|
|
|
emitDouble(code, newval);
|
2022-05-29 18:08:39 +00:00
|
|
|
}
|
|
|
|
|
2022-07-18 14:32:14 +00:00
|
|
|
//=========================================================================
|
|
|
|
// VerilatedTraceOffloadBuffer
|
|
|
|
|
2022-08-05 09:56:57 +00:00
|
|
|
template <>
|
2022-07-18 14:32:14 +00:00
|
|
|
VerilatedTraceOffloadBuffer<VL_BUF_T>::VerilatedTraceOffloadBuffer(VL_SUB_T& owner)
|
|
|
|
: VerilatedTraceBuffer<VL_BUF_T>{owner}
|
|
|
|
, m_offloadBufferWritep{owner.m_offloadBufferWritep}
|
|
|
|
, m_offloadBufferEndp{owner.m_offloadBufferEndp} {
|
|
|
|
if (m_offloadBufferWritep) {
|
|
|
|
using This = VerilatedTraceBuffer<VL_BUF_T>*;
|
|
|
|
// Tack on the buffer address
|
|
|
|
static_assert(2 * sizeof(uint32_t) >= sizeof(This),
|
|
|
|
"This should be enough on all plafrorms");
|
|
|
|
*m_offloadBufferWritep++ = VerilatedTraceOffloadCommand::TRACE_BUFFER;
|
|
|
|
*reinterpret_cast<This*>(m_offloadBufferWritep) = static_cast<This>(this);
|
|
|
|
m_offloadBufferWritep += 2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-11 02:40:14 +00:00
|
|
|
#endif // VL_CPPCHECK
|