diff --git a/include/verilated_funcs.h b/include/verilated_funcs.h index eea56ac26..8aa30e0db 100644 --- a/include/verilated_funcs.h +++ b/include/verilated_funcs.h @@ -1604,26 +1604,26 @@ static inline IData VL_PACK_II(int obits, int lbits, const VlQueue& q) { return ret; } -template -static inline IData VL_PACK_II(int obits, int lbits, const VlUnpacked& q) { +template +static inline IData VL_PACK_II(int obits, int lbits, const VlUnpacked& q) { IData ret = 0; - for (size_t i = 0; i < T_Depth; ++i) - ret |= static_cast(q[T_Depth - 1 - i]) << (i * lbits); + for (size_t i = 0; i < N_Depth; ++i) + ret |= static_cast(q[N_Depth - 1 - i]) << (i * lbits); return ret; } -template -static inline IData VL_PACK_II(int obits, int lbits, const VlUnpacked& q) { +template +static inline IData VL_PACK_II(int obits, int lbits, const VlUnpacked& q) { IData ret = 0; - for (size_t i = 0; i < T_Depth; ++i) - ret |= static_cast(q[T_Depth - 1 - i]) << (i * lbits); + for (size_t i = 0; i < N_Depth; ++i) + ret |= static_cast(q[N_Depth - 1 - i]) << (i * lbits); return ret; } -template -static inline IData VL_PACK_II(int obits, int lbits, const VlUnpacked& q) { +template +static inline IData VL_PACK_II(int obits, int lbits, const VlUnpacked& q) { IData ret = 0; - for (size_t i = 0; i < T_Depth; ++i) ret |= q[T_Depth - 1 - i] << (i * lbits); + for (size_t i = 0; i < N_Depth; ++i) ret |= q[N_Depth - 1 - i] << (i * lbits); return ret; } @@ -1645,27 +1645,27 @@ static inline QData VL_PACK_QI(int obits, int lbits, const VlQueue& q) { return ret; } -template -static inline QData VL_PACK_QI(int obits, int lbits, const VlUnpacked& q) { +template +static inline QData VL_PACK_QI(int obits, int lbits, const VlUnpacked& q) { QData ret = 0; - for (size_t i = 0; i < T_Depth; ++i) - ret |= static_cast(q[T_Depth - 1 - i]) << (i * lbits); + for (size_t i = 0; i < N_Depth; ++i) + ret |= static_cast(q[N_Depth - 1 - i]) << (i * lbits); return ret; } -template -static inline QData VL_PACK_QI(int obits, int lbits, const VlUnpacked& q) { +template +static inline QData VL_PACK_QI(int obits, int lbits, const VlUnpacked& q) { QData ret = 0; - for (size_t i = 0; i < T_Depth; ++i) - ret |= static_cast(q[T_Depth - 1 - i]) << (i * lbits); + for (size_t i = 0; i < N_Depth; ++i) + ret |= static_cast(q[N_Depth - 1 - i]) << (i * lbits); return ret; } -template -static inline QData VL_PACK_QI(int obits, int lbits, const VlUnpacked& q) { +template +static inline QData VL_PACK_QI(int obits, int lbits, const VlUnpacked& q) { QData ret = 0; - for (size_t i = 0; i < T_Depth; ++i) - ret |= static_cast(q[T_Depth - 1 - i]) << (i * lbits); + for (size_t i = 0; i < N_Depth; ++i) + ret |= static_cast(q[N_Depth - 1 - i]) << (i * lbits); return ret; } @@ -1675,10 +1675,10 @@ static inline QData VL_PACK_QQ(int obits, int lbits, const VlQueue& q) { return ret; } -template -static inline QData VL_PACK_QQ(int obits, int lbits, const VlUnpacked& q) { +template +static inline QData VL_PACK_QQ(int obits, int lbits, const VlUnpacked& q) { QData ret = 0; - for (size_t i = 0; i < T_Depth; ++i) ret |= q[T_Depth - 1 - i] << (i * lbits); + for (size_t i = 0; i < N_Depth; ++i) ret |= q[N_Depth - 1 - i] << (i * lbits); return ret; } @@ -1703,30 +1703,30 @@ static inline WDataOutP VL_PACK_WI(int obits, int lbits, WDataOutP owp, const Vl return owp; } -template +template static inline WDataOutP VL_PACK_WI(int obits, int lbits, WDataOutP owp, - const VlUnpacked& q) { + const VlUnpacked& q) { VL_MEMSET_ZERO_W(owp + 1, VL_WORDS_I(obits) - 1); - for (size_t i = 0; i < T_Depth; ++i) - _vl_insert_WI(owp, q[T_Depth - 1 - i], i * lbits + lbits - 1, i * lbits); + for (size_t i = 0; i < N_Depth; ++i) + _vl_insert_WI(owp, q[N_Depth - 1 - i], i * lbits + lbits - 1, i * lbits); return owp; } -template +template static inline WDataOutP VL_PACK_WI(int obits, int lbits, WDataOutP owp, - const VlUnpacked& q) { + const VlUnpacked& q) { VL_MEMSET_ZERO_W(owp + 1, VL_WORDS_I(obits) - 1); - for (size_t i = 0; i < T_Depth; ++i) - _vl_insert_WI(owp, q[T_Depth - 1 - i], i * lbits + lbits - 1, i * lbits); + for (size_t i = 0; i < N_Depth; ++i) + _vl_insert_WI(owp, q[N_Depth - 1 - i], i * lbits + lbits - 1, i * lbits); return owp; } -template +template static inline WDataOutP VL_PACK_WI(int obits, int lbits, WDataOutP owp, - const VlUnpacked& q) { + const VlUnpacked& q) { VL_MEMSET_ZERO_W(owp + 1, VL_WORDS_I(obits) - 1); - for (size_t i = 0; i < T_Depth; ++i) - _vl_insert_WI(owp, q[T_Depth - 1 - i], i * lbits + lbits - 1, i * lbits); + for (size_t i = 0; i < N_Depth; ++i) + _vl_insert_WI(owp, q[N_Depth - 1 - i], i * lbits + lbits - 1, i * lbits); return owp; } @@ -1737,30 +1737,30 @@ static inline WDataOutP VL_PACK_WQ(int obits, int lbits, WDataOutP owp, const Vl return owp; } -template +template static inline WDataOutP VL_PACK_WQ(int obits, int lbits, WDataOutP owp, - const VlUnpacked& q) { + const VlUnpacked& q) { VL_MEMSET_ZERO_W(owp + 1, VL_WORDS_I(obits) - 1); - for (size_t i = 0; i < T_Depth; ++i) - _vl_insert_WQ(owp, q[T_Depth - 1 - i], i * lbits + lbits - 1, i * lbits); + for (size_t i = 0; i < N_Depth; ++i) + _vl_insert_WQ(owp, q[N_Depth - 1 - i], i * lbits + lbits - 1, i * lbits); return owp; } -template +template static inline WDataOutP VL_PACK_WW(int obits, int lbits, WDataOutP owp, - const VlQueue>& q) { + const VlQueue>& q) { VL_MEMSET_ZERO_W(owp + 1, VL_WORDS_I(obits) - 1); for (size_t i = 0; i < q.size(); ++i) _vl_insert_WW(owp, q.at(i), i * lbits + lbits - 1, i * lbits); return owp; } -template +template static inline WDataOutP VL_PACK_WW(int obits, int lbits, WDataOutP owp, - const VlUnpacked, T_Depth>& q) { + const VlUnpacked, N_Depth>& q) { VL_MEMSET_ZERO_W(owp + 1, VL_WORDS_I(obits) - 1); - for (size_t i = 0; i < T_Depth; ++i) - _vl_insert_WW(owp, q[T_Depth - 1 - i], i * lbits + lbits - 1, i * lbits); + for (size_t i = 0; i < N_Depth; ++i) + _vl_insert_WW(owp, q[N_Depth - 1 - i], i * lbits + lbits - 1, i * lbits); return owp; } @@ -2288,8 +2288,8 @@ static inline void VL_UNPACK_QW(int lbits, int rbits, VlQueue& q, WDataIn } } -template -static inline void VL_UNPACK_WW(int lbits, int rbits, VlQueue>& q, WDataInP rwp) { +template +static inline void VL_UNPACK_WW(int lbits, int rbits, VlQueue>& q, WDataInP rwp) { const int size = (rbits + lbits - 1) / lbits; q.renew(size); for (size_t i = 0; i < size; ++i) { @@ -2297,85 +2297,85 @@ static inline void VL_UNPACK_WW(int lbits, int rbits, VlQueue>& q, WDa } } -template -static inline void VL_UNPACK_II(int lbits, int rbits, VlUnpacked& q, IData from) { +template +static inline void VL_UNPACK_II(int lbits, int rbits, VlUnpacked& q, IData from) { const IData mask = VL_MASK_I(lbits); - for (size_t i = 0; i < T_Depth; ++i) q[i] = (from >> ((T_Depth - 1 - i) * lbits)) & mask; + for (size_t i = 0; i < N_Depth; ++i) q[i] = (from >> ((N_Depth - 1 - i) * lbits)) & mask; } -template -static inline void VL_UNPACK_II(int lbits, int rbits, VlUnpacked& q, IData from) { +template +static inline void VL_UNPACK_II(int lbits, int rbits, VlUnpacked& q, IData from) { const IData mask = VL_MASK_I(lbits); - for (size_t i = 0; i < T_Depth; ++i) q[i] = (from >> ((T_Depth - 1 - i) * lbits)) & mask; + for (size_t i = 0; i < N_Depth; ++i) q[i] = (from >> ((N_Depth - 1 - i) * lbits)) & mask; } -template -static inline void VL_UNPACK_II(int lbits, int rbits, VlUnpacked& q, IData from) { +template +static inline void VL_UNPACK_II(int lbits, int rbits, VlUnpacked& q, IData from) { const IData mask = VL_MASK_I(lbits); - for (size_t i = 0; i < T_Depth; ++i) q[i] = (from >> ((T_Depth - 1 - i) * lbits)) & mask; + for (size_t i = 0; i < N_Depth; ++i) q[i] = (from >> ((N_Depth - 1 - i) * lbits)) & mask; } -template -static inline void VL_UNPACK_IQ(int lbits, int rbits, VlUnpacked& q, QData from) { +template +static inline void VL_UNPACK_IQ(int lbits, int rbits, VlUnpacked& q, QData from) { const IData mask = VL_MASK_I(lbits); - for (size_t i = 0; i < T_Depth; ++i) q[i] = (from >> ((T_Depth - 1 - i) * lbits)) & mask; + for (size_t i = 0; i < N_Depth; ++i) q[i] = (from >> ((N_Depth - 1 - i) * lbits)) & mask; } -template -static inline void VL_UNPACK_IQ(int lbits, int rbits, VlUnpacked& q, QData from) { +template +static inline void VL_UNPACK_IQ(int lbits, int rbits, VlUnpacked& q, QData from) { const IData mask = VL_MASK_I(lbits); - for (size_t i = 0; i < T_Depth; ++i) q[i] = (from >> ((T_Depth - 1 - i) * lbits)) & mask; + for (size_t i = 0; i < N_Depth; ++i) q[i] = (from >> ((N_Depth - 1 - i) * lbits)) & mask; } -template -static inline void VL_UNPACK_IQ(int lbits, int rbits, VlUnpacked& q, QData from) { +template +static inline void VL_UNPACK_IQ(int lbits, int rbits, VlUnpacked& q, QData from) { const IData mask = VL_MASK_I(lbits); - for (size_t i = 0; i < T_Depth; ++i) q[i] = (from >> ((T_Depth - 1 - i) * lbits)) & mask; + for (size_t i = 0; i < N_Depth; ++i) q[i] = (from >> ((N_Depth - 1 - i) * lbits)) & mask; } -template -static inline void VL_UNPACK_QQ(int lbits, int rbits, VlUnpacked& q, QData from) { +template +static inline void VL_UNPACK_QQ(int lbits, int rbits, VlUnpacked& q, QData from) { const QData mask = VL_MASK_Q(lbits); - for (size_t i = 0; i < T_Depth; ++i) q[i] = (from >> ((T_Depth - 1 - i) * lbits)) & mask; + for (size_t i = 0; i < N_Depth; ++i) q[i] = (from >> ((N_Depth - 1 - i) * lbits)) & mask; } -template -static inline void VL_UNPACK_IW(int lbits, int rbits, VlUnpacked& q, +template +static inline void VL_UNPACK_IW(int lbits, int rbits, VlUnpacked& q, WDataInP rwp) { const IData mask = VL_MASK_I(lbits); - for (size_t i = 0; i < T_Depth; ++i) - q[i] = VL_SEL_IWII(rbits, rwp, (T_Depth - 1 - i) * lbits, lbits) & mask; + for (size_t i = 0; i < N_Depth; ++i) + q[i] = VL_SEL_IWII(rbits, rwp, (N_Depth - 1 - i) * lbits, lbits) & mask; } -template -static inline void VL_UNPACK_IW(int lbits, int rbits, VlUnpacked& q, +template +static inline void VL_UNPACK_IW(int lbits, int rbits, VlUnpacked& q, WDataInP rwp) { const IData mask = VL_MASK_I(lbits); - for (size_t i = 0; i < T_Depth; ++i) - q[i] = VL_SEL_IWII(rbits, rwp, (T_Depth - 1 - i) * lbits, lbits) & mask; + for (size_t i = 0; i < N_Depth; ++i) + q[i] = VL_SEL_IWII(rbits, rwp, (N_Depth - 1 - i) * lbits, lbits) & mask; } -template -static inline void VL_UNPACK_IW(int lbits, int rbits, VlUnpacked& q, +template +static inline void VL_UNPACK_IW(int lbits, int rbits, VlUnpacked& q, WDataInP rwp) { const IData mask = VL_MASK_I(lbits); - for (size_t i = 0; i < T_Depth; ++i) - q[i] = VL_SEL_IWII(rbits, rwp, (T_Depth - 1 - i) * lbits, lbits) & mask; + for (size_t i = 0; i < N_Depth; ++i) + q[i] = VL_SEL_IWII(rbits, rwp, (N_Depth - 1 - i) * lbits, lbits) & mask; } -template -static inline void VL_UNPACK_QW(int lbits, int rbits, VlUnpacked& q, +template +static inline void VL_UNPACK_QW(int lbits, int rbits, VlUnpacked& q, WDataInP rwp) { const QData mask = VL_MASK_Q(lbits); - for (size_t i = 0; i < T_Depth; ++i) - q[i] = VL_SEL_QWII(rbits, rwp, (T_Depth - 1 - i) * lbits, lbits) & mask; + for (size_t i = 0; i < N_Depth; ++i) + q[i] = VL_SEL_QWII(rbits, rwp, (N_Depth - 1 - i) * lbits, lbits) & mask; } -template -static inline void VL_UNPACK_WW(int lbits, int rbits, VlUnpacked, T_Depth>& q, +template +static inline void VL_UNPACK_WW(int lbits, int rbits, VlUnpacked, N_Depth>& q, WDataInP rwp) { - for (size_t i = 0; i < T_Depth; ++i) - VL_SEL_WWII(lbits, rbits, q[i], rwp, (T_Depth - 1 - i) * lbits, lbits); + for (size_t i = 0; i < N_Depth; ++i) + VL_SEL_WWII(lbits, rbits, q[i], rwp, (N_Depth - 1 - i) * lbits, lbits); } // Return QData from double (numeric) diff --git a/include/verilated_profiler.h b/include/verilated_profiler.h index 6938c338d..08210df56 100644 --- a/include/verilated_profiler.h +++ b/include/verilated_profiler.h @@ -198,7 +198,7 @@ public: //============================================================================= // VlPgoProfiler is for collecting profiling data for PGO -template +template class VlPgoProfiler final { // TYPES struct Record final { @@ -207,7 +207,7 @@ class VlPgoProfiler final { }; // Counters are stored packed, all together to reduce cache effects - std::array m_counters; // Time spent on this record + std::array m_counters; // Time spent on this record std::vector m_records; // Record information public: @@ -216,7 +216,7 @@ public: ~VlPgoProfiler() = default; void write(const char* modelp, const std::string& filename) VL_MT_SAFE; void addCounter(size_t counter, const std::string& name) { - VL_DEBUG_IF(assert(counter < T_Entries);); + VL_DEBUG_IF(assert(counter < N_Entries);); m_records.emplace_back(Record{name, counter}); } void startCounter(size_t counter) { @@ -227,8 +227,8 @@ public: void stopCounter(size_t counter) { m_counters[counter] += VL_CPU_TICK(); } }; -template -void VlPgoProfiler::write(const char* modelp, const std::string& filename) VL_MT_SAFE { +template +void VlPgoProfiler::write(const char* modelp, const std::string& filename) VL_MT_SAFE { static VerilatedMutex s_mutex; const VerilatedLockGuard lock{s_mutex}; diff --git a/include/verilated_random.h b/include/verilated_random.h index d679f3e0e..4dd840149 100644 --- a/include/verilated_random.h +++ b/include/verilated_random.h @@ -250,11 +250,11 @@ public: record_arr_table(var, name, dimension, {}); } } - template - void write_var(VlUnpacked& var, int width, const char* name, int dimension, + template + void write_var(VlUnpacked& var, int width, const char* name, int dimension, std::uint32_t randmodeIdx = std::numeric_limits::max()) { if (m_vars.find(name) != m_vars.end()) return; - m_vars[name] = std::make_shared>>( + m_vars[name] = std::make_shared>>( name, width, &var, dimension, randmodeIdx); if (dimension > 0) { idx = 0; @@ -295,11 +295,11 @@ public: ++idx; } } - template - void record_arr_table(VlUnpacked& var, const std::string name, int dimension, + template + void record_arr_table(VlUnpacked& var, const std::string name, int dimension, std::vector indices) { - if ((dimension > 0) && (N != 0)) { - for (size_t i = 0; i < N; ++i) { + if ((dimension > 0) && (N_Depth != 0)) { + for (size_t i = 0; i < N_Depth; ++i) { const std::string indexed_name = name + "[" + std::to_string(i) + "]"; indices.push_back(i); record_arr_table(var.operator[](i), indexed_name, dimension - 1, indices); diff --git a/include/verilated_threads.h b/include/verilated_threads.h index 7ba0a4f33..48a927789 100644 --- a/include/verilated_threads.h +++ b/include/verilated_threads.h @@ -161,10 +161,10 @@ public: ~VlWorkerThread(); // METHODS - template + template void dequeWork(ExecRec* workp) VL_MT_SAFE_EXCLUDES(m_mutex) { // Spin for a while, waiting for new data - if VL_CONSTEXPR_CXX17 (SpinWait) { + if VL_CONSTEXPR_CXX17 (N_SpinWait) { for (unsigned i = 0; i < VL_LOCK_SPINS; ++i) { if (VL_LIKELY(m_ready_size.load(std::memory_order_relaxed))) break; VL_CPU_RELAX(); diff --git a/include/verilated_types.h b/include/verilated_types.h index fe6db1774..a7a72d5e1 100644 --- a/include/verilated_types.h +++ b/include/verilated_types.h @@ -164,12 +164,12 @@ inline std::string VL_TO_STRING(const VlProcessRef& p) { return std::string("pro //=================================================================== // Activity trigger vector -template // +template // class VlTriggerVec final { - // TODO: static assert T_size > 0, and don't generate when empty + // TODO: static assert N_Size > 0, and don't generate when empty // MEMBERS - alignas(16) std::array(T_size) / 64> m_flags; // The flags + alignas(16) std::array(N_Size) / 64> m_flags; // The flags public: // CONSTRUCTOR @@ -200,12 +200,12 @@ public: } // Set all elements true in 'this' that are set in 'other' - void thisOr(const VlTriggerVec& other) { + void thisOr(const VlTriggerVec& other) { for (size_t i = 0; i < m_flags.size(); ++i) m_flags[i] |= other.m_flags[i]; } // Set elements of 'this' to 'a & !b' element-wise - void andNot(const VlTriggerVec& a, const VlTriggerVec& b) { + void andNot(const VlTriggerVec& a, const VlTriggerVec& b) { for (size_t i = 0; i < m_flags.size(); ++i) m_flags[i] = a.m_flags[i] & ~b.m_flags[i]; } }; @@ -309,7 +309,7 @@ public: size_t operator()() { return VL_MASK_I(31) & vl_rand64(); } }; -template +template class VlRandC final { T_Value m_remaining = 0; // Number of values to pull before re-randomize T_Value m_lfsr = 1; // LFSR state @@ -317,8 +317,8 @@ class VlRandC final { public: // CONSTRUCTORS VlRandC() { - static_assert(T_numValues >= 1, ""); - static_assert(sizeof(T_Value) == 8 || (T_numValues < (1ULL << (8 * sizeof(T_Value)))), ""); + static_assert(N_NumValues >= 1, ""); + static_assert(sizeof(T_Value) == 8 || (N_NumValues < (1ULL << (8 * sizeof(T_Value)))), ""); } // METHODS T_Value randomize(VlRNG& rngr) { @@ -337,23 +337,23 @@ public: 0x80000057ULL, // 32 0x100000029ULL // 33 }; - constexpr uint32_t clogWidth = VL_CLOG2_CE_Q(T_numValues) + 1; + constexpr uint32_t clogWidth = VL_CLOG2_CE_Q(N_NumValues) + 1; constexpr uint32_t lfsrWidth = (clogWidth < 2) ? 2 : clogWidth; constexpr T_Value polynomial = static_cast(s_polynomials[lfsrWidth]); - // printf(" numV=%ld w=%d poly=%x\n", T_numValues, lfsrWidth, polynomial); + // printf(" numV=%ld w=%d poly=%x\n", N_NumValues, lfsrWidth, polynomial); // Loop until get reasonable value. Because we picked a LFSR of at most one // extra bit in width, this will only require at most on average 1.5 loops do { m_lfsr = (m_lfsr & 1ULL) ? ((m_lfsr >> 1ULL) ^ polynomial) : (m_lfsr >> 1ULL); - } while (m_lfsr > T_numValues); // Note if == then output value 0 + } while (m_lfsr > N_NumValues); // Note if == then output value 0 --m_remaining; - T_Value result = (m_lfsr == T_numValues) ? 0 : m_lfsr; - // printf(" result=%x (numv=%ld, rem=%d)\n", result, T_numValues, m_remaining); + T_Value result = (m_lfsr == N_NumValues) ? 0 : m_lfsr; + // printf(" result=%x (numv=%ld, rem=%d)\n", result, N_NumValues, m_remaining); return result; } void reseed(VlRNG& rngr) { - constexpr uint32_t lfsrWidth = VL_CLOG2_CE_Q(T_numValues) + 1; - m_remaining = T_numValues; + constexpr uint32_t lfsrWidth = VL_CLOG2_CE_Q(N_NumValues) + 1; + m_remaining = N_NumValues; do { m_lfsr = rngr.rand64() & VL_MASK_Q(lfsrWidth); // printf(" lfsr.reseed=%x\n", m_lfsr); @@ -414,23 +414,23 @@ public: static int _vl_cmp_w(int words, WDataInP const lwp, WDataInP const rwp) VL_PURE; -template +template struct VlWide; // Type trait to check if a type is VlWide template struct VlIsVlWide : public std::false_type {}; -template -struct VlIsVlWide> : public std::true_type {}; +template +struct VlIsVlWide> : public std::true_type {}; -template +template struct VlWide final { - static constexpr size_t Words = T_Words; + static constexpr size_t Words = N_Words; // MEMBERS // This should be the only data member, otherwise generated static initializers need updating - EData m_storage[T_Words]; // Contents of the packed array + EData m_storage[N_Words]; // Contents of the packed array // CONSTRUCTORS // Default constructors and destructor are used. Note however that C++20 requires that @@ -441,8 +441,8 @@ struct VlWide final { // Default copy assignment operators are used. operator WDataOutP() VL_PURE { return &m_storage[0]; } // This also allows [] operator WDataInP() const VL_PURE { return &m_storage[0]; } // This also allows [] - bool operator!=(const VlWide& that) const VL_PURE { - for (size_t i = 0; i < T_Words; ++i) { + bool operator!=(const VlWide& that) const VL_PURE { + for (size_t i = 0; i < N_Words; ++i) { if (m_storage[i] != that.m_storage[i]) return true; } return false; @@ -453,21 +453,21 @@ struct VlWide final { EData& at(size_t index) { return m_storage[index]; } WData* data() { return &m_storage[0]; } const WData* data() const { return &m_storage[0]; } - bool operator<(const VlWide& rhs) const { - return _vl_cmp_w(T_Words, data(), rhs.data()) < 0; + bool operator<(const VlWide& rhs) const { + return _vl_cmp_w(N_Words, data(), rhs.data()) < 0; } }; // Convert a C array to std::array reference by pointer magic, without copy. // Data type (second argument) is so the function template can automatically generate. -template -VlWide& VL_CVT_W_A(const WDataInP inp, const VlWide&) { - return *((VlWide*)inp); +template +VlWide& VL_CVT_W_A(const WDataInP inp, const VlWide&) { + return *((VlWide*)inp); } -template -std::string VL_TO_STRING(const VlWide& obj) { - return VL_TO_STRING_W(T_Words, obj.data()); +template +std::string VL_TO_STRING(const VlWide& obj) { + return VL_TO_STRING_W(N_Words, obj.data()); } //=================================================================== @@ -477,7 +477,7 @@ std::string VL_TO_STRING(const VlWide& obj) { // // Bound here is the maximum size() allowed, e.g. 1 + SystemVerilog bound // For dynamic arrays it is always zero -template +template class VlQueue final { private: // TYPES @@ -485,8 +485,8 @@ private: public: using const_iterator = typename Deque::const_iterator; - template - using WithFuncReturnType = decltype(std::declval()(0, std::declval())); + template + using WithFuncReturnType = decltype(std::declval()(0, std::declval())); private: // MEMBERS @@ -506,11 +506,11 @@ public: bool operator!=(const VlQueue& rhs) const { return m_deque != rhs.m_deque; } // Standard copy constructor works. Verilog: assoca = assocb - // Also must allow conversion from a different T_MaxSize queue - template - VlQueue operator=(const VlQueue& rhs) { + // Also must allow conversion from a different N_MaxSize queue + template + VlQueue operator=(const VlQueue& rhs) { m_deque = rhs.privateDeque(); - if (VL_UNLIKELY(T_MaxSize && T_MaxSize < m_deque.size())) m_deque.resize(T_MaxSize - 1); + if (VL_UNLIKELY(N_MaxSize && N_MaxSize < m_deque.size())) m_deque.resize(N_MaxSize - 1); return *this; } @@ -562,7 +562,7 @@ public: m_deque.resize(size, atDefault()); } // Dynamic array new[]() becomes a renew_copy() - void renew_copy(size_t size, const VlQueue& rhs) { + void renew_copy(size_t size, const VlQueue& rhs) { if (size == 0) { clear(); } else { @@ -575,11 +575,11 @@ public: // function void q.push_front(value) void push_front(const T_Value& value) { m_deque.push_front(value); - if (VL_UNLIKELY(T_MaxSize != 0 && m_deque.size() > T_MaxSize)) m_deque.pop_back(); + if (VL_UNLIKELY(N_MaxSize != 0 && m_deque.size() > N_MaxSize)) m_deque.pop_back(); } // function void q.push_back(value) void push_back(const T_Value& value) { - if (VL_LIKELY(T_MaxSize == 0 || m_deque.size() < T_MaxSize)) m_deque.push_back(value); + if (VL_LIKELY(N_MaxSize == 0 || m_deque.size() < N_MaxSize)) m_deque.push_back(value); } // function value_t q.pop_front(); T_Value pop_front() { @@ -600,7 +600,7 @@ public: T_Value& atWrite(int32_t index) { // cppcheck-suppress variableScope static thread_local T_Value t_throwAway; - // Needs to work for dynamic arrays, so does not use T_MaxSize + // Needs to work for dynamic arrays, so does not use N_MaxSize if (VL_UNLIKELY(index < 0 || index >= m_deque.size())) { t_throwAway = atDefault(); return t_throwAway; @@ -621,7 +621,7 @@ public: } // Accessing. Verilog: v = assoc[index] const T_Value& at(int32_t index) const { - // Needs to work for dynamic arrays, so does not use T_MaxSize + // Needs to work for dynamic arrays, so does not use N_MaxSize if (VL_UNLIKELY(index < 0 || index >= m_deque.size())) { return atDefault(); } else { @@ -665,8 +665,8 @@ public: // Methods void sort() { std::sort(m_deque.begin(), m_deque.end()); } - template - void sort(Func with_func) { + template + void sort(T_Func with_func) { // with_func returns arbitrary type to use for the sort comparison std::sort(m_deque.begin(), m_deque.end(), [=](const T_Value& a, const T_Value& b) { // index number is meaningless with sort, as it changes @@ -674,8 +674,8 @@ public: }); } void rsort() { std::sort(m_deque.rbegin(), m_deque.rend()); } - template - void rsort(Func with_func) { + template + void rsort(T_Func with_func) { // with_func returns arbitrary type to use for the sort comparison std::sort(m_deque.rbegin(), m_deque.rend(), [=](const T_Value& a, const T_Value& b) { // index number is meaningless with sort, as it changes @@ -696,8 +696,8 @@ public: } return out; } - template - VlQueue unique(Func with_func) const { + template + VlQueue unique(T_Func with_func) const { VlQueue out; std::set saw; for (const auto& i : m_deque) { @@ -724,8 +724,8 @@ public: } return out; } - template - VlQueue unique_index(Func with_func) const { + template + VlQueue unique_index(T_Func with_func) const { VlQueue out; IData index = 0; std::set saw; @@ -740,8 +740,8 @@ public: } return out; } - template - VlQueue find(Func with_func) const { + template + VlQueue find(T_Func with_func) const { VlQueue out; IData index = 0; for (const auto& i : m_deque) { @@ -750,8 +750,8 @@ public: } return out; } - template - VlQueue find_index(Func with_func) const { + template + VlQueue find_index(T_Func with_func) const { VlQueue out; IData index = 0; for (const auto& i : m_deque) { @@ -760,8 +760,8 @@ public: } return out; } - template - VlQueue find_first(Func with_func) const { + template + VlQueue find_first(T_Func with_func) const { // Can't use std::find_if as need index number IData index = 0; for (const auto& i : m_deque) { @@ -770,8 +770,8 @@ public: } return VlQueue{}; } - template - VlQueue find_first_index(Func with_func) const { + template + VlQueue find_first_index(T_Func with_func) const { IData index = 0; for (const auto& i : m_deque) { if (with_func(index, i)) return VlQueue::consV(index); @@ -779,8 +779,8 @@ public: } return VlQueue{}; } - template - VlQueue find_last(Func with_func) const { + template + VlQueue find_last(T_Func with_func) const { IData index = m_deque.size() - 1; for (auto& item : vlstd::reverse_view(m_deque)) { if (with_func(index, item)) return VlQueue::consV(item); @@ -788,8 +788,8 @@ public: } return VlQueue{}; } - template - VlQueue find_last_index(Func with_func) const { + template + VlQueue find_last_index(T_Func with_func) const { IData index = m_deque.size() - 1; for (auto& item : vlstd::reverse_view(m_deque)) { if (with_func(index, item)) return VlQueue::consV(index); @@ -804,8 +804,8 @@ public: const auto it = std::min_element(m_deque.cbegin(), m_deque.cend()); return VlQueue::consV(*it); } - template - VlQueue min(Func with_func) const { + template + VlQueue min(T_Func with_func) const { if (m_deque.empty()) return VlQueue{}; const auto it = std::min_element(m_deque.cbegin(), m_deque.cend(), [&with_func](const IData& a, const IData& b) { @@ -818,8 +818,8 @@ public: const auto it = std::max_element(m_deque.cbegin(), m_deque.cend()); return VlQueue::consV(*it); } - template - VlQueue max(Func with_func) const { + template + VlQueue max(T_Func with_func) const { if (m_deque.empty()) return VlQueue{}; const auto it = std::max_element(m_deque.cbegin(), m_deque.cend(), [&with_func](const IData& a, const IData& b) { @@ -833,9 +833,9 @@ public: for (const auto& i : m_deque) out += i; return out; } - template - WithFuncReturnType r_sum(Func with_func) const { - WithFuncReturnType out = WithFuncReturnType(0); + template + WithFuncReturnType r_sum(T_Func with_func) const { + WithFuncReturnType out = WithFuncReturnType(0); IData index = 0; for (const auto& i : m_deque) out += with_func(index++, i); return out; @@ -846,10 +846,10 @@ public: for (const auto& i : m_deque) out *= i; return out; } - template - WithFuncReturnType r_product(Func with_func) const { - if (m_deque.empty()) return WithFuncReturnType(0); // The big three do it this way - WithFuncReturnType out = WithFuncReturnType(1); + template + WithFuncReturnType r_product(T_Func with_func) const { + if (m_deque.empty()) return WithFuncReturnType(0); // The big three do it this way + WithFuncReturnType out = WithFuncReturnType(1); IData index = 0; for (const auto& i : m_deque) out *= with_func(index++, i); return out; @@ -860,11 +860,11 @@ public: for (const auto& i : m_deque) out &= i; return out; } - template - WithFuncReturnType r_and(Func with_func) const { - if (m_deque.empty()) return WithFuncReturnType(0); // The big three do it this way + template + WithFuncReturnType r_and(T_Func with_func) const { + if (m_deque.empty()) return WithFuncReturnType(0); // The big three do it this way IData index = 0; - WithFuncReturnType out = ~WithFuncReturnType(0); + WithFuncReturnType out = ~WithFuncReturnType(0); for (const auto& i : m_deque) out &= with_func(index++, i); return out; } @@ -873,9 +873,9 @@ public: for (const auto& i : m_deque) out |= i; return out; } - template - WithFuncReturnType r_or(Func with_func) const { - WithFuncReturnType out = WithFuncReturnType(0); + template + WithFuncReturnType r_or(T_Func with_func) const { + WithFuncReturnType out = WithFuncReturnType(0); IData index = 0; for (const auto& i : m_deque) out |= with_func(index++, i); return out; @@ -888,9 +888,9 @@ public: for (const auto& i : m_deque) out ^= i; return out; } - template - WithFuncReturnType r_xor(Func with_func) const { - WithFuncReturnType out = WithFuncReturnType(0); + template + WithFuncReturnType r_xor(T_Func with_func) const { + WithFuncReturnType out = WithFuncReturnType(0); IData index = 0; for (const auto& i : m_deque) out ^= with_func(index++, i); return out; @@ -909,8 +909,8 @@ public: } }; -template -std::string VL_TO_STRING(const VlQueue& obj) { +template +std::string VL_TO_STRING(const VlQueue& obj) { return obj.to_string(); } @@ -927,9 +927,9 @@ private: public: using const_iterator = typename Map::const_iterator; - template + template using WithFuncReturnType - = decltype(std::declval()(std::declval(), std::declval())); + = decltype(std::declval()(std::declval(), std::declval())); private: // MEMBERS @@ -1038,8 +1038,8 @@ public: } return out; } - template - VlQueue unique(Func with_func) const { + template + VlQueue unique(T_Func with_func) const { VlQueue out; T_Key default_key; using WithType = decltype(with_func(m_map.begin()->first, m_map.begin()->second)); @@ -1066,8 +1066,8 @@ public: } return out; } - template - VlQueue unique_index(Func with_func) const { + template + VlQueue unique_index(T_Func with_func) const { VlQueue out; using WithType = decltype(with_func(m_map.begin()->first, m_map.begin()->second)); std::set saw; @@ -1081,22 +1081,22 @@ public: } return out; } - template - VlQueue find(Func with_func) const { + template + VlQueue find(T_Func with_func) const { VlQueue out; for (const auto& i : m_map) if (with_func(i.first, i.second)) out.push_back(i.second); return out; } - template - VlQueue find_index(Func with_func) const { + template + VlQueue find_index(T_Func with_func) const { VlQueue out; for (const auto& i : m_map) if (with_func(i.first, i.second)) out.push_back(i.first); return out; } - template - VlQueue find_first(Func with_func) const { + template + VlQueue find_first(T_Func with_func) const { const auto it = std::find_if(m_map.cbegin(), m_map.cend(), [=](const std::pair& i) { return with_func(i.first, i.second); @@ -1104,8 +1104,8 @@ public: if (it == m_map.end()) return VlQueue{}; return VlQueue::consV(it->second); } - template - VlQueue find_first_index(Func with_func) const { + template + VlQueue find_first_index(T_Func with_func) const { const auto it = std::find_if(m_map.cbegin(), m_map.cend(), [=](const std::pair& i) { return with_func(i.first, i.second); @@ -1113,16 +1113,16 @@ public: if (it == m_map.end()) return VlQueue{}; return VlQueue::consV(it->first); } - template - VlQueue find_last(Func with_func) const { + template + VlQueue find_last(T_Func with_func) const { const auto it = std::find_if( m_map.crbegin(), m_map.crend(), [=](const std::pair& i) { return with_func(i.first, i.second); }); if (it == m_map.rend()) return VlQueue{}; return VlQueue::consV(it->second); } - template - VlQueue find_last_index(Func with_func) const { + template + VlQueue find_last_index(T_Func with_func) const { const auto it = std::find_if( m_map.crbegin(), m_map.crend(), [=](const std::pair& i) { return with_func(i.first, i.second); }); @@ -1140,8 +1140,8 @@ public: }); return VlQueue::consV(it->second); } - template - VlQueue min(Func with_func) const { + template + VlQueue min(T_Func with_func) const { if (m_map.empty()) return VlQueue(); const auto it = std::min_element( m_map.cbegin(), m_map.cend(), @@ -1159,8 +1159,8 @@ public: }); return VlQueue::consV(it->second); } - template - VlQueue max(Func with_func) const { + template + VlQueue max(T_Func with_func) const { if (m_map.empty()) return VlQueue(); const auto it = std::max_element( m_map.cbegin(), m_map.cend(), @@ -1175,9 +1175,9 @@ public: for (const auto& i : m_map) out += i.second; return out; } - template - WithFuncReturnType r_sum(Func with_func) const { - WithFuncReturnType out = WithFuncReturnType(0); + template + WithFuncReturnType r_sum(T_Func with_func) const { + WithFuncReturnType out = WithFuncReturnType(0); for (const auto& i : m_map) out += with_func(i.first, i.second); return out; } @@ -1187,10 +1187,10 @@ public: for (const auto& i : m_map) out *= i.second; return out; } - template - WithFuncReturnType r_product(Func with_func) const { - if (m_map.empty()) return WithFuncReturnType(0); // The big three do it this way - WithFuncReturnType out = WithFuncReturnType(1); + template + WithFuncReturnType r_product(T_Func with_func) const { + if (m_map.empty()) return WithFuncReturnType(0); // The big three do it this way + WithFuncReturnType out = WithFuncReturnType(1); for (const auto& i : m_map) out *= with_func(i.first, i.second); return out; } @@ -1200,10 +1200,10 @@ public: for (const auto& i : m_map) out &= i.second; return out; } - template - WithFuncReturnType r_and(Func with_func) const { - if (m_map.empty()) return WithFuncReturnType(0); // The big three do it this way - WithFuncReturnType out = ~WithFuncReturnType(0); + template + WithFuncReturnType r_and(T_Func with_func) const { + if (m_map.empty()) return WithFuncReturnType(0); // The big three do it this way + WithFuncReturnType out = ~WithFuncReturnType(0); for (const auto& i : m_map) out &= with_func(i.first, i.second); return out; } @@ -1212,8 +1212,8 @@ public: for (const auto& i : m_map) out |= i.second; return out; } - template - T_Value r_or(Func with_func) const { + template + T_Value r_or(T_Func with_func) const { T_Value out = T_Value(0); for (const auto& i : m_map) out |= with_func(i.first, i.second); return out; @@ -1223,9 +1223,9 @@ public: for (const auto& i : m_map) out ^= i.second; return out; } - template - WithFuncReturnType r_xor(Func with_func) const { - WithFuncReturnType out = WithFuncReturnType(0); + template + WithFuncReturnType r_xor(T_Func with_func) const { + WithFuncReturnType out = WithFuncReturnType(0); for (const auto& i : m_map) out ^= with_func(i.first, i.second); return out; } @@ -1287,11 +1287,11 @@ void VL_WRITEMEM_N(bool hex, int bits, const std::string& filename, /// This class may get exposed to a Verilated Model's top I/O, if the top /// IO has an unpacked array. -template +template class VlUnpacked final { // TYPES using T_Key = IData; // Index type, for uniformity with other containers - using Unpacked = T_Value[T_Depth]; + using Unpacked = T_Value[N_Depth]; public: // MEMBERS @@ -1312,41 +1312,41 @@ public: WData* data() { return &m_storage[0]; } const WData* data() const { return &m_storage[0]; } - std::size_t size() const { return T_Depth; } + std::size_t size() const { return N_Depth; } // To fit C++14 - template + template int find_length(int dimension, std::false_type) const { return size(); } - template + template int find_length(int dimension, std::true_type) const { - if (dimension == CurrentDimension) { + if (dimension == N_CurrentDimension) { return size(); } else { - return m_storage[0].template find_length(dimension); + return m_storage[0].template find_length(dimension); } } - template + template int find_length(int dimension) const { - return find_length(dimension, std::is_class{}); + return find_length(dimension, std::is_class{}); } - template + template auto& find_element(const std::vector& indices, std::false_type) { - return m_storage[indices[CurrentDimension]]; + return m_storage[indices[N_CurrentDimension]]; } - template + template auto& find_element(const std::vector& indices, std::true_type) { - return m_storage[indices[CurrentDimension]].template find_element( - indices); + return m_storage[indices[N_CurrentDimension]] + .template find_element(indices); } - template + template auto& find_element(const std::vector& indices) { - return find_element(indices, std::is_class{}); + return find_element(indices, std::is_class{}); } T_Value& operator[](size_t index) { return m_storage[index]; } @@ -1354,15 +1354,15 @@ public: // *this != that, which might be used for change detection/trigger computation, but avoid // operator overloading in VlUnpacked for safety in other contexts. - bool neq(const VlUnpacked& that) const { return neq(*this, that); } + bool neq(const VlUnpacked& that) const { return neq(*this, that); } // Similar to 'neq' above, *this = that used for change detection - void assign(const VlUnpacked& that) { *this = that; } - bool operator==(const VlUnpacked& that) const { return !neq(that); } - bool operator!=(const VlUnpacked& that) const { return neq(that); } + void assign(const VlUnpacked& that) { *this = that; } + bool operator==(const VlUnpacked& that) const { return !neq(that); } + bool operator!=(const VlUnpacked& that) const { return neq(that); } // interface to C style arrays (used in ports), see issue #5125 - bool neq(const T_Value that[T_Depth]) const { return neq(*this, that); } - void assign(const T_Value that[T_Depth]) { std::copy_n(that, T_Depth, m_storage); } - void operator=(const T_Value that[T_Depth]) { assign(that); } + bool neq(const T_Value that[N_Depth]) const { return neq(*this, that); } + void assign(const T_Value that[N_Depth]) { std::copy_n(that, N_Depth, m_storage); } + void operator=(const T_Value that[N_Depth]) { assign(that); } // inside (set membership operator) bool inside(const T_Value& value) const { @@ -1370,8 +1370,8 @@ public: } void sort() { std::sort(std::begin(m_storage), std::end(m_storage)); } - template - void sort(Func with_func) { + template + void sort(T_Func with_func) { // with_func returns arbitrary type to use for the sort comparison std::sort(std::begin(m_storage), std::end(m_storage), [=](const T_Value& a, const T_Value& b) { @@ -1383,8 +1383,8 @@ public: void rsort() { std::sort(std::begin(m_storage), std::end(m_storage), std::greater()); } - template - void rsort(Func with_func) { + template + void rsort(T_Func with_func) { // with_func returns arbitrary type to use for the sort comparison // std::rbegin/std::rend not available until C++14, so using > below std::sort(std::begin(m_storage), std::end(m_storage), @@ -1407,8 +1407,8 @@ public: } return out; } - template - VlQueue unique(Func with_func) const { + template + VlQueue unique(T_Func with_func) const { VlQueue out; std::set saw; for (const auto& i : m_storage) { @@ -1435,8 +1435,8 @@ public: } return out; } - template - VlQueue unique_index(Func with_func) const { + template + VlQueue unique_index(T_Func with_func) const { VlQueue out; IData index = 0; std::set saw; @@ -1451,8 +1451,8 @@ public: } return out; } - template - VlQueue find(Func with_func) const { + template + VlQueue find(T_Func with_func) const { VlQueue out; IData index = 0; for (const auto& i : m_storage) { @@ -1461,8 +1461,8 @@ public: } return out; } - template - VlQueue find_index(Func with_func) const { + template + VlQueue find_index(T_Func with_func) const { VlQueue out; IData index = 0; for (const auto& i : m_storage) { @@ -1471,8 +1471,8 @@ public: } return out; } - template - VlQueue find_first(Func with_func) const { + template + VlQueue find_first(T_Func with_func) const { // Can't use std::find_if as need index number IData index = 0; for (const auto& i : m_storage) { @@ -1481,8 +1481,8 @@ public: } return VlQueue{}; } - template - VlQueue find_first_index(Func with_func) const { + template + VlQueue find_first_index(T_Func with_func) const { IData index = 0; for (const auto& i : m_storage) { if (with_func(index, i)) return VlQueue::consV(index); @@ -1490,16 +1490,16 @@ public: } return VlQueue{}; } - template - VlQueue find_last(Func with_func) const { - for (int i = T_Depth - 1; i >= 0; i--) { + template + VlQueue find_last(T_Func with_func) const { + for (int i = N_Depth - 1; i >= 0; i--) { if (with_func(i, m_storage[i])) return VlQueue::consV(m_storage[i]); } return VlQueue{}; } - template - VlQueue find_last_index(Func with_func) const { - for (int i = T_Depth - 1; i >= 0; i--) { + template + VlQueue find_last_index(T_Func with_func) const { + for (int i = N_Depth - 1; i >= 0; i--) { if (with_func(i, m_storage[i])) return VlQueue::consV(i); } return VlQueue{}; @@ -1510,8 +1510,8 @@ public: const auto it = std::min_element(std::begin(m_storage), std::end(m_storage)); return VlQueue::consV(*it); } - template - VlQueue min(Func with_func) const { + template + VlQueue min(T_Func with_func) const { const auto it = std::min_element(std::begin(m_storage), std::end(m_storage), [&with_func](const IData& a, const IData& b) { return with_func(0, a) < with_func(0, b); @@ -1522,8 +1522,8 @@ public: const auto it = std::max_element(std::begin(m_storage), std::end(m_storage)); return VlQueue::consV(*it); } - template - VlQueue max(Func with_func) const { + template + VlQueue max(T_Func with_func) const { const auto it = std::max_element(std::begin(m_storage), std::end(m_storage), [&with_func](const IData& a, const IData& b) { return with_func(0, a) < with_func(0, b); @@ -1535,7 +1535,7 @@ public: std::string to_string() const { std::string out = "'{"; std::string comma; - for (int i = 0; i < T_Depth; ++i) { + for (int i = 0; i < N_Depth; ++i) { out += comma + VL_TO_STRING(m_storage[i]); comma = ", "; } @@ -1543,18 +1543,18 @@ public: } private: - template - static bool neq(const VlUnpacked& a, const VlUnpacked& b) { - for (size_t i = 0; i < T_Dep; ++i) { + template + static bool neq(const VlUnpacked& a, const VlUnpacked& b) { + for (size_t i = 0; i < N_Dep; ++i) { // Recursive 'neq', in case T_Val is also a VlUnpacked<_, _> if (neq(a.m_storage[i], b.m_storage[i])) return true; } return false; } - template - static bool neq(const VlUnpacked& a, const T_Val b[T_Dep]) { - for (size_t i = 0; i < T_Dep; ++i) { + template + static bool neq(const VlUnpacked& a, const T_Val b[N_Dep]) { + for (size_t i = 0; i < N_Dep; ++i) { // Recursive 'neq', in case T_Val is also a VlUnpacked<_, _> if (neq(a.m_storage[i], b[i])) return true; } @@ -1568,25 +1568,25 @@ private: } }; -template -std::string VL_TO_STRING(const VlUnpacked& obj) { +template +std::string VL_TO_STRING(const VlUnpacked& obj) { return obj.to_string(); } //=================================================================== // Helper to apply the given indices to a target expression -template +template struct VlApplyIndices final { VL_ATTR_ALWINLINE static auto& apply(T_Target& target, const size_t* indicesp) { - return VlApplyIndices::apply( - target[indicesp[Curr]], indicesp); + return VlApplyIndices::apply( + target[indicesp[N_Curr]], indicesp); } }; -template -struct VlApplyIndices final { +template +struct VlApplyIndices final { VL_ATTR_ALWINLINE static T_Target& apply(T_Target& target, const size_t*) { return target; } }; @@ -1621,17 +1621,17 @@ template class VlNBACommitQueue; // Specialization for whole element updates only -template -class VlNBACommitQueue final { +template +class VlNBACommitQueue final { // TYPES struct Entry final { T_Element value; - size_t indices[T_Rank]; + size_t indices[N_Rank]; }; // STATE @@ -1643,8 +1643,8 @@ public: VL_UNCOPYABLE(VlNBACommitQueue); // METHODS - template - void enqueue(const T_Element& value, Args... indices) { + template + void enqueue(const T_Element& value, T_Args... indices) { m_pending.emplace_back(Entry{value, {indices...}}); } @@ -1654,20 +1654,20 @@ public: void commit(T_Commit& target) { if (m_pending.empty()) return; for (const Entry& entry : m_pending) { - VlApplyIndices<0, T_Rank, T_Commit>::apply(target, entry.indices) = entry.value; + VlApplyIndices<0, N_Rank, T_Commit>::apply(target, entry.indices) = entry.value; } m_pending.clear(); } }; // With partial element updates -template -class VlNBACommitQueue final { +template +class VlNBACommitQueue final { // TYPES struct Entry final { T_Element value; T_Element mask; - size_t indices[T_Rank]; + size_t indices[N_Rank]; }; // STATE @@ -1728,8 +1728,8 @@ public: VL_UNCOPYABLE(VlNBACommitQueue); // METHODS - template - void enqueue(const T_Element& value, const T_Element& mask, Args... indices) { + template + void enqueue(const T_Element& value, const T_Element& mask, T_Args... indices) { m_pending.emplace_back(Entry{value, mask, {indices...}}); } @@ -1739,7 +1739,7 @@ public: void commit(T_Commit& target) { if (m_pending.empty()) return; for (const Entry& entry : m_pending) { // - auto& ref = VlApplyIndices<0, T_Rank, T_Commit>::apply(target, entry.indices); + auto& ref = VlApplyIndices<0, N_Rank, T_Commit>::apply(target, entry.indices); // Maybe inefficient, but it works for now ... const auto oldValue = ref; ref = bOr(bAnd(entry.value, entry.mask), bAnd(oldValue, bNot(entry.mask))); @@ -1961,13 +1961,13 @@ public: }; }; -template -static inline bool VL_CAST_DYNAMIC(VlClassRef in, VlClassRef& outr) { +template +static inline bool VL_CAST_DYNAMIC(VlClassRef in, VlClassRef& outr) { if (!in) { outr = VlNull{}; return true; } - VlClassRef casted = in.template dynamicCast(); + VlClassRef casted = in.template dynamicCast(); if (VL_LIKELY(casted)) { outr = casted; return true; @@ -1976,8 +1976,8 @@ static inline bool VL_CAST_DYNAMIC(VlClassRef in, VlClassRef& outr) { } } -template -static inline bool VL_CAST_DYNAMIC(VlNull in, VlClassRef& outr) { +template +static inline bool VL_CAST_DYNAMIC(VlNull in, VlClassRef& outr) { outr = VlNull{}; return true; } diff --git a/src/V3Active.cpp b/src/V3Active.cpp index 6c6070574..290f872cf 100644 --- a/src/V3Active.cpp +++ b/src/V3Active.cpp @@ -230,7 +230,7 @@ class ActiveNamer final : public VNVisitor { void visit(AstNode* nodep) override { iterateChildren(nodep); } // Specialized below for the special sensitivity classes - template + template AstActive*& getSpecialActive(); public: @@ -246,17 +246,17 @@ public: } // Make a new AstActive sensitive to the given special sensitivity class and return it - template + template AstActive* makeSpecialActive(FileLine* const fl) { - AstSenTree* const senTreep = new AstSenTree{fl, new AstSenItem{fl, SenItemKind{}}}; + AstSenTree* const senTreep = new AstSenTree{fl, new AstSenItem{fl, T_SenItemKind{}}}; return makeActive(fl, senTreep); } // Return an AstActive sensitive to the given special sensitivity class (possibly pre-created) - template + template AstActive* getSpecialActive(FileLine* fl) { - AstActive*& cachep = getSpecialActive(); - if (!cachep) cachep = makeSpecialActive(fl); + AstActive*& cachep = getSpecialActive(); + if (!cachep) cachep = makeSpecialActive(fl); return cachep; } diff --git a/src/V3Ast.h b/src/V3Ast.h index c298829fc..cbee6890a 100644 --- a/src/V3Ast.h +++ b/src/V3Ast.h @@ -2553,21 +2553,21 @@ protected: inline static bool privateTypeTest(const AstNode* nodep); // For internal use only. - template + template constexpr static bool uselessCast() VL_PURE { - using NonRef = typename std::remove_reference::type; + using NonRef = typename std::remove_reference::type; using NonPtr = typename std::remove_pointer::type; using NonCV = typename std::remove_cv::type; - return std::is_base_of::value; + return std::is_base_of::value; } // For internal use only. - template + template constexpr static bool impossibleCast() VL_PURE { - using NonRef = typename std::remove_reference::type; + using NonRef = typename std::remove_reference::type; using NonPtr = typename std::remove_pointer::type; using NonCV = typename std::remove_cv::type; - return !std::is_base_of::value; + return !std::is_base_of::value; } public: @@ -2655,12 +2655,12 @@ private: using ConstCorrectAstNode = typename std::conditional::value, const AstNode, AstNode>::type; - template - inline static void foreachImpl(ConstCorrectAstNode* nodep, const Callable& f, + template + inline static void foreachImpl(ConstCorrectAstNode* nodep, const T_Callable& f, bool visitNext); - template - inline static bool predicateImpl(ConstCorrectAstNode* nodep, const Callable& p); + template + inline static bool predicateImpl(ConstCorrectAstNode* nodep, const T_Callable& p); public: // Given a callable 'f' that takes a single argument of some AstNode subtype 'T_Node', traverse @@ -2670,46 +2670,48 @@ public: // handle a single (or a few) node types, as it's easier to write, but more importantly, the // dispatch to the callable in 'foreach' should be completely predictable by branch target // caches in modern CPUs, while it is basically unpredictable for VNVisitor. - template - void foreach(Callable&& f) { - using T_Node = typename FunctionArgNoPointerNoCV::type; - static_assert(vlstd::is_invocable::value + template + void foreach(T_Callable&& f) { + using T_Node = typename FunctionArgNoPointerNoCV::type; + static_assert(vlstd::is_invocable::value && std::is_base_of::value, - "Callable 'f' must have a signature compatible with 'void(T_Node*)', " + "T_Callable 'f' must have a signature compatible with 'void(T_Node*)', " "with 'T_Node' being a subtype of 'AstNode'"); foreachImpl(this, f, /* visitNext: */ false); } // Same as above, but for 'const' nodes - template - void foreach(Callable&& f) const { - using T_Node = typename FunctionArgNoPointerNoCV::type; - static_assert(vlstd::is_invocable::value - && std::is_base_of::value, - "Callable 'f' must have a signature compatible with 'void(const T_Node*)', " - "with 'T_Node' being a subtype of 'AstNode'"); + template + void foreach(T_Callable&& f) const { + using T_Node = typename FunctionArgNoPointerNoCV::type; + static_assert( + vlstd::is_invocable::value + && std::is_base_of::value, + "T_Callable 'f' must have a signature compatible with 'void(const T_Node*)', " + "with 'T_Node' being a subtype of 'AstNode'"); foreachImpl(this, f, /* visitNext: */ false); } // Same as 'foreach' but also traverses 'this->nextp()' transitively - template - void foreachAndNext(Callable&& f) { - using T_Node = typename FunctionArgNoPointerNoCV::type; - static_assert(vlstd::is_invocable::value + template + void foreachAndNext(T_Callable&& f) { + using T_Node = typename FunctionArgNoPointerNoCV::type; + static_assert(vlstd::is_invocable::value && std::is_base_of::value, - "Callable 'f' must have a signature compatible with 'void(T_Node*)', " + "T_Callable 'f' must have a signature compatible with 'void(T_Node*)', " "with 'T_Node' being a subtype of 'AstNode'"); foreachImpl(this, f, /* visitNext: */ true); } // Same as above, but for 'const' nodes - template - void foreachAndNext(Callable&& f) const { - using T_Node = typename FunctionArgNoPointerNoCV::type; - static_assert(vlstd::is_invocable::value - && std::is_base_of::value, - "Callable 'f' must have a signature compatible with 'void(const T_Node*)', " - "with 'T_Node' being a subtype of 'AstNode'"); + template + void foreachAndNext(T_Callable&& f) const { + using T_Node = typename FunctionArgNoPointerNoCV::type; + static_assert( + vlstd::is_invocable::value + && std::is_base_of::value, + "T_Callable 'f' must have a signature compatible with 'void(const T_Node*)', " + "with 'T_Node' being a subtype of 'AstNode'"); foreachImpl(this, f, /* visitNext: */ true); } @@ -2718,50 +2720,50 @@ public: // that satisfies the predicate 'p'. Returns false if no node of type 'T_Node' is present. // Traversal is performed in some arbitrary order and is terminated as soon as the result can // be determined. - template - bool exists(Callable&& p) { - using T_Node = typename FunctionArgNoPointerNoCV::type; - static_assert(vlstd::is_invocable_r::value + template + bool exists(T_Callable&& p) { + using T_Node = typename FunctionArgNoPointerNoCV::type; + static_assert(vlstd::is_invocable_r::value && std::is_base_of::value, "Predicate 'p' must have a signature compatible with 'bool(T_Node*)', " "with 'T_Node' being a subtype of 'AstNode'"); - return predicateImpl(this, p); + return predicateImpl(this, p); } // Same as above, but for 'const' nodes - template - bool exists(Callable&& p) const { - using T_Node = typename FunctionArgNoPointerNoCV::type; - static_assert(vlstd::is_invocable_r::value + template + bool exists(T_Callable&& p) const { + using T_Node = typename FunctionArgNoPointerNoCV::type; + static_assert(vlstd::is_invocable_r::value && std::is_base_of::value, "Predicate 'p' must have a signature compatible with 'bool(const T_Node*)', " "with 'T_Node' being a subtype of 'AstNode'"); - return predicateImpl(this, p); + return predicateImpl(this, p); } // Given a predicate 'p' that takes a single argument of some AstNode subtype 'T_Node', return // true if and only if all nodes of type 'T_Node' in the tree rooted at this node satisfy the // predicate 'p'. Returns true if no node of type 'T_Node' is present. Traversal is performed // in some arbitrary order and is terminated as soon as the result can be determined. - template - bool forall(Callable&& p) { - using T_Node = typename FunctionArgNoPointerNoCV::type; - static_assert(vlstd::is_invocable_r::value + template + bool forall(T_Callable&& p) { + using T_Node = typename FunctionArgNoPointerNoCV::type; + static_assert(vlstd::is_invocable_r::value && std::is_base_of::value, "Predicate 'p' must have a signature compatible with 'bool(T_Node*)', " "with 'T_Node' being a subtype of 'AstNode'"); - return predicateImpl(this, p); + return predicateImpl(this, p); } // Same as above, but for 'const' nodes - template - bool forall(Callable&& p) const { - using T_Node = typename FunctionArgNoPointerNoCV::type; - static_assert(vlstd::is_invocable_r::value + template + bool forall(T_Callable&& p) const { + using T_Node = typename FunctionArgNoPointerNoCV::type; + static_assert(vlstd::is_invocable_r::value && std::is_base_of::value, "Predicate 'p' must have a signature compatible with 'bool(const T_Node*)', " "with 'T_Node' being a subtype of 'AstNode'"); - return predicateImpl(this, p); + return predicateImpl(this, p); } int nodeCount() const { @@ -2834,8 +2836,8 @@ constexpr bool AstNode::isLeaf() { } // foreach implementation -template -void AstNode::foreachImpl(ConstCorrectAstNode* nodep, const Callable& f, bool visitNext) { +template +void AstNode::foreachImpl(ConstCorrectAstNode* nodep, const T_Callable& f, bool visitNext) { // Pre-order traversal implemented directly (without recursion) for speed reasons. The very // first iteration (the one that operates on the input nodep) is special, as we might or // might not need to enqueue nodep->nextp() depending on VisitNext, while in all other @@ -2915,8 +2917,8 @@ void AstNode::foreachImpl(ConstCorrectAstNode* nodep, const Callable& f, } // predicate implementation -template -bool AstNode::predicateImpl(ConstCorrectAstNode* nodep, const Callable& p) { +template +bool AstNode::predicateImpl(ConstCorrectAstNode* nodep, const T_Callable& p) { // Implementation similar to foreach, but abort traversal as soon as result is determined using T_Arg_NonConst = typename std::remove_const::type; using Node = ConstCorrectAstNode; @@ -2951,7 +2953,7 @@ bool AstNode::predicateImpl(ConstCorrectAstNode* nodep, const Callable& p // Type test this node if (AstNode::privateTypeTest(currp)) { // Call the client function - if (p(static_cast(currp)) != Default) return true; + if (p(static_cast(currp)) != N_Default) return true; // Short circuit if iterating leaf nodes if VL_CONSTEXPR_CXX17 (isLeaf()) return false; } @@ -2968,7 +2970,7 @@ bool AstNode::predicateImpl(ConstCorrectAstNode* nodep, const Callable& p }; // Visit the root node - if (visit(nodep)) return !Default; + if (visit(nodep)) return !N_Default; // Visit the rest of the tree while (VL_LIKELY(topp > basep)) { @@ -2985,10 +2987,10 @@ bool AstNode::predicateImpl(ConstCorrectAstNode* nodep, const Callable& p if (headp->nextp()) *topp++ = headp->nextp(); // Visit the head node - if (visit(headp)) return !Default; + if (visit(headp)) return !N_Default; } - return Default; + return N_Default; } inline std::ostream& operator<<(std::ostream& os, const AstNode* rhs) { diff --git a/src/V3AstNodeOther.h b/src/V3AstNodeOther.h index c171f5098..155237d80 100644 --- a/src/V3AstNodeOther.h +++ b/src/V3AstNodeOther.h @@ -2401,13 +2401,13 @@ public: // Iterates top level members of the class, taking into account inheritance (starting from the // root superclass). Note: after V3Scope, several children are moved under an AstScope and will // not be found by this. - template - void foreachMember(const Callable& f) { - using T_Node = typename FunctionArgNoPointerNoCV::type; + template + void foreachMember(const T_Callable& f) { + using T_Node = typename FunctionArgNoPointerNoCV::type; static_assert( - vlstd::is_invocable::value + vlstd::is_invocable::value && std::is_base_of::value, - "Callable 'f' must have a signature compatible with 'void(AstClass*, T_Node*)', " + "T_Callable 'f' must have a signature compatible with 'void(AstClass*, T_Node*)', " "with 'T_Node' being a subtype of 'AstNode'"); if (AstClassExtends* const cextendsp = this->extendsp()) { cextendsp->classp()->foreachMember(f); @@ -2417,13 +2417,14 @@ public: } } // Same as above, but stops after first match - template - bool existsMember(const Callable& p) const { - using T_Node = typename FunctionArgNoPointerNoCV::type; - static_assert(vlstd::is_invocable_r::value - && std::is_base_of::value, - "Predicate 'p' must have a signature compatible with 'bool(const AstClass*, " - "const T_Node*)', with 'T_Node' being a subtype of 'AstNode'"); + template + bool existsMember(const T_Callable& p) const { + using T_Node = typename FunctionArgNoPointerNoCV::type; + static_assert( + vlstd::is_invocable_r::value + && std::is_base_of::value, + "Predicate 'p' must have a signature compatible with 'bool(const AstClass*, " + "const T_Node*)', with 'T_Node' being a subtype of 'AstNode'"); if (AstClassExtends* const cextendsp = this->extendsp()) { if (cextendsp->classp()->existsMember(p)) return true; } diff --git a/src/V3AstUserAllocator.h b/src/V3AstUserAllocator.h index 974be6b0b..fdef7e36c 100644 --- a/src/V3AstUserAllocator.h +++ b/src/V3AstUserAllocator.h @@ -27,22 +27,22 @@ #include #include -template +template class AstUserAllocatorBase VL_NOT_FINAL { - static_assert(1 <= T_UserN && T_UserN <= 4, "Wrong user pointer number"); + static_assert(1 <= N_UserN && N_UserN <= 4, "Wrong user pointer number"); static_assert(std::is_base_of::value, "T_Node must be an AstNode type"); private: std::deque m_allocated; T_Data* getUserp(const T_Node* nodep) const { - if VL_CONSTEXPR_CXX17 (T_UserN == 1) { + if VL_CONSTEXPR_CXX17 (N_UserN == 1) { const VNUser user = nodep->user1u(); return user.to(); - } else if VL_CONSTEXPR_CXX17 (T_UserN == 2) { + } else if VL_CONSTEXPR_CXX17 (N_UserN == 2) { const VNUser user = nodep->user2u(); return user.to(); - } else if VL_CONSTEXPR_CXX17 (T_UserN == 3) { + } else if VL_CONSTEXPR_CXX17 (N_UserN == 3) { const VNUser user = nodep->user3u(); return user.to(); } else { @@ -52,11 +52,11 @@ private: } void setUserp(T_Node* nodep, T_Data* userp) const { - if VL_CONSTEXPR_CXX17 (T_UserN == 1) { + if VL_CONSTEXPR_CXX17 (N_UserN == 1) { nodep->user1u(VNUser{userp}); - } else if VL_CONSTEXPR_CXX17 (T_UserN == 2) { + } else if VL_CONSTEXPR_CXX17 (N_UserN == 2) { nodep->user2u(VNUser{userp}); - } else if VL_CONSTEXPR_CXX17 (T_UserN == 3) { + } else if VL_CONSTEXPR_CXX17 (N_UserN == 3) { nodep->user3u(VNUser{userp}); } else { nodep->user4u(VNUser{userp}); @@ -65,11 +65,11 @@ private: protected: AstUserAllocatorBase() { - if VL_CONSTEXPR_CXX17 (T_UserN == 1) { + if VL_CONSTEXPR_CXX17 (N_UserN == 1) { VNUser1InUse::check(); - } else if VL_CONSTEXPR_CXX17 (T_UserN == 2) { + } else if VL_CONSTEXPR_CXX17 (N_UserN == 2) { VNUser2InUse::check(); - } else if VL_CONSTEXPR_CXX17 (T_UserN == 3) { + } else if VL_CONSTEXPR_CXX17 (N_UserN == 3) { VNUser3InUse::check(); } else { VNUser4InUse::check(); diff --git a/src/V3Delayed.cpp b/src/V3Delayed.cpp index 31f21e5b2..82b0b156d 100644 --- a/src/V3Delayed.cpp +++ b/src/V3Delayed.cpp @@ -534,17 +534,17 @@ class DelayedVisitor final : public VNVisitor { } // Scheme::ValueQueuePartial/Scheme::ValueQueueWhole - template + template void prepareSchemeValueQueue(AstVarScope* vscp, VarScopeInfo& vscpInfo) { - UASSERT_OBJ(Partial ? vscpInfo.m_scheme == Scheme::ValueQueuePartial - : vscpInfo.m_scheme == Scheme::ValueQueueWhole, + UASSERT_OBJ(N_Partial ? vscpInfo.m_scheme == Scheme::ValueQueuePartial + : vscpInfo.m_scheme == Scheme::ValueQueueWhole, vscp, "Inconsistencheme"); FileLine* const flp = vscp->fileline(); AstScope* const scopep = vscp->scopep(); // Create the commit queue variable auto* const cqDTypep - = new AstNBACommitQueueDType{flp, vscp->dtypep()->skipRefp(), Partial}; + = new AstNBACommitQueueDType{flp, vscp->dtypep()->skipRefp(), N_Partial}; v3Global.rootp()->typeTablep()->addTypesp(cqDTypep); const std::string name = "__VdlyCommitQueue" + vscp->varp()->shortName(); AstVarScope* const queueVscp = createTemp(flp, scopep, name, cqDTypep); diff --git a/src/V3Dfg.h b/src/V3Dfg.h index 5fab278ee..880517370 100644 --- a/src/V3Dfg.h +++ b/src/V3Dfg.h @@ -427,52 +427,52 @@ public: // Implementation of dataflow graph vertices with a fixed number of sources //------------------------------------------------------------------------------ -template +template class DfgVertexWithArity VL_NOT_FINAL : public DfgVertex { - static_assert(1 <= Arity && Arity <= 4, "Arity must be between 1 and 4 inclusive"); + static_assert(1 <= N_Arity && N_Arity <= 4, "N_Arity must be between 1 and 4 inclusive"); - std::array m_srcs; // Source edges + std::array m_srcs; // Source edges protected: DfgVertexWithArity(DfgGraph& dfg, VDfgType type, FileLine* flp, AstNodeDType* dtypep) : DfgVertex{dfg, type, flp, dtypep} { // Initialize source edges - for (size_t i = 0; i < Arity; ++i) m_srcs[i].init(this); + for (size_t i = 0; i < N_Arity; ++i) m_srcs[i].init(this); } ~DfgVertexWithArity() override = default; public: std::pair sourceEdges() final override { // - return {m_srcs.data(), Arity}; + return {m_srcs.data(), N_Arity}; } std::pair sourceEdges() const final override { - return {m_srcs.data(), Arity}; + return {m_srcs.data(), N_Arity}; } - template + template DfgEdge* sourceEdge() { - static_assert(Index < Arity, "Source index out of range"); - return &m_srcs[Index]; + static_assert(N_Index < N_Arity, "Source index out of range"); + return &m_srcs[N_Index]; } - template + template const DfgEdge* sourceEdge() const { - static_assert(Index < Arity, "Source index out of range"); - return &m_srcs[Index]; + static_assert(N_Index < N_Arity, "Source index out of range"); + return &m_srcs[N_Index]; } - template + template DfgVertex* source() const { - static_assert(Index < Arity, "Source index out of range"); - return m_srcs[Index].sourcep(); + static_assert(N_Index < N_Arity, "Source index out of range"); + return m_srcs[N_Index].sourcep(); } - template + template void relinkSource(DfgVertex* newSourcep) { - static_assert(Index < Arity, "Source index out of range"); - UASSERT_OBJ(m_srcs[Index].sinkp() == this, this, "Inconsistent"); - m_srcs[Index].relinkSource(newSourcep); + static_assert(N_Index < N_Arity, "Source index out of range"); + UASSERT_OBJ(m_srcs[N_Index].sinkp() == this, this, "Inconsistent"); + m_srcs[N_Index].relinkSource(newSourcep); } }; diff --git a/src/V3DfgAstToDfg.cpp b/src/V3DfgAstToDfg.cpp index f61380f77..2f941b9e5 100644 --- a/src/V3DfgAstToDfg.cpp +++ b/src/V3DfgAstToDfg.cpp @@ -37,9 +37,9 @@ namespace { // Create a DfgVertex out of a AstNodeExpr. For most AstNodeExpr subtypes, this can be done // automatically. For the few special cases, we provide specializations below -template -Vertex* makeVertex(const Node* nodep, DfgGraph& dfg) { - return new Vertex{dfg, nodep->fileline(), DfgVertex::dtypeFor(nodep)}; +template +T_Vertex* makeVertex(const T_Node* nodep, DfgGraph& dfg) { + return new T_Vertex{dfg, nodep->fileline(), DfgVertex::dtypeFor(nodep)}; } //====================================================================== diff --git a/src/V3DfgCache.h b/src/V3DfgCache.h index 09df39f6c..ca43a6d3c 100644 --- a/src/V3DfgCache.h +++ b/src/V3DfgCache.h @@ -157,8 +157,8 @@ public: }; }; -template -using Cache = std::unordered_map; +template +using Cache = std::unordered_map; using CacheSel = Cache; using CacheUnary = Cache; @@ -246,10 +246,10 @@ inline void setOperands(DfgVertexTernary* vtxp, DfgVertex* src0p, DfgVertex* src } // Get or create (and insert) vertex with given operands -template -inline Vertex* getOrCreate(DfgGraph& dfg, FileLine* flp, AstNodeDType* dtypep, Cache& cache, +template +inline Vertex* getOrCreate(DfgGraph& dfg, FileLine* flp, AstNodeDType* dtypep, T_Cache& cache, Operands... operands) { - typename Cache::mapped_type& entrypr = getEntry(cache, dtypep, operands...); + typename T_Cache::mapped_type& entrypr = getEntry(cache, dtypep, operands...); if (!entrypr) { Vertex* const newp = new Vertex{dfg, flp, dtypep}; setOperands(newp, operands...); diff --git a/src/V3DfgDfgToAst.cpp b/src/V3DfgDfgToAst.cpp index ee7b0c078..423ed6600 100644 --- a/src/V3DfgDfgToAst.cpp +++ b/src/V3DfgDfgToAst.cpp @@ -40,9 +40,9 @@ namespace { // Create an AstNodeExpr out of a DfgVertex. For most AstNodeExpr subtypes, this can be done // automatically. For the few special cases, we provide specializations below -template -Node* makeNode(const Vertex* vtxp, Ops... ops) { - Node* const nodep = new Node{vtxp->fileline(), ops...}; +template +T_Node* makeNode(const T_Vertex* vtxp, Ops... ops) { + T_Node* const nodep = new T_Node{vtxp->fileline(), ops...}; UASSERT_OBJ(nodep->width() == static_cast(vtxp->width()), vtxp, "Incorrect width in AstNode created from DfgVertex " << vtxp->typeName() << ": " << nodep->width() << " vs " << vtxp->width()); diff --git a/src/V3EmitCBase.h b/src/V3EmitCBase.h index 28bdf76a8..f82323f63 100644 --- a/src/V3EmitCBase.h +++ b/src/V3EmitCBase.h @@ -146,8 +146,8 @@ public: void emitCFuncDecl(const AstCFunc* funcp, const AstNodeModule* modp, bool cLinkage = false); void emitVarDecl(const AstVar* nodep, bool asRef = false); void emitVarAccessors(const AstVar* nodep); - template - static void forModCUse(const AstNodeModule* modp, VUseType useType, F action) { + template + static void forModCUse(const AstNodeModule* modp, VUseType useType, T_Callable action) { for (AstNode* itemp = modp->stmtsp(); itemp; itemp = itemp->nextp()) { if (AstCUse* const usep = VN_CAST(itemp, CUse)) { if (usep->useType().containsAny(useType)) { diff --git a/src/V3EmitCMake.cpp b/src/V3EmitCMake.cpp index 0bd768a94..b35fc2886 100644 --- a/src/V3EmitCMake.cpp +++ b/src/V3EmitCMake.cpp @@ -36,8 +36,8 @@ class CMakeEmitter final { // STATIC FUNCTIONS // Concatenate all strings in 'strs' with ' ' between them. - template - static string cmake_list(const List& strs) { + template + static string cmake_list(const T_List& strs) { string s; for (auto it = strs.begin(); it != strs.end(); ++it) { s += '"'; diff --git a/src/V3FunctionTraits.h b/src/V3FunctionTraits.h index 9287048b8..c5e1a4c16 100644 --- a/src/V3FunctionTraits.h +++ b/src/V3FunctionTraits.h @@ -32,25 +32,25 @@ struct FunctionTraits final : public FunctionTraits::type::operator())> {}; // Specialization for pointers to member function -template -struct FunctionTraits VL_NOT_FINAL { +template +struct FunctionTraits VL_NOT_FINAL { // Number of arguments static constexpr size_t arity = sizeof...(Args); // Type of result - using result_type = ReturnType; + using result_type = T_ReturnType; // Type of arguments - template + template struct arg final { - using type = typename std::tuple_element>::type; + using type = typename std::tuple_element>::type; }; }; -template +template struct FunctionArgNoPointerNoCV final { using Traits = FunctionTraits; - using T_Arg = typename Traits::template arg::type; + using T_Arg = typename Traits::template arg::type; using T_ArgNoPtr = typename std::remove_pointer::type; using type = typename std::remove_cv::type; }; diff --git a/src/V3Graph.cpp b/src/V3Graph.cpp index 968e4c261..b4a3adf08 100644 --- a/src/V3Graph.cpp +++ b/src/V3Graph.cpp @@ -79,12 +79,12 @@ void V3GraphVertex::rerouteEdges(V3Graph* graphp) { unlinkEdges(graphp); } -template +template V3GraphEdge* V3GraphVertex::findConnectingEdgep(V3GraphVertex* waywardp) { // O(edges) linear search. Searches search both nodes' edge lists in // parallel. The lists probably aren't _both_ huge, so this is // unlikely to blow up even on fairly nasty graphs. - constexpr GraphWay way{T_Way}; + constexpr GraphWay way{N_Way}; constexpr GraphWay inv = way.invert(); auto& aEdges = this->edges(); auto aIt = aEdges.begin(); diff --git a/src/V3Graph.h b/src/V3Graph.h index 911c909b3..765907947 100644 --- a/src/V3Graph.h +++ b/src/V3Graph.h @@ -187,9 +187,9 @@ public: uint64_t user() const { return m_user; } V3GraphVertex* fromp() const { return m_fromp; } V3GraphVertex* top() const { return m_top; } - template + template V3GraphVertex* furtherp() const { - return T_Way == GraphWay::FORWARD ? top() : fromp(); + return N_Way == GraphWay::FORWARD ? top() : fromp(); } // STATIC ACCESSORS static bool followNotCutable(const V3GraphEdge* edgep) { return !edgep->m_cutable; } @@ -301,9 +301,9 @@ public: void* userp() const VL_MT_STABLE { return m_userp; } V3GraphEdge::IList& inEdges() { return m_ins; } const V3GraphEdge::IList& inEdges() const { return m_ins; } - template + template inline auto& edges(); - template + template inline const auto& edges() const; bool inEmpty() const { return m_ins.empty(); } bool inSize1() const { return m_ins.hasSingleElement(); } @@ -320,7 +320,7 @@ public: void rerouteEdges(V3Graph* graphp) VL_MT_DISABLED; // Find the edge connecting this vertex to the given vertex. // If edge is not found returns nullptr. O(edges) performance. - template + template V3GraphEdge* findConnectingEdgep(V3GraphVertex* otherp) VL_MT_DISABLED; }; diff --git a/src/V3GraphPathChecker.cpp b/src/V3GraphPathChecker.cpp index f7f72cdfb..14930d378 100644 --- a/src/V3GraphPathChecker.cpp +++ b/src/V3GraphPathChecker.cpp @@ -53,9 +53,9 @@ struct GraphPCNode final { //###################################################################### // GraphPathChecker implementation -template +template void GraphPathChecker::initHalfCriticalPaths(bool checkOnly) { - constexpr GraphWay way{T_Way}; + constexpr GraphWay way{N_Way}; constexpr GraphWay rev = way.invert(); GraphStreamUnordered order(m_graphp, way); while (const V3GraphVertex* const vertexp = order.nextp()) { diff --git a/src/V3GraphPathChecker.h b/src/V3GraphPathChecker.h index 495d8cca8..761d45ff7 100644 --- a/src/V3GraphPathChecker.h +++ b/src/V3GraphPathChecker.h @@ -53,7 +53,7 @@ public: private: bool pathExistsInternal(const V3GraphVertex* ap, const V3GraphVertex* bp, unsigned* costp = nullptr) VL_MT_DISABLED; - template + template void initHalfCriticalPaths(bool checkOnly) VL_MT_DISABLED; void incGeneration() { ++m_generation; } diff --git a/src/V3GraphStream.h b/src/V3GraphStream.h index 948c762d4..5ddab8087 100644 --- a/src/V3GraphStream.h +++ b/src/V3GraphStream.h @@ -264,9 +264,9 @@ public: } private: - template // + template // VL_ATTR_NOINLINE void init(V3Graph* graphp) { - constexpr GraphWay way{T_Way}; + constexpr GraphWay way{N_Way}; // Assign every vertex without an incoming edge to ready, others to waiting for (V3GraphVertex& vertex : graphp->vertices()) { const uint32_t nDeps = vertex.edges().size(); @@ -275,9 +275,9 @@ private: } } - template // + template // VL_ATTR_NOINLINE const V3GraphVertex* unblock(const V3GraphVertex* resultp) { - constexpr GraphWay way{T_Way}; + constexpr GraphWay way{N_Way}; for (const V3GraphEdge& edge : resultp->edges()) { V3GraphVertex* const vertexp = edge.furtherp(); #if VL_DEBUG diff --git a/src/V3List.h b/src/V3List.h index c35ec4b2d..99c435f9c 100644 --- a/src/V3List.h +++ b/src/V3List.h @@ -89,7 +89,7 @@ class V3List final { // Iterator class template for V3List. This is just enough to support range based for loops // and basic usage. Feel free to extend as required. - template + template class SimpleItertatorImpl final { static_assert(std::is_same::value || std::is_same::value, @@ -99,7 +99,7 @@ class V3List final { template & (B::*)(), typename> friend class V3List; - using IteratorType = SimpleItertatorImpl; + using IteratorType = SimpleItertatorImpl; T_Base* m_currp; // Currently iterated element, or 'nullptr' for 'end()' iterator @@ -109,7 +109,7 @@ class V3List final { VL_ATTR_ALWINLINE static T_Base* step(T_Base* currp) { - if VL_CONSTEXPR_CXX17 (T_Reverse) { + if VL_CONSTEXPR_CXX17 (N_Reverse) { return toLinks(currp).m_prevp; } else { return toLinks(currp).m_nextp; @@ -145,8 +145,8 @@ class V3List final { bool operator!=(const IteratorType& other) const { return m_currp != other.m_currp; } // Convert to const iterator VL_ATTR_ALWINLINE - operator SimpleItertatorImpl() const { - return SimpleItertatorImpl{m_currp}; + operator SimpleItertatorImpl() const { + return SimpleItertatorImpl{m_currp}; } }; @@ -221,10 +221,10 @@ class V3List final { }; public: - using iterator = SimpleItertatorImpl; - using const_iterator = SimpleItertatorImpl; - using reverse_iterator = SimpleItertatorImpl; - using const_reverse_iterator = SimpleItertatorImpl; + using iterator = SimpleItertatorImpl; + using const_iterator = SimpleItertatorImpl; + using reverse_iterator = SimpleItertatorImpl; + using const_reverse_iterator = SimpleItertatorImpl; // CONSTRUCTOR V3List() = default; diff --git a/src/V3OptionParser.cpp b/src/V3OptionParser.cpp index 49e27fe30..917cd4e9d 100644 --- a/src/V3OptionParser.cpp +++ b/src/V3OptionParser.cpp @@ -37,14 +37,14 @@ struct V3OptionParser::Impl final { VALUE // "-opt val" }; // Base class of actual action classes - template + template class ActionBase VL_NOT_FINAL : public ActionIfs { bool m_undocumented = false; // This option is not documented public: - bool isValueNeeded() const override final { return MODE == en::VALUE; } - bool isFOnOffAllowed() const override final { return MODE == en::FONOFF; } - bool isOnOffAllowed() const override final { return MODE == en::ONOFF; } - bool isPartialMatchAllowed() const override final { return ALLOW_PARTIAL_MATCH; } + bool isValueNeeded() const override final { return N_Mode == en::VALUE; } + bool isFOnOffAllowed() const override final { return N_Mode == en::FONOFF; } + bool isOnOffAllowed() const override final { return N_Mode == en::ONOFF; } + bool isPartialMatchAllowed() const override final { return N_Allow_Partial_Match; } bool isUndocumented() const override { return m_undocumented; } void undocumented() override { m_undocumented = true; } }; @@ -52,9 +52,9 @@ struct V3OptionParser::Impl final { // Actual action classes template class ActionSet; // "-opt" for bool-ish, "-opt val" for int and string - template + template class ActionFOnOff; // "-fopt" and "-fno-opt" for bool-ish - template + template class ActionOnOff; // "-opt" and "-no-opt" for bool-ish class ActionCbCall; // Callback without argument for "-opt" class ActionCbFOnOff; // Callback for "-fopt" and "-fno-opt" @@ -171,10 +171,10 @@ V3OptionParser::ActionIfs* V3OptionParser::find(const char* optp) { return nullptr; } -template -V3OptionParser::ActionIfs& V3OptionParser::add(const std::string& opt, ARG arg) { +template +V3OptionParser::ActionIfs& V3OptionParser::add(const std::string& opt, T_Arg arg) { UASSERT(!m_pimpl->m_isFinalized, "Cannot add after finalize() is called"); - std::unique_ptr act{new ACT{std::move(arg)}}; + std::unique_ptr act{new T_Act{std::move(arg)}}; UASSERT(opt.size() >= 2, opt << " is too short"); UASSERT(opt[0] == '-' || opt[0] == '+', opt << " does not start with either '-' or '+'"); UASSERT(!(opt[0] == '-' && opt[1] == '-'), "Option must have single '-', but " << opt); diff --git a/src/V3OptionParser.h b/src/V3OptionParser.h index 8b4870168..d6160081b 100644 --- a/src/V3OptionParser.h +++ b/src/V3OptionParser.h @@ -65,8 +65,8 @@ private: // METHODS ActionIfs* find(const char* optp) VL_MT_DISABLED; - template - ActionIfs& add(const string& opt, ARG arg) VL_MT_DISABLED; + template + ActionIfs& add(const string& opt, T_Arg arg) VL_MT_DISABLED; // Returns true if strp starts with "-fno" static bool hasPrefixFNo(const char* strp) VL_MT_DISABLED; // Returns true if strp starts with "-no" diff --git a/src/V3OrderParallel.cpp b/src/V3OrderParallel.cpp index cc614b211..ffb9ff1da 100644 --- a/src/V3OrderParallel.cpp +++ b/src/V3OrderParallel.cpp @@ -268,7 +268,7 @@ static_assert(!std::is_polymorphic::value, "Should not have a vtable" class MTaskEdge final : public V3GraphEdge, public MergeCandidate { VL_RTTI_IMPL(MTaskEdge, V3GraphEdge) friend class LogicMTask; - template + template friend class PropagateCp; // MEMBERS @@ -280,7 +280,7 @@ public: // CONSTRUCTORS MTaskEdge(V3Graph* graphp, LogicMTask* fromp, LogicMTask* top, int weight); // METHODS - template + template inline LogicMTask* furtherMTaskp() const; inline LogicMTask* fromMTaskp() const; inline LogicMTask* toMTaskp() const; @@ -307,7 +307,7 @@ private: class LogicMTask final : public V3GraphVertex { VL_RTTI_IMPL(LogicMTask, V3GraphVertex) - template + template friend class PropagateCp; public: @@ -419,28 +419,28 @@ public: #endif } - template + template void addRelativeEdge(MTaskEdge* edgep) { - constexpr GraphWay way{T_Way}; + constexpr GraphWay way{N_Way}; constexpr GraphWay inv = way.invert(); // Add to the edge heap - LogicMTask* const relativep = edgep->furtherMTaskp(); + LogicMTask* const relativep = edgep->furtherMTaskp(); // Value is !way cp to this edge const uint32_t cp = relativep->stepCost() + relativep->critPathCost(inv); // m_edgeHeap[way].insert(&edgep->m_edgeHeapNode[way], {relativep->id(), cp}); } - template + template void stealRelativeEdge(MTaskEdge* edgep) { - constexpr GraphWay way{T_Way}; + constexpr GraphWay way{N_Way}; // Make heap node insertable, ruining the heap it is currently in. edgep->m_edgeHeapNode[way].yank(); // Add the edge as new - addRelativeEdge(edgep); + addRelativeEdge(edgep); } - template + template void removeRelativeEdge(MTaskEdge* edgep) { - constexpr GraphWay way{T_Way}; + constexpr GraphWay way{N_Way}; // Remove from the edge heap m_edgeHeap[way].remove(&edgep->m_edgeHeapNode[way]); } @@ -456,12 +456,12 @@ public: } bool hasRelativeMTask(LogicMTask* relativep) const { return m_edgeSet.count(relativep); } - template + template void checkRelativesCp() const { - constexpr GraphWay way{T_Way}; - for (const V3GraphEdge& edge : edges()) { + constexpr GraphWay way{N_Way}; + for (const V3GraphEdge& edge : edges()) { const LogicMTask* const relativep - = static_cast(edge.furtherp()); + = static_cast(edge.furtherp()); const uint32_t cachedCp = static_cast(edge).cachedCp(way); const uint32_t cp = relativep->critPathCost(way.invert()) + relativep->stepCost(); partCheckCachedScoreVsActual(cachedCp, cp); @@ -479,14 +479,14 @@ public: void setCritPathCost(GraphWay way, uint32_t cost) { m_critPathCost[way] = cost; } uint32_t critPathCost(GraphWay way) const { return m_critPathCost[way]; } - template + template uint32_t critPathCostWithout(const V3GraphEdge* withoutp) const { - const GraphWay way{T_Way}; + const GraphWay way{N_Way}; const GraphWay inv = way.invert(); // Compute the critical path cost wayward to this node, without considering edge // 'withoutp'. We need to look at two edges at most, the critical path if that is not via // 'withoutp', or the second-worst path, if the critical path is via 'withoutp'. - UDEBUGONLY(UASSERT(withoutp->furtherp() == this, + UDEBUGONLY(UASSERT(withoutp->furtherp() == this, "In critPathCostWithout(), edge 'withoutp' must further to 'this'");); const EdgeHeap& edgeHeap = m_edgeHeap[inv]; const EdgeHeap::Node* const maxp = edgeHeap.max(); @@ -690,9 +690,9 @@ MTaskEdge::MTaskEdge(V3Graph* graphp, LogicMTask* fromp, LogicMTask* top, int we top->addRelativeEdge(this); } -template +template LogicMTask* MTaskEdge::furtherMTaskp() const { - return static_cast(this->furtherp()); + return static_cast(this->furtherp()); } LogicMTask* MTaskEdge::fromMTaskp() const { return static_cast(fromp()); } LogicMTask* MTaskEdge::toMTaskp() const { return static_cast(top()); } @@ -716,9 +716,9 @@ void MTaskEdge::resetCriticalPaths() { // Look at vertex costs (in one way) to form critical paths for each // vertex. -template +template static void partInitHalfCriticalPaths(V3Graph& mTaskGraph, bool checkOnly) { - constexpr GraphWay way{T_Way}; + constexpr GraphWay way{N_Way}; constexpr GraphWay rev = way.invert(); GraphStreamUnordered order{&mTaskGraph, way}; for (const V3GraphVertex* vertexp; (vertexp = order.nextp());) { @@ -776,7 +776,7 @@ static void partCheckCriticalPaths(V3Graph& mTaskGraph) { // ###################################################################### // PropagateCp -template +template class PropagateCp final { // Propagate increasing critical path (CP) costs through a graph. // @@ -862,7 +862,7 @@ private: public: void cpHasIncreased(V3GraphVertex* vxp, uint32_t newInclusiveCp) { - constexpr GraphWay way{T_Way}; + constexpr GraphWay way{N_Way}; constexpr GraphWay inv{way.invert()}; // For *vxp, whose CP-inclusive has just increased to @@ -871,7 +871,7 @@ public: for (V3GraphEdge& graphEdge : vxp->edges()) { MTaskEdge& edge = static_cast(graphEdge); - LogicMTask* const relativep = edge.furtherMTaskp(); + LogicMTask* const relativep = edge.furtherMTaskp(); EdgeHeap::Node& edgeHeapNode = edge.m_edgeHeapNode[inv]; if (newInclusiveCp > edgeHeapNode.key().m_score) { relativep->m_edgeHeap[inv].increaseKey(&edgeHeapNode, newInclusiveCp); @@ -899,7 +899,7 @@ public: } void go() { - constexpr GraphWay way{T_Way}; + constexpr GraphWay way{N_Way}; constexpr GraphWay inv{way.invert()}; // m_pending maps each pending vertex to the amount that it wayward @@ -982,7 +982,7 @@ public: partInitCriticalPaths(graph); - PropagateCp prop{true}; + PropagateCp prop{true}; // Seed the propagator with every input node; // This should result in the complete graph getting all CP's assigned. @@ -1316,9 +1316,9 @@ public: } private: - template + template NewCp newCp(LogicMTask* mtaskp, LogicMTask* otherp, MTaskEdge* mergeEdgep) { - constexpr GraphWay way{T_Way}; + constexpr GraphWay way{N_Way}; // Return new wayward-CP for mtaskp reflecting its upcoming merge // with otherp. Set 'result.propagate' if mtaskp's wayward // relatives will see a new wayward CP from this merge. @@ -1528,9 +1528,9 @@ private: } } - template + template void siblingPairFromRelatives(V3GraphVertex* mtaskp) { - constexpr GraphWay way{T_Way}; + constexpr GraphWay way{N_Way}; // Need at least 2 edges auto& edges = mtaskp->edges(); if (!edges.hasMultipleElements()) return; @@ -1575,7 +1575,7 @@ private: // Just make a few pairs. constexpr size_t MAX_NONEXHAUSTIVE_PAIRS = 3; - if (Exhaustive || n <= 2 * MAX_NONEXHAUSTIVE_PAIRS) { + if (N_Exhaustive || n <= 2 * MAX_NONEXHAUSTIVE_PAIRS) { const size_t end = n & ~static_cast(1); // Round down to even, (we want pairs) std::sort(sortRecs.begin(), sortRecs.begin() + n); for (size_t i = 0; i < end; i += 2) { diff --git a/src/V3Randomize.cpp b/src/V3Randomize.cpp index c920cc1cb..b6507c960 100644 --- a/src/V3Randomize.cpp +++ b/src/V3Randomize.cpp @@ -934,14 +934,14 @@ class CaptureVisitor final : public VNVisitor { return false; } - template - void fixupClassOrPackage(AstNode* memberp, NodeT refp) { + template + void fixupClassOrPackage(AstNode* memberp, T_Node refp) { AstNodeModule* const declClassp = VN_AS(memberp->user2p(), NodeModule); if (declClassp != m_targetp) refp->classOrPackagep(declClassp); } - template - bool isReferenceToInnerMember(NodeT nodep) { + template + bool isReferenceToInnerMember(T_Node nodep) { return VN_IS(nodep->fromp(), LambdaArgRef); } diff --git a/src/V3Timing.cpp b/src/V3Timing.cpp index ff0514d40..d8cc6bee9 100644 --- a/src/V3Timing.cpp +++ b/src/V3Timing.cpp @@ -233,8 +233,8 @@ class TimingSuspendableVisitor final : public VNVisitor { if (passFlag(parentp, depp, flag)) propagateFlags(depVxp, flag); } } - template - void propagateFlagsIf(DepVtx* const vxp, NodeFlag flag, Predicate p) { + template + void propagateFlagsIf(DepVtx* const vxp, NodeFlag flag, T_Predicate p) { auto* const parentp = vxp->nodep(); for (V3GraphEdge& edge : vxp->outEdges()) { auto* const depVxp = static_cast(edge.top()); @@ -242,8 +242,8 @@ class TimingSuspendableVisitor final : public VNVisitor { if (p(&edge) && passFlag(parentp, depp, flag)) propagateFlagsIf(depVxp, flag, p); } } - template - void propagateFlagsReversedIf(DepVtx* const vxp, NodeFlag flag, Predicate p) { + template + void propagateFlagsReversedIf(DepVtx* const vxp, NodeFlag flag, T_Predicate p) { auto* const parentp = vxp->nodep(); for (V3GraphEdge& edge : vxp->inEdges()) { auto* const depVxp = static_cast(edge.fromp());