Internals: Reformat with new settings (last commit). No functional change.

This commit is contained in:
Wilson Snyder 2024-08-26 21:43:34 -04:00
parent bde4097df2
commit ae35be9102
23 changed files with 433 additions and 816 deletions

View File

@ -16,8 +16,7 @@ parser = argparse.ArgumentParser(
For documentation see For documentation see
https://verilator.org/guide/latest/exe_verilator_ccache_report.html""", https://verilator.org/guide/latest/exe_verilator_ccache_report.html""",
epilog= epilog="""Copyright 2002-2024 by Wilson Snyder. This program is free software; you
"""Copyright 2002-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0. Version 2.0.
@ -67,30 +66,23 @@ else:
wnames = max(len(_) for _ in results) + 1 wnames = max(len(_) for _ in results) + 1
wresults = max(len(_) for _ in results.values()) + 1 wresults = max(len(_) for _ in results.values()) + 1
for k in sorted(results.keys()): for k in sorted(results.keys()):
args.o.write("{:{wnames}} : {:{wresults}} : {}s\n".format( args.o.write("{:{wnames}} : {:{wresults}} : {}s\n".format(k,
k, results[k],
results[k], elapsed[k].total_seconds(),
elapsed[k].total_seconds(), wnames=wnames,
wnames=wnames, wresults=wresults))
wresults=wresults))
args.o.write("\nSummary:\n") args.o.write("\nSummary:\n")
counts = collections.Counter(_ for _ in results.values()) counts = collections.Counter(_ for _ in results.values())
total = sum(counts.values()) total = sum(counts.values())
for k in sorted(counts.keys()): for k in sorted(counts.keys()):
c = counts[k] c = counts[k]
args.o.write("{:{width}}| {} ({:.2%})\n".format(k, args.o.write("{:{width}}| {} ({:.2%})\n".format(k, c, c / total, width=wresults))
c,
c / total,
width=wresults))
args.o.write("\nLongest:\n") args.o.write("\nLongest:\n")
longest = sorted(list(elapsed.items()), longest = sorted(list(elapsed.items()), key=lambda kv: -kv[1].total_seconds())
key=lambda kv: -kv[1].total_seconds())
for i, (k, v) in enumerate(longest): for i, (k, v) in enumerate(longest):
args.o.write("{:{width}}| {}s\n".format(k, args.o.write("{:{width}}| {}s\n".format(k, v.total_seconds(), width=wnames))
v.total_seconds(),
width=wnames))
if i > 4: if i > 4:
break break

View File

@ -47,8 +47,7 @@ def diff_dir(a, b):
diff_file(a, b) diff_file(a, b)
anyfile = True anyfile = True
if not anyfile: if not anyfile:
sys.stderr.write( sys.stderr.write("%Warning: No .tree files found that have similar base names\n")
"%Warning: No .tree files found that have similar base names\n")
def diff_file(a, b): def diff_file(a, b):
@ -109,18 +108,14 @@ parser = argparse.ArgumentParser(
Verilator_difftree is used for debugging Verilator tree output files. Verilator_difftree is used for debugging Verilator tree output files.
It performs a diff between two files, or all files common between two It performs a diff between two files, or all files common between two
directories, ignoring irrelevant pointer differences.""", directories, ignoring irrelevant pointer differences.""",
epilog= epilog="""Copyright 2005-2024 by Wilson Snyder. This program is free software; you
"""Copyright 2005-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0. Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""") SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('--debug', parser.add_argument('--debug', action='store_const', const=9, help='enable debug')
action='store_const',
const=9,
help='enable debug')
parser.add_argument('--no-lineno', parser.add_argument('--no-lineno',
action='store_false', action='store_false',
help='do not show differences in line numbering') help='do not show differences in line numbering')

View File

@ -15,11 +15,7 @@ LongestVcdStrValueLength = 0
Threads = collections.defaultdict(lambda: []) # List of records per thread id Threads = collections.defaultdict(lambda: []) # List of records per thread id
Mtasks = collections.defaultdict(lambda: {'elapsed': 0, 'end': 0}) Mtasks = collections.defaultdict(lambda: {'elapsed': 0, 'end': 0})
Cpus = collections.defaultdict(lambda: {'mtask_time': 0}) Cpus = collections.defaultdict(lambda: {'mtask_time': 0})
Global = { Global = {'args': {}, 'cpuinfo': collections.defaultdict(lambda: {}), 'stats': {}}
'args': {},
'cpuinfo': collections.defaultdict(lambda: {}),
'stats': {}
}
ElapsedTime = None # total elapsed time ElapsedTime = None # total elapsed time
ExecGraphTime = 0 # total elapsed time excuting an exec graph ExecGraphTime = 0 # total elapsed time excuting an exec graph
ExecGraphIntervals = [] # list of (start, end) pairs ExecGraphIntervals = [] # list of (start, end) pairs
@ -31,8 +27,7 @@ def read_data(filename):
with open(filename, "r", encoding="utf8") as fh: with open(filename, "r", encoding="utf8") as fh:
re_thread = re.compile(r'^VLPROFTHREAD (\d+)$') re_thread = re.compile(r'^VLPROFTHREAD (\d+)$')
re_record = re.compile(r'^VLPROFEXEC (\S+) (\d+)(.*)$') re_record = re.compile(r'^VLPROFEXEC (\S+) (\d+)(.*)$')
re_payload_mtaskBegin = re.compile( re_payload_mtaskBegin = re.compile(r'id (\d+) predictStart (\d+) cpu (\d+)')
r'id (\d+) predictStart (\d+) cpu (\d+)')
re_payload_mtaskEnd = re.compile(r'id (\d+) predictCost (\d+)') re_payload_mtaskEnd = re.compile(r'id (\d+) predictCost (\d+)')
re_arg1 = re.compile(r'VLPROF arg\s+(\S+)\+([0-9.]*)\s*') re_arg1 = re.compile(r'VLPROF arg\s+(\S+)\+([0-9.]*)\s*')
@ -57,8 +52,7 @@ def read_data(filename):
tick = int(tick) tick = int(tick)
payload = payload.strip() payload = payload.strip()
if kind == "SECTION_PUSH": if kind == "SECTION_PUSH":
LongestVcdStrValueLength = max(LongestVcdStrValueLength, LongestVcdStrValueLength = max(LongestVcdStrValueLength, len(payload))
len(payload))
SectionStack.append(payload) SectionStack.append(payload)
Sections.append((tick, tuple(SectionStack))) Sections.append((tick, tuple(SectionStack)))
elif kind == "SECTION_POP": elif kind == "SECTION_POP":
@ -66,15 +60,13 @@ def read_data(filename):
SectionStack.pop() SectionStack.pop()
Sections.append((tick, tuple(SectionStack))) Sections.append((tick, tuple(SectionStack)))
elif kind == "MTASK_BEGIN": elif kind == "MTASK_BEGIN":
mtask, predict_start, ecpu = re_payload_mtaskBegin.match( mtask, predict_start, ecpu = re_payload_mtaskBegin.match(payload).groups()
payload).groups()
mtask = int(mtask) mtask = int(mtask)
predict_start = int(predict_start) predict_start = int(predict_start)
ecpu = int(ecpu) ecpu = int(ecpu)
mTaskThread[mtask] = thread mTaskThread[mtask] = thread
records = Threads[thread] records = Threads[thread]
assert not records or records[-1]['start'] <= records[-1][ assert not records or records[-1]['start'] <= records[-1]['end'] <= tick
'end'] <= tick
records.append({ records.append({
'start': tick, 'start': tick,
'mtask': mtask, 'mtask': mtask,
@ -85,8 +77,7 @@ def read_data(filename):
Mtasks[mtask]['thread'] = thread Mtasks[mtask]['thread'] = thread
Mtasks[mtask]['predict_start'] = predict_start Mtasks[mtask]['predict_start'] = predict_start
elif kind == "MTASK_END": elif kind == "MTASK_END":
mtask, predict_cost = re_payload_mtaskEnd.match( mtask, predict_cost = re_payload_mtaskEnd.match(payload).groups()
payload).groups()
mtask = int(mtask) mtask = int(mtask)
predict_cost = int(predict_cost) predict_cost = int(predict_cost)
begin = Mtasks[mtask]['begin'] begin = Mtasks[mtask]['begin']
@ -163,8 +154,7 @@ def report():
print("\nSummary:") print("\nSummary:")
print(" Total elapsed time = {} rdtsc ticks".format(ElapsedTime)) print(" Total elapsed time = {} rdtsc ticks".format(ElapsedTime))
print(" Parallelized code = {:.2%} of elapsed time".format( print(" Parallelized code = {:.2%} of elapsed time".format(ExecGraphTime / ElapsedTime))
ExecGraphTime / ElapsedTime))
print(" Total threads = %d" % nthreads) print(" Total threads = %d" % nthreads)
print(" Total CPUs used = %d" % ncpus) print(" Total CPUs used = %d" % ncpus)
print(" Total mtasks = %d" % len(Mtasks)) print(" Total mtasks = %d" % len(Mtasks))
@ -176,15 +166,12 @@ def report():
if nthreads > ncpus: if nthreads > ncpus:
print() print()
print("%%Warning: There were fewer CPUs (%d) than threads (%d)." % print("%%Warning: There were fewer CPUs (%d) than threads (%d)." % (ncpus, nthreads))
(ncpus, nthreads))
print(" : See docs on use of numactl.") print(" : See docs on use of numactl.")
else: else:
if 'cpu_socket_cores_warning' in Global: if 'cpu_socket_cores_warning' in Global:
print() print()
print( print("%Warning: Multiple threads scheduled on same hyperthreaded core.")
"%Warning: Multiple threads scheduled on same hyperthreaded core."
)
print(" : See docs on use of numactl.") print(" : See docs on use of numactl.")
if 'cpu_sockets_warning' in Global: if 'cpu_sockets_warning' in Global:
print() print()
@ -228,8 +215,7 @@ def report_mtasks():
serialTime = ElapsedTime - ExecGraphTime serialTime = ElapsedTime - ExecGraphTime
def subReport(elapsed, work): def subReport(elapsed, work):
print(" Thread utilization = {:7.2%}".format(work / print(" Thread utilization = {:7.2%}".format(work / (elapsed * nthreads)))
(elapsed * nthreads)))
print(" Speedup = {:6.3}x".format(work / elapsed)) print(" Speedup = {:6.3}x".format(work / elapsed))
print("\nParallelized code, measured:") print("\nParallelized code, measured:")
@ -256,8 +242,7 @@ def report_mtasks():
if Mtasks[mtask]['elapsed'] > 0: if Mtasks[mtask]['elapsed'] > 0:
if Mtasks[mtask]['predict_cost'] == 0: if Mtasks[mtask]['predict_cost'] == 0:
Mtasks[mtask]['predict_cost'] = 1 # don't log(0) below Mtasks[mtask]['predict_cost'] = 1 # don't log(0) below
p2e_ratio = math.log(Mtasks[mtask]['predict_cost'] / p2e_ratio = math.log(Mtasks[mtask]['predict_cost'] / Mtasks[mtask]['elapsed'])
Mtasks[mtask]['elapsed'])
p2e_ratios.append(p2e_ratio) p2e_ratios.append(p2e_ratio)
if p2e_ratio > max_p2e: if p2e_ratio > max_p2e:
@ -269,18 +254,14 @@ def report_mtasks():
print("\nMTask statistics:") print("\nMTask statistics:")
print(" Longest mtask id = {}".format(long_mtask)) print(" Longest mtask id = {}".format(long_mtask))
print(" Longest mtask time = {:.2%} of time elapsed in parallelized code". print(" Longest mtask time = {:.2%} of time elapsed in parallelized code".format(
format(long_mtask_time / ExecGraphTime)) long_mtask_time / ExecGraphTime))
print(" min log(p2e) = %0.3f" % min_p2e, end="") print(" min log(p2e) = %0.3f" % min_p2e, end="")
print(" from mtask %d (predict %d," % print(" from mtask %d (predict %d," % (min_mtask, Mtasks[min_mtask]['predict_cost']), end="")
(min_mtask, Mtasks[min_mtask]['predict_cost']),
end="")
print(" elapsed %d)" % Mtasks[min_mtask]['elapsed']) print(" elapsed %d)" % Mtasks[min_mtask]['elapsed'])
print(" max log(p2e) = %0.3f" % max_p2e, end="") print(" max log(p2e) = %0.3f" % max_p2e, end="")
print(" from mtask %d (predict %d," % print(" from mtask %d (predict %d," % (max_mtask, Mtasks[max_mtask]['predict_cost']), end="")
(max_mtask, Mtasks[max_mtask]['predict_cost']),
end="")
print(" elapsed %d)" % Mtasks[max_mtask]['elapsed']) print(" elapsed %d)" % Mtasks[max_mtask]['elapsed'])
stddev = statistics.pstdev(p2e_ratios) stddev = statistics.pstdev(p2e_ratios)
@ -315,8 +296,8 @@ def report_cpus():
model = cpuinfo['model_name'] model = cpuinfo['model_name']
print(" {:3d} | {:7.2%} / {:16d} | {:>6s} | {:>4s} | {}".format( print(" {:3d} | {:7.2%} / {:16d} | {:>6s} | {:>4s} | {}".format(
cpu, Cpus[cpu]['mtask_time'] / ElapsedTime, cpu, Cpus[cpu]['mtask_time'] / ElapsedTime, Cpus[cpu]['mtask_time'], socket, core,
Cpus[cpu]['mtask_time'], socket, core, model)) model))
if len(Global['cpu_sockets']) > 1: if len(Global['cpu_sockets']) > 1:
Global['cpu_sockets_warning'] = True Global['cpu_sockets_warning'] = True
@ -366,8 +347,8 @@ def report_sections():
def printTree(prefix, name, entries, tree): def printTree(prefix, name, entries, tree):
print(" {:7.2%} | {:7.2%} | {:8} | {:10.2f} | {}".format( print(" {:7.2%} | {:7.2%} | {:8} | {:10.2f} | {}".format(
treeSum(tree) / ElapsedTime, tree[0] / ElapsedTime, tree[2], treeSum(tree) / ElapsedTime, tree[0] / ElapsedTime, tree[2], tree[2] / entries,
tree[2] / entries, prefix + name)) prefix + name))
for k in sorted(tree[1], key=lambda _: -treeSum(tree[1][_])): for k in sorted(tree[1], key=lambda _: -treeSum(tree[1][_])):
printTree(prefix + " ", k, tree[2], tree[1][k]) printTree(prefix + " ", k, tree[2], tree[1][k])
@ -438,10 +419,8 @@ def write_vcd(filename):
addValue(code, start, mtask) addValue(code, start, mtask)
addValue(code, end, None) addValue(code, end, None)
tStart = sorted(_['start'] for records in Threads.values() tStart = sorted(_['start'] for records in Threads.values() for _ in records)
for _ in records) tEnd = sorted(_['end'] for records in Threads.values() for _ in records)
tEnd = sorted(_['end'] for records in Threads.values()
for _ in records)
# Predicted graph # Predicted graph
for start, end in ExecGraphIntervals: for start, end in ExecGraphIntervals:
@ -455,11 +434,10 @@ def write_vcd(filename):
# Predict mtasks that fill the time the execution occupied # Predict mtasks that fill the time the execution occupied
for mtask in Mtasks: for mtask in Mtasks:
thread = Mtasks[mtask]['thread'] thread = Mtasks[mtask]['thread']
pred_scaled_start = start + int( pred_scaled_start = start + int(Mtasks[mtask]['predict_start'] * measured_scaling)
Mtasks[mtask]['predict_start'] * measured_scaling)
pred_scaled_end = start + int( pred_scaled_end = start + int(
(Mtasks[mtask]['predict_start'] + (Mtasks[mtask]['predict_start'] + Mtasks[mtask]['predict_cost']) *
Mtasks[mtask]['predict_cost']) * measured_scaling) measured_scaling)
if pred_scaled_start == pred_scaled_end: if pred_scaled_start == pred_scaled_end:
continue continue
@ -545,8 +523,7 @@ Verilator_gantt creates a visual representation to help analyze Verilator
For documentation see For documentation see
https://verilator.org/guide/latest/exe_verilator_gantt.html""", https://verilator.org/guide/latest/exe_verilator_gantt.html""",
epilog= epilog="""Copyright 2018-2024 by Wilson Snyder. This program is free software; you
"""Copyright 2018-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0. Version 2.0.
@ -554,12 +531,8 @@ Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""") SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('--debug', action='store_true', help='enable debug') parser.add_argument('--debug', action='store_true', help='enable debug')
parser.add_argument('--no-vcd', parser.add_argument('--no-vcd', help='disable creating vcd', action='store_true')
help='disable creating vcd', parser.add_argument('--vcd', help='filename for vcd outpue', default='profile_exec.vcd')
action='store_true')
parser.add_argument('--vcd',
help='filename for vcd outpue',
default='profile_exec.vcd')
parser.add_argument('filename', parser.add_argument('filename',
help='input profile_exec.dat filename to process', help='input profile_exec.dat filename to process',
default='profile_exec.dat') default='profile_exec.dat')

View File

@ -34,9 +34,8 @@ def profcfunc(filename):
# Older gprofs have no call column for single-call functions # Older gprofs have no call column for single-call functions
# %time cumesec selfsec {stuff} name # %time cumesec selfsec {stuff} name
match = re.match( match = re.match(r'^\s*([0-9.]+)\s+[0-9.]+\s+([0-9.]+)\s+[^a-zA-Z_]*([a-zA-Z_].*)$',
r'^\s*([0-9.]+)\s+[0-9.]+\s+([0-9.]+)\s+[^a-zA-Z_]*([a-zA-Z_].*)$', line)
line)
if match: if match:
pct = float(match.group(1)) pct = float(match.group(1))
sec = float(match.group(2)) sec = float(match.group(2))
@ -143,12 +142,9 @@ def profcfunc(filename):
print(" These are split into three categories:") print(" These are split into three categories:")
print(" C++: Time in non-Verilated C++ code") print(" C++: Time in non-Verilated C++ code")
print(" Prof: Time in profile overhead") print(" Prof: Time in profile overhead")
print(" VBlock: Time attributable to a block in a" + print(" VBlock: Time attributable to a block in a" + " Verilog file and line")
" Verilog file and line") print(" VCommon: Time in a Verilated module," + " due to all parts of the design")
print(" VCommon: Time in a Verilated module," + print(" VLib: Time in Verilated common libraries," + " called by the Verilated code")
" due to all parts of the design")
print(" VLib: Time in Verilated common libraries," +
" called by the Verilated code")
print() print()
print(" % cumulative self ") print(" % cumulative self ")
@ -156,13 +152,11 @@ def profcfunc(filename):
"s type filename and line number") % "design") "s type filename and line number") % "design")
cume = 0 cume = 0
for func in sorted(vfuncs.keys(), for func in sorted(vfuncs.keys(), key=lambda f: vfuncs[f]['sec'], reverse=True):
key=lambda f: vfuncs[f]['sec'],
reverse=True):
cume += vfuncs[func]['sec'] cume += vfuncs[func]['sec']
print(("%6.2f %9.2f %8.2f %10d %-" + str(design_width) + "s %s") % print(("%6.2f %9.2f %8.2f %10d %-" + str(design_width) + "s %s") %
(vfuncs[func]['pct'], cume, vfuncs[func]['sec'], (vfuncs[func]['pct'], cume, vfuncs[func]['sec'], vfuncs[func]['calls'],
vfuncs[func]['calls'], vfuncs[func]['design'], func)) vfuncs[func]['design'], func))
###################################################################### ######################################################################
@ -180,18 +174,14 @@ in each Verilog block.
For documentation see For documentation see
https://verilator.org/guide/latest/exe_verilator_profcfunc.html""", https://verilator.org/guide/latest/exe_verilator_profcfunc.html""",
epilog= epilog="""Copyright 2002-2024 by Wilson Snyder. This program is free software; you
"""Copyright 2002-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0. Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""") SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('--debug', parser.add_argument('--debug', action='store_const', const=9, help='enable debug')
action='store_const',
const=9,
help='enable debug')
parser.add_argument('filename', help='input gprof output to process') parser.add_argument('filename', help='input gprof output to process')
Args = parser.parse_args() Args = parser.parse_args()

View File

@ -23,9 +23,7 @@ class VlSphinxExtract:
outname = match.group(1) outname = match.group(1)
print("Writing %s" % outname) print("Writing %s" % outname)
fhw = open(outname, "w", encoding="utf8") # pylint: disable=consider-using-with fhw = open(outname, "w", encoding="utf8") # pylint: disable=consider-using-with
fhw.write( fhw.write(".. comment: generated by vl_sphinx_extract from " + filename + "\n")
".. comment: generated by vl_sphinx_extract from " +
filename + "\n")
fhw.write(".. code-block::\n") fhw.write(".. code-block::\n")
elif re.match(r'^[=a-zA-Z0-9_]', line): elif re.match(r'^[=a-zA-Z0-9_]', line):
fhw = None fhw = None
@ -39,18 +37,14 @@ parser = argparse.ArgumentParser(
allow_abbrev=False, allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter, formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Read a file and extract documentation data.""", description="""Read a file and extract documentation data.""",
epilog= epilog=""" Copyright 2021-2024 by Wilson Snyder. This package is free software;
""" Copyright 2021-2024 by Wilson Snyder. This package is free software;
you can redistribute it and/or modify it under the terms of either the GNU you can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0. Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""") SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('--debug', parser.add_argument('--debug', action='store_const', const=9, help='enable debug')
action='store_const',
const=9,
help='enable debug')
parser.add_argument('path', help='path to extract from') parser.add_argument('path', help='path to extract from')
Args = parser.parse_args() Args = parser.parse_args()

View File

@ -18,8 +18,7 @@ class VlSphinxFix:
if os.path.isdir(path): if os.path.isdir(path):
for basefile in os.listdir(path): for basefile in os.listdir(path):
file = os.path.join(path, basefile) file = os.path.join(path, basefile)
if ((basefile != ".") and (basefile != "..") if ((basefile != ".") and (basefile != "..") and basefile not in self.SkipBasenames
and basefile not in self.SkipBasenames
and not os.path.islink(file)): and not os.path.islink(file)):
self.process(file) self.process(file)
elif re.search(r'\.(html|tex)$', path): elif re.search(r'\.(html|tex)$', path):
@ -54,18 +53,14 @@ parser = argparse.ArgumentParser(
allow_abbrev=False, allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter, formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Post-process Sphinx HTML.""", description="""Post-process Sphinx HTML.""",
epilog= epilog=""" Copyright 2021-2024 by Wilson Snyder. This package is free software;
""" Copyright 2021-2024 by Wilson Snyder. This package is free software;
you can redistribute it and/or modify it under the terms of either the GNU you can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0. Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""") SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('--debug', parser.add_argument('--debug', action='store_const', const=9, help='enable debug')
action='store_const',
const=9,
help='enable debug')
parser.add_argument('path', help='path to edit') parser.add_argument('path', help='path to edit')
Args = parser.parse_args() Args = parser.parse_args()

View File

@ -23,8 +23,7 @@ def get_vlt_version():
filename = "../../Makefile" filename = "../../Makefile"
with open(filename, "r", encoding="utf8") as fh: with open(filename, "r", encoding="utf8") as fh:
for line in fh: for line in fh:
match = re.search(r"PACKAGE_VERSION *= *([a-z0-9.]+) +([-0-9]+)", match = re.search(r"PACKAGE_VERSION *= *([a-z0-9.]+) +([-0-9]+)", line)
line)
if match: if match:
return match.group(1), match.group(2) return match.group(1), match.group(2)
match = re.search(r"PACKAGE_VERSION *= *([a-z0-9.]+) +devel", line) match = re.search(r"PACKAGE_VERSION *= *([a-z0-9.]+) +devel", line)
@ -75,8 +74,7 @@ extensions = []
# directories to ignore when looking for source files. # directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path. # This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [ exclude_patterns = [
'_build', 'Thumbs.db', '.DS_Store', 'internals.rst', 'xml.rst', 'gen/ex_*', '_build', 'Thumbs.db', '.DS_Store', 'internals.rst', 'xml.rst', 'gen/ex_*', 'CONTRIBUTING.rst'
'CONTRIBUTING.rst'
] ]
# Warn about refs # Warn about refs

View File

@ -25,8 +25,7 @@ class VlFileCopy:
self.debug = debug self.debug = debug
with NamedTemporaryFile() as tree_temp, NamedTemporaryFile( with NamedTemporaryFile() as tree_temp, NamedTemporaryFile() as meta_temp:
) as meta_temp:
vargs = [ vargs = [
'--json-only-output', '--json-only-output',
tree_temp.name, tree_temp.name,
@ -61,8 +60,7 @@ class VlFileCopy:
print("\t%s " % command) print("\t%s " % command)
status = subprocess.call(command, shell=True) status = subprocess.call(command, shell=True)
if status != 0: if status != 0:
raise RuntimeError("Command failed running Verilator with '" + raise RuntimeError("Command failed running Verilator with '" + command + "', stopped")
command + "', stopped")
####################################################################### #######################################################################
@ -71,8 +69,7 @@ if __name__ == '__main__':
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
allow_abbrev=False, allow_abbrev=False,
formatter_class=argparse.RawTextHelpFormatter, formatter_class=argparse.RawTextHelpFormatter,
description= description="""Example of using Verilator JSON output to copy a list of files to an
"""Example of using Verilator JSON output to copy a list of files to an
output directory (-odir, defaults to 'copied'), e.g. to easily create a output directory (-odir, defaults to 'copied'), e.g. to easily create a
tarball of the design to pass to others. tarball of the design to pass to others.
@ -95,11 +92,7 @@ This file ONLY is placed under the Creative Commons Public Domain, for
any use, without warranty, 2019 by Wilson Snyder. any use, without warranty, 2019 by Wilson Snyder.
SPDX-License-Identifier: CC0-1.0 SPDX-License-Identifier: CC0-1.0
""") """)
parser.add_argument('-debug', parser.add_argument('-debug', '--debug', action='store_const', const=9, help='enable debug')
'--debug',
action='store_const',
const=9,
help='enable debug')
parser.add_argument('-odir', parser.add_argument('-odir',
'--odir', '--odir',
action='store', action='store',
@ -108,9 +101,7 @@ SPDX-License-Identifier: CC0-1.0
help='target output directory') help='target output directory')
(args, rem) = parser.parse_known_args() (args, rem) = parser.parse_known_args()
print( print("NOTE: vl_file_copy is only an example starting point for writing your own tool.")
"NOTE: vl_file_copy is only an example starting point for writing your own tool."
)
# That is: # That is:
# 1. We will accept basic patches # 1. We will accept basic patches
# 2. We are not expecting to make this globally useful. (e.g. we don't cleanup obj_dir) # 2. We are not expecting to make this globally useful. (e.g. we don't cleanup obj_dir)

View File

@ -24,8 +24,7 @@ class VlHierGraph:
self.next_vertex_number = 0 self.next_vertex_number = 0
self.addr_to_number = {} self.addr_to_number = {}
with NamedTemporaryFile() as tree_temp, NamedTemporaryFile( with NamedTemporaryFile() as tree_temp, NamedTemporaryFile() as meta_temp:
) as meta_temp:
vargs = [ vargs = [
'--json-only-output', '--json-only-output',
tree_temp.name, tree_temp.name,
@ -45,9 +44,7 @@ class VlHierGraph:
fh.write("digraph {\n") fh.write("digraph {\n")
fh.write(" dpi=300;\n") fh.write(" dpi=300;\n")
fh.write(" order=LR;\n") fh.write(" order=LR;\n")
fh.write( fh.write(" node [fontsize=8 shape=\"box\" margin=0.01 width=0 height=0]")
" node [fontsize=8 shape=\"box\" margin=0.01 width=0 height=0]"
)
fh.write(" edge [fontsize=6]") fh.write(" edge [fontsize=6]")
# Find cells # Find cells
modules = self.flatten(self.tree, lambda n: n['type'] == "MODULE") modules = self.flatten(self.tree, lambda n: n['type'] == "MODULE")
@ -101,8 +98,7 @@ class VlHierGraph:
print("\t%s " % command) print("\t%s " % command)
status = subprocess.call(command, shell=True) status = subprocess.call(command, shell=True)
if status != 0: if status != 0:
raise RuntimeError("Command failed running Verilator with '" + raise RuntimeError("Command failed running Verilator with '" + command + "', stopped")
command + "', stopped")
####################################################################### #######################################################################
@ -111,8 +107,7 @@ if __name__ == '__main__':
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
allow_abbrev=False, allow_abbrev=False,
formatter_class=argparse.RawTextHelpFormatter, formatter_class=argparse.RawTextHelpFormatter,
description= description="""Example of using Verilator JSON output to create a .dot file showing the
"""Example of using Verilator JSON output to create a .dot file showing the
design module hierarchy. design module hierarchy.
Example usage: Example usage:
@ -134,11 +129,7 @@ This file ONLY is placed under the Creative Commons Public Domain, for
any use, without warranty, 2019 by Wilson Snyder. any use, without warranty, 2019 by Wilson Snyder.
SPDX-License-Identifier: CC0-1.0 SPDX-License-Identifier: CC0-1.0
""") """)
parser.add_argument('-debug', parser.add_argument('-debug', '--debug', action='store_const', const=9, help='enable debug')
'--debug',
action='store_const',
const=9,
help='enable debug')
parser.add_argument('-o', parser.add_argument('-o',
'--o', '--o',
action='store', action='store',
@ -147,18 +138,14 @@ SPDX-License-Identifier: CC0-1.0
help='output filename') help='output filename')
(args, rem) = parser.parse_known_args() (args, rem) = parser.parse_known_args()
print( print("NOTE: vl_hier_graph is only an example starting point for writing your own tool.")
"NOTE: vl_hier_graph is only an example starting point for writing your own tool."
)
# That is: # That is:
# 1. We will accept basic patches # 1. We will accept basic patches
# 2. We are not expecting to make this globally useful. (e.g. we don't cleanup obj_dir) # 2. We are not expecting to make this globally useful. (e.g. we don't cleanup obj_dir)
# 3. "make install" will not install this. # 3. "make install" will not install this.
# 4. This has not had production-worthy validation. # 4. This has not had production-worthy validation.
fc = VlHierGraph(output_filename=args.o, fc = VlHierGraph(output_filename=args.o, debug=args.debug, verilator_args=rem)
debug=args.debug,
verilator_args=rem)
###################################################################### ######################################################################
# Local Variables: # Local Variables:

View File

@ -106,9 +106,8 @@ class VlAnnotations:
def is_mt_safe_call(self): def is_mt_safe_call(self):
return (not self.is_mt_unsafe_call() return (not self.is_mt_unsafe_call()
and (self.mt_safe or self.mt_safe_postinit or self.pure and (self.mt_safe or self.mt_safe_postinit or self.pure or self.requires
or self.requires or self.excludes or self.acquire or self.excludes or self.acquire or self.release))
or self.release))
def is_pure_call(self): def is_pure_call(self):
return self.pure return self.pure
@ -203,9 +202,7 @@ class FunctionInfo:
annotations: VlAnnotations annotations: VlAnnotations
ftype: FunctionType ftype: FunctionType
_hash: Optional[int] = dataclasses.field(default=None, _hash: Optional[int] = dataclasses.field(default=None, init=False, repr=False)
init=False,
repr=False)
@property @property
def name(self): def name(self):
@ -220,15 +217,13 @@ class FunctionInfo:
return self._hash return self._hash
def __eq__(self, other): def __eq__(self, other):
return (self.usr == other.usr and self.file == other.file return (self.usr == other.usr and self.file == other.file and self.line == other.line)
and self.line == other.line)
def copy(self, /, **changes): def copy(self, /, **changes):
return dataclasses.replace(self, **changes) return dataclasses.replace(self, **changes)
@staticmethod @staticmethod
def from_decl_file_line_and_refd_node(file: str, line: int, def from_decl_file_line_and_refd_node(file: str, line: int, refd: clang.cindex.Cursor,
refd: clang.cindex.Cursor,
annotations: VlAnnotations): annotations: VlAnnotations):
file = os.path.abspath(file) file = os.path.abspath(file)
refd = refd.canonical refd = refd.canonical
@ -277,14 +272,11 @@ class Diagnostic:
source_ctx: FunctionInfo source_ctx: FunctionInfo
kind: DiagnosticKind kind: DiagnosticKind
_hash: Optional[int] = dataclasses.field(default=None, _hash: Optional[int] = dataclasses.field(default=None, init=False, repr=False)
init=False,
repr=False)
def __hash__(self): def __hash__(self):
if not self._hash: if not self._hash:
self._hash = hash( self._hash = hash(hash(self.target) ^ hash(self.source_ctx) ^ hash(self.kind))
hash(self.target) ^ hash(self.source_ctx) ^ hash(self.kind))
return self._hash return self._hash
@ -292,9 +284,9 @@ class CallAnnotationsValidator:
def __init__(self, diagnostic_cb: Callable[[Diagnostic], None], def __init__(self, diagnostic_cb: Callable[[Diagnostic], None],
is_ignored_top_level: Callable[[clang.cindex.Cursor], bool], is_ignored_top_level: Callable[[clang.cindex.Cursor], bool],
is_ignored_def: Callable[ is_ignored_def: Callable[[clang.cindex.Cursor, clang.cindex.Cursor],
[clang.cindex.Cursor, clang.cindex.Cursor], bool], bool], is_ignored_call: Callable[[clang.cindex.Cursor],
is_ignored_call: Callable[[clang.cindex.Cursor], bool]): bool]):
self._diagnostic_cb = diagnostic_cb self._diagnostic_cb = diagnostic_cb
self._is_ignored_top_level = is_ignored_top_level self._is_ignored_top_level = is_ignored_top_level
self._is_ignored_call = is_ignored_call self._is_ignored_call = is_ignored_call
@ -329,8 +321,7 @@ class CallAnnotationsValidator:
with open(source_file, "r", encoding="utf-8") as file: with open(source_file, "r", encoding="utf-8") as file:
for line in file: for line in file:
line = line.strip() line = line.strip()
match = re.fullmatch( match = re.fullmatch(r"^#\s*(define\s+(\w+)(?:\s+(.*))?|include\s+.*)$", line)
r"^#\s*(define\s+(\w+)(?:\s+(.*))?|include\s+.*)$", line)
if match: if match:
if match.group(1).startswith("define"): if match.group(1).startswith("define"):
key = match.group(2) key = match.group(2)
@ -341,16 +332,14 @@ class CallAnnotationsValidator:
return defs return defs
@staticmethod @staticmethod
def filter_out_unsupported_compiler_args( def filter_out_unsupported_compiler_args(args: list[str]) -> tuple[list[str], dict[str, str]]:
args: list[str]) -> tuple[list[str], dict[str, str]]:
filtered_args = [] filtered_args = []
defines = {} defines = {}
args_iter = iter(args) args_iter = iter(args)
try: try:
while arg := next(args_iter): while arg := next(args_iter):
# Skip positional arguments (input file name). # Skip positional arguments (input file name).
if not arg.startswith("-") and (arg.endswith(".cpp") if not arg.startswith("-") and (arg.endswith(".cpp") or arg.endswith(".c")
or arg.endswith(".c")
or arg.endswith(".h")): or arg.endswith(".h")):
continue continue
@ -367,8 +356,7 @@ class CallAnnotationsValidator:
# Preserved options with separate value argument. # Preserved options with separate value argument.
if arg in [ if arg in [
"-x" "-x"
"-Xclang", "-I", "-isystem", "-iquote", "-include", "-Xclang", "-I", "-isystem", "-iquote", "-include", "-include-pch"
"-include-pch"
]: ]:
filtered_args += [arg, next(args_iter)] filtered_args += [arg, next(args_iter)]
continue continue
@ -406,14 +394,12 @@ class CallAnnotationsValidator:
return (filtered_args, defines) return (filtered_args, defines)
def compile_and_analyze_file(self, source_file: str, def compile_and_analyze_file(self, source_file: str, compiler_args: list[str],
compiler_args: list[str],
build_dir: Optional[str]): build_dir: Optional[str]):
filename = os.path.abspath(source_file) filename = os.path.abspath(source_file)
initial_cwd = "." initial_cwd = "."
filtered_args, defines = self.filter_out_unsupported_compiler_args( filtered_args, defines = self.filter_out_unsupported_compiler_args(compiler_args)
compiler_args)
defines.update(self.parse_initial_defines(source_file)) defines.update(self.parse_initial_defines(source_file))
if build_dir: if build_dir:
@ -451,8 +437,7 @@ class CallAnnotationsValidator:
self._diagnostic_cb(Diagnostic(target, source, source_ctx, kind)) self._diagnostic_cb(Diagnostic(target, source, source_ctx, kind))
else: else:
self._diagnostic_cb( self._diagnostic_cb(
Diagnostic(FunctionInfo.from_node(target), source, source_ctx, Diagnostic(FunctionInfo.from_node(target), source, source_ctx, kind))
kind))
def iterate_children(self, children: Iterable[clang.cindex.Cursor], def iterate_children(self, children: Iterable[clang.cindex.Cursor],
handler: Callable[[clang.cindex.Cursor], None]): handler: Callable[[clang.cindex.Cursor], None]):
@ -465,8 +450,7 @@ class CallAnnotationsValidator:
@staticmethod @staticmethod
def get_referenced_node_info( def get_referenced_node_info(
node: clang.cindex.Cursor node: clang.cindex.Cursor
) -> tuple[bool, Optional[clang.cindex.Cursor], VlAnnotations, ) -> tuple[bool, Optional[clang.cindex.Cursor], VlAnnotations, Iterable[clang.cindex.Cursor]]:
Iterable[clang.cindex.Cursor]]:
if not node.spelling and not node.displayname: if not node.spelling and not node.displayname:
return (False, None, VlAnnotations(), []) return (False, None, VlAnnotations(), [])
@ -480,8 +464,7 @@ class CallAnnotationsValidator:
annotations = VlAnnotations.from_nodes_list(children) annotations = VlAnnotations.from_nodes_list(children)
return (True, refd, annotations, children) return (True, refd, annotations, children)
def check_mt_safe_call(self, node: clang.cindex.Cursor, def check_mt_safe_call(self, node: clang.cindex.Cursor, refd: clang.cindex.Cursor,
refd: clang.cindex.Cursor,
annotations: VlAnnotations): annotations: VlAnnotations):
is_mt_safe = False is_mt_safe = False
@ -513,8 +496,7 @@ class CallAnnotationsValidator:
# we are calling local method. It is MT safe # we are calling local method. It is MT safe
# only if this method is also only calling local methods or # only if this method is also only calling local methods or
# MT-safe methods # MT-safe methods
self.iterate_children(refd.get_children(), self.iterate_children(refd.get_children(), self.dispatch_node_inside_definition)
self.dispatch_node_inside_definition)
is_mt_safe = True is_mt_safe = True
# class/struct member # class/struct member
elif refn and refn.kind == CursorKind.MEMBER_REF_EXPR and refn.referenced: elif refn and refn.kind == CursorKind.MEMBER_REF_EXPR and refn.referenced:
@ -525,18 +507,15 @@ class CallAnnotationsValidator:
if self.is_constructor_context() and refn.semantic_parent: if self.is_constructor_context() and refn.semantic_parent:
# we are in constructor, so calling local members is MT_SAFE, # we are in constructor, so calling local members is MT_SAFE,
# make sure object that we are calling is local to the constructor # make sure object that we are calling is local to the constructor
constructor_class = self._constructor_context[ constructor_class = self._constructor_context[-1].semantic_parent
-1].semantic_parent
if refn.semantic_parent.spelling == constructor_class.spelling: if refn.semantic_parent.spelling == constructor_class.spelling:
if check_class_member_exists(constructor_class, refn): if check_class_member_exists(constructor_class, refn):
is_mt_safe = True is_mt_safe = True
else: else:
# check if this class inherits from some base class # check if this class inherits from some base class
base_class = get_base_class(constructor_class, base_class = get_base_class(constructor_class, refn.semantic_parent)
refn.semantic_parent)
if base_class: if base_class:
if check_class_member_exists( if check_class_member_exists(base_class.get_declaration(), refn):
base_class.get_declaration(), refn):
is_mt_safe = True is_mt_safe = True
# variable # variable
elif refn and refn.kind == CursorKind.DECL_REF_EXPR and refn.referenced: elif refn and refn.kind == CursorKind.DECL_REF_EXPR and refn.referenced:
@ -567,8 +546,7 @@ class CallAnnotationsValidator:
# Call handling # Call handling
def process_method_call(self, node: clang.cindex.Cursor, def process_method_call(self, node: clang.cindex.Cursor, refd: clang.cindex.Cursor,
refd: clang.cindex.Cursor,
annotations: VlAnnotations): annotations: VlAnnotations):
assert self._call_location assert self._call_location
ctx = self._call_location.annotations ctx = self._call_location.annotations
@ -576,58 +554,48 @@ class CallAnnotationsValidator:
# MT-safe context # MT-safe context
if ctx.is_mt_safe_context(): if ctx.is_mt_safe_context():
if not self.check_mt_safe_call(node, refd, annotations): if not self.check_mt_safe_call(node, refd, annotations):
self.emit_diagnostic( self.emit_diagnostic(FunctionInfo.from_node(refd, refd, annotations),
FunctionInfo.from_node(refd, refd, annotations), DiagnosticKind.NON_MT_SAFE_CALL_IN_MT_SAFE_CTX)
DiagnosticKind.NON_MT_SAFE_CALL_IN_MT_SAFE_CTX)
# stable tree context # stable tree context
if ctx.is_stabe_tree_context(): if ctx.is_stabe_tree_context():
if annotations.is_mt_unsafe_call() or not ( if annotations.is_mt_unsafe_call() or not (
annotations.is_stabe_tree_call() annotations.is_stabe_tree_call() or annotations.is_pure_call()
or annotations.is_pure_call()
or self.check_mt_safe_call(node, refd, annotations)): or self.check_mt_safe_call(node, refd, annotations)):
self.emit_diagnostic( self.emit_diagnostic(FunctionInfo.from_node(refd, refd, annotations),
FunctionInfo.from_node(refd, refd, annotations), DiagnosticKind.NON_STABLE_TREE_CALL_IN_STABLE_TREE_CTX)
DiagnosticKind.NON_STABLE_TREE_CALL_IN_STABLE_TREE_CTX)
# pure context # pure context
if ctx.is_pure_context(): if ctx.is_pure_context():
if not annotations.is_pure_call(): if not annotations.is_pure_call():
self.emit_diagnostic( self.emit_diagnostic(FunctionInfo.from_node(refd, refd, annotations),
FunctionInfo.from_node(refd, refd, annotations), DiagnosticKind.NON_PURE_CALL_IN_PURE_CTX)
DiagnosticKind.NON_PURE_CALL_IN_PURE_CTX)
def process_function_call(self, refd: clang.cindex.Cursor, def process_function_call(self, refd: clang.cindex.Cursor, annotations: VlAnnotations):
annotations: VlAnnotations):
assert self._call_location assert self._call_location
ctx = self._call_location.annotations ctx = self._call_location.annotations
# MT-safe context # MT-safe context
if ctx.is_mt_safe_context(): if ctx.is_mt_safe_context():
if not annotations.is_mt_safe_call(): if not annotations.is_mt_safe_call():
self.emit_diagnostic( self.emit_diagnostic(FunctionInfo.from_node(refd, refd, annotations),
FunctionInfo.from_node(refd, refd, annotations), DiagnosticKind.NON_MT_SAFE_CALL_IN_MT_SAFE_CTX)
DiagnosticKind.NON_MT_SAFE_CALL_IN_MT_SAFE_CTX)
# stable tree context # stable tree context
if ctx.is_stabe_tree_context(): if ctx.is_stabe_tree_context():
if annotations.is_mt_unsafe_call() or not ( if annotations.is_mt_unsafe_call() or not (annotations.is_pure_call()
annotations.is_pure_call() or annotations.is_mt_safe_call()
or annotations.is_mt_safe_call() or annotations.is_stabe_tree_call()):
or annotations.is_stabe_tree_call()): self.emit_diagnostic(FunctionInfo.from_node(refd, refd, annotations),
self.emit_diagnostic( DiagnosticKind.NON_STABLE_TREE_CALL_IN_STABLE_TREE_CTX)
FunctionInfo.from_node(refd, refd, annotations),
DiagnosticKind.NON_STABLE_TREE_CALL_IN_STABLE_TREE_CTX)
# pure context # pure context
if ctx.is_pure_context(): if ctx.is_pure_context():
if not annotations.is_pure_call(): if not annotations.is_pure_call():
self.emit_diagnostic( self.emit_diagnostic(FunctionInfo.from_node(refd, refd, annotations),
FunctionInfo.from_node(refd, refd, annotations), DiagnosticKind.NON_PURE_CALL_IN_PURE_CTX)
DiagnosticKind.NON_PURE_CALL_IN_PURE_CTX)
def process_constructor_call(self, refd: clang.cindex.Cursor, def process_constructor_call(self, refd: clang.cindex.Cursor, annotations: VlAnnotations):
annotations: VlAnnotations):
assert self._call_location assert self._call_location
ctx = self._call_location.annotations ctx = self._call_location.annotations
@ -635,31 +603,26 @@ class CallAnnotationsValidator:
# only if they call local methods or MT-safe functions. # only if they call local methods or MT-safe functions.
if ctx.is_mt_safe_context() or self.is_constructor_context(): if ctx.is_mt_safe_context() or self.is_constructor_context():
self._constructor_context.append(refd) self._constructor_context.append(refd)
self.iterate_children(refd.get_children(), self.iterate_children(refd.get_children(), self.dispatch_node_inside_definition)
self.dispatch_node_inside_definition)
self._constructor_context.pop() self._constructor_context.pop()
# stable tree context # stable tree context
if ctx.is_stabe_tree_context(): if ctx.is_stabe_tree_context():
self._constructor_context.append(refd) self._constructor_context.append(refd)
self.iterate_children(refd.get_children(), self.iterate_children(refd.get_children(), self.dispatch_node_inside_definition)
self.dispatch_node_inside_definition)
self._constructor_context.pop() self._constructor_context.pop()
# pure context # pure context
if ctx.is_pure_context(): if ctx.is_pure_context():
if not annotations.is_pure_call( if not annotations.is_pure_call() and not refd.is_default_constructor():
) and not refd.is_default_constructor(): self.emit_diagnostic(FunctionInfo.from_node(refd, refd, annotations),
self.emit_diagnostic( DiagnosticKind.NON_PURE_CALL_IN_PURE_CTX)
FunctionInfo.from_node(refd, refd, annotations),
DiagnosticKind.NON_PURE_CALL_IN_PURE_CTX)
def dispatch_call_node(self, node: clang.cindex.Cursor): def dispatch_call_node(self, node: clang.cindex.Cursor):
[supported, refd, annotations, _] = self.get_referenced_node_info(node) [supported, refd, annotations, _] = self.get_referenced_node_info(node)
if not supported: if not supported:
self.iterate_children(node.get_children(), self.iterate_children(node.get_children(), self.dispatch_node_inside_definition)
self.dispatch_node_inside_definition)
return True return True
assert refd is not None assert refd is not None
@ -676,19 +639,14 @@ class CallAnnotationsValidator:
assert self._call_location is not None assert self._call_location is not None
node_file = os.path.abspath(node.location.file.name) node_file = os.path.abspath(node.location.file.name)
self._call_location = self._call_location.copy(file=node_file, self._call_location = self._call_location.copy(file=node_file, line=node.location.line)
line=node.location.line)
# Standalone functions and static class methods # Standalone functions and static class methods
if (refd.kind == CursorKind.FUNCTION_DECL if (refd.kind == CursorKind.FUNCTION_DECL
or refd.kind == CursorKind.CXX_METHOD or refd.kind == CursorKind.CXX_METHOD and refd.is_static_method()):
and refd.is_static_method()):
self.process_function_call(refd, annotations) self.process_function_call(refd, annotations)
# Function pointer # Function pointer
elif refd.kind in [ elif refd.kind in [CursorKind.VAR_DECL, CursorKind.FIELD_DECL, CursorKind.PARM_DECL]:
CursorKind.VAR_DECL, CursorKind.FIELD_DECL,
CursorKind.PARM_DECL
]:
self.process_function_call(refd, annotations) self.process_function_call(refd, annotations)
# Non-static class methods # Non-static class methods
elif refd.kind == CursorKind.CXX_METHOD: elif refd.kind == CursorKind.CXX_METHOD:
@ -726,14 +684,13 @@ class CallAnnotationsValidator:
if self.dispatch_call_node(node) is False: if self.dispatch_call_node(node) is False:
return None return None
elif node.is_definition() and node.kind in [ elif node.is_definition() and node.kind in [
CursorKind.CXX_METHOD, CursorKind.FUNCTION_DECL, CursorKind.CXX_METHOD, CursorKind.FUNCTION_DECL, CursorKind.CONSTRUCTOR,
CursorKind.CONSTRUCTOR, CursorKind.CONVERSION_FUNCTION CursorKind.CONVERSION_FUNCTION
]: ]:
self.process_function_definition(node) self.process_function_definition(node)
return None return None
return self.iterate_children(node.get_children(), return self.iterate_children(node.get_children(), self.dispatch_node_inside_definition)
self.dispatch_node_inside_definition)
def process_function_definition(self, node: clang.cindex.Cursor): def process_function_definition(self, node: clang.cindex.Cursor):
[supported, refd, annotations, _] = self.get_referenced_node_info(node) [supported, refd, annotations, _] = self.get_referenced_node_info(node)
@ -768,14 +725,12 @@ class CallAnnotationsValidator:
self._caller = FunctionInfo.from_node(node, refd, def_annotations) self._caller = FunctionInfo.from_node(node, refd, def_annotations)
self._call_location = self._caller self._call_location = self._caller
self.emit_diagnostic( self.emit_diagnostic(FunctionInfo.from_node(refd, refd, annotations),
FunctionInfo.from_node(refd, refd, annotations), DiagnosticKind.ANNOTATIONS_DEF_DECL_MISMATCH)
DiagnosticKind.ANNOTATIONS_DEF_DECL_MISMATCH)
# Use concatenation of definition and declaration annotations # Use concatenation of definition and declaration annotations
# for calls validation. # for calls validation.
self._caller = FunctionInfo.from_node(node, refd, self._caller = FunctionInfo.from_node(node, refd, def_annotations | annotations)
def_annotations | annotations)
prev_call_location = self._call_location prev_call_location = self._call_location
self._call_location = self._caller self._call_location = self._caller
@ -793,8 +748,7 @@ class CallAnnotationsValidator:
if declarations: if declarations:
del self._external_decls[usr] del self._external_decls[usr]
self.iterate_children(node_children, self.iterate_children(node_children, self.dispatch_node_inside_definition)
self.dispatch_node_inside_definition)
self._call_location = prev_call_location self._call_location = prev_call_location
self._caller = prev_call_location self._caller = prev_call_location
@ -805,8 +759,8 @@ class CallAnnotationsValidator:
def dispatch_node(self, node: clang.cindex.Cursor): def dispatch_node(self, node: clang.cindex.Cursor):
if node.kind in [ if node.kind in [
CursorKind.CXX_METHOD, CursorKind.FUNCTION_DECL, CursorKind.CXX_METHOD, CursorKind.FUNCTION_DECL, CursorKind.CONSTRUCTOR,
CursorKind.CONSTRUCTOR, CursorKind.CONVERSION_FUNCTION CursorKind.CONVERSION_FUNCTION
]: ]:
if node.is_definition(): if node.is_definition():
return self.process_function_definition(node) return self.process_function_definition(node)
@ -815,14 +769,12 @@ class CallAnnotationsValidator:
return self.iterate_children(node.get_children(), self.dispatch_node) return self.iterate_children(node.get_children(), self.dispatch_node)
def process_translation_unit( def process_translation_unit(self, translation_unit: clang.cindex.TranslationUnit):
self, translation_unit: clang.cindex.TranslationUnit):
self._level += 1 self._level += 1
kv_defines = sorted([f"{k}={v}" for k, v in self._defines.items()]) kv_defines = sorted([f"{k}={v}" for k, v in self._defines.items()])
concat_defines = '\n'.join(kv_defines) concat_defines = '\n'.join(kv_defines)
# List of headers already processed in a TU with specified set of defines. # List of headers already processed in a TU with specified set of defines.
tu_processed_headers = self._processed_headers.setdefault( tu_processed_headers = self._processed_headers.setdefault(concat_defines, set())
concat_defines, set())
for child in translation_unit.cursor.get_children(): for child in translation_unit.cursor.get_children():
if self._is_ignored_top_level(child): if self._is_ignored_top_level(child):
continue continue
@ -833,10 +785,8 @@ class CallAnnotationsValidator:
self.dispatch_node(child) self.dispatch_node(child)
self._level -= 1 self._level -= 1
tu_processed_headers.update([ tu_processed_headers.update(
os.path.abspath(str(hdr.source)) [os.path.abspath(str(hdr.source)) for hdr in translation_unit.get_includes()])
for hdr in translation_unit.get_includes()
])
@dataclass @dataclass
@ -857,8 +807,7 @@ def get_filter_funcs(verilator_root: str):
filename = os.path.abspath(node.location.file.name) filename = os.path.abspath(node.location.file.name)
return not filename.startswith(verilator_root) return not filename.startswith(verilator_root)
def is_ignored_def(node: clang.cindex.Cursor, def is_ignored_def(node: clang.cindex.Cursor, refd: clang.cindex.Cursor) -> bool:
refd: clang.cindex.Cursor) -> bool:
# __* # __*
if str(refd.spelling).startswith("__"): if str(refd.spelling).startswith("__"):
return True return True
@ -901,8 +850,7 @@ def precompile_header(compile_command: CompileCommand, tmp_dir: str) -> str:
os.chdir(compile_command.directory) os.chdir(compile_command.directory)
index = Index.create() index = Index.create()
translation_unit = index.parse(compile_command.filename, translation_unit = index.parse(compile_command.filename, compile_command.args)
compile_command.args)
for diag in translation_unit.diagnostics: for diag in translation_unit.diagnostics:
if diag.severity >= clang.cindex.Diagnostic.Error: if diag.severity >= clang.cindex.Diagnostic.Error:
errors.append(str(diag)) errors.append(str(diag))
@ -910,23 +858,20 @@ def precompile_header(compile_command: CompileCommand, tmp_dir: str) -> str:
if len(errors) == 0: if len(errors) == 0:
pch_file = os.path.join( pch_file = os.path.join(
tmp_dir, tmp_dir,
f"{compile_command.refid:02}_{os.path.basename(compile_command.filename)}.pch" f"{compile_command.refid:02}_{os.path.basename(compile_command.filename)}.pch")
)
translation_unit.save(pch_file) translation_unit.save(pch_file)
if pch_file: if pch_file:
return pch_file return pch_file
except (TranslationUnitSaveError, TranslationUnitLoadError, except (TranslationUnitSaveError, TranslationUnitLoadError, OSError) as exception:
OSError) as exception:
print(f"%Warning: {exception}", file=sys.stderr) print(f"%Warning: {exception}", file=sys.stderr)
finally: finally:
os.chdir(initial_cwd) os.chdir(initial_cwd)
print( print(f"%Warning: Precompilation failed, skipping: {compile_command.filename}",
f"%Warning: Precompilation failed, skipping: {compile_command.filename}", file=sys.stderr)
file=sys.stderr)
for error in errors: for error in errors:
print(f" {error}", file=sys.stderr) print(f" {error}", file=sys.stderr)
return "" return ""
@ -934,10 +879,8 @@ def precompile_header(compile_command: CompileCommand, tmp_dir: str) -> str:
# Compile and analyze inputs in a single process. # Compile and analyze inputs in a single process.
def run_analysis(ccl: Iterable[CompileCommand], pccl: Iterable[CompileCommand], def run_analysis(ccl: Iterable[CompileCommand], pccl: Iterable[CompileCommand],
diagnostic_cb: Callable[[Diagnostic], diagnostic_cb: Callable[[Diagnostic], None], verilator_root: str):
None], verilator_root: str): (is_ignored_top_level, is_ignored_def, is_ignored_call) = get_filter_funcs(verilator_root)
(is_ignored_top_level, is_ignored_def,
is_ignored_call) = get_filter_funcs(verilator_root)
prefix = "verilator_clang_check_attributes_" prefix = "verilator_clang_check_attributes_"
with tempfile.TemporaryDirectory(prefix=prefix) as tmp_dir: with tempfile.TemporaryDirectory(prefix=prefix) as tmp_dir:
@ -947,8 +890,8 @@ def run_analysis(ccl: Iterable[CompileCommand], pccl: Iterable[CompileCommand],
if pch_file: if pch_file:
extra_args += ["-include-pch", pch_file] extra_args += ["-include-pch", pch_file]
cav = CallAnnotationsValidator(diagnostic_cb, is_ignored_top_level, cav = CallAnnotationsValidator(diagnostic_cb, is_ignored_top_level, is_ignored_def,
is_ignored_def, is_ignored_call) is_ignored_call)
for compile_command in ccl: for compile_command in ccl:
cav.compile_and_analyze_file(compile_command.filename, cav.compile_and_analyze_file(compile_command.filename,
extra_args + compile_command.args, extra_args + compile_command.args,
@ -963,12 +906,11 @@ class ParallelAnalysisProcess:
@staticmethod @staticmethod
def init_data(verilator_root: str, tmp_dir: str): def init_data(verilator_root: str, tmp_dir: str):
(is_ignored_top_level, is_ignored_def, (is_ignored_top_level, is_ignored_def, is_ignored_call) = get_filter_funcs(verilator_root)
is_ignored_call) = get_filter_funcs(verilator_root)
ParallelAnalysisProcess.cav = CallAnnotationsValidator( ParallelAnalysisProcess.cav = CallAnnotationsValidator(
ParallelAnalysisProcess._diagnostic_handler, is_ignored_top_level, ParallelAnalysisProcess._diagnostic_handler, is_ignored_top_level, is_ignored_def,
is_ignored_def, is_ignored_call) is_ignored_call)
ParallelAnalysisProcess.tmp_dir = tmp_dir ParallelAnalysisProcess.tmp_dir = tmp_dir
@staticmethod @staticmethod
@ -979,31 +921,27 @@ class ParallelAnalysisProcess:
def analyze_cpp_file(compile_command: CompileCommand) -> set[Diagnostic]: def analyze_cpp_file(compile_command: CompileCommand) -> set[Diagnostic]:
ParallelAnalysisProcess.diags = set() ParallelAnalysisProcess.diags = set()
assert ParallelAnalysisProcess.cav is not None assert ParallelAnalysisProcess.cav is not None
ParallelAnalysisProcess.cav.compile_and_analyze_file( ParallelAnalysisProcess.cav.compile_and_analyze_file(compile_command.filename,
compile_command.filename, compile_command.args, compile_command.args,
compile_command.directory) compile_command.directory)
return ParallelAnalysisProcess.diags return ParallelAnalysisProcess.diags
@staticmethod @staticmethod
def precompile_header(compile_command: CompileCommand) -> str: def precompile_header(compile_command: CompileCommand) -> str:
return precompile_header(compile_command, return precompile_header(compile_command, ParallelAnalysisProcess.tmp_dir)
ParallelAnalysisProcess.tmp_dir)
# Compile and analyze inputs in multiple processes. # Compile and analyze inputs in multiple processes.
def run_parallel_analysis(ccl: Iterable[CompileCommand], def run_parallel_analysis(ccl: Iterable[CompileCommand], pccl: Iterable[CompileCommand],
pccl: Iterable[CompileCommand], diagnostic_cb: Callable[[Diagnostic],
diagnostic_cb: Callable[[Diagnostic], None], None], jobs_count: int, verilator_root: str):
jobs_count: int, verilator_root: str):
prefix = "verilator_clang_check_attributes_" prefix = "verilator_clang_check_attributes_"
with tempfile.TemporaryDirectory(prefix=prefix) as tmp_dir: with tempfile.TemporaryDirectory(prefix=prefix) as tmp_dir:
with multiprocessing.Pool( with multiprocessing.Pool(processes=jobs_count,
processes=jobs_count, initializer=ParallelAnalysisProcess.init_data,
initializer=ParallelAnalysisProcess.init_data, initargs=[verilator_root, tmp_dir]) as pool:
initargs=[verilator_root, tmp_dir]) as pool:
extra_args = [] extra_args = []
for pch_file in pool.imap_unordered( for pch_file in pool.imap_unordered(ParallelAnalysisProcess.precompile_header, pccl):
ParallelAnalysisProcess.precompile_header, pccl):
if pch_file: if pch_file:
extra_args += ["-include-pch", pch_file] extra_args += ["-include-pch", pch_file]
@ -1011,8 +949,7 @@ def run_parallel_analysis(ccl: Iterable[CompileCommand],
for compile_command in ccl: for compile_command in ccl:
compile_command.args = compile_command.args + extra_args compile_command.args = compile_command.args + extra_args
for diags in pool.imap_unordered( for diags in pool.imap_unordered(ParallelAnalysisProcess.analyze_cpp_file, ccl, 1):
ParallelAnalysisProcess.analyze_cpp_file, ccl, 1):
for diag in diags: for diag in diags:
diagnostic_cb(diag) diagnostic_cb(diag)
@ -1057,8 +994,7 @@ class TopDownSummaryPrinter():
row_groups: dict[str, list[list[str]]] = {} row_groups: dict[str, list[list[str]]] = {}
column_widths = [0, 0] column_widths = [0, 0]
for func in sorted(self._funcs.values(), for func in sorted(self._funcs.values(),
key=lambda func: key=lambda func: (func.info.file, func.info.line, func.info.usr)):
(func.info.file, func.info.line, func.info.usr)):
func_info = func.info func_info = func.info
relfile = os.path.relpath(func_info.file, root_dir) relfile = os.path.relpath(func_info.file, root_dir)
@ -1082,31 +1018,23 @@ class TopDownSummaryPrinter():
if func.mismatch: if func.mismatch:
mrelfile = os.path.relpath(func.mismatch.file, root_dir) mrelfile = os.path.relpath(func.mismatch.file, root_dir)
row_group.append([ row_group.append([
f"{mrelfile}:{func.mismatch.line}:", f"{mrelfile}:{func.mismatch.line}:", f"[{func.mismatch.annotations}]",
f"[{func.mismatch.annotations}]",
func.mismatch.name + " [declaration]" func.mismatch.name + " [declaration]"
]) ])
row_group.append([ row_group.append(
f"{relfile}:{func_info.line}:", f"[{func_info.annotations}]", [f"{relfile}:{func_info.line}:", f"[{func_info.annotations}]", func_info.name])
func_info.name
])
for callee in sorted(func.calees, for callee in sorted(func.calees, key=lambda func: (func.file, func.line, func.usr)):
key=lambda func:
(func.file, func.line, func.usr)):
crelfile = os.path.relpath(callee.file, root_dir) crelfile = os.path.relpath(callee.file, root_dir)
row_group.append([ row_group.append(
f"{crelfile}:{callee.line}:", f"[{callee.annotations}]", [f"{crelfile}:{callee.line}:", f"[{callee.annotations}]", " " + callee.name])
" " + callee.name
])
row_groups[name] = row_group row_groups[name] = row_group
for row in row_group: for row in row_group:
for row_id, value in enumerate(row[0:-1]): for row_id, value in enumerate(row[0:-1]):
column_widths[row_id] = max(column_widths[row_id], column_widths[row_id] = max(column_widths[row_id], len(value))
len(value))
for label, rows in sorted(row_groups.items(), key=lambda kv: kv[0]): for label, rows in sorted(row_groups.items(), key=lambda kv: kv[0]):
self.begin_group(label) self.begin_group(label)
@ -1114,21 +1042,17 @@ class TopDownSummaryPrinter():
print(f"{row[0]:<{column_widths[0]}} " print(f"{row[0]:<{column_widths[0]}} "
f"{row[1]:<{column_widths[1]}} " f"{row[1]:<{column_widths[1]}} "
f"{row[2]}") f"{row[2]}")
print( print(f"Number of functions reported unsafe: {len(self._unsafe_in_safe)}")
f"Number of functions reported unsafe: {len(self._unsafe_in_safe)}"
)
def main(): def main():
default_verilator_root = os.path.abspath( default_verilator_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
os.path.join(os.path.dirname(__file__), ".."))
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
allow_abbrev=False, allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter, formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Check function annotations for correctness""", description="""Check function annotations for correctness""",
epilog= epilog="""Copyright 2022-2024 by Wilson Snyder. Verilator is free software;
"""Copyright 2022-2024 by Wilson Snyder. Verilator is free software;
you can redistribute it and/or modify it under the terms of either the GNU you can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Apache License 2.0. Lesser General Public License Version 3 or the Apache License 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Apache-2.0""") SPDX-License-Identifier: LGPL-3.0-only OR Apache-2.0""")
@ -1142,29 +1066,23 @@ def main():
type=int, type=int,
default=0, default=0,
help="Number of parallel jobs to use.") help="Number of parallel jobs to use.")
parser.add_argument( parser.add_argument("--compile-commands-dir",
"--compile-commands-dir", type=str,
type=str, default=None,
default=None, help="Path to directory containing compile_commands.json.")
help="Path to directory containing compile_commands.json.")
parser.add_argument("--cxxflags", parser.add_argument("--cxxflags",
type=str, type=str,
default=None, default=None,
help="Extra flags passed to clang++.") help="Extra flags passed to clang++.")
parser.add_argument( parser.add_argument("--compilation-root",
"--compilation-root",
type=str,
default=os.getcwd(),
help="Directory used as CWD when compiling source files.")
parser.add_argument(
"-c",
"--precompile",
action="append",
help="Header file to be precompiled and cached at the start.")
parser.add_argument("file",
type=str, type=str,
nargs="+", default=os.getcwd(),
help="Source file to analyze.") help="Directory used as CWD when compiling source files.")
parser.add_argument("-c",
"--precompile",
action="append",
help="Header file to be precompiled and cached at the start.")
parser.add_argument("file", type=str, nargs="+", help="Source file to analyze.")
cmdline = parser.parse_args() cmdline = parser.parse_args()
@ -1179,8 +1097,7 @@ def main():
compdb: Optional[CompilationDatabase] = None compdb: Optional[CompilationDatabase] = None
if cmdline.compile_commands_dir: if cmdline.compile_commands_dir:
compdb = CompilationDatabase.fromDirectory( compdb = CompilationDatabase.fromDirectory(cmdline.compile_commands_dir)
cmdline.compile_commands_dir)
if cmdline.cxxflags is not None: if cmdline.cxxflags is not None:
common_cxxflags = shlex.split(cmdline.cxxflags) common_cxxflags = shlex.split(cmdline.cxxflags)
@ -1230,8 +1147,7 @@ def main():
summary_printer.handle_diagnostic, verilator_root) summary_printer.handle_diagnostic, verilator_root)
else: else:
run_parallel_analysis(compile_commands_list, precompile_commands_list, run_parallel_analysis(compile_commands_list, precompile_commands_list,
summary_printer.handle_diagnostic, cmdline.jobs, summary_printer.handle_diagnostic, cmdline.jobs, verilator_root)
verilator_root)
summary_printer.print_summary(verilator_root) summary_printer.print_summary(verilator_root)

View File

@ -58,15 +58,12 @@ def test():
if not Args.scenarios or re.match('dist', Args.scenarios): if not Args.scenarios or re.match('dist', Args.scenarios):
run("make examples VERILATOR_NO_OPT_BUILD=1") run("make examples VERILATOR_NO_OPT_BUILD=1")
run("make test_regress VERILATOR_NO_OPT_BUILD=1" + run("make test_regress VERILATOR_NO_OPT_BUILD=1" +
(" SCENARIOS='" + Args.scenarios + (" SCENARIOS='" + Args.scenarios + "'" if Args.scenarios else "") +
"'" if Args.scenarios else "") + (" DRIVER_HASHSET='--hashset=" + Args.hashset + "'" if Args.hashset else "") +
(" DRIVER_HASHSET='--hashset=" + Args.hashset +
"'" if Args.hashset else "") +
('' if Args.stop else ' || true')) ('' if Args.stop else ' || true'))
else: else:
for test in Args.tests: for test in Args.tests:
if not os.path.exists(test) and os.path.exists( if not os.path.exists(test) and os.path.exists("test_regress/t/" + test):
"test_regress/t/" + test):
test = "test_regress/t/" + test test = "test_regress/t/" + test
run(test) run(test)
ci_fold_end() ci_fold_end()
@ -78,8 +75,7 @@ def test():
os.makedirs(cc_dir, exist_ok=True) os.makedirs(cc_dir, exist_ok=True)
os.makedirs(cc_dir + "/info", exist_ok=True) os.makedirs(cc_dir + "/info", exist_ok=True)
with subprocess.Popen("find . -print | grep .gcda", with subprocess.Popen("find . -print | grep .gcda", shell=True,
shell=True,
stdout=subprocess.PIPE) as sp: stdout=subprocess.PIPE) as sp:
datout = sp.stdout.read() datout = sp.stdout.read()
@ -98,8 +94,7 @@ def test():
del dats[dat] del dats[dat]
break break
with subprocess.Popen("find . -print | grep .gcno", with subprocess.Popen("find . -print | grep .gcno", shell=True,
shell=True,
stdout=subprocess.PIPE) as sp: stdout=subprocess.PIPE) as sp:
datout = sp.stdout.read() datout = sp.stdout.read()
@ -116,8 +111,7 @@ def test():
if gbase in gcnos: if gbase in gcnos:
os.symlink(gcnos[gbase], gcno) os.symlink(gcnos[gbase], gcno)
else: else:
print("MISSING .gcno for a .gcda: " + gcno, print("MISSING .gcno for a .gcda: " + gcno, file=sys.stderr)
file=sys.stderr)
ci_fold_end() ci_fold_end()
if Args.stage_enabled[5]: if Args.stage_enabled[5]:
@ -142,8 +136,7 @@ def test():
if Args.stage_enabled[11]: if Args.stage_enabled[11]:
ci_fold_start("dirs") ci_fold_start("dirs")
print("Stage 11: Cleanup paths") print("Stage 11: Cleanup paths")
cleanup_abs_paths_info(cc_dir, cc_dir + "/app_total.info", cleanup_abs_paths_info(cc_dir, cc_dir + "/app_total.info", cc_dir + "/app_total.info")
cc_dir + "/app_total.info")
ci_fold_end() ci_fold_end()
if Args.stage_enabled[12]: if Args.stage_enabled[12]:
@ -164,17 +157,15 @@ def test():
inc = "--include " + inc inc = "--include " + inc
if exc != '': if exc != '':
exc = "--exclude " + exc exc = "--exclude " + exc
run("cd " + cc_dir + " ; " + RealPath + run("cd " + cc_dir + " ; " + RealPath + "/fastcov.py -C app_total.info " + inc + " " +
"/fastcov.py -C app_total.info " + inc + " " + exc + exc + " -x --lcov -o app_total_f.info")
" -x --lcov -o app_total_f.info")
ci_fold_end() ci_fold_end()
if Args.stage_enabled[17]: if Args.stage_enabled[17]:
ci_fold_start("report") ci_fold_start("report")
print("Stage 17: Create HTML") print("Stage 17: Create HTML")
run("cd " + cc_dir + " ; genhtml app_total_f.info --demangle-cpp" + run("cd " + cc_dir + " ; genhtml app_total_f.info --demangle-cpp" +
" --rc lcov_branch_coverage=1 --rc genhtml_hi_limit=100 --output-directory html" " --rc lcov_branch_coverage=1 --rc genhtml_hi_limit=100 --output-directory html")
)
ci_fold_end() ci_fold_end()
if Args.stage_enabled[18]: if Args.stage_enabled[18]:
@ -186,8 +177,7 @@ def test():
# So, remove gcno files before calling codecov # So, remove gcno files before calling codecov
upload_dir = "nodist/obj_dir/upload" upload_dir = "nodist/obj_dir/upload"
os.makedirs(upload_dir, exist_ok=True) os.makedirs(upload_dir, exist_ok=True)
cmd = ("ci/codecov -v upload-process -Z" + " -f " + cc_dir + cmd = ("ci/codecov -v upload-process -Z" + " -f " + cc_dir + "/app_total.info )")
"/app_total.info )")
print("print: Not running:") print("print: Not running:")
print(" export CODECOV_TOKEN=<hidden>") print(" export CODECOV_TOKEN=<hidden>")
print(" find . -name '*.gcno' -exec rm {} \\;") print(" find . -name '*.gcno' -exec rm {} \\;")
@ -198,9 +188,7 @@ def test():
print("*-* All Finished *-*") print("*-* All Finished *-*")
print("") print("")
print("* See report in " + cc_dir + "/html/index.html") print("* See report in " + cc_dir + "/html/index.html")
print( print("* Remember to make distclean && ./configure before working on non-coverage")
"* Remember to make distclean && ./configure before working on non-coverage"
)
def clone_sources(cc_dir): def clone_sources(cc_dir):
@ -209,9 +197,8 @@ def clone_sources(cc_dir):
for globf in Source_Globs: for globf in Source_Globs:
for infile in glob.glob(globf): for infile in glob.glob(globf):
if re.match(r'^/', infile): if re.match(r'^/', infile):
sys.exit( sys.exit("%Error: source globs should be relative not absolute filenames, " +
"%Error: source globs should be relative not absolute filenames, " infile)
+ infile)
outfile = cc_dir + "/" + infile outfile = cc_dir + "/" + infile
outpath = re.sub(r'/[^/]*$', '', outfile, count=1) outpath = re.sub(r'/[^/]*$', '', outfile, count=1)
os.makedirs(outpath, exist_ok=True) os.makedirs(outpath, exist_ok=True)
@ -252,10 +239,8 @@ def clone_sources(cc_dir):
done = True done = True
ofh.write(line + "\n") ofh.write(line + "\n")
print("Number of source lines automatically LCOV_EXCL_LINE'ed: %d" % print("Number of source lines automatically LCOV_EXCL_LINE'ed: %d" % excluded_lines)
excluded_lines) print("Number of source lines automatically LCOV_EXCL_BR_LINE'ed: %d" % excluded_br_lines)
print("Number of source lines automatically LCOV_EXCL_BR_LINE'ed: %d" %
excluded_br_lines)
def cleanup_abs_paths_info(cc_dir, infile, outfile): def cleanup_abs_paths_info(cc_dir, infile, outfile):
@ -263,20 +248,11 @@ def cleanup_abs_paths_info(cc_dir, infile, outfile):
with open(infile, "r", encoding="utf8") as fh: with open(infile, "r", encoding="utf8") as fh:
for line in fh: for line in fh:
if re.search(r'^SF:', line) and not re.search(r'^SF:/usr/', line): if re.search(r'^SF:', line) and not re.search(r'^SF:/usr/', line):
line = re.sub(os.environ['VERILATOR_ROOT'] + '/', line = re.sub(os.environ['VERILATOR_ROOT'] + '/', '', line, count=1)
'',
line,
count=1)
line = re.sub(cc_dir + '/', '', line, count=1) line = re.sub(cc_dir + '/', '', line, count=1)
line = re.sub(r'^SF:.*?/include/', line = re.sub(r'^SF:.*?/include/', 'SF:include/', line, count=1)
'SF:include/',
line,
count=1)
line = re.sub(r'^SF:.*?/src/', 'SF:src/', line, count=1) line = re.sub(r'^SF:.*?/src/', 'SF:src/', line, count=1)
line = re.sub(r'^SF:.*?/test_regress/', line = re.sub(r'^SF:.*?/test_regress/', 'SF:test_regress/', line, count=1)
'SF:test_regress/',
line,
count=1)
line = re.sub(r'obj_dbg/verilog.y$', 'verilog.y', line) line = re.sub(r'obj_dbg/verilog.y$', 'verilog.y', line)
# print("Remaining SF: "+line) # print("Remaining SF: "+line)
lines.append(line) lines.append(line)
@ -358,15 +334,13 @@ def ci_fold_end():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
allow_abbrev=False, allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter, formatter_class=argparse.RawDescriptionHelpFormatter,
description= description="""code_coverage builds Verilator with C++ coverage support and runs
"""code_coverage builds Verilator with C++ coverage support and runs
tests with coverage enabled. This will rebuild the current object tests with coverage enabled. This will rebuild the current object
files. Run as: files. Run as:
cd $VERILATOR_ROOT cd $VERILATOR_ROOT
nodist/code_coverage""", nodist/code_coverage""",
epilog= epilog="""Copyright 2019-2024 by Wilson Snyder. This program is free software; you
"""Copyright 2019-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0. Version 2.0.
@ -380,19 +354,16 @@ parser.add_argument('--hashset',
parser.add_argument('--scenarios', parser.add_argument('--scenarios',
action='store', action='store',
help='pass test scenarios onto driver.pl test harness') help='pass test scenarios onto driver.pl test harness')
parser.add_argument( parser.add_argument('--stages',
'--stages', '--stage',
'--stage', action='store',
action='store', help='runs a specific stage or range of stages (see the script)')
help='runs a specific stage or range of stages (see the script)')
parser.add_argument( parser.add_argument(
'--tests', '--tests',
'--test', '--test',
action='append', action='append',
default=[], default=[],
help= help='Instead of normal regressions, run the specified test(s), may be used multiple times')
'Instead of normal regressions, run the specified test(s), may be used multiple times'
)
parser.add_argument('--no-stop', parser.add_argument('--no-stop',
dest='stop', dest='stop',
action='store_false', action='store_false',

View File

@ -20,8 +20,7 @@ def dotread(filename):
vnum = 0 vnum = 0
vertex_re = re.compile(r'^\t([a-zA-Z0-9_]+)\t(.*)$') vertex_re = re.compile(r'^\t([a-zA-Z0-9_]+)\t(.*)$')
edge_re = re.compile( edge_re = re.compile(r'^\t([a-zA-Z0-9_]+)\s+->\s+([a-zA-Z0-9_]+)\s*(.*)$')
r'^\t([a-zA-Z0-9_]+)\s+->\s+([a-zA-Z0-9_]+)\s*(.*)$')
for line in fh: for line in fh:
vertex_match = re.search(vertex_re, line) vertex_match = re.search(vertex_re, line)
@ -29,11 +28,7 @@ def dotread(filename):
if vertex_match: if vertex_match:
if vertex_match.group(1) != 'nTITLE': if vertex_match.group(1) != 'nTITLE':
header = False header = False
Vertexes.append({ Vertexes.append({'num': vnum, 'line': line, 'name': vertex_match.group(1)})
'num': vnum,
'line': line,
'name': vertex_match.group(1)
})
vnum += 1 vnum += 1
elif edge_match: elif edge_match:
fromv = edge_match.group(1) fromv = edge_match.group(1)
@ -65,14 +60,13 @@ def cwrite(filename):
fh.write("void V3GraphTestImport::dotImport() {\n") fh.write("void V3GraphTestImport::dotImport() {\n")
fh.write(" auto* gp = &m_graph;\n") fh.write(" auto* gp = &m_graph;\n")
for ver in sorted(Vertexes, key=lambda ver: ver['num']): for ver in sorted(Vertexes, key=lambda ver: ver['num']):
fh.write( fh.write(" auto* %s = new V3GraphTestVertex{gp, \"%s\"}; if (%s) {}\n" %
" auto* %s = new V3GraphTestVertex{gp, \"%s\"}; if (%s) {}\n" (ver['name'], ver['name'], ver['name']))
% (ver['name'], ver['name'], ver['name']))
fh.write("\n") fh.write("\n")
for edge in Edges: for edge in Edges:
fh.write(" new V3GraphEdge{gp, %s, %s, %s, %s};\n" % fh.write(
(edge['from'], edge['to'], edge['weight'], " new V3GraphEdge{gp, %s, %s, %s, %s};\n" %
"true" if edge['cutable'] else "false")) (edge['from'], edge['to'], edge['weight'], "true" if edge['cutable'] else "false"))
fh.write("}\n") fh.write("}\n")
@ -82,22 +76,17 @@ def cwrite(filename):
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
allow_abbrev=False, allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter, formatter_class=argparse.RawDescriptionHelpFormatter,
description= description="""dot_importer takes a graphvis .dot file and converts into .cpp file.
"""dot_importer takes a graphvis .dot file and converts into .cpp file.
This x.cpp file is then manually included in V3GraphTest.cpp to verify This x.cpp file is then manually included in V3GraphTest.cpp to verify
various xsub-algorithms.""", various xsub-algorithms.""",
epilog= epilog="""Copyright 2005-2024 by Wilson Snyder. This program is free software; you
"""Copyright 2005-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0. Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""") SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('--debug', parser.add_argument('--debug', action='store_const', const=9, help='enable debug')
action='store_const',
const=9,
help='enable debug')
parser.add_argument('filename', help='input .dot filename to process') parser.add_argument('filename', help='input .dot filename to process')
Args = parser.parse_args() Args = parser.parse_args()

View File

@ -19,9 +19,12 @@ from argparse import ArgumentParser
def interesting(s): def interesting(s):
if 'assert' in s: return 1 if 'assert' in s:
if 'Assert' in s: return 1 return 1
if 'Aborted' in s: return 1 if 'Assert' in s:
return 1
if 'Aborted' in s:
return 1
if 'terminate' in s: if 'terminate' in s:
if 'unterminated' in s: if 'unterminated' in s:
return 0 return 0
@ -41,8 +44,7 @@ def main():
for infile in glob(args.dir + '/*'): for infile in glob(args.dir + '/*'):
# Input filenames are known not to contain spaces or other unusual # Input filenames are known not to contain spaces or other unusual
# characters, therefore this works. # characters, therefore this works.
status, output = getstatusoutput('../../bin/verilator_bin --cc ' + status, output = getstatusoutput('../../bin/verilator_bin --cc ' + infile)
infile)
if interesting(output): if interesting(output):
print(infile) print(infile)
print(status) print(status)

View File

@ -51,9 +51,11 @@ def write_file(filename, contents):
def parse_line(s): def parse_line(s):
# str->maybe str # str->maybe str
if len(s) == 0: return None if len(s) == 0:
return None
part = skip_while(lambda x: x != '"', s) part = skip_while(lambda x: x != '"', s)
if len(part) == 0 or part[0] != '"': return None if len(part) == 0 or part[0] != '"':
return None
literal_part = take_while(lambda x: x != '"', part[1:]) literal_part = take_while(lambda x: x != '"', part[1:])
return ''.join(filter(lambda x: x != '\\', literal_part)) return ''.join(filter(lambda x: x != '\\', literal_part))

View File

@ -41,8 +41,7 @@ def test():
run("/bin/mkdir -p " + prefix) run("/bin/mkdir -p " + prefix)
run("cd " + blddir + " && make install") run("cd " + blddir + " && make install")
run("test -e " + prefix + "/share/man/man1/verilator.1") run("test -e " + prefix + "/share/man/man1/verilator.1")
run("test -e " + prefix + run("test -e " + prefix + "/share/verilator/examples/make_tracing_c/Makefile")
"/share/verilator/examples/make_tracing_c/Makefile")
run("test -e " + prefix + "/share/verilator/include/verilated.h") run("test -e " + prefix + "/share/verilator/include/verilated.h")
run("test -e " + prefix + "/bin/verilator") run("test -e " + prefix + "/bin/verilator")
run("test -e " + prefix + "/bin/verilator_bin") run("test -e " + prefix + "/bin/verilator_bin")
@ -58,10 +57,8 @@ def test():
run("/bin/mkdir -p " + odir) run("/bin/mkdir -p " + odir)
path = prefix + "/bin" + ":" + prefix + "/share/bin" path = prefix + "/bin" + ":" + prefix + "/share/bin"
write_verilog(odir) write_verilog(odir)
run("cd " + odir + " && PATH=" + path + run("cd " + odir + " && PATH=" + path + ":$PATH verilator --cc top.v --exe sim_main.cpp")
":$PATH verilator --cc top.v --exe sim_main.cpp") run("cd " + odir + "/obj_dir && PATH=" + path + ":$PATH make -f Vtop.mk")
run("cd " + odir + "/obj_dir && PATH=" + path +
":$PATH make -f Vtop.mk")
run("cd " + odir + " && PATH=" + path + ":$PATH obj_dir/Vtop") run("cd " + odir + " && PATH=" + path + ":$PATH obj_dir/Vtop")
# run a test using exact path to binary # run a test using exact path to binary
@ -72,8 +69,7 @@ def test():
run("/bin/mkdir -p " + odir) run("/bin/mkdir -p " + odir)
write_verilog(odir) write_verilog(odir)
bin1 = prefix + "/bin" bin1 = prefix + "/bin"
run("cd " + odir + " && " + bin1 + run("cd " + odir + " && " + bin1 + "/verilator --cc top.v --exe sim_main.cpp")
"/verilator --cc top.v --exe sim_main.cpp")
run("cd " + odir + "/obj_dir && make -f Vtop.mk") run("cd " + odir + "/obj_dir && make -f Vtop.mk")
run("cd " + odir + "/obj_dir && ./Vtop") run("cd " + odir + "/obj_dir && ./Vtop")
@ -88,8 +84,7 @@ def write_verilog(odir):
def cleanenv(): def cleanenv():
for var in os.environ: for var in os.environ:
if var in ('VERILATOR_ROOT', 'VERILATOR_INCLUDE', if var in ('VERILATOR_ROOT', 'VERILATOR_INCLUDE', 'VERILATOR_NO_OPT_BUILD'):
'VERILATOR_NO_OPT_BUILD'):
print("unset %s # Was '%s'" % (var, os.environ[var])) print("unset %s # Was '%s'" % (var, os.environ[var]))
del os.environ[var] del os.environ[var]
@ -113,21 +108,16 @@ def run(command):
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
allow_abbrev=False, allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter, formatter_class=argparse.RawDescriptionHelpFormatter,
description= description="""install_test performs several make-and-install iterations to verify the
"""install_test performs several make-and-install iterations to verify the
Verilator kit. It isn't part of the normal "make test" due to the number Verilator kit. It isn't part of the normal "make test" due to the number
of builds required.""", of builds required.""",
epilog= epilog="""Copyright 2009-2024 by Wilson Snyder. This program is free software; you
"""Copyright 2009-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0. Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""") SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('--debug', parser.add_argument('--debug', action='store_const', const=9, help='enable debug')
action='store_const',
const=9,
help='enable debug')
parser.add_argument('--stage', parser.add_argument('--stage',
type=int, type=int,
default=0, default=0,

View File

@ -101,8 +101,7 @@ parser = argparse.ArgumentParser(
allow_abbrev=False, allow_abbrev=False,
prog="log_changes", prog="log_changes",
description="Create example entries for 'Changes' from parsing 'git log'", description="Create example entries for 'Changes' from parsing 'git log'",
epilog= epilog="""Copyright 2019-2024 by Wilson Snyder. This program is free software; you
"""Copyright 2019-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0. Version 2.0.

View File

@ -47,9 +47,12 @@ class AstseeCmd(gdb.Command):
def _null_check(self, old, new): def _null_check(self, old, new):
err = "" err = ""
if old == "<nullptr>\n": err += "old == <nullptr>\n" if old == "<nullptr>\n":
if new == "<nullptr>\n": err += "new == <nullptr>" err += "old == <nullptr>\n"
if err: raise gdb.GdbError(err.strip("\n")) if new == "<nullptr>\n":
err += "new == <nullptr>"
if err:
raise gdb.GdbError(err.strip("\n"))
def invoke(self, arg_str, from_tty): def invoke(self, arg_str, from_tty):
from astsee import verilator_cli as astsee # pylint: disable=import-error,import-outside-toplevel from astsee import verilator_cli as astsee # pylint: disable=import-error,import-outside-toplevel
@ -58,8 +61,8 @@ class AstseeCmd(gdb.Command):
# We hack `astsee_verilator`'s arg parser to find arguments with nodes # We hack `astsee_verilator`'s arg parser to find arguments with nodes
# After finding them, we replace them with proper files # After finding them, we replace them with proper files
astsee_args = astsee.parser.parse_args(gdb.string_to_argv(arg_str)) astsee_args = astsee.parser.parse_args(gdb.string_to_argv(arg_str))
with _vltgdb_tmpfile() as oldfile, _vltgdb_tmpfile( with _vltgdb_tmpfile() as oldfile, _vltgdb_tmpfile() as newfile, _vltgdb_tmpfile(
) as newfile, _vltgdb_tmpfile() as metafile: ) as metafile:
if astsee_args.file: if astsee_args.file:
_vltgdb_fwrite(oldfile, _vltgdb_get_dump(astsee_args.file)) _vltgdb_fwrite(oldfile, _vltgdb_get_dump(astsee_args.file))
astsee_args.file = oldfile.name astsee_args.file = oldfile.name
@ -68,8 +71,7 @@ class AstseeCmd(gdb.Command):
astsee_args.newfile = newfile.name astsee_args.newfile = newfile.name
if astsee_args.meta is None: if astsee_args.meta is None:
# pass # pass
gdb.execute( gdb.execute(f'call AstNode::dumpJsonMetaFileGdb("{metafile.name}")')
f'call AstNode::dumpJsonMetaFileGdb("{metafile.name}")')
astsee_args.meta = metafile.name astsee_args.meta = metafile.name
try: try:
astsee.main(astsee_args) astsee.main(astsee_args)

View File

@ -89,8 +89,7 @@ class Node:
assert not self.isCompleted assert not self.isCompleted
# Sort sub-classes and convert to tuple, which marks completion # Sort sub-classes and convert to tuple, which marks completion
self._subClasses = tuple( self._subClasses = tuple(
sorted(self._subClasses, sorted(self._subClasses, key=lambda _: (bool(_._subClasses), _.name))) # pylint: disable=protected-access
key=lambda _: (bool(_._subClasses), _.name))) # pylint: disable=protected-access
self._ordIdx = ordIdx self._ordIdx = ordIdx
ordIdx = ordIdx + 1 ordIdx = ordIdx + 1
@ -128,8 +127,7 @@ class Node:
if self.superClass is None: if self.superClass is None:
self._allSuperClasses = () self._allSuperClasses = ()
else: else:
self._allSuperClasses = self.superClass.allSuperClasses + ( self._allSuperClasses = self.superClass.allSuperClasses + (self.superClass, )
self.superClass, )
return self._allSuperClasses return self._allSuperClasses
@property @property
@ -139,9 +137,8 @@ class Node:
if self.isLeaf: if self.isLeaf:
self._allSubClasses = () self._allSubClasses = ()
else: else:
self._allSubClasses = self.subClasses + tuple( self._allSubClasses = self.subClasses + tuple(_ for subClass in self.subClasses
_ for subClass in self.subClasses for _ in subClass.allSubClasses)
for _ in subClass.allSubClasses)
return self._allSubClasses return self._allSubClasses
@property @property
@ -210,8 +207,7 @@ class Cpt:
self._exec_syms = {} self._exec_syms = {}
def error(self, txt): def error(self, txt):
sys.exit("%%Error: %s:%d: %s" % sys.exit("%%Error: %s:%d: %s" % (self.in_filename, self.in_linenum, txt))
(self.in_filename, self.in_linenum, txt))
def print(self, txt): def print(self, txt):
self.out_lines.append(txt) self.out_lines.append(txt)
@ -220,8 +216,7 @@ class Cpt:
self.out_lines.append(func) self.out_lines.append(func)
def _output_line(self): def _output_line(self):
self.print("#line " + str(self.out_linenum + 2) + " \"" + self.print("#line " + str(self.out_linenum + 2) + " \"" + self.out_filename + "\"\n")
self.out_filename + "\"\n")
def process(self, in_filename, out_filename): def process(self, in_filename, out_filename):
self.in_filename = in_filename self.in_filename = in_filename
@ -234,8 +229,7 @@ class Cpt:
for line in fhi: for line in fhi:
ln += 1 ln += 1
if not didln: if not didln:
self.print("#line " + str(ln) + " \"" + self.in_filename + self.print("#line " + str(ln) + " \"" + self.in_filename + "\"\n")
"\"\n")
didln = True didln = True
match = re.match(r'^\s+(TREE.*)$', line) match = re.match(r'^\s+(TREE.*)$', line)
if match: if match:
@ -245,8 +239,8 @@ class Cpt:
self.output_func(lambda self: self._output_line()) self.output_func(lambda self: self._output_line())
self.tree_line(func) self.tree_line(func)
didln = False didln = False
elif not re.match(r'^\s*(#define|/[/\*])\s*TREE', elif not re.match(r'^\s*(#define|/[/\*])\s*TREE', line) and re.search(
line) and re.search(r'\s+TREE', line): r'\s+TREE', line):
self.error("Unknown astgen line: " + line) self.error("Unknown astgen line: " + line)
else: else:
self.print(line) self.print(line)
@ -275,8 +269,7 @@ class Cpt:
# 1 2 3 4 # 1 2 3 4
r'TREEOP(1?)([ACSV]?)\s*\(\s*\"([^\"]*)\"\s*,\s*\"([^\"]*)\"\s*\)', r'TREEOP(1?)([ACSV]?)\s*\(\s*\"([^\"]*)\"\s*,\s*\"([^\"]*)\"\s*\)',
func) func)
match_skip = re.search(r'TREE_SKIP_VISIT\s*\(\s*\"([^\"]*)\"\s*\)', match_skip = re.search(r'TREE_SKIP_VISIT\s*\(\s*\"([^\"]*)\"\s*\)', func)
func)
if match: if match:
order = match.group(1) order = match.group(1)
@ -314,20 +307,18 @@ class Cpt:
if re.match(r'^\$([a-zA-Z0-9]+)$', subnode): if re.match(r'^\$([a-zA-Z0-9]+)$', subnode):
continue # "$lhs" is just a comment that this op has a lhs continue # "$lhs" is just a comment that this op has a lhs
subnodeif = subnode subnodeif = subnode
subnodeif = re.sub( subnodeif = re.sub(r'\$([a-zA-Z0-9]+)\.cast([A-Z][A-Za-z0-9]+)$',
r'\$([a-zA-Z0-9]+)\.cast([A-Z][A-Za-z0-9]+)$', r'VN_IS(nodep->\1(),\2)', subnodeif)
r'VN_IS(nodep->\1(),\2)', subnodeif) subnodeif = re.sub(r'\$([a-zA-Z0-9]+)\.([a-zA-Z0-9]+)$', r'nodep->\1()->\2()',
subnodeif = re.sub(r'\$([a-zA-Z0-9]+)\.([a-zA-Z0-9]+)$', subnodeif)
r'nodep->\1()->\2()', subnodeif)
subnodeif = self.add_nodep(subnodeif) subnodeif = self.add_nodep(subnodeif)
if mif != "" and subnodeif != "": if mif != "" and subnodeif != "":
mif += " && " mif += " && "
mif += subnodeif mif += subnodeif
exec_func = self.treeop_exec_func(to) exec_func = self.treeop_exec_func(to)
exec_func = re.sub( exec_func = re.sub(r'([-()a-zA-Z0-9_>]+)->cast([A-Z][A-Za-z0-9]+)\(\)',
r'([-()a-zA-Z0-9_>]+)->cast([A-Z][A-Za-z0-9]+)\(\)', r'VN_CAST(\1,\2)', exec_func)
r'VN_CAST(\1,\2)', exec_func)
if typen not in self.treeop: if typen not in self.treeop:
self.treeop[typen] = [] self.treeop[typen] = []
@ -431,14 +422,12 @@ class Cpt:
self._exec_nsyms = 0 self._exec_nsyms = 0
self._exec_syms_recurse(aref) self._exec_syms_recurse(aref)
for sym in sorted(self._exec_syms.keys(), for sym in sorted(self._exec_syms.keys(), key=lambda val: self._exec_syms[val]):
key=lambda val: self._exec_syms[val]):
argnp = self._exec_syms[sym] argnp = self._exec_syms[sym]
arg = self.add_nodep(sym) arg = self.add_nodep(sym)
out += "AstNodeExpr* " + argnp + " = " + arg + "->unlinkFrBack();\n" out += "AstNodeExpr* " + argnp + " = " + arg + "->unlinkFrBack();\n"
out += "AstNodeExpr* newp = " + self._exec_new_recurse( out += "AstNodeExpr* newp = " + self._exec_new_recurse(aref) + ";\n"
aref) + ";\n"
out += "nodep->replaceWith(newp);" out += "nodep->replaceWith(newp);"
out += "VL_DO_DANGLING(nodep->deleteTree(), nodep);" out += "VL_DO_DANGLING(nodep->deleteTree(), nodep);"
elif func == "NEVER": elif func == "NEVER":
@ -454,19 +443,15 @@ class Cpt:
self.tree_base() self.tree_base()
def tree_match(self): def tree_match(self):
self.print( self.print(" // TREEOP functions, each return true if they matched & transformed\n")
" // TREEOP functions, each return true if they matched & transformed\n"
)
for base in sorted(self.treeop.keys()): for base in sorted(self.treeop.keys()):
for typefunc in self.treeop[base]: for typefunc in self.treeop[base]:
self.print(" // Generated by astgen\n") self.print(" // Generated by astgen\n")
self.print(" bool " + typefunc['match_func'] + "(Ast" + self.print(" bool " + typefunc['match_func'] + "(Ast" + base + "* nodep) {\n")
base + "* nodep) {\n")
self.print("\t// " + typefunc['comment'] + "\n") self.print("\t// " + typefunc['comment'] + "\n")
self.print("\tif (" + typefunc['match_if'] + ") {\n") self.print("\tif (" + typefunc['match_if'] + ") {\n")
self.print("\t UINFO(" + str(typefunc['uinfo_level']) + self.print("\t UINFO(" + str(typefunc['uinfo_level']) + ", cvtToHex(nodep)" +
", cvtToHex(nodep)" + " << \" " + " << \" " + typefunc['uinfo'] + "\\n\");\n")
typefunc['uinfo'] + "\\n\");\n")
self.print("\t " + typefunc['exec_func'] + "\n") self.print("\t " + typefunc['exec_func'] + "\n")
self.print("\t return true;\n") self.print("\t return true;\n")
self.print("\t}\n") self.print("\t}\n")
@ -475,9 +460,7 @@ class Cpt:
def tree_base(self): def tree_base(self):
self.print(" // TREEOP visitors, call each base type's match\n") self.print(" // TREEOP visitors, call each base type's match\n")
self.print( self.print(" // Bottom class up, as more simple transforms are generally better\n")
" // Bottom class up, as more simple transforms are generally better\n"
)
for node in AstNodeList: for node in AstNodeList:
out_for_type_sc = [] out_for_type_sc = []
out_for_type = [] out_for_type = []
@ -488,15 +471,11 @@ class Cpt:
if base not in self.treeop: if base not in self.treeop:
continue continue
for typefunc in self.treeop[base]: for typefunc in self.treeop[base]:
lines = [ lines = [" if (" + typefunc['match_func'] + "(nodep)) return;\n"]
" if (" + typefunc['match_func'] +
"(nodep)) return;\n"
]
if typefunc['short_circuit']: # short-circuit match fn if typefunc['short_circuit']: # short-circuit match fn
out_for_type_sc.extend(lines) out_for_type_sc.extend(lines)
else: # Standard match fn else: # Standard match fn
if typefunc[ if typefunc['order']: # TREEOP1's go in front of others
'order']: # TREEOP1's go in front of others
out_for_type = lines + out_for_type out_for_type = lines + out_for_type
else: else:
out_for_type.extend(lines) out_for_type.extend(lines)
@ -509,32 +488,26 @@ class Cpt:
# For types without short-circuits, we just use iterateChildren, which # For types without short-circuits, we just use iterateChildren, which
# saves one comparison. # saves one comparison.
if len(out_for_type_sc) > 0: # Short-circuited types if len(out_for_type_sc) > 0: # Short-circuited types
self.print( self.print(" // Generated by astgen with short-circuiting\n" +
" // Generated by astgen with short-circuiting\n" + " void visit(Ast" + node.name + "* nodep) override {\n" +
" void visit(Ast" + node.name + " iterateAndNextNull(nodep->{op1}());\n".format(
"* nodep) override {\n" + op1=node.getOp(1)[0]) + "".join(out_for_type_sc))
" iterateAndNextNull(nodep->{op1}());\n".format(
op1=node.getOp(1)[0]) + "".join(out_for_type_sc))
if out_for_type[0]: if out_for_type[0]:
self.print( self.print(
" iterateAndNextNull(nodep->{op2}());\n".format( " iterateAndNextNull(nodep->{op2}());\n".format(op2=node.getOp(2)[0]))
op2=node.getOp(2)[0]))
if node.isSubClassOf(AstNodes["NodeTriop"]): if node.isSubClassOf(AstNodes["NodeTriop"]):
self.print( self.print(" iterateAndNextNull(nodep->{op3}());\n".format(
" iterateAndNextNull(nodep->{op3}());\n". op3=node.getOp(3)[0]))
format(op3=node.getOp(3)[0]))
self.print("".join(out_for_type) + " }\n") self.print("".join(out_for_type) + " }\n")
elif len(out_for_type) > 0: # Other types with something to print elif len(out_for_type) > 0: # Other types with something to print
skip = node.name in self.tree_skip_visit skip = node.name in self.tree_skip_visit
gen = "Gen" if skip else "" gen = "Gen" if skip else ""
virtual = "virtual " if skip else "" virtual = "virtual " if skip else ""
override = "" if skip else " override" override = "" if skip else " override"
self.print( self.print(" // Generated by astgen\n" + " " + virtual + "void visit" + gen +
" // Generated by astgen\n" + " " + virtual + "(Ast" + node.name + "* nodep)" + override + " {\n" +
"void visit" + gen + "(Ast" + node.name + "* nodep)" + ("" if skip else " iterateChildren(nodep);\n") +
override + " {\n" + ''.join(out_for_type) + " }\n")
("" if skip else " iterateChildren(nodep);\n") +
''.join(out_for_type) + " }\n")
###################################################################### ######################################################################
@ -565,8 +538,7 @@ def read_types(filename, Nodes, prefix):
def error(lineno, message): def error(lineno, message):
nonlocal hasErrors nonlocal hasErrors
print(filename + ":" + str(lineno) + ": %Error: " + message, print(filename + ":" + str(lineno) + ": %Error: " + message, file=sys.stderr)
file=sys.stderr)
hasErrors = True hasErrors = True
node = None node = None
@ -579,8 +551,7 @@ def read_types(filename, Nodes, prefix):
if not hasAstgenMembers: if not hasAstgenMembers:
error( error(
node.lineno, node.lineno,
"'{p}{n}' does not contain 'ASTGEN_MEMBERS_{p}{n};'".format( "'{p}{n}' does not contain 'ASTGEN_MEMBERS_{p}{n};'".format(p=prefix, n=node.name))
p=prefix, n=node.name))
hasAstgenMembers = False hasAstgenMembers = False
with open(filename, "r", encoding="utf8") as fh: with open(filename, "r", encoding="utf8") as fh:
@ -598,8 +569,7 @@ def read_types(filename, Nodes, prefix):
classn = re.sub(r'^' + prefix, '', classn) classn = re.sub(r'^' + prefix, '', classn)
supern = re.sub(r'^' + prefix, '', supern) supern = re.sub(r'^' + prefix, '', supern)
if not supern: if not supern:
sys.exit("%Error: '{p}{c}' has no super-class".format( sys.exit("%Error: '{p}{c}' has no super-class".format(p=prefix, c=classn))
p=prefix, c=classn))
checkFinishedNode(node) checkFinishedNode(node)
superClass = Nodes[supern] superClass = Nodes[supern]
node = Node(classn, superClass, filename, lineno) node = Node(classn, superClass, filename, lineno)
@ -608,8 +578,7 @@ def read_types(filename, Nodes, prefix):
if not node: if not node:
continue continue
if re.match(r'^\s*ASTGEN_MEMBERS_' + prefix + node.name + ';', if re.match(r'^\s*ASTGEN_MEMBERS_' + prefix + node.name + ';', line):
line):
hasAstgenMembers = True hasAstgenMembers = True
if prefix != "Ast": if prefix != "Ast":
@ -631,36 +600,29 @@ def read_types(filename, Nodes, prefix):
ident = ident.strip() ident = ident.strip()
if not sep or not re.match(r'^\w+$', ident): if not sep or not re.match(r'^\w+$', ident):
error( error(
lineno, "Malformed '@astgen " + what + lineno, "Malformed '@astgen " + what + "' directive (expecting '" +
"' directive (expecting '" + what + what + " := <identifier> : <type>': " + decl)
" := <identifier> : <type>': " + decl)
else: else:
kind = parseOpType(kind) kind = parseOpType(kind)
if not kind: if not kind:
error( error(
lineno, "Bad type for '@astgen " + what + lineno, "Bad type for '@astgen " + what +
"' (expecting Ast*, Optional[Ast*], or List[Ast*]):" "' (expecting Ast*, Optional[Ast*], or List[Ast*]):" + decl)
+ decl)
elif node.getOp(n) is not None: elif node.getOp(n) is not None:
error( error(lineno, "Already defined " + what + " for " + node.name)
lineno, "Already defined " + what + " for " +
node.name)
else: else:
node.addOp(n, ident, *kind) node.addOp(n, ident, *kind)
elif what in ("alias op1", "alias op2", "alias op3", elif what in ("alias op1", "alias op2", "alias op3", "alias op4"):
"alias op4"):
n = int(what[-1]) n = int(what[-1])
ident = rest.strip() ident = rest.strip()
if not re.match(r'^\w+$', ident): if not re.match(r'^\w+$', ident):
error( error(
lineno, "Malformed '@astgen " + what + lineno, "Malformed '@astgen " + what + "' directive (expecting '" +
"' directive (expecting '" + what + what + " := <identifier>': " + decl)
" := <identifier>': " + decl)
else: else:
op = node.getOp(n) op = node.getOp(n)
if op is None: if op is None:
error(lineno, error(lineno, "Aliased op" + str(n) + " is not defined")
"Aliased op" + str(n) + " is not defined")
else: else:
node.addOp(n, ident, *op[1:]) node.addOp(n, ident, *op[1:])
elif what == "ptr": elif what == "ptr":
@ -670,8 +632,7 @@ def read_types(filename, Nodes, prefix):
if not kind: if not kind:
error( error(
lineno, "Bad type for '@astgen " + what + lineno, "Bad type for '@astgen " + what +
"' (expecting Ast*, Optional[Ast*], or List[Ast*]):" "' (expecting Ast*, Optional[Ast*], or List[Ast*]):" + decl)
+ decl)
if not re.match(r'^m_(\w+)$', ident): if not re.match(r'^m_(\w+)$', ident):
error( error(
lineno, "Malformed '@astgen ptr'" lineno, "Malformed '@astgen ptr'"
@ -680,20 +641,15 @@ def read_types(filename, Nodes, prefix):
node.addPtr(ident, *kind) node.addPtr(ident, *kind)
else: else:
error( error(
lineno, lineno, "Malformed @astgen what (expecting 'op1'..'op4'," +
"Malformed @astgen what (expecting 'op1'..'op4'," +
" 'alias op1'.., 'ptr'): " + what) " 'alias op1'.., 'ptr'): " + what)
else: else:
line = re.sub(r'//.*$', '', line) line = re.sub(r'//.*$', '', line)
if re.match(r'.*[Oo]p[1-9].*', line): if re.match(r'.*[Oo]p[1-9].*', line):
error(lineno, error(lineno, "Use generated accessors to access op<N> operands")
"Use generated accessors to access op<N> operands")
if re.match( if re.match(r'^\s*Ast[A-Z][A-Za-z0-9_]+\s*\*(\s*const)?\s+m_[A-Za-z0-9_]+\s*;', line):
r'^\s*Ast[A-Z][A-Za-z0-9_]+\s*\*(\s*const)?\s+m_[A-Za-z0-9_]+\s*;', error(lineno, "Use '@astgen ptr' for Ast pointer members: " + line)
line):
error(lineno,
"Use '@astgen ptr' for Ast pointer members: " + line)
checkFinishedNode(node) checkFinishedNode(node)
if hasErrors: if hasErrors:
@ -707,33 +663,24 @@ def check_types(sortedTypes, prefix, abstractPrefix):
for node in sortedTypes: for node in sortedTypes:
if re.match(r'^' + abstractPrefix, node.name): if re.match(r'^' + abstractPrefix, node.name):
if node.isLeaf: if node.isLeaf:
sys.exit( sys.exit("%Error: Final {b} subclasses must not be named {b}*: {p}{n}".format(
"%Error: Final {b} subclasses must not be named {b}*: {p}{n}" b=baseClass, p=prefix, n=node.name))
.format(b=baseClass, p=prefix, n=node.name))
else: else:
if not node.isLeaf: if not node.isLeaf:
sys.exit( sys.exit("%Error: Non-final {b} subclasses must be named {b}*: {p}{n}".format(
"%Error: Non-final {b} subclasses must be named {b}*: {p}{n}" b=baseClass, p=prefix, n=node.name))
.format(b=baseClass, p=prefix, n=node.name))
# Check ordering of node definitions # Check ordering of node definitions
hasOrderingError = False hasOrderingError = False
files = tuple( files = tuple(sorted(set(_.file for _ in sortedTypes if _.file is not None)))
sorted(set(_.file for _ in sortedTypes if _.file is not None)))
for file in files: for file in files:
nodes = tuple(filter(lambda _, f=file: _.file == f, sortedTypes)) nodes = tuple(filter(lambda _, f=file: _.file == f, sortedTypes))
expectOrder = tuple(sorted(nodes, key=lambda _: (_.isLeaf, _.ordIdx))) expectOrder = tuple(sorted(nodes, key=lambda _: (_.isLeaf, _.ordIdx)))
actualOrder = tuple(sorted(nodes, key=lambda _: _.lineno)) actualOrder = tuple(sorted(nodes, key=lambda _: _.lineno))
expect = { expect = {node: pred for pred, node in zip((None, ) + expectOrder[:-1], expectOrder)}
node: pred actual = {node: pred for pred, node in zip((None, ) + actualOrder[:-1], actualOrder)}
for pred, node in zip((None, ) + expectOrder[:-1], expectOrder)
}
actual = {
node: pred
for pred, node in zip((None, ) + actualOrder[:-1], actualOrder)
}
for node in nodes: for node in nodes:
if expect[node] != actual[node]: if expect[node] != actual[node]:
hasOrderingError = True hasOrderingError = True
@ -749,8 +696,7 @@ def check_types(sortedTypes, prefix, abstractPrefix):
file=sys.stderr) file=sys.stderr)
if hasOrderingError: if hasOrderingError:
sys.exit( sys.exit("%Error: Stopping due to out of order definitions listed above")
"%Error: Stopping due to out of order definitions listed above")
def read_stages(filename): def read_stages(filename):
@ -783,8 +729,7 @@ def read_refs(filename):
if ref not in ClassRefs: if ref not in ClassRefs:
ClassRefs[ref] = {'newed': {}, 'used': {}} ClassRefs[ref] = {'newed': {}, 'used': {}}
ClassRefs[ref]['used'][basename] = 1 ClassRefs[ref]['used'][basename] = 1
for match in re.finditer( for match in re.finditer(r'(VN_IS|VN_AS|VN_CAST)\([^.]+, ([A-Za-z0-9_]+)', line):
r'(VN_IS|VN_AS|VN_CAST)\([^.]+, ([A-Za-z0-9_]+)', line):
ref = "Ast" + match.group(2) ref = "Ast" + match.group(2)
if ref not in ClassRefs: if ref not in ClassRefs:
ClassRefs[ref] = {'newed': {}, 'used': {}} ClassRefs[ref] = {'newed': {}, 'used': {}}
@ -796,9 +741,7 @@ def open_file(filename):
if re.search(r'\.txt$', filename): if re.search(r'\.txt$', filename):
fh.write("// Generated by astgen\n") fh.write("// Generated by astgen\n")
else: else:
fh.write( fh.write('// Generated by astgen // -*- mode: C++; c-file-style: "cc-mode" -*-' + "\n")
'// Generated by astgen // -*- mode: C++; c-file-style: "cc-mode" -*-'
+ "\n")
return fh return fh
@ -808,9 +751,7 @@ def open_file(filename):
def write_report(filename): def write_report(filename):
with open_file(filename) as fh: with open_file(filename) as fh:
fh.write( fh.write("Processing stages (approximate, based on order in Verilator.cpp):\n")
"Processing stages (approximate, based on order in Verilator.cpp):\n"
)
for classn in sorted(Stages.keys(), key=lambda val: Stages[val]): for classn in sorted(Stages.keys(), key=lambda val: Stages[val]):
fh.write(" " + classn + "\n") fh.write(" " + classn + "\n")
@ -831,14 +772,12 @@ def write_report(filename):
refs = ClassRefs["Ast" + node.name] refs = ClassRefs["Ast" + node.name]
fh.write(" newed: ") fh.write(" newed: ")
for stage in sorted(refs['newed'].keys(), for stage in sorted(refs['newed'].keys(),
key=lambda val: Stages[val] key=lambda val: Stages[val] if (val in Stages) else -1):
if (val in Stages) else -1):
fh.write(stage + " ") fh.write(stage + " ")
fh.write("\n") fh.write("\n")
fh.write(" used: ") fh.write(" used: ")
for stage in sorted(refs['used'].keys(), for stage in sorted(refs['used'].keys(),
key=lambda val: Stages[val] key=lambda val: Stages[val] if (val in Stages) else -1):
if (val in Stages) else -1):
fh.write(stage + " ") fh.write(stage + " ")
fh.write("\n") fh.write("\n")
fh.write("\n") fh.write("\n")
@ -852,8 +791,7 @@ def write_report(filename):
def write_forward_class_decls(prefix, nodeList): def write_forward_class_decls(prefix, nodeList):
with open_file("V3{p}__gen_forward_class_decls.h".format(p=prefix)) as fh: with open_file("V3{p}__gen_forward_class_decls.h".format(p=prefix)) as fh:
for node in nodeList: for node in nodeList:
fh.write("class {p}{n:<17} // ".format(p=prefix, fh.write("class {p}{n:<17} // ".format(p=prefix, n=node.name + ";"))
n=node.name + ";"))
for superClass in node.allSuperClasses: for superClass in node.allSuperClasses:
fh.write("{p}{n:<12} ".format(p=prefix, n=superClass.name)) fh.write("{p}{n:<12} ".format(p=prefix, n=superClass.name))
fh.write("\n") fh.write("\n")
@ -863,8 +801,7 @@ def write_visitor_decls(prefix, nodeList):
with open_file("V3{p}__gen_visitor_decls.h".format(p=prefix)) as fh: with open_file("V3{p}__gen_visitor_decls.h".format(p=prefix)) as fh:
for node in nodeList: for node in nodeList:
if not node.isRoot: if not node.isRoot:
fh.write("virtual void visit({p}{n}*);\n".format(p=prefix, fh.write("virtual void visit({p}{n}*);\n".format(p=prefix, n=node.name))
n=node.name))
def write_visitor_defns(prefix, nodeList, visitor): def write_visitor_defns(prefix, nodeList, visitor):
@ -873,13 +810,8 @@ def write_visitor_defns(prefix, nodeList, visitor):
for node in nodeList: for node in nodeList:
base = node.superClass base = node.superClass
if base is not None: if base is not None:
fh.write( fh.write("void {c}::visit({p}{n}* {v}) {{ visit(static_cast<{p}{b}*>({v})); }}\n".
"void {c}::visit({p}{n}* {v}) {{ visit(static_cast<{p}{b}*>({v})); }}\n" format(c=visitor, p=prefix, n=node.name, b=base.name, v=variable))
.format(c=visitor,
p=prefix,
n=node.name,
b=base.name,
v=variable))
def write_type_enum(prefix, nodeList): def write_type_enum(prefix, nodeList):
@ -887,27 +819,21 @@ def write_type_enum(prefix, nodeList):
with open_file("V3{p}__gen_type_enum.h".format(p=prefix)) as fh: with open_file("V3{p}__gen_type_enum.h".format(p=prefix)) as fh:
fh.write(" enum en : uint16_t {\n") fh.write(" enum en : uint16_t {\n")
for node in sorted(filter(lambda _: _.isLeaf, nodeList), for node in sorted(filter(lambda _: _.isLeaf, nodeList), key=lambda _: _.typeId):
key=lambda _: _.typeId): fh.write(" at{t} = {n},\n".format(t=node.name, n=node.typeId))
fh.write(" at{t} = {n},\n".format(t=node.name,
n=node.typeId))
fh.write(" _ENUM_END = {n}\n".format(n=root.typeIdMax + 1)) fh.write(" _ENUM_END = {n}\n".format(n=root.typeIdMax + 1))
fh.write(" };\n") fh.write(" };\n")
fh.write(" enum bounds : uint16_t {\n") fh.write(" enum bounds : uint16_t {\n")
for node in sorted(filter(lambda _: not _.isLeaf, nodeList), for node in sorted(filter(lambda _: not _.isLeaf, nodeList), key=lambda _: _.typeIdMin):
key=lambda _: _.typeIdMin): fh.write(" first{t} = {n},\n".format(t=node.name, n=node.typeIdMin))
fh.write(" first{t} = {n},\n".format(t=node.name, fh.write(" last{t} = {n},\n".format(t=node.name, n=node.typeIdMax))
n=node.typeIdMin))
fh.write(" last{t} = {n},\n".format(t=node.name,
n=node.typeIdMax))
fh.write(" _BOUNDS_END\n") fh.write(" _BOUNDS_END\n")
fh.write(" };\n") fh.write(" };\n")
fh.write(" const char* ascii() const VL_MT_SAFE {\n") fh.write(" const char* ascii() const VL_MT_SAFE {\n")
fh.write(" static const char* const names[_ENUM_END + 1] = {\n") fh.write(" static const char* const names[_ENUM_END + 1] = {\n")
for node in sorted(filter(lambda _: _.isLeaf, nodeList), for node in sorted(filter(lambda _: _.isLeaf, nodeList), key=lambda _: _.typeId):
key=lambda _: _.typeId):
fh.write(' "{T}",\n'.format(T=node.name.upper())) fh.write(' "{T}",\n'.format(T=node.name.upper()))
fh.write(" \"_ENUM_END\"\n") fh.write(" \"_ENUM_END\"\n")
fh.write(" };\n") fh.write(" };\n")
@ -928,8 +854,8 @@ def write_type_tests(prefix, nodeList):
enum = "VDfgType" enum = "VDfgType"
for node in nodeList: for node in nodeList:
fh.write( fh.write(
"template<> inline bool {b}::privateTypeTest<{p}{n}>(const {b}* {v}) {{ " "template<> inline bool {b}::privateTypeTest<{p}{n}>(const {b}* {v}) {{ ".format(
.format(b=base, p=prefix, n=node.name, v=variable)) b=base, p=prefix, n=node.name, v=variable))
if node.isRoot: if node.isRoot:
fh.write("return true;") fh.write("return true;")
elif not node.isLeaf: elif not node.isLeaf:
@ -937,8 +863,9 @@ def write_type_tests(prefix, nodeList):
"return static_cast<int>({v}->type()) >= static_cast<int>({e}::first{t}) && static_cast<int>({v}->type()) <= static_cast<int>({e}::last{t});" "return static_cast<int>({v}->type()) >= static_cast<int>({e}::first{t}) && static_cast<int>({v}->type()) <= static_cast<int>({e}::last{t});"
.format(v=variable, e=enum, t=node.name)) .format(v=variable, e=enum, t=node.name))
else: else:
fh.write("return {v}->type() == {e}::at{t};".format( fh.write("return {v}->type() == {e}::at{t};".format(v=variable,
v=variable, e=enum, t=node.name)) e=enum,
t=node.name))
fh.write(" }\n") fh.write(" }\n")
@ -949,8 +876,7 @@ def write_type_tests(prefix, nodeList):
def write_ast_type_info(filename): def write_ast_type_info(filename):
with open_file(filename) as fh: with open_file(filename) as fh:
for node in sorted(filter(lambda _: _.isLeaf, AstNodeList), for node in sorted(filter(lambda _: _.isLeaf, AstNodeList), key=lambda _: _.typeId):
key=lambda _: _.typeId):
opTypeList = [] opTypeList = []
opNameList = [] opNameList = []
for n in range(1, 5): for n in range(1, 5):
@ -968,12 +894,11 @@ def write_ast_type_info(filename):
opTypeList.append('OP_LIST') opTypeList.append('OP_LIST')
opNameList.append(name) opNameList.append(name)
# opTypeStr = ', '.join(opTypeList) # opTypeStr = ', '.join(opTypeList)
opTypeStr = ', '.join( opTypeStr = ', '.join(['VNTypeInfo::{0}'.format(s) for s in opTypeList])
['VNTypeInfo::{0}'.format(s) for s in opTypeList])
opNameStr = ', '.join(['"{0}"'.format(s) for s in opNameList]) opNameStr = ', '.join(['"{0}"'.format(s) for s in opNameList])
fh.write( fh.write(
' {{ "Ast{name}", {{{opTypeStr}}}, {{{opNameStr}}}, sizeof(Ast{name}) }},\n' ' {{ "Ast{name}", {{{opTypeStr}}}, {{{opNameStr}}}, sizeof(Ast{name}) }},\n'.
.format( format(
name=node.name, name=node.name,
opTypeStr=opTypeStr, opTypeStr=opTypeStr,
opNameStr=opNameStr, opNameStr=opNameStr,
@ -984,23 +909,19 @@ def write_ast_impl(filename):
with open_file(filename) as fh: with open_file(filename) as fh:
def emitBlock(pattern, **fmt): def emitBlock(pattern, **fmt):
fh.write( fh.write(textwrap.indent(textwrap.dedent(pattern), " ").format(**fmt))
textwrap.indent(textwrap.dedent(pattern),
" ").format(**fmt))
for node in AstNodeList: for node in AstNodeList:
if node.name == "Node": if node.name == "Node":
continue continue
emitBlock("const char* Ast{t}::brokenGen() const {{\n", emitBlock("const char* Ast{t}::brokenGen() const {{\n", t=node.name)
t=node.name)
if node.superClass.name != 'Node': if node.superClass.name != 'Node':
emitBlock(" BROKEN_BASE_RTN(Ast{base}::brokenGen());\n", emitBlock(" BROKEN_BASE_RTN(Ast{base}::brokenGen());\n",
base=node.superClass.name) base=node.superClass.name)
for ptr in node.ptrs: for ptr in node.ptrs:
if ptr['monad'] == 'Optional': if ptr['monad'] == 'Optional':
emitBlock( emitBlock(" BROKEN_RTN(m_{name} && !m_{name}->brokeExists());\n",
" BROKEN_RTN(m_{name} && !m_{name}->brokeExists());\n", name=ptr['name'])
name=ptr['name'])
else: else:
emitBlock(" BROKEN_RTN(!m_{name});\n" + emitBlock(" BROKEN_RTN(!m_{name});\n" +
" BROKEN_RTN(!m_{name}->brokeExists());\n", " BROKEN_RTN(!m_{name}->brokeExists());\n",
@ -1010,8 +931,7 @@ def write_ast_impl(filename):
emitBlock("void Ast{t}::cloneRelinkGen() {{\n", t=node.name) emitBlock("void Ast{t}::cloneRelinkGen() {{\n", t=node.name)
if node.superClass.name != 'Node': if node.superClass.name != 'Node':
emitBlock(" Ast{base}::cloneRelinkGen();\n", emitBlock(" Ast{base}::cloneRelinkGen();\n", base=node.superClass.name)
base=node.superClass.name)
for ptr in node.ptrs: for ptr in node.ptrs:
emitBlock( emitBlock(
" if (m_{name} && m_{name}->clonep()) m_{name} = m_{name}->clonep();\n", " if (m_{name} && m_{name}->clonep()) m_{name} = m_{name}->clonep();\n",
@ -1020,14 +940,11 @@ def write_ast_impl(filename):
emitBlock("}}\n") emitBlock("}}\n")
emitBlock("void Ast{t}::dumpJsonGen(std::ostream& str) const {{\n", emitBlock("void Ast{t}::dumpJsonGen(std::ostream& str) const {{\n", t=node.name)
t=node.name)
if node.superClass.name != 'Node': if node.superClass.name != 'Node':
emitBlock(" Ast{base}::dumpJson(str);\n", emitBlock(" Ast{base}::dumpJson(str);\n", base=node.superClass.name)
base=node.superClass.name)
for ptr in node.ptrs: for ptr in node.ptrs:
emitBlock(" dumpJsonPtr(str, \"{name}\", m_{name});\n", emitBlock(" dumpJsonPtr(str, \"{name}\", m_{name});\n", name=ptr['name'])
name=ptr['name'])
emitBlock("}}\n") emitBlock("}}\n")
emitBlock( emitBlock(
@ -1038,9 +955,7 @@ def write_ast_impl(filename):
if op is None: if op is None:
continue continue
name, _, _ = op name, _, _ = op
emitBlock( emitBlock(" dumpNodeListJson(str, {name}(), \"{name}\", indent);\n", name=name)
" dumpNodeListJson(str, {name}(), \"{name}\", indent);\n",
name=name)
emitBlock("}}\n") emitBlock("}}\n")
@ -1059,9 +974,7 @@ def write_ast_macros(filename):
if not any_ptr: if not any_ptr:
fh.write("private: \\\n") fh.write("private: \\\n")
any_ptr = True any_ptr = True
emitBlock("Ast{kind}* m_{name} = nullptr;", emitBlock("Ast{kind}* m_{name} = nullptr;", name=ptr['name'], kind=ptr['kind'])
name=ptr['name'],
kind=ptr['kind'])
if any_ptr: if any_ptr:
fh.write("public: \\\n") fh.write("public: \\\n")
# TODO pointer accessors # TODO pointer accessors
@ -1108,8 +1021,8 @@ def write_ast_macros(filename):
if not op: if not op:
continue continue
name, monad, kind = op name, monad, kind = op
retrieve = ("VN_DBG_AS(op{n}p(), {kind})" if kind != "Node" retrieve = ("VN_DBG_AS(op{n}p(), {kind})"
else "op{n}p()").format(n=n, kind=kind) if kind != "Node" else "op{n}p()").format(n=n, kind=kind)
superOp = node.superClass.getOp(n) superOp = node.superClass.getOp(n)
superName = None superName = None
if superOp: if superOp:
@ -1126,8 +1039,7 @@ def write_ast_macros(filename):
n=n, n=n,
retrieve=retrieve) retrieve=retrieve)
if superOp: if superOp:
hiddenMethods.append("add" + superName[0].upper() + hiddenMethods.append("add" + superName[0].upper() + superName[1:])
superName[1:])
elif monad == "Optional": elif monad == "Optional":
emitBlock('''\ emitBlock('''\
Ast{kind}* {name}() const VL_MT_STABLE {{ return {retrieve}; }} Ast{kind}* {name}() const VL_MT_STABLE {{ return {retrieve}; }}
@ -1150,27 +1062,24 @@ def write_ast_macros(filename):
if hiddenMethods: if hiddenMethods:
fh.write("private: \\\n") fh.write("private: \\\n")
for method in hiddenMethods: for method in hiddenMethods:
fh.write(" using Ast{sup}::{method}; \\\n".format( fh.write(" using Ast{sup}::{method}; \\\n".format(sup=node.superClass.name,
sup=node.superClass.name, method=method)) method=method))
fh.write("public: \\\n") fh.write("public: \\\n")
fh.write( fh.write(" static_assert(true, \"\")\n") # Swallowing the semicolon
" static_assert(true, \"\")\n") # Swallowing the semicolon
# Only care about leaf classes for the rest # Only care about leaf classes for the rest
if node.isLeaf: if node.isLeaf:
fh.write( fh.write(
"#define ASTGEN_SUPER_{t}(...) Ast{b}(VNType::at{t}, __VA_ARGS__)\n" "#define ASTGEN_SUPER_{t}(...) Ast{b}(VNType::at{t}, __VA_ARGS__)\n".format(
.format(t=node.name, b=node.superClass.name)) t=node.name, b=node.superClass.name))
fh.write("\n") fh.write("\n")
def write_ast_yystype(filename): def write_ast_yystype(filename):
with open_file(filename) as fh: with open_file(filename) as fh:
for node in AstNodeList: for node in AstNodeList:
fh.write("Ast{t}* {m}p;\n".format(t=node.name, fh.write("Ast{t}* {m}p;\n".format(t=node.name, m=node.name[0].lower() + node.name[1:]))
m=node.name[0].lower() +
node.name[1:]))
################################################################################ ################################################################################
@ -1207,8 +1116,7 @@ def write_dfg_macros(filename):
name=name, name=name,
n=n - 1) n=n - 1)
operandNames = tuple( operandNames = tuple(node.getOp(n)[0] for n in range(1, node.arity + 1))
node.getOp(n)[0] for n in range(1, node.arity + 1))
if operandNames: if operandNames:
emitBlock('''\ emitBlock('''\
const std::string srcName(size_t idx) const override {{ const std::string srcName(size_t idx) const override {{
@ -1217,10 +1125,8 @@ def write_dfg_macros(filename):
}} }}
''', ''',
a=node.arity, a=node.arity,
ns=", ".join( ns=", ".join(map(lambda _: '"' + _ + '"', operandNames)))
map(lambda _: '"' + _ + '"', operandNames))) fh.write(" static_assert(true, \"\")\n") # Swallowing the semicolon
fh.write(
" static_assert(true, \"\")\n") # Swallowing the semicolon
def write_dfg_auto_classes(filename): def write_dfg_auto_classes(filename):
@ -1254,11 +1160,8 @@ def write_dfg_ast_to_dfg(filename):
if (node.file is not None) or (not node.isLeaf): if (node.file is not None) or (not node.isLeaf):
continue continue
fh.write( fh.write("void visit(Ast{t}* nodep) override {{\n".format(t=node.name))
"void visit(Ast{t}* nodep) override {{\n".format(t=node.name)) fh.write(' UASSERT_OBJ(!nodep->user1p(), nodep, "Already has Dfg vertex");\n\n')
fh.write(
' UASSERT_OBJ(!nodep->user1p(), nodep, "Already has Dfg vertex");\n\n'
)
fh.write(" if (unhandled(nodep)) return;\n\n") fh.write(" if (unhandled(nodep)) return;\n\n")
for i in range(node.arity): for i in range(node.arity):
fh.write(" iterate(nodep->op{j}p());\n".format(j=i + 1)) fh.write(" iterate(nodep->op{j}p());\n".format(j=i + 1))
@ -1267,9 +1170,8 @@ def write_dfg_ast_to_dfg(filename):
' UASSERT_OBJ(nodep->op{j}p()->user1p(), nodep, "Child {j} missing Dfg vertex");\n' ' UASSERT_OBJ(nodep->op{j}p()->user1p(), nodep, "Child {j} missing Dfg vertex");\n'
.format(j=i + 1)) .format(j=i + 1))
fh.write("\n") fh.write("\n")
fh.write( fh.write(" Dfg{t}* const vtxp = makeVertex<Dfg{t}>(nodep, *m_dfgp);\n".format(
" Dfg{t}* const vtxp = makeVertex<Dfg{t}>(nodep, *m_dfgp);\n" t=node.name))
.format(t=node.name))
fh.write(" if (!vtxp) {\n") fh.write(" if (!vtxp) {\n")
fh.write(" m_foundUnhandled = true;\n") fh.write(" m_foundUnhandled = true;\n")
fh.write(" ++m_ctx.m_nonRepNode;\n") fh.write(" ++m_ctx.m_nonRepNode;\n")
@ -1277,8 +1179,8 @@ def write_dfg_ast_to_dfg(filename):
fh.write(" }\n\n") fh.write(" }\n\n")
for i in range(node.arity): for i in range(node.arity):
fh.write( fh.write(
" vtxp->relinkSource<{i}>(nodep->op{j}p()->user1u().to<DfgVertex*>());\n" " vtxp->relinkSource<{i}>(nodep->op{j}p()->user1u().to<DfgVertex*>());\n".
.format(i=i, j=i + 1)) format(i=i, j=i + 1))
fh.write("\n") fh.write("\n")
fh.write(" m_uncommittedVertices.push_back(vtxp);\n") fh.write(" m_uncommittedVertices.push_back(vtxp);\n")
fh.write(" nodep->user1p(vtxp);\n") fh.write(" nodep->user1p(vtxp);\n")
@ -1292,14 +1194,12 @@ def write_dfg_dfg_to_ast(filename):
if (node.file is not None) or (not node.isLeaf): if (node.file is not None) or (not node.isLeaf):
continue continue
fh.write( fh.write("void visit(Dfg{t}* vtxp) override {{\n".format(t=node.name))
"void visit(Dfg{t}* vtxp) override {{\n".format(t=node.name))
for i in range(node.arity): for i in range(node.arity):
fh.write( fh.write(
" AstNodeExpr* const op{j}p = convertDfgVertexToAstNodeExpr(vtxp->source<{i}>());\n" " AstNodeExpr* const op{j}p = convertDfgVertexToAstNodeExpr(vtxp->source<{i}>());\n"
.format(i=i, j=i + 1)) .format(i=i, j=i + 1))
fh.write( fh.write(" m_resultp = makeNode<Ast{t}>(vtxp".format(t=node.name))
" m_resultp = makeNode<Ast{t}>(vtxp".format(t=node.name))
for i in range(node.arity): for i in range(node.arity):
fh.write(", op{j}p".format(j=i + 1)) fh.write(", op{j}p".format(j=i + 1))
fh.write(");\n") fh.write(");\n")
@ -1313,8 +1213,7 @@ parser = argparse.ArgumentParser(
allow_abbrev=False, allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter, formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Generate V3Ast headers to reduce C++ code duplication.""", description="""Generate V3Ast headers to reduce C++ code duplication.""",
epilog= epilog="""Copyright 2002-2024 by Wilson Snyder. This program is free software; you
"""Copyright 2002-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0. Version 2.0.
@ -1322,15 +1221,9 @@ Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""") SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('-I', action='store', help='source code include directory') parser.add_argument('-I', action='store', help='source code include directory')
parser.add_argument('--astdef', parser.add_argument('--astdef', action='append', help='add AST definition file (relative to -I)')
action='append', parser.add_argument('--dfgdef', action='append', help='add DFG definition file (relative to -I)')
help='add AST definition file (relative to -I)') parser.add_argument('--classes', action='store_true', help='makes class declaration files')
parser.add_argument('--dfgdef',
action='append',
help='add DFG definition file (relative to -I)')
parser.add_argument('--classes',
action='store_true',
help='makes class declaration files')
parser.add_argument('--debug', action='store_true', help='enable debug') parser.add_argument('--debug', action='store_true', help='enable debug')
parser.add_argument('infiles', nargs='*', help='list of input .cpp filenames') parser.add_argument('infiles', nargs='*', help='list of input .cpp filenames')
@ -1409,8 +1302,7 @@ for node in AstNodeList:
# Compute derived properties over the whole DfgVertex hierarchy # Compute derived properties over the whole DfgVertex hierarchy
DfgVertices["Vertex"].complete() DfgVertices["Vertex"].complete()
DfgVertexList = tuple(map(lambda _: DfgVertices[_], DfgVertexList = tuple(map(lambda _: DfgVertices[_], sorted(DfgVertices.keys())))
sorted(DfgVertices.keys())))
check_types(DfgVertexList, "Dfg", "Vertex") check_types(DfgVertexList, "Dfg", "Vertex")
@ -1457,8 +1349,7 @@ for cpt in Args.infiles:
if not re.search(r'.cpp$', cpt): if not re.search(r'.cpp$', cpt):
sys.exit("%Error: Expected argument to be .cpp file: " + cpt) sys.exit("%Error: Expected argument to be .cpp file: " + cpt)
cpt = re.sub(r'.cpp$', '', cpt) cpt = re.sub(r'.cpp$', '', cpt)
Cpt().process(in_filename=Args.I + "/" + cpt + ".cpp", Cpt().process(in_filename=Args.I + "/" + cpt + ".cpp", out_filename=cpt + "__gen.cpp")
out_filename=cpt + "__gen.cpp")
###################################################################### ######################################################################
# Local Variables: # Local Variables:

View File

@ -28,15 +28,13 @@ def process():
+ (" -d" if Args.definitions else "") # + (" -d" if Args.definitions else "") #
+ (" -k" if Args.token_table else "") # + (" -k" if Args.token_table else "") #
+ (" -v" if Args.verbose else "") # + (" -v" if Args.verbose else "") #
+ (" --report=itemset --report=lookahead" if + (" --report=itemset --report=lookahead" if (Args.verbose and supports_report) else "")
(Args.verbose and supports_report) else "")
# Useful but slow: # Useful but slow:
# (" -Wcounterexamples" if # (" -Wcounterexamples" if
# (Args.verbose and supports_counter_examples) else "") # (Args.verbose and supports_counter_examples) else "")
# #
# -p required for GLR parsers; they write to -p basename, not -o # -p required for GLR parsers; they write to -p basename, not -o
+ ((" -p " + Args.name_prefix) if Args.name_prefix else "") + " -b " + + ((" -p " + Args.name_prefix) if Args.name_prefix else "") + " -b " + tmp_prefix() #
tmp_prefix() #
+ " -o " + tmp_prefix() + ".c" # + " -o " + tmp_prefix() + ".c" #
+ " " + tmp_prefix() + ".y") + " " + tmp_prefix() + ".y")
@ -44,11 +42,10 @@ def process():
status = subprocess.call(command, shell=True) status = subprocess.call(command, shell=True)
if status != 0: if status != 0:
unlink_outputs() unlink_outputs()
sys.exit("bisonpre: %Error: " + Args.yacc + " version " + sys.exit("bisonpre: %Error: " + Args.yacc + " version " + str(Bison_Version) +
str(Bison_Version) + " run failed due to errors\n") " run failed due to errors\n")
clean_output(tmp_prefix() + ".output", clean_output(tmp_prefix() + ".output", output_prefix() + ".output", True, False)
output_prefix() + ".output", True, False)
warning_check(output_prefix() + ".output") warning_check(output_prefix() + ".output")
clean_output(tmp_prefix() + ".c", output_prefix() + ".c", False, True) clean_output(tmp_prefix() + ".c", output_prefix() + ".c", False, True)
@ -88,9 +85,7 @@ def unlink_outputs():
def bison_version_check(): def bison_version_check():
with subprocess.Popen(Args.yacc + " --version", with subprocess.Popen(Args.yacc + " --version", shell=True, stdout=subprocess.PIPE) as sp:
shell=True,
stdout=subprocess.PIPE) as sp:
out = str(sp.stdout.read()) out = str(sp.stdout.read())
match = re.search(r'([0-9]+\.[0-9]+)', out) match = re.search(r'([0-9]+\.[0-9]+)', out)
if match: if match:
@ -102,8 +97,7 @@ def bison_version_check():
Bison_Version = v Bison_Version = v
return return
sys.exit("bisonpre: %Error: '" + Args.yacc + sys.exit("bisonpre: %Error: '" + Args.yacc + "' is not installed, or not working\n")
"' is not installed, or not working\n")
def clean_output(filename, outname, is_output, is_c): def clean_output(filename, outname, is_output, is_c):
@ -141,8 +135,7 @@ def clean_output(filename, outname, is_output, is_c):
if is_c: if is_c:
token_values = {} token_values = {}
for line in lines: for line in lines:
if re.search(r'enum\s+yytokentype', if re.search(r'enum\s+yytokentype', line) and not re.search(r';', line):
line) and not re.search(r';', line):
match = re.search(r'\b(\S+) = (\d+)', line) match = re.search(r'\b(\S+) = (\d+)', line)
if match: if match:
token_values[match.group(2)] = match.group(1) token_values[match.group(2)] = match.group(1)
@ -151,8 +144,7 @@ def clean_output(filename, outname, is_output, is_c):
if _enaline(line) and re.search(r'BISONPRE_TOKEN_NAMES', line): if _enaline(line) and re.search(r'BISONPRE_TOKEN_NAMES', line):
out.append(line) out.append(line)
for tv in sorted(token_values.keys()): for tv in sorted(token_values.keys()):
out.append("\tcase %d: return \"%s\";\n" % out.append("\tcase %d: return \"%s\";\n" % (tv, token_values[tv]))
(tv, token_values[tv]))
continue continue
out.append(line) out.append(line)
lines = out lines = out
@ -165,8 +157,7 @@ def clean_output(filename, outname, is_output, is_c):
# Fix bison 2.3 and GCC 4.2.1 # Fix bison 2.3 and GCC 4.2.1
line = re.sub(r'\(YY_\("', '(YY_((char*)"', line) line = re.sub(r'\(YY_\("', '(YY_((char*)"', line)
# Fix bison 2.3 glr-parser warning about yyerrorloc.YYTYPE::yydummy uninit # Fix bison 2.3 glr-parser warning about yyerrorloc.YYTYPE::yydummy uninit
line = re.sub(r'(YYLTYPE yyerrloc;)', line = re.sub(r'(YYLTYPE yyerrloc;)', r'\1 yyerrloc.yydummy=0;/*bisonpre*/', line)
r'\1 yyerrloc.yydummy=0;/*bisonpre*/', line)
# Fix bison 3.6.1 unexpected nested-comment # Fix bison 3.6.1 unexpected nested-comment
line = re.sub(r'/\* "/\*.*\*/" \*/', '', line) line = re.sub(r'/\* "/\*.*\*/" \*/', '', line)
fh.write(line) fh.write(line)
@ -177,11 +168,8 @@ def warning_check(filename):
linenum = 0 linenum = 0
for line in fh: for line in fh:
linenum += 1 linenum += 1
if re.search(r'(conflicts|warning:|^useless)', if re.search(r'(conflicts|warning:|^useless)', line, flags=re.IGNORECASE):
line, sys.exit("%Error: " + filename + ":" + str(linenum) + ": " + line + "\n")
flags=re.IGNORECASE):
sys.exit("%Error: " + filename + ":" + str(linenum) + ": " +
line + "\n")
###################################################################### ######################################################################
@ -214,12 +202,9 @@ def clean_input(filename, outname):
# ^/ to prevent comments from matching # ^/ to prevent comments from matching
if re.match(r'^[a-zA-Z0-9_<>]+:[^/]*[a-zA-Z]', line): if re.match(r'^[a-zA-Z0-9_<>]+:[^/]*[a-zA-Z]', line):
sys.exit("%Error: " + filename + ":" + str(lineno) + sys.exit("%Error: " + filename + ":" + str(lineno) +
": Move text on rule line to next line: " + line + ": Move text on rule line to next line: " + line + "\n")
"\n")
matcha = re.match(r'^([a-zA-Z0-9_]+)<(\S*)>(.*)$', matcha = re.match(r'^([a-zA-Z0-9_]+)<(\S*)>(.*)$', line, flags=re.DOTALL)
line,
flags=re.DOTALL)
matchb = re.match(r'^([a-zA-Z0-9_]+):', line) matchb = re.match(r'^([a-zA-Z0-9_]+):', line)
if re.match(r'^%%', line): if re.match(r'^%%', line):
@ -231,8 +216,8 @@ def clean_input(filename, outname):
dtype = matcha.group(2) dtype = matcha.group(2)
line = name + matcha.group(3) line = name + matcha.group(3)
if name in Rules: if name in Rules:
sys.exit("%Error: " + filename + ":" + str(lineno) + sys.exit("%Error: " + filename + ":" + str(lineno) + ": Redeclaring '" + name +
": Redeclaring '" + name + "': " + line) "': " + line)
if dtype not in types: if dtype not in types:
types[dtype] = {} types[dtype] = {}
types[dtype][name] = 1 types[dtype][name] = 1
@ -250,8 +235,8 @@ def clean_input(filename, outname):
name = matchb.group(1) name = matchb.group(1)
if name not in ('public', 'private'): if name not in ('public', 'private'):
if name in Rules: if name in Rules:
sys.exit("%Error: " + filename + ":" + str(lineno) + sys.exit("%Error: " + filename + ":" + str(lineno) + ": Redeclaring '" +
": Redeclaring '" + name + "': " + line) name + "': " + line)
Rules[name] = { Rules[name] = {
'name': name, 'name': name,
'type': "", 'type': "",
@ -268,8 +253,7 @@ def clean_input(filename, outname):
cline = re.sub(r'//.*$', '\n', line) cline = re.sub(r'//.*$', '\n', line)
if re.match(r'^\s*;', cline): if re.match(r'^\s*;', cline):
if not last_rule: if not last_rule:
sys.exit("%Error: " + filename + ":" + str(lineno) + sys.exit("%Error: " + filename + ":" + str(lineno) + ": Stray semicolon\n")
": Stray semicolon\n")
last_rule = None last_rule = None
elif last_rule: elif last_rule:
Rules[last_rule]['rules_and_productions'] += cline Rules[last_rule]['rules_and_productions'] += cline
@ -279,8 +263,8 @@ def clean_input(filename, outname):
dtype = match.group(1) dtype = match.group(1)
tok = match.group(2) tok = match.group(2)
if tok in tokens: if tok in tokens:
sys.exit("%Error: " + filename + ":" + str(lineno) + sys.exit("%Error: " + filename + ":" + str(lineno) + ": Redeclaring '" + tok +
": Redeclaring '" + tok + "': " + line) "': " + line)
tokens[tok] = dtype tokens[tok] = dtype
for tok in re.split(r'[^a-zA-Z0-9_]+', cline): for tok in re.split(r'[^a-zA-Z0-9_]+', cline):
@ -299,17 +283,16 @@ def clean_input(filename, outname):
lineno += 1 lineno += 1
if _enaline(line) and re.search(r'BISONPRE_VERSION', line): if _enaline(line) and re.search(r'BISONPRE_VERSION', line):
# 1 2 3 4 # 1 2 3 4
match = re.search( match = re.search(r'BISONPRE_VERSION\((\S+)\s*,\s*((\S+)\s*,)?\s*([^\),]+)\)\s*$',
r'BISONPRE_VERSION\((\S+)\s*,\s*((\S+)\s*,)?\s*([^\),]+)\)\s*$', line)
line)
if not match: if not match:
sys.exit("%Error: " + filename + ":" + str(lineno) + sys.exit("%Error: " + filename + ":" + str(lineno) +
": Bad form of BISONPRE_VERSION: " + line) ": Bad form of BISONPRE_VERSION: " + line)
ver = match.group(1) ver = match.group(1)
ver_max = match.group(3) ver_max = match.group(3)
cmd = match.group(4) cmd = match.group(4)
if Bison_Version >= float(ver) and ( if Bison_Version >= float(ver) and (not ver_max
not ver_max or Bison_Version <= float(ver_max)): or Bison_Version <= float(ver_max)):
line = cmd + "\n" line = cmd + "\n"
else: else:
line = "//NOP: " + line line = "//NOP: " + line
@ -323,10 +306,9 @@ def clean_input(filename, outname):
for line in linesin: for line in linesin:
lineno += 1 lineno += 1
if _enaline(line) and re.search(r'BISONPRE_NOT', line): if _enaline(line) and re.search(r'BISONPRE_NOT', line):
match = re.search( match = re.search(r'(.*)BISONPRE_NOT\((\S+)\)\s*(\{[^}]+})\s*(.*)$',
r'(.*)BISONPRE_NOT\((\S+)\)\s*(\{[^}]+})\s*(.*)$', line,
line, flags=re.DOTALL)
flags=re.DOTALL)
if not match: if not match:
sys.exit("%Error: " + filename + ":" + str(lineno) + sys.exit("%Error: " + filename + ":" + str(lineno) +
": Bad form of BISONPRE_NOT: " + line) ": Bad form of BISONPRE_NOT: " + line)
@ -337,8 +319,7 @@ def clean_input(filename, outname):
for etok in endtoks: for etok in endtoks:
if etok not in tokens: if etok not in tokens:
sys.exit("%Error: " + filename + ":" + str(lineno) + sys.exit("%Error: " + filename + ":" + str(lineno) +
": Can't find definition for token: " + etok + ": Can't find definition for token: " + etok + "\n")
"\n")
# Push it all onto one line to avoid error messages changing # Push it all onto one line to avoid error messages changing
pipe = "" pipe = ""
for tok in sorted(tokens.keys()): for tok in sorted(tokens.keys()):
@ -397,10 +378,8 @@ def clean_input(filename, outname):
# Bison doesn't have a #line directive, so we need somewhere to insert into # Bison doesn't have a #line directive, so we need somewhere to insert into
line = re.sub(r'^\s*//.*$', '', line) line = re.sub(r'^\s*//.*$', '', line)
if not re.match(r'^\s*$', line): if not re.match(r'^\s*$', line):
sys.exit( sys.exit("%Error: " + filename + ":" + str(lineno) + ": Need " +
"%Error: " + filename + ":" + str(lineno) + ": Need " + str(needmore) + " more blank lines to keep line numbers constant\n")
str(needmore) +
" more blank lines to keep line numbers constant\n")
needmore -= 1 needmore -= 1
else: else:
lines.append(line) lines.append(line)
@ -418,8 +397,8 @@ def _bisonpre_copy(text, lineno, depth):
text, text,
flags=re.DOTALL) flags=re.DOTALL)
if not match: if not match:
sys.exit("%Error: " + Filename + ":" + str(lineno) + sys.exit("%Error: " + Filename + ":" + str(lineno) + ": Bad form of BISONPRE_NOT: " +
": Bad form of BISONPRE_NOT: " + text) text)
text = match.group(1) + '{HERE}' + match.group(5) text = match.group(1) + '{HERE}' + match.group(5)
once = match.group(2) once = match.group(2)
rule = match.group(3) rule = match.group(3)
@ -448,8 +427,7 @@ def _bisonpre_copy(text, lineno, depth):
insert = re.sub(left, right, insert) insert = re.sub(left, right, insert)
insert = re.sub(r'[ \t\n]+\n', "\n", insert) insert = re.sub(r'[ \t\n]+\n', "\n", insert)
insert = re.sub(r'\n', " ", insert = re.sub(r'\n', " ", insert) # Optional - preserve line numbering
insert) # Optional - preserve line numbering
text = re.sub(r'{HERE}', insert, text) text = re.sub(r'{HERE}', insert, text)
depth += 1 depth += 1
return text return text
@ -465,8 +443,7 @@ def _enaline(line):
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
allow_abbrev=False, allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter, formatter_class=argparse.RawDescriptionHelpFormatter,
description= description="""Bisonpre is a wrapper for the Bison YACC replacement. Input to Bison is
"""Bisonpre is a wrapper for the Bison YACC replacement. Input to Bison is
preprocessed with substitution as described below under EXTENSIONS. Output preprocessed with substitution as described below under EXTENSIONS. Output
from Bison is checked for additional errors, and corrected to work around from Bison is checked for additional errors, and corrected to work around
various compile warnings.""", various compile warnings.""",
@ -522,35 +499,17 @@ parser.add_argument('--yacc',
help='name of the bison executable, defaults to "bison"') help='name of the bison executable, defaults to "bison"')
# Arguments passed through to bison # Arguments passed through to bison
parser.add_argument('-b', parser.add_argument('-b', '--file-prefix', action='store', help='Passed to bison.')
'--file-prefix', parser.add_argument('-d', '--definitions', action='store_true', help='Passed to bison.')
action='store', parser.add_argument('-k', '--token-table', action='store_true', help='Passed to bison.')
help='Passed to bison.')
parser.add_argument('-d',
'--definitions',
action='store_true',
help='Passed to bison.')
parser.add_argument('-k',
'--token-table',
action='store_true',
help='Passed to bison.')
parser.add_argument('-o', parser.add_argument('-o',
'--output', '--output',
action='store', action='store',
required=True, required=True,
help='Passed to bison. Sets output file name') help='Passed to bison. Sets output file name')
parser.add_argument('-p', parser.add_argument('-p', '--name-prefix', action='store', help='Passed to bison.')
'--name-prefix', parser.add_argument('-t', '--debug', action='store_true', help='Passed to bison.')
action='store', parser.add_argument('-v', '--verbose', action='store_true', help='Passed to bison.')
help='Passed to bison.')
parser.add_argument('-t',
'--debug',
action='store_true',
help='Passed to bison.')
parser.add_argument('-v',
'--verbose',
action='store_true',
help='Passed to bison.')
parser.add_argument('input', help='Passed to bison. Input grammar file.') parser.add_argument('input', help='Passed to bison. Input grammar file.')

View File

@ -115,11 +115,9 @@ def _suppress(filename, linenum, eid):
return True return True
if eid == 'constParameter' and re.search(r'gtkwave/', filename): if eid == 'constParameter' and re.search(r'gtkwave/', filename):
return True return True
if eid == 'ctuOneDefinitionRuleViolation' and re.search( if eid == 'ctuOneDefinitionRuleViolation' and re.search(r'vltstd/', filename):
r'vltstd/', filename):
return True return True
if eid == 'duplicateConditionalAssign' and re.search( if eid == 'duplicateConditionalAssign' and re.search(r'gtkwave/', filename):
r'gtkwave/', filename):
return True return True
if eid == 'knownConditionTrueFalse' and re.search(r'gtkwave/', filename): if eid == 'knownConditionTrueFalse' and re.search(r'gtkwave/', filename):
return True return True
@ -151,8 +149,7 @@ def _suppress(filename, linenum, eid):
return True return True
if not os.path.exists(filename): if not os.path.exists(filename):
print("%Warning: " + filename + " does not exist, ignored", print("%Warning: " + filename + " does not exist, ignored", file=sys.stderr)
file=sys.stderr)
return False return False
with open(filename, "r", encoding="utf8") as fh: with open(filename, "r", encoding="utf8") as fh:
@ -160,13 +157,11 @@ def _suppress(filename, linenum, eid):
for line in fh: for line in fh:
lineno += 1 lineno += 1
if (lineno + 1) == linenum: if (lineno + 1) == linenum:
match = re.search( match = re.search(r'(cppcheck|cppcheck-has-bug|cppverilator)-suppress((\s+\S+)+)',
r'(cppcheck|cppcheck-has-bug|cppverilator)-suppress((\s+\S+)+)', line)
line)
if match: if match:
for supid in match.group(2).split(): for supid in match.group(2).split():
if (supid == eid or (eid in SuppressMap if (supid == eid or (eid in SuppressMap and supid == SuppressMap[eid])):
and supid == SuppressMap[eid])):
return True return True
return False return False
@ -182,8 +177,7 @@ filters out unnecessary warnings related to Verilator. Run as:
cd $VERILATOR_ROOT cd $VERILATOR_ROOT
make -k cppcheck""", make -k cppcheck""",
epilog= epilog="""Copyright 2014-2024 by Wilson Snyder. This program is free software; you
"""Copyright 2014-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0. Version 2.0.

View File

@ -16,23 +16,19 @@ import sys
for line in sys.stdin: for line in sys.stdin:
# Fix flex 2.6.1 warning # Fix flex 2.6.1 warning
line = re.sub( line = re.sub(r'for \( i = 0; i < _yybytes_len; \+\+i \)',
r'for \( i = 0; i < _yybytes_len; \+\+i \)', r'for ( i = 0; (yy_size_t)(i) < (yy_size_t)(_yybytes_len); ++i )', line)
r'for ( i = 0; (yy_size_t)(i) < (yy_size_t)(_yybytes_len); ++i )',
line)
# Fix flex 2.6.0 warning # Fix flex 2.6.0 warning
line = re.sub( line = re.sub(
r'\(\(int\) \(\(yy_n_chars\) \+ number_to_move\) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size\)', r'\(\(int\) \(\(yy_n_chars\) \+ number_to_move\) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size\)',
r'((int) ((yy_n_chars) + number_to_move) > (int) YY_CURRENT_BUFFER_LVALUE->yy_buf_size)', r'((int) ((yy_n_chars) + number_to_move) > (int) YY_CURRENT_BUFFER_LVALUE->yy_buf_size)',
line) line)
line = re.sub(r' number_to_move == YY_MORE_ADJ ', line = re.sub(r' number_to_move == YY_MORE_ADJ ', r' (int)number_to_move == (int)YY_MORE_ADJ ',
r' (int)number_to_move == (int)YY_MORE_ADJ ', line)
# Fix flex 2.5.4 namespace omission
line = re.sub(r'^class istream;',
'#include <iostream>\nusing namespace std;\n', line)
# Fix flex 2.5.31 redefinition
line = re.sub(r'(\#define\s+yyFlexLexer\s+yyFlexLexer)', r'//flexfix: \1',
line) line)
# Fix flex 2.5.4 namespace omission
line = re.sub(r'^class istream;', '#include <iostream>\nusing namespace std;\n', line)
# Fix flex 2.5.31 redefinition
line = re.sub(r'(\#define\s+yyFlexLexer\s+yyFlexLexer)', r'//flexfix: \1', line)
# Fix flex 2.5.1 yytext_ptr undef # Fix flex 2.5.1 yytext_ptr undef
line = re.sub(r'(\#undef\s+yytext_ptr)', r'//flexfix: \1', line) line = re.sub(r'(\#undef\s+yytext_ptr)', r'//flexfix: \1', line)
# Fix flex 2.5.4 and GCC 4.1.0 warn_unused_result # Fix flex 2.5.4 and GCC 4.1.0 warn_unused_result
@ -41,8 +37,7 @@ for line in sys.stdin:
line = re.sub(r'for \( n = 0; n < max_size && ', line = re.sub(r'for \( n = 0; n < max_size && ',
r'for ( n = 0; ((size_t)n < (size_t)max_size) && ', line) r'for ( n = 0; ((size_t)n < (size_t)max_size) && ', line)
# Fix flex 2.5.4 and GCC 4.0.2 under FLEX_DEBUG # Fix flex 2.5.4 and GCC 4.0.2 under FLEX_DEBUG
line = re.sub(r'--accepting rule at line %d ', line = re.sub(r'--accepting rule at line %d ', r'--accepting rule at line %ld ', line)
r'--accepting rule at line %ld ', line)
# Fix compiler warning filenames # Fix compiler warning filenames
line = re.sub(r'(#line \d+ ".*)_pretmp', r'\1', line) line = re.sub(r'(#line \d+ ".*)_pretmp', r'\1', line)
# Fix 'register' storage class specifier is deprecated and incompatible with C++17 # Fix 'register' storage class specifier is deprecated and incompatible with C++17

View File

@ -19,11 +19,9 @@ def read_keys(filename):
if re.match(r'^\s*$', line): if re.match(r'^\s*$', line):
continue continue
if re.search(r'^\s*VLCOVGEN_ITEM', line): if re.search(r'^\s*VLCOVGEN_ITEM', line):
match = re.search(r'^\s*VLCOVGEN_ITEM *\( *"([^"]+)" *\)', match = re.search(r'^\s*VLCOVGEN_ITEM *\( *"([^"]+)" *\)', line)
line)
if not match: if not match:
sys.exit("%Error: " + filename + sys.exit("%Error: " + filename + ": vlcovgen misformed VLCOVGEN_ITEM line")
": vlcovgen misformed VLCOVGEN_ITEM line")
code = "{" + match.group(1) + "}" code = "{" + match.group(1) + "}"
data = eval(code) data = eval(code)
# pprint(data) # pprint(data)
@ -59,9 +57,8 @@ def write_keys(filename):
deleting = True deleting = True
out.append(line) out.append(line)
for keyref in sorted(Items, key=lambda a: a['name']): for keyref in sorted(Items, key=lambda a: a['name']):
out.append( out.append(" if (key == \"%s\") return VL_CIK_%s;\n" %
" if (key == \"%s\") return VL_CIK_%s;\n" % (keyref['name'], keyref['name'].upper()))
(keyref['name'], keyref['name'].upper()))
elif re.search(r'VLCOVGEN_.*AUTO_EDIT_END', line): elif re.search(r'VLCOVGEN_.*AUTO_EDIT_END', line):
deleting = False deleting = False
out.append(line) out.append(line)
@ -80,19 +77,15 @@ def write_keys(filename):
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
allow_abbrev=False, allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter, formatter_class=argparse.RawDescriptionHelpFormatter,
description= description="""Generate verilated_cov headers to reduce C++ code duplication.""",
"""Generate verilated_cov headers to reduce C++ code duplication.""", epilog="""Copyright 2002-2024 by Wilson Snyder. This program is free software; you
epilog=
"""Copyright 2002-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0. Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""") SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('--srcdir', parser.add_argument('--srcdir', action='store', help='directory containing Verilator sources')
action='store',
help='directory containing Verilator sources')
parser.set_defaults(srcdir=".") parser.set_defaults(srcdir=".")

View File

@ -42,8 +42,7 @@ for cmd in sys.stdin:
if 'example_lint' in line: if 'example_lint' in line:
# We don't have a way to specify this yet, so just for now # We don't have a way to specify this yet, so just for now
# sys.stderr.write($line) # sys.stderr.write($line)
prefixes.append("int lint_off_line_" + str(lineno) + prefixes.append("int lint_off_line_" + str(lineno) + " = 1;\n")
" = 1;\n")
lineno += 1 lineno += 1
pos = newpos + 1 pos = newpos + 1