Internals: Reformat with new settings (last commit). No functional change.

This commit is contained in:
Wilson Snyder 2024-08-26 21:43:34 -04:00
parent bde4097df2
commit ae35be9102
23 changed files with 433 additions and 816 deletions

View File

@ -16,8 +16,7 @@ parser = argparse.ArgumentParser(
For documentation see
https://verilator.org/guide/latest/exe_verilator_ccache_report.html""",
epilog=
"""Copyright 2002-2024 by Wilson Snyder. This program is free software; you
epilog="""Copyright 2002-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0.
@ -67,8 +66,7 @@ else:
wnames = max(len(_) for _ in results) + 1
wresults = max(len(_) for _ in results.values()) + 1
for k in sorted(results.keys()):
args.o.write("{:{wnames}} : {:{wresults}} : {}s\n".format(
k,
args.o.write("{:{wnames}} : {:{wresults}} : {}s\n".format(k,
results[k],
elapsed[k].total_seconds(),
wnames=wnames,
@ -79,18 +77,12 @@ else:
total = sum(counts.values())
for k in sorted(counts.keys()):
c = counts[k]
args.o.write("{:{width}}| {} ({:.2%})\n".format(k,
c,
c / total,
width=wresults))
args.o.write("{:{width}}| {} ({:.2%})\n".format(k, c, c / total, width=wresults))
args.o.write("\nLongest:\n")
longest = sorted(list(elapsed.items()),
key=lambda kv: -kv[1].total_seconds())
longest = sorted(list(elapsed.items()), key=lambda kv: -kv[1].total_seconds())
for i, (k, v) in enumerate(longest):
args.o.write("{:{width}}| {}s\n".format(k,
v.total_seconds(),
width=wnames))
args.o.write("{:{width}}| {}s\n".format(k, v.total_seconds(), width=wnames))
if i > 4:
break

View File

@ -47,8 +47,7 @@ def diff_dir(a, b):
diff_file(a, b)
anyfile = True
if not anyfile:
sys.stderr.write(
"%Warning: No .tree files found that have similar base names\n")
sys.stderr.write("%Warning: No .tree files found that have similar base names\n")
def diff_file(a, b):
@ -109,18 +108,14 @@ parser = argparse.ArgumentParser(
Verilator_difftree is used for debugging Verilator tree output files.
It performs a diff between two files, or all files common between two
directories, ignoring irrelevant pointer differences.""",
epilog=
"""Copyright 2005-2024 by Wilson Snyder. This program is free software; you
epilog="""Copyright 2005-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('--debug',
action='store_const',
const=9,
help='enable debug')
parser.add_argument('--debug', action='store_const', const=9, help='enable debug')
parser.add_argument('--no-lineno',
action='store_false',
help='do not show differences in line numbering')

View File

@ -15,11 +15,7 @@ LongestVcdStrValueLength = 0
Threads = collections.defaultdict(lambda: []) # List of records per thread id
Mtasks = collections.defaultdict(lambda: {'elapsed': 0, 'end': 0})
Cpus = collections.defaultdict(lambda: {'mtask_time': 0})
Global = {
'args': {},
'cpuinfo': collections.defaultdict(lambda: {}),
'stats': {}
}
Global = {'args': {}, 'cpuinfo': collections.defaultdict(lambda: {}), 'stats': {}}
ElapsedTime = None # total elapsed time
ExecGraphTime = 0 # total elapsed time excuting an exec graph
ExecGraphIntervals = [] # list of (start, end) pairs
@ -31,8 +27,7 @@ def read_data(filename):
with open(filename, "r", encoding="utf8") as fh:
re_thread = re.compile(r'^VLPROFTHREAD (\d+)$')
re_record = re.compile(r'^VLPROFEXEC (\S+) (\d+)(.*)$')
re_payload_mtaskBegin = re.compile(
r'id (\d+) predictStart (\d+) cpu (\d+)')
re_payload_mtaskBegin = re.compile(r'id (\d+) predictStart (\d+) cpu (\d+)')
re_payload_mtaskEnd = re.compile(r'id (\d+) predictCost (\d+)')
re_arg1 = re.compile(r'VLPROF arg\s+(\S+)\+([0-9.]*)\s*')
@ -57,8 +52,7 @@ def read_data(filename):
tick = int(tick)
payload = payload.strip()
if kind == "SECTION_PUSH":
LongestVcdStrValueLength = max(LongestVcdStrValueLength,
len(payload))
LongestVcdStrValueLength = max(LongestVcdStrValueLength, len(payload))
SectionStack.append(payload)
Sections.append((tick, tuple(SectionStack)))
elif kind == "SECTION_POP":
@ -66,15 +60,13 @@ def read_data(filename):
SectionStack.pop()
Sections.append((tick, tuple(SectionStack)))
elif kind == "MTASK_BEGIN":
mtask, predict_start, ecpu = re_payload_mtaskBegin.match(
payload).groups()
mtask, predict_start, ecpu = re_payload_mtaskBegin.match(payload).groups()
mtask = int(mtask)
predict_start = int(predict_start)
ecpu = int(ecpu)
mTaskThread[mtask] = thread
records = Threads[thread]
assert not records or records[-1]['start'] <= records[-1][
'end'] <= tick
assert not records or records[-1]['start'] <= records[-1]['end'] <= tick
records.append({
'start': tick,
'mtask': mtask,
@ -85,8 +77,7 @@ def read_data(filename):
Mtasks[mtask]['thread'] = thread
Mtasks[mtask]['predict_start'] = predict_start
elif kind == "MTASK_END":
mtask, predict_cost = re_payload_mtaskEnd.match(
payload).groups()
mtask, predict_cost = re_payload_mtaskEnd.match(payload).groups()
mtask = int(mtask)
predict_cost = int(predict_cost)
begin = Mtasks[mtask]['begin']
@ -163,8 +154,7 @@ def report():
print("\nSummary:")
print(" Total elapsed time = {} rdtsc ticks".format(ElapsedTime))
print(" Parallelized code = {:.2%} of elapsed time".format(
ExecGraphTime / ElapsedTime))
print(" Parallelized code = {:.2%} of elapsed time".format(ExecGraphTime / ElapsedTime))
print(" Total threads = %d" % nthreads)
print(" Total CPUs used = %d" % ncpus)
print(" Total mtasks = %d" % len(Mtasks))
@ -176,15 +166,12 @@ def report():
if nthreads > ncpus:
print()
print("%%Warning: There were fewer CPUs (%d) than threads (%d)." %
(ncpus, nthreads))
print("%%Warning: There were fewer CPUs (%d) than threads (%d)." % (ncpus, nthreads))
print(" : See docs on use of numactl.")
else:
if 'cpu_socket_cores_warning' in Global:
print()
print(
"%Warning: Multiple threads scheduled on same hyperthreaded core."
)
print("%Warning: Multiple threads scheduled on same hyperthreaded core.")
print(" : See docs on use of numactl.")
if 'cpu_sockets_warning' in Global:
print()
@ -228,8 +215,7 @@ def report_mtasks():
serialTime = ElapsedTime - ExecGraphTime
def subReport(elapsed, work):
print(" Thread utilization = {:7.2%}".format(work /
(elapsed * nthreads)))
print(" Thread utilization = {:7.2%}".format(work / (elapsed * nthreads)))
print(" Speedup = {:6.3}x".format(work / elapsed))
print("\nParallelized code, measured:")
@ -256,8 +242,7 @@ def report_mtasks():
if Mtasks[mtask]['elapsed'] > 0:
if Mtasks[mtask]['predict_cost'] == 0:
Mtasks[mtask]['predict_cost'] = 1 # don't log(0) below
p2e_ratio = math.log(Mtasks[mtask]['predict_cost'] /
Mtasks[mtask]['elapsed'])
p2e_ratio = math.log(Mtasks[mtask]['predict_cost'] / Mtasks[mtask]['elapsed'])
p2e_ratios.append(p2e_ratio)
if p2e_ratio > max_p2e:
@ -269,18 +254,14 @@ def report_mtasks():
print("\nMTask statistics:")
print(" Longest mtask id = {}".format(long_mtask))
print(" Longest mtask time = {:.2%} of time elapsed in parallelized code".
format(long_mtask_time / ExecGraphTime))
print(" Longest mtask time = {:.2%} of time elapsed in parallelized code".format(
long_mtask_time / ExecGraphTime))
print(" min log(p2e) = %0.3f" % min_p2e, end="")
print(" from mtask %d (predict %d," %
(min_mtask, Mtasks[min_mtask]['predict_cost']),
end="")
print(" from mtask %d (predict %d," % (min_mtask, Mtasks[min_mtask]['predict_cost']), end="")
print(" elapsed %d)" % Mtasks[min_mtask]['elapsed'])
print(" max log(p2e) = %0.3f" % max_p2e, end="")
print(" from mtask %d (predict %d," %
(max_mtask, Mtasks[max_mtask]['predict_cost']),
end="")
print(" from mtask %d (predict %d," % (max_mtask, Mtasks[max_mtask]['predict_cost']), end="")
print(" elapsed %d)" % Mtasks[max_mtask]['elapsed'])
stddev = statistics.pstdev(p2e_ratios)
@ -315,8 +296,8 @@ def report_cpus():
model = cpuinfo['model_name']
print(" {:3d} | {:7.2%} / {:16d} | {:>6s} | {:>4s} | {}".format(
cpu, Cpus[cpu]['mtask_time'] / ElapsedTime,
Cpus[cpu]['mtask_time'], socket, core, model))
cpu, Cpus[cpu]['mtask_time'] / ElapsedTime, Cpus[cpu]['mtask_time'], socket, core,
model))
if len(Global['cpu_sockets']) > 1:
Global['cpu_sockets_warning'] = True
@ -366,8 +347,8 @@ def report_sections():
def printTree(prefix, name, entries, tree):
print(" {:7.2%} | {:7.2%} | {:8} | {:10.2f} | {}".format(
treeSum(tree) / ElapsedTime, tree[0] / ElapsedTime, tree[2],
tree[2] / entries, prefix + name))
treeSum(tree) / ElapsedTime, tree[0] / ElapsedTime, tree[2], tree[2] / entries,
prefix + name))
for k in sorted(tree[1], key=lambda _: -treeSum(tree[1][_])):
printTree(prefix + " ", k, tree[2], tree[1][k])
@ -438,10 +419,8 @@ def write_vcd(filename):
addValue(code, start, mtask)
addValue(code, end, None)
tStart = sorted(_['start'] for records in Threads.values()
for _ in records)
tEnd = sorted(_['end'] for records in Threads.values()
for _ in records)
tStart = sorted(_['start'] for records in Threads.values() for _ in records)
tEnd = sorted(_['end'] for records in Threads.values() for _ in records)
# Predicted graph
for start, end in ExecGraphIntervals:
@ -455,11 +434,10 @@ def write_vcd(filename):
# Predict mtasks that fill the time the execution occupied
for mtask in Mtasks:
thread = Mtasks[mtask]['thread']
pred_scaled_start = start + int(
Mtasks[mtask]['predict_start'] * measured_scaling)
pred_scaled_start = start + int(Mtasks[mtask]['predict_start'] * measured_scaling)
pred_scaled_end = start + int(
(Mtasks[mtask]['predict_start'] +
Mtasks[mtask]['predict_cost']) * measured_scaling)
(Mtasks[mtask]['predict_start'] + Mtasks[mtask]['predict_cost']) *
measured_scaling)
if pred_scaled_start == pred_scaled_end:
continue
@ -545,8 +523,7 @@ Verilator_gantt creates a visual representation to help analyze Verilator
For documentation see
https://verilator.org/guide/latest/exe_verilator_gantt.html""",
epilog=
"""Copyright 2018-2024 by Wilson Snyder. This program is free software; you
epilog="""Copyright 2018-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0.
@ -554,12 +531,8 @@ Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('--debug', action='store_true', help='enable debug')
parser.add_argument('--no-vcd',
help='disable creating vcd',
action='store_true')
parser.add_argument('--vcd',
help='filename for vcd outpue',
default='profile_exec.vcd')
parser.add_argument('--no-vcd', help='disable creating vcd', action='store_true')
parser.add_argument('--vcd', help='filename for vcd outpue', default='profile_exec.vcd')
parser.add_argument('filename',
help='input profile_exec.dat filename to process',
default='profile_exec.dat')

View File

@ -34,8 +34,7 @@ def profcfunc(filename):
# Older gprofs have no call column for single-call functions
# %time cumesec selfsec {stuff} name
match = re.match(
r'^\s*([0-9.]+)\s+[0-9.]+\s+([0-9.]+)\s+[^a-zA-Z_]*([a-zA-Z_].*)$',
match = re.match(r'^\s*([0-9.]+)\s+[0-9.]+\s+([0-9.]+)\s+[^a-zA-Z_]*([a-zA-Z_].*)$',
line)
if match:
pct = float(match.group(1))
@ -143,12 +142,9 @@ def profcfunc(filename):
print(" These are split into three categories:")
print(" C++: Time in non-Verilated C++ code")
print(" Prof: Time in profile overhead")
print(" VBlock: Time attributable to a block in a" +
" Verilog file and line")
print(" VCommon: Time in a Verilated module," +
" due to all parts of the design")
print(" VLib: Time in Verilated common libraries," +
" called by the Verilated code")
print(" VBlock: Time attributable to a block in a" + " Verilog file and line")
print(" VCommon: Time in a Verilated module," + " due to all parts of the design")
print(" VLib: Time in Verilated common libraries," + " called by the Verilated code")
print()
print(" % cumulative self ")
@ -156,13 +152,11 @@ def profcfunc(filename):
"s type filename and line number") % "design")
cume = 0
for func in sorted(vfuncs.keys(),
key=lambda f: vfuncs[f]['sec'],
reverse=True):
for func in sorted(vfuncs.keys(), key=lambda f: vfuncs[f]['sec'], reverse=True):
cume += vfuncs[func]['sec']
print(("%6.2f %9.2f %8.2f %10d %-" + str(design_width) + "s %s") %
(vfuncs[func]['pct'], cume, vfuncs[func]['sec'],
vfuncs[func]['calls'], vfuncs[func]['design'], func))
(vfuncs[func]['pct'], cume, vfuncs[func]['sec'], vfuncs[func]['calls'],
vfuncs[func]['design'], func))
######################################################################
@ -180,18 +174,14 @@ in each Verilog block.
For documentation see
https://verilator.org/guide/latest/exe_verilator_profcfunc.html""",
epilog=
"""Copyright 2002-2024 by Wilson Snyder. This program is free software; you
epilog="""Copyright 2002-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('--debug',
action='store_const',
const=9,
help='enable debug')
parser.add_argument('--debug', action='store_const', const=9, help='enable debug')
parser.add_argument('filename', help='input gprof output to process')
Args = parser.parse_args()

View File

@ -23,9 +23,7 @@ class VlSphinxExtract:
outname = match.group(1)
print("Writing %s" % outname)
fhw = open(outname, "w", encoding="utf8") # pylint: disable=consider-using-with
fhw.write(
".. comment: generated by vl_sphinx_extract from " +
filename + "\n")
fhw.write(".. comment: generated by vl_sphinx_extract from " + filename + "\n")
fhw.write(".. code-block::\n")
elif re.match(r'^[=a-zA-Z0-9_]', line):
fhw = None
@ -39,18 +37,14 @@ parser = argparse.ArgumentParser(
allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Read a file and extract documentation data.""",
epilog=
""" Copyright 2021-2024 by Wilson Snyder. This package is free software;
epilog=""" Copyright 2021-2024 by Wilson Snyder. This package is free software;
you can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('--debug',
action='store_const',
const=9,
help='enable debug')
parser.add_argument('--debug', action='store_const', const=9, help='enable debug')
parser.add_argument('path', help='path to extract from')
Args = parser.parse_args()

View File

@ -18,8 +18,7 @@ class VlSphinxFix:
if os.path.isdir(path):
for basefile in os.listdir(path):
file = os.path.join(path, basefile)
if ((basefile != ".") and (basefile != "..")
and basefile not in self.SkipBasenames
if ((basefile != ".") and (basefile != "..") and basefile not in self.SkipBasenames
and not os.path.islink(file)):
self.process(file)
elif re.search(r'\.(html|tex)$', path):
@ -54,18 +53,14 @@ parser = argparse.ArgumentParser(
allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Post-process Sphinx HTML.""",
epilog=
""" Copyright 2021-2024 by Wilson Snyder. This package is free software;
epilog=""" Copyright 2021-2024 by Wilson Snyder. This package is free software;
you can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('--debug',
action='store_const',
const=9,
help='enable debug')
parser.add_argument('--debug', action='store_const', const=9, help='enable debug')
parser.add_argument('path', help='path to edit')
Args = parser.parse_args()

View File

@ -23,8 +23,7 @@ def get_vlt_version():
filename = "../../Makefile"
with open(filename, "r", encoding="utf8") as fh:
for line in fh:
match = re.search(r"PACKAGE_VERSION *= *([a-z0-9.]+) +([-0-9]+)",
line)
match = re.search(r"PACKAGE_VERSION *= *([a-z0-9.]+) +([-0-9]+)", line)
if match:
return match.group(1), match.group(2)
match = re.search(r"PACKAGE_VERSION *= *([a-z0-9.]+) +devel", line)
@ -75,8 +74,7 @@ extensions = []
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'_build', 'Thumbs.db', '.DS_Store', 'internals.rst', 'xml.rst', 'gen/ex_*',
'CONTRIBUTING.rst'
'_build', 'Thumbs.db', '.DS_Store', 'internals.rst', 'xml.rst', 'gen/ex_*', 'CONTRIBUTING.rst'
]
# Warn about refs

View File

@ -25,8 +25,7 @@ class VlFileCopy:
self.debug = debug
with NamedTemporaryFile() as tree_temp, NamedTemporaryFile(
) as meta_temp:
with NamedTemporaryFile() as tree_temp, NamedTemporaryFile() as meta_temp:
vargs = [
'--json-only-output',
tree_temp.name,
@ -61,8 +60,7 @@ class VlFileCopy:
print("\t%s " % command)
status = subprocess.call(command, shell=True)
if status != 0:
raise RuntimeError("Command failed running Verilator with '" +
command + "', stopped")
raise RuntimeError("Command failed running Verilator with '" + command + "', stopped")
#######################################################################
@ -71,8 +69,7 @@ if __name__ == '__main__':
parser = argparse.ArgumentParser(
allow_abbrev=False,
formatter_class=argparse.RawTextHelpFormatter,
description=
"""Example of using Verilator JSON output to copy a list of files to an
description="""Example of using Verilator JSON output to copy a list of files to an
output directory (-odir, defaults to 'copied'), e.g. to easily create a
tarball of the design to pass to others.
@ -95,11 +92,7 @@ This file ONLY is placed under the Creative Commons Public Domain, for
any use, without warranty, 2019 by Wilson Snyder.
SPDX-License-Identifier: CC0-1.0
""")
parser.add_argument('-debug',
'--debug',
action='store_const',
const=9,
help='enable debug')
parser.add_argument('-debug', '--debug', action='store_const', const=9, help='enable debug')
parser.add_argument('-odir',
'--odir',
action='store',
@ -108,9 +101,7 @@ SPDX-License-Identifier: CC0-1.0
help='target output directory')
(args, rem) = parser.parse_known_args()
print(
"NOTE: vl_file_copy is only an example starting point for writing your own tool."
)
print("NOTE: vl_file_copy is only an example starting point for writing your own tool.")
# That is:
# 1. We will accept basic patches
# 2. We are not expecting to make this globally useful. (e.g. we don't cleanup obj_dir)

View File

@ -24,8 +24,7 @@ class VlHierGraph:
self.next_vertex_number = 0
self.addr_to_number = {}
with NamedTemporaryFile() as tree_temp, NamedTemporaryFile(
) as meta_temp:
with NamedTemporaryFile() as tree_temp, NamedTemporaryFile() as meta_temp:
vargs = [
'--json-only-output',
tree_temp.name,
@ -45,9 +44,7 @@ class VlHierGraph:
fh.write("digraph {\n")
fh.write(" dpi=300;\n")
fh.write(" order=LR;\n")
fh.write(
" node [fontsize=8 shape=\"box\" margin=0.01 width=0 height=0]"
)
fh.write(" node [fontsize=8 shape=\"box\" margin=0.01 width=0 height=0]")
fh.write(" edge [fontsize=6]")
# Find cells
modules = self.flatten(self.tree, lambda n: n['type'] == "MODULE")
@ -101,8 +98,7 @@ class VlHierGraph:
print("\t%s " % command)
status = subprocess.call(command, shell=True)
if status != 0:
raise RuntimeError("Command failed running Verilator with '" +
command + "', stopped")
raise RuntimeError("Command failed running Verilator with '" + command + "', stopped")
#######################################################################
@ -111,8 +107,7 @@ if __name__ == '__main__':
parser = argparse.ArgumentParser(
allow_abbrev=False,
formatter_class=argparse.RawTextHelpFormatter,
description=
"""Example of using Verilator JSON output to create a .dot file showing the
description="""Example of using Verilator JSON output to create a .dot file showing the
design module hierarchy.
Example usage:
@ -134,11 +129,7 @@ This file ONLY is placed under the Creative Commons Public Domain, for
any use, without warranty, 2019 by Wilson Snyder.
SPDX-License-Identifier: CC0-1.0
""")
parser.add_argument('-debug',
'--debug',
action='store_const',
const=9,
help='enable debug')
parser.add_argument('-debug', '--debug', action='store_const', const=9, help='enable debug')
parser.add_argument('-o',
'--o',
action='store',
@ -147,18 +138,14 @@ SPDX-License-Identifier: CC0-1.0
help='output filename')
(args, rem) = parser.parse_known_args()
print(
"NOTE: vl_hier_graph is only an example starting point for writing your own tool."
)
print("NOTE: vl_hier_graph is only an example starting point for writing your own tool.")
# That is:
# 1. We will accept basic patches
# 2. We are not expecting to make this globally useful. (e.g. we don't cleanup obj_dir)
# 3. "make install" will not install this.
# 4. This has not had production-worthy validation.
fc = VlHierGraph(output_filename=args.o,
debug=args.debug,
verilator_args=rem)
fc = VlHierGraph(output_filename=args.o, debug=args.debug, verilator_args=rem)
######################################################################
# Local Variables:

View File

@ -106,9 +106,8 @@ class VlAnnotations:
def is_mt_safe_call(self):
return (not self.is_mt_unsafe_call()
and (self.mt_safe or self.mt_safe_postinit or self.pure
or self.requires or self.excludes or self.acquire
or self.release))
and (self.mt_safe or self.mt_safe_postinit or self.pure or self.requires
or self.excludes or self.acquire or self.release))
def is_pure_call(self):
return self.pure
@ -203,9 +202,7 @@ class FunctionInfo:
annotations: VlAnnotations
ftype: FunctionType
_hash: Optional[int] = dataclasses.field(default=None,
init=False,
repr=False)
_hash: Optional[int] = dataclasses.field(default=None, init=False, repr=False)
@property
def name(self):
@ -220,15 +217,13 @@ class FunctionInfo:
return self._hash
def __eq__(self, other):
return (self.usr == other.usr and self.file == other.file
and self.line == other.line)
return (self.usr == other.usr and self.file == other.file and self.line == other.line)
def copy(self, /, **changes):
return dataclasses.replace(self, **changes)
@staticmethod
def from_decl_file_line_and_refd_node(file: str, line: int,
refd: clang.cindex.Cursor,
def from_decl_file_line_and_refd_node(file: str, line: int, refd: clang.cindex.Cursor,
annotations: VlAnnotations):
file = os.path.abspath(file)
refd = refd.canonical
@ -277,14 +272,11 @@ class Diagnostic:
source_ctx: FunctionInfo
kind: DiagnosticKind
_hash: Optional[int] = dataclasses.field(default=None,
init=False,
repr=False)
_hash: Optional[int] = dataclasses.field(default=None, init=False, repr=False)
def __hash__(self):
if not self._hash:
self._hash = hash(
hash(self.target) ^ hash(self.source_ctx) ^ hash(self.kind))
self._hash = hash(hash(self.target) ^ hash(self.source_ctx) ^ hash(self.kind))
return self._hash
@ -292,9 +284,9 @@ class CallAnnotationsValidator:
def __init__(self, diagnostic_cb: Callable[[Diagnostic], None],
is_ignored_top_level: Callable[[clang.cindex.Cursor], bool],
is_ignored_def: Callable[
[clang.cindex.Cursor, clang.cindex.Cursor], bool],
is_ignored_call: Callable[[clang.cindex.Cursor], bool]):
is_ignored_def: Callable[[clang.cindex.Cursor, clang.cindex.Cursor],
bool], is_ignored_call: Callable[[clang.cindex.Cursor],
bool]):
self._diagnostic_cb = diagnostic_cb
self._is_ignored_top_level = is_ignored_top_level
self._is_ignored_call = is_ignored_call
@ -329,8 +321,7 @@ class CallAnnotationsValidator:
with open(source_file, "r", encoding="utf-8") as file:
for line in file:
line = line.strip()
match = re.fullmatch(
r"^#\s*(define\s+(\w+)(?:\s+(.*))?|include\s+.*)$", line)
match = re.fullmatch(r"^#\s*(define\s+(\w+)(?:\s+(.*))?|include\s+.*)$", line)
if match:
if match.group(1).startswith("define"):
key = match.group(2)
@ -341,16 +332,14 @@ class CallAnnotationsValidator:
return defs
@staticmethod
def filter_out_unsupported_compiler_args(
args: list[str]) -> tuple[list[str], dict[str, str]]:
def filter_out_unsupported_compiler_args(args: list[str]) -> tuple[list[str], dict[str, str]]:
filtered_args = []
defines = {}
args_iter = iter(args)
try:
while arg := next(args_iter):
# Skip positional arguments (input file name).
if not arg.startswith("-") and (arg.endswith(".cpp")
or arg.endswith(".c")
if not arg.startswith("-") and (arg.endswith(".cpp") or arg.endswith(".c")
or arg.endswith(".h")):
continue
@ -367,8 +356,7 @@ class CallAnnotationsValidator:
# Preserved options with separate value argument.
if arg in [
"-x"
"-Xclang", "-I", "-isystem", "-iquote", "-include",
"-include-pch"
"-Xclang", "-I", "-isystem", "-iquote", "-include", "-include-pch"
]:
filtered_args += [arg, next(args_iter)]
continue
@ -406,14 +394,12 @@ class CallAnnotationsValidator:
return (filtered_args, defines)
def compile_and_analyze_file(self, source_file: str,
compiler_args: list[str],
def compile_and_analyze_file(self, source_file: str, compiler_args: list[str],
build_dir: Optional[str]):
filename = os.path.abspath(source_file)
initial_cwd = "."
filtered_args, defines = self.filter_out_unsupported_compiler_args(
compiler_args)
filtered_args, defines = self.filter_out_unsupported_compiler_args(compiler_args)
defines.update(self.parse_initial_defines(source_file))
if build_dir:
@ -451,8 +437,7 @@ class CallAnnotationsValidator:
self._diagnostic_cb(Diagnostic(target, source, source_ctx, kind))
else:
self._diagnostic_cb(
Diagnostic(FunctionInfo.from_node(target), source, source_ctx,
kind))
Diagnostic(FunctionInfo.from_node(target), source, source_ctx, kind))
def iterate_children(self, children: Iterable[clang.cindex.Cursor],
handler: Callable[[clang.cindex.Cursor], None]):
@ -465,8 +450,7 @@ class CallAnnotationsValidator:
@staticmethod
def get_referenced_node_info(
node: clang.cindex.Cursor
) -> tuple[bool, Optional[clang.cindex.Cursor], VlAnnotations,
Iterable[clang.cindex.Cursor]]:
) -> tuple[bool, Optional[clang.cindex.Cursor], VlAnnotations, Iterable[clang.cindex.Cursor]]:
if not node.spelling and not node.displayname:
return (False, None, VlAnnotations(), [])
@ -480,8 +464,7 @@ class CallAnnotationsValidator:
annotations = VlAnnotations.from_nodes_list(children)
return (True, refd, annotations, children)
def check_mt_safe_call(self, node: clang.cindex.Cursor,
refd: clang.cindex.Cursor,
def check_mt_safe_call(self, node: clang.cindex.Cursor, refd: clang.cindex.Cursor,
annotations: VlAnnotations):
is_mt_safe = False
@ -513,8 +496,7 @@ class CallAnnotationsValidator:
# we are calling local method. It is MT safe
# only if this method is also only calling local methods or
# MT-safe methods
self.iterate_children(refd.get_children(),
self.dispatch_node_inside_definition)
self.iterate_children(refd.get_children(), self.dispatch_node_inside_definition)
is_mt_safe = True
# class/struct member
elif refn and refn.kind == CursorKind.MEMBER_REF_EXPR and refn.referenced:
@ -525,18 +507,15 @@ class CallAnnotationsValidator:
if self.is_constructor_context() and refn.semantic_parent:
# we are in constructor, so calling local members is MT_SAFE,
# make sure object that we are calling is local to the constructor
constructor_class = self._constructor_context[
-1].semantic_parent
constructor_class = self._constructor_context[-1].semantic_parent
if refn.semantic_parent.spelling == constructor_class.spelling:
if check_class_member_exists(constructor_class, refn):
is_mt_safe = True
else:
# check if this class inherits from some base class
base_class = get_base_class(constructor_class,
refn.semantic_parent)
base_class = get_base_class(constructor_class, refn.semantic_parent)
if base_class:
if check_class_member_exists(
base_class.get_declaration(), refn):
if check_class_member_exists(base_class.get_declaration(), refn):
is_mt_safe = True
# variable
elif refn and refn.kind == CursorKind.DECL_REF_EXPR and refn.referenced:
@ -567,8 +546,7 @@ class CallAnnotationsValidator:
# Call handling
def process_method_call(self, node: clang.cindex.Cursor,
refd: clang.cindex.Cursor,
def process_method_call(self, node: clang.cindex.Cursor, refd: clang.cindex.Cursor,
annotations: VlAnnotations):
assert self._call_location
ctx = self._call_location.annotations
@ -576,58 +554,48 @@ class CallAnnotationsValidator:
# MT-safe context
if ctx.is_mt_safe_context():
if not self.check_mt_safe_call(node, refd, annotations):
self.emit_diagnostic(
FunctionInfo.from_node(refd, refd, annotations),
self.emit_diagnostic(FunctionInfo.from_node(refd, refd, annotations),
DiagnosticKind.NON_MT_SAFE_CALL_IN_MT_SAFE_CTX)
# stable tree context
if ctx.is_stabe_tree_context():
if annotations.is_mt_unsafe_call() or not (
annotations.is_stabe_tree_call()
or annotations.is_pure_call()
annotations.is_stabe_tree_call() or annotations.is_pure_call()
or self.check_mt_safe_call(node, refd, annotations)):
self.emit_diagnostic(
FunctionInfo.from_node(refd, refd, annotations),
self.emit_diagnostic(FunctionInfo.from_node(refd, refd, annotations),
DiagnosticKind.NON_STABLE_TREE_CALL_IN_STABLE_TREE_CTX)
# pure context
if ctx.is_pure_context():
if not annotations.is_pure_call():
self.emit_diagnostic(
FunctionInfo.from_node(refd, refd, annotations),
self.emit_diagnostic(FunctionInfo.from_node(refd, refd, annotations),
DiagnosticKind.NON_PURE_CALL_IN_PURE_CTX)
def process_function_call(self, refd: clang.cindex.Cursor,
annotations: VlAnnotations):
def process_function_call(self, refd: clang.cindex.Cursor, annotations: VlAnnotations):
assert self._call_location
ctx = self._call_location.annotations
# MT-safe context
if ctx.is_mt_safe_context():
if not annotations.is_mt_safe_call():
self.emit_diagnostic(
FunctionInfo.from_node(refd, refd, annotations),
self.emit_diagnostic(FunctionInfo.from_node(refd, refd, annotations),
DiagnosticKind.NON_MT_SAFE_CALL_IN_MT_SAFE_CTX)
# stable tree context
if ctx.is_stabe_tree_context():
if annotations.is_mt_unsafe_call() or not (
annotations.is_pure_call()
if annotations.is_mt_unsafe_call() or not (annotations.is_pure_call()
or annotations.is_mt_safe_call()
or annotations.is_stabe_tree_call()):
self.emit_diagnostic(
FunctionInfo.from_node(refd, refd, annotations),
self.emit_diagnostic(FunctionInfo.from_node(refd, refd, annotations),
DiagnosticKind.NON_STABLE_TREE_CALL_IN_STABLE_TREE_CTX)
# pure context
if ctx.is_pure_context():
if not annotations.is_pure_call():
self.emit_diagnostic(
FunctionInfo.from_node(refd, refd, annotations),
self.emit_diagnostic(FunctionInfo.from_node(refd, refd, annotations),
DiagnosticKind.NON_PURE_CALL_IN_PURE_CTX)
def process_constructor_call(self, refd: clang.cindex.Cursor,
annotations: VlAnnotations):
def process_constructor_call(self, refd: clang.cindex.Cursor, annotations: VlAnnotations):
assert self._call_location
ctx = self._call_location.annotations
@ -635,31 +603,26 @@ class CallAnnotationsValidator:
# only if they call local methods or MT-safe functions.
if ctx.is_mt_safe_context() or self.is_constructor_context():
self._constructor_context.append(refd)
self.iterate_children(refd.get_children(),
self.dispatch_node_inside_definition)
self.iterate_children(refd.get_children(), self.dispatch_node_inside_definition)
self._constructor_context.pop()
# stable tree context
if ctx.is_stabe_tree_context():
self._constructor_context.append(refd)
self.iterate_children(refd.get_children(),
self.dispatch_node_inside_definition)
self.iterate_children(refd.get_children(), self.dispatch_node_inside_definition)
self._constructor_context.pop()
# pure context
if ctx.is_pure_context():
if not annotations.is_pure_call(
) and not refd.is_default_constructor():
self.emit_diagnostic(
FunctionInfo.from_node(refd, refd, annotations),
if not annotations.is_pure_call() and not refd.is_default_constructor():
self.emit_diagnostic(FunctionInfo.from_node(refd, refd, annotations),
DiagnosticKind.NON_PURE_CALL_IN_PURE_CTX)
def dispatch_call_node(self, node: clang.cindex.Cursor):
[supported, refd, annotations, _] = self.get_referenced_node_info(node)
if not supported:
self.iterate_children(node.get_children(),
self.dispatch_node_inside_definition)
self.iterate_children(node.get_children(), self.dispatch_node_inside_definition)
return True
assert refd is not None
@ -676,19 +639,14 @@ class CallAnnotationsValidator:
assert self._call_location is not None
node_file = os.path.abspath(node.location.file.name)
self._call_location = self._call_location.copy(file=node_file,
line=node.location.line)
self._call_location = self._call_location.copy(file=node_file, line=node.location.line)
# Standalone functions and static class methods
if (refd.kind == CursorKind.FUNCTION_DECL
or refd.kind == CursorKind.CXX_METHOD
and refd.is_static_method()):
or refd.kind == CursorKind.CXX_METHOD and refd.is_static_method()):
self.process_function_call(refd, annotations)
# Function pointer
elif refd.kind in [
CursorKind.VAR_DECL, CursorKind.FIELD_DECL,
CursorKind.PARM_DECL
]:
elif refd.kind in [CursorKind.VAR_DECL, CursorKind.FIELD_DECL, CursorKind.PARM_DECL]:
self.process_function_call(refd, annotations)
# Non-static class methods
elif refd.kind == CursorKind.CXX_METHOD:
@ -726,14 +684,13 @@ class CallAnnotationsValidator:
if self.dispatch_call_node(node) is False:
return None
elif node.is_definition() and node.kind in [
CursorKind.CXX_METHOD, CursorKind.FUNCTION_DECL,
CursorKind.CONSTRUCTOR, CursorKind.CONVERSION_FUNCTION
CursorKind.CXX_METHOD, CursorKind.FUNCTION_DECL, CursorKind.CONSTRUCTOR,
CursorKind.CONVERSION_FUNCTION
]:
self.process_function_definition(node)
return None
return self.iterate_children(node.get_children(),
self.dispatch_node_inside_definition)
return self.iterate_children(node.get_children(), self.dispatch_node_inside_definition)
def process_function_definition(self, node: clang.cindex.Cursor):
[supported, refd, annotations, _] = self.get_referenced_node_info(node)
@ -768,14 +725,12 @@ class CallAnnotationsValidator:
self._caller = FunctionInfo.from_node(node, refd, def_annotations)
self._call_location = self._caller
self.emit_diagnostic(
FunctionInfo.from_node(refd, refd, annotations),
self.emit_diagnostic(FunctionInfo.from_node(refd, refd, annotations),
DiagnosticKind.ANNOTATIONS_DEF_DECL_MISMATCH)
# Use concatenation of definition and declaration annotations
# for calls validation.
self._caller = FunctionInfo.from_node(node, refd,
def_annotations | annotations)
self._caller = FunctionInfo.from_node(node, refd, def_annotations | annotations)
prev_call_location = self._call_location
self._call_location = self._caller
@ -793,8 +748,7 @@ class CallAnnotationsValidator:
if declarations:
del self._external_decls[usr]
self.iterate_children(node_children,
self.dispatch_node_inside_definition)
self.iterate_children(node_children, self.dispatch_node_inside_definition)
self._call_location = prev_call_location
self._caller = prev_call_location
@ -805,8 +759,8 @@ class CallAnnotationsValidator:
def dispatch_node(self, node: clang.cindex.Cursor):
if node.kind in [
CursorKind.CXX_METHOD, CursorKind.FUNCTION_DECL,
CursorKind.CONSTRUCTOR, CursorKind.CONVERSION_FUNCTION
CursorKind.CXX_METHOD, CursorKind.FUNCTION_DECL, CursorKind.CONSTRUCTOR,
CursorKind.CONVERSION_FUNCTION
]:
if node.is_definition():
return self.process_function_definition(node)
@ -815,14 +769,12 @@ class CallAnnotationsValidator:
return self.iterate_children(node.get_children(), self.dispatch_node)
def process_translation_unit(
self, translation_unit: clang.cindex.TranslationUnit):
def process_translation_unit(self, translation_unit: clang.cindex.TranslationUnit):
self._level += 1
kv_defines = sorted([f"{k}={v}" for k, v in self._defines.items()])
concat_defines = '\n'.join(kv_defines)
# List of headers already processed in a TU with specified set of defines.
tu_processed_headers = self._processed_headers.setdefault(
concat_defines, set())
tu_processed_headers = self._processed_headers.setdefault(concat_defines, set())
for child in translation_unit.cursor.get_children():
if self._is_ignored_top_level(child):
continue
@ -833,10 +785,8 @@ class CallAnnotationsValidator:
self.dispatch_node(child)
self._level -= 1
tu_processed_headers.update([
os.path.abspath(str(hdr.source))
for hdr in translation_unit.get_includes()
])
tu_processed_headers.update(
[os.path.abspath(str(hdr.source)) for hdr in translation_unit.get_includes()])
@dataclass
@ -857,8 +807,7 @@ def get_filter_funcs(verilator_root: str):
filename = os.path.abspath(node.location.file.name)
return not filename.startswith(verilator_root)
def is_ignored_def(node: clang.cindex.Cursor,
refd: clang.cindex.Cursor) -> bool:
def is_ignored_def(node: clang.cindex.Cursor, refd: clang.cindex.Cursor) -> bool:
# __*
if str(refd.spelling).startswith("__"):
return True
@ -901,8 +850,7 @@ def precompile_header(compile_command: CompileCommand, tmp_dir: str) -> str:
os.chdir(compile_command.directory)
index = Index.create()
translation_unit = index.parse(compile_command.filename,
compile_command.args)
translation_unit = index.parse(compile_command.filename, compile_command.args)
for diag in translation_unit.diagnostics:
if diag.severity >= clang.cindex.Diagnostic.Error:
errors.append(str(diag))
@ -910,22 +858,19 @@ def precompile_header(compile_command: CompileCommand, tmp_dir: str) -> str:
if len(errors) == 0:
pch_file = os.path.join(
tmp_dir,
f"{compile_command.refid:02}_{os.path.basename(compile_command.filename)}.pch"
)
f"{compile_command.refid:02}_{os.path.basename(compile_command.filename)}.pch")
translation_unit.save(pch_file)
if pch_file:
return pch_file
except (TranslationUnitSaveError, TranslationUnitLoadError,
OSError) as exception:
except (TranslationUnitSaveError, TranslationUnitLoadError, OSError) as exception:
print(f"%Warning: {exception}", file=sys.stderr)
finally:
os.chdir(initial_cwd)
print(
f"%Warning: Precompilation failed, skipping: {compile_command.filename}",
print(f"%Warning: Precompilation failed, skipping: {compile_command.filename}",
file=sys.stderr)
for error in errors:
print(f" {error}", file=sys.stderr)
@ -934,10 +879,8 @@ def precompile_header(compile_command: CompileCommand, tmp_dir: str) -> str:
# Compile and analyze inputs in a single process.
def run_analysis(ccl: Iterable[CompileCommand], pccl: Iterable[CompileCommand],
diagnostic_cb: Callable[[Diagnostic],
None], verilator_root: str):
(is_ignored_top_level, is_ignored_def,
is_ignored_call) = get_filter_funcs(verilator_root)
diagnostic_cb: Callable[[Diagnostic], None], verilator_root: str):
(is_ignored_top_level, is_ignored_def, is_ignored_call) = get_filter_funcs(verilator_root)
prefix = "verilator_clang_check_attributes_"
with tempfile.TemporaryDirectory(prefix=prefix) as tmp_dir:
@ -947,8 +890,8 @@ def run_analysis(ccl: Iterable[CompileCommand], pccl: Iterable[CompileCommand],
if pch_file:
extra_args += ["-include-pch", pch_file]
cav = CallAnnotationsValidator(diagnostic_cb, is_ignored_top_level,
is_ignored_def, is_ignored_call)
cav = CallAnnotationsValidator(diagnostic_cb, is_ignored_top_level, is_ignored_def,
is_ignored_call)
for compile_command in ccl:
cav.compile_and_analyze_file(compile_command.filename,
extra_args + compile_command.args,
@ -963,12 +906,11 @@ class ParallelAnalysisProcess:
@staticmethod
def init_data(verilator_root: str, tmp_dir: str):
(is_ignored_top_level, is_ignored_def,
is_ignored_call) = get_filter_funcs(verilator_root)
(is_ignored_top_level, is_ignored_def, is_ignored_call) = get_filter_funcs(verilator_root)
ParallelAnalysisProcess.cav = CallAnnotationsValidator(
ParallelAnalysisProcess._diagnostic_handler, is_ignored_top_level,
is_ignored_def, is_ignored_call)
ParallelAnalysisProcess._diagnostic_handler, is_ignored_top_level, is_ignored_def,
is_ignored_call)
ParallelAnalysisProcess.tmp_dir = tmp_dir
@staticmethod
@ -979,31 +921,27 @@ class ParallelAnalysisProcess:
def analyze_cpp_file(compile_command: CompileCommand) -> set[Diagnostic]:
ParallelAnalysisProcess.diags = set()
assert ParallelAnalysisProcess.cav is not None
ParallelAnalysisProcess.cav.compile_and_analyze_file(
compile_command.filename, compile_command.args,
ParallelAnalysisProcess.cav.compile_and_analyze_file(compile_command.filename,
compile_command.args,
compile_command.directory)
return ParallelAnalysisProcess.diags
@staticmethod
def precompile_header(compile_command: CompileCommand) -> str:
return precompile_header(compile_command,
ParallelAnalysisProcess.tmp_dir)
return precompile_header(compile_command, ParallelAnalysisProcess.tmp_dir)
# Compile and analyze inputs in multiple processes.
def run_parallel_analysis(ccl: Iterable[CompileCommand],
pccl: Iterable[CompileCommand],
diagnostic_cb: Callable[[Diagnostic], None],
jobs_count: int, verilator_root: str):
def run_parallel_analysis(ccl: Iterable[CompileCommand], pccl: Iterable[CompileCommand],
diagnostic_cb: Callable[[Diagnostic],
None], jobs_count: int, verilator_root: str):
prefix = "verilator_clang_check_attributes_"
with tempfile.TemporaryDirectory(prefix=prefix) as tmp_dir:
with multiprocessing.Pool(
processes=jobs_count,
with multiprocessing.Pool(processes=jobs_count,
initializer=ParallelAnalysisProcess.init_data,
initargs=[verilator_root, tmp_dir]) as pool:
extra_args = []
for pch_file in pool.imap_unordered(
ParallelAnalysisProcess.precompile_header, pccl):
for pch_file in pool.imap_unordered(ParallelAnalysisProcess.precompile_header, pccl):
if pch_file:
extra_args += ["-include-pch", pch_file]
@ -1011,8 +949,7 @@ def run_parallel_analysis(ccl: Iterable[CompileCommand],
for compile_command in ccl:
compile_command.args = compile_command.args + extra_args
for diags in pool.imap_unordered(
ParallelAnalysisProcess.analyze_cpp_file, ccl, 1):
for diags in pool.imap_unordered(ParallelAnalysisProcess.analyze_cpp_file, ccl, 1):
for diag in diags:
diagnostic_cb(diag)
@ -1057,8 +994,7 @@ class TopDownSummaryPrinter():
row_groups: dict[str, list[list[str]]] = {}
column_widths = [0, 0]
for func in sorted(self._funcs.values(),
key=lambda func:
(func.info.file, func.info.line, func.info.usr)):
key=lambda func: (func.info.file, func.info.line, func.info.usr)):
func_info = func.info
relfile = os.path.relpath(func_info.file, root_dir)
@ -1082,31 +1018,23 @@ class TopDownSummaryPrinter():
if func.mismatch:
mrelfile = os.path.relpath(func.mismatch.file, root_dir)
row_group.append([
f"{mrelfile}:{func.mismatch.line}:",
f"[{func.mismatch.annotations}]",
f"{mrelfile}:{func.mismatch.line}:", f"[{func.mismatch.annotations}]",
func.mismatch.name + " [declaration]"
])
row_group.append([
f"{relfile}:{func_info.line}:", f"[{func_info.annotations}]",
func_info.name
])
row_group.append(
[f"{relfile}:{func_info.line}:", f"[{func_info.annotations}]", func_info.name])
for callee in sorted(func.calees,
key=lambda func:
(func.file, func.line, func.usr)):
for callee in sorted(func.calees, key=lambda func: (func.file, func.line, func.usr)):
crelfile = os.path.relpath(callee.file, root_dir)
row_group.append([
f"{crelfile}:{callee.line}:", f"[{callee.annotations}]",
" " + callee.name
])
row_group.append(
[f"{crelfile}:{callee.line}:", f"[{callee.annotations}]", " " + callee.name])
row_groups[name] = row_group
for row in row_group:
for row_id, value in enumerate(row[0:-1]):
column_widths[row_id] = max(column_widths[row_id],
len(value))
column_widths[row_id] = max(column_widths[row_id], len(value))
for label, rows in sorted(row_groups.items(), key=lambda kv: kv[0]):
self.begin_group(label)
@ -1114,21 +1042,17 @@ class TopDownSummaryPrinter():
print(f"{row[0]:<{column_widths[0]}} "
f"{row[1]:<{column_widths[1]}} "
f"{row[2]}")
print(
f"Number of functions reported unsafe: {len(self._unsafe_in_safe)}"
)
print(f"Number of functions reported unsafe: {len(self._unsafe_in_safe)}")
def main():
default_verilator_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), ".."))
default_verilator_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
parser = argparse.ArgumentParser(
allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Check function annotations for correctness""",
epilog=
"""Copyright 2022-2024 by Wilson Snyder. Verilator is free software;
epilog="""Copyright 2022-2024 by Wilson Snyder. Verilator is free software;
you can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Apache License 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Apache-2.0""")
@ -1142,8 +1066,7 @@ def main():
type=int,
default=0,
help="Number of parallel jobs to use.")
parser.add_argument(
"--compile-commands-dir",
parser.add_argument("--compile-commands-dir",
type=str,
default=None,
help="Path to directory containing compile_commands.json.")
@ -1151,20 +1074,15 @@ def main():
type=str,
default=None,
help="Extra flags passed to clang++.")
parser.add_argument(
"--compilation-root",
parser.add_argument("--compilation-root",
type=str,
default=os.getcwd(),
help="Directory used as CWD when compiling source files.")
parser.add_argument(
"-c",
parser.add_argument("-c",
"--precompile",
action="append",
help="Header file to be precompiled and cached at the start.")
parser.add_argument("file",
type=str,
nargs="+",
help="Source file to analyze.")
parser.add_argument("file", type=str, nargs="+", help="Source file to analyze.")
cmdline = parser.parse_args()
@ -1179,8 +1097,7 @@ def main():
compdb: Optional[CompilationDatabase] = None
if cmdline.compile_commands_dir:
compdb = CompilationDatabase.fromDirectory(
cmdline.compile_commands_dir)
compdb = CompilationDatabase.fromDirectory(cmdline.compile_commands_dir)
if cmdline.cxxflags is not None:
common_cxxflags = shlex.split(cmdline.cxxflags)
@ -1230,8 +1147,7 @@ def main():
summary_printer.handle_diagnostic, verilator_root)
else:
run_parallel_analysis(compile_commands_list, precompile_commands_list,
summary_printer.handle_diagnostic, cmdline.jobs,
verilator_root)
summary_printer.handle_diagnostic, cmdline.jobs, verilator_root)
summary_printer.print_summary(verilator_root)

View File

@ -58,15 +58,12 @@ def test():
if not Args.scenarios or re.match('dist', Args.scenarios):
run("make examples VERILATOR_NO_OPT_BUILD=1")
run("make test_regress VERILATOR_NO_OPT_BUILD=1" +
(" SCENARIOS='" + Args.scenarios +
"'" if Args.scenarios else "") +
(" DRIVER_HASHSET='--hashset=" + Args.hashset +
"'" if Args.hashset else "") +
(" SCENARIOS='" + Args.scenarios + "'" if Args.scenarios else "") +
(" DRIVER_HASHSET='--hashset=" + Args.hashset + "'" if Args.hashset else "") +
('' if Args.stop else ' || true'))
else:
for test in Args.tests:
if not os.path.exists(test) and os.path.exists(
"test_regress/t/" + test):
if not os.path.exists(test) and os.path.exists("test_regress/t/" + test):
test = "test_regress/t/" + test
run(test)
ci_fold_end()
@ -78,8 +75,7 @@ def test():
os.makedirs(cc_dir, exist_ok=True)
os.makedirs(cc_dir + "/info", exist_ok=True)
with subprocess.Popen("find . -print | grep .gcda",
shell=True,
with subprocess.Popen("find . -print | grep .gcda", shell=True,
stdout=subprocess.PIPE) as sp:
datout = sp.stdout.read()
@ -98,8 +94,7 @@ def test():
del dats[dat]
break
with subprocess.Popen("find . -print | grep .gcno",
shell=True,
with subprocess.Popen("find . -print | grep .gcno", shell=True,
stdout=subprocess.PIPE) as sp:
datout = sp.stdout.read()
@ -116,8 +111,7 @@ def test():
if gbase in gcnos:
os.symlink(gcnos[gbase], gcno)
else:
print("MISSING .gcno for a .gcda: " + gcno,
file=sys.stderr)
print("MISSING .gcno for a .gcda: " + gcno, file=sys.stderr)
ci_fold_end()
if Args.stage_enabled[5]:
@ -142,8 +136,7 @@ def test():
if Args.stage_enabled[11]:
ci_fold_start("dirs")
print("Stage 11: Cleanup paths")
cleanup_abs_paths_info(cc_dir, cc_dir + "/app_total.info",
cc_dir + "/app_total.info")
cleanup_abs_paths_info(cc_dir, cc_dir + "/app_total.info", cc_dir + "/app_total.info")
ci_fold_end()
if Args.stage_enabled[12]:
@ -164,17 +157,15 @@ def test():
inc = "--include " + inc
if exc != '':
exc = "--exclude " + exc
run("cd " + cc_dir + " ; " + RealPath +
"/fastcov.py -C app_total.info " + inc + " " + exc +
" -x --lcov -o app_total_f.info")
run("cd " + cc_dir + " ; " + RealPath + "/fastcov.py -C app_total.info " + inc + " " +
exc + " -x --lcov -o app_total_f.info")
ci_fold_end()
if Args.stage_enabled[17]:
ci_fold_start("report")
print("Stage 17: Create HTML")
run("cd " + cc_dir + " ; genhtml app_total_f.info --demangle-cpp" +
" --rc lcov_branch_coverage=1 --rc genhtml_hi_limit=100 --output-directory html"
)
" --rc lcov_branch_coverage=1 --rc genhtml_hi_limit=100 --output-directory html")
ci_fold_end()
if Args.stage_enabled[18]:
@ -186,8 +177,7 @@ def test():
# So, remove gcno files before calling codecov
upload_dir = "nodist/obj_dir/upload"
os.makedirs(upload_dir, exist_ok=True)
cmd = ("ci/codecov -v upload-process -Z" + " -f " + cc_dir +
"/app_total.info )")
cmd = ("ci/codecov -v upload-process -Z" + " -f " + cc_dir + "/app_total.info )")
print("print: Not running:")
print(" export CODECOV_TOKEN=<hidden>")
print(" find . -name '*.gcno' -exec rm {} \\;")
@ -198,9 +188,7 @@ def test():
print("*-* All Finished *-*")
print("")
print("* See report in " + cc_dir + "/html/index.html")
print(
"* Remember to make distclean && ./configure before working on non-coverage"
)
print("* Remember to make distclean && ./configure before working on non-coverage")
def clone_sources(cc_dir):
@ -209,9 +197,8 @@ def clone_sources(cc_dir):
for globf in Source_Globs:
for infile in glob.glob(globf):
if re.match(r'^/', infile):
sys.exit(
"%Error: source globs should be relative not absolute filenames, "
+ infile)
sys.exit("%Error: source globs should be relative not absolute filenames, " +
infile)
outfile = cc_dir + "/" + infile
outpath = re.sub(r'/[^/]*$', '', outfile, count=1)
os.makedirs(outpath, exist_ok=True)
@ -252,10 +239,8 @@ def clone_sources(cc_dir):
done = True
ofh.write(line + "\n")
print("Number of source lines automatically LCOV_EXCL_LINE'ed: %d" %
excluded_lines)
print("Number of source lines automatically LCOV_EXCL_BR_LINE'ed: %d" %
excluded_br_lines)
print("Number of source lines automatically LCOV_EXCL_LINE'ed: %d" % excluded_lines)
print("Number of source lines automatically LCOV_EXCL_BR_LINE'ed: %d" % excluded_br_lines)
def cleanup_abs_paths_info(cc_dir, infile, outfile):
@ -263,20 +248,11 @@ def cleanup_abs_paths_info(cc_dir, infile, outfile):
with open(infile, "r", encoding="utf8") as fh:
for line in fh:
if re.search(r'^SF:', line) and not re.search(r'^SF:/usr/', line):
line = re.sub(os.environ['VERILATOR_ROOT'] + '/',
'',
line,
count=1)
line = re.sub(os.environ['VERILATOR_ROOT'] + '/', '', line, count=1)
line = re.sub(cc_dir + '/', '', line, count=1)
line = re.sub(r'^SF:.*?/include/',
'SF:include/',
line,
count=1)
line = re.sub(r'^SF:.*?/include/', 'SF:include/', line, count=1)
line = re.sub(r'^SF:.*?/src/', 'SF:src/', line, count=1)
line = re.sub(r'^SF:.*?/test_regress/',
'SF:test_regress/',
line,
count=1)
line = re.sub(r'^SF:.*?/test_regress/', 'SF:test_regress/', line, count=1)
line = re.sub(r'obj_dbg/verilog.y$', 'verilog.y', line)
# print("Remaining SF: "+line)
lines.append(line)
@ -358,15 +334,13 @@ def ci_fold_end():
parser = argparse.ArgumentParser(
allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=
"""code_coverage builds Verilator with C++ coverage support and runs
description="""code_coverage builds Verilator with C++ coverage support and runs
tests with coverage enabled. This will rebuild the current object
files. Run as:
cd $VERILATOR_ROOT
nodist/code_coverage""",
epilog=
"""Copyright 2019-2024 by Wilson Snyder. This program is free software; you
epilog="""Copyright 2019-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0.
@ -380,8 +354,7 @@ parser.add_argument('--hashset',
parser.add_argument('--scenarios',
action='store',
help='pass test scenarios onto driver.pl test harness')
parser.add_argument(
'--stages',
parser.add_argument('--stages',
'--stage',
action='store',
help='runs a specific stage or range of stages (see the script)')
@ -390,9 +363,7 @@ parser.add_argument(
'--test',
action='append',
default=[],
help=
'Instead of normal regressions, run the specified test(s), may be used multiple times'
)
help='Instead of normal regressions, run the specified test(s), may be used multiple times')
parser.add_argument('--no-stop',
dest='stop',
action='store_false',

View File

@ -20,8 +20,7 @@ def dotread(filename):
vnum = 0
vertex_re = re.compile(r'^\t([a-zA-Z0-9_]+)\t(.*)$')
edge_re = re.compile(
r'^\t([a-zA-Z0-9_]+)\s+->\s+([a-zA-Z0-9_]+)\s*(.*)$')
edge_re = re.compile(r'^\t([a-zA-Z0-9_]+)\s+->\s+([a-zA-Z0-9_]+)\s*(.*)$')
for line in fh:
vertex_match = re.search(vertex_re, line)
@ -29,11 +28,7 @@ def dotread(filename):
if vertex_match:
if vertex_match.group(1) != 'nTITLE':
header = False
Vertexes.append({
'num': vnum,
'line': line,
'name': vertex_match.group(1)
})
Vertexes.append({'num': vnum, 'line': line, 'name': vertex_match.group(1)})
vnum += 1
elif edge_match:
fromv = edge_match.group(1)
@ -65,14 +60,13 @@ def cwrite(filename):
fh.write("void V3GraphTestImport::dotImport() {\n")
fh.write(" auto* gp = &m_graph;\n")
for ver in sorted(Vertexes, key=lambda ver: ver['num']):
fh.write(
" auto* %s = new V3GraphTestVertex{gp, \"%s\"}; if (%s) {}\n"
% (ver['name'], ver['name'], ver['name']))
fh.write(" auto* %s = new V3GraphTestVertex{gp, \"%s\"}; if (%s) {}\n" %
(ver['name'], ver['name'], ver['name']))
fh.write("\n")
for edge in Edges:
fh.write(" new V3GraphEdge{gp, %s, %s, %s, %s};\n" %
(edge['from'], edge['to'], edge['weight'],
"true" if edge['cutable'] else "false"))
fh.write(
" new V3GraphEdge{gp, %s, %s, %s, %s};\n" %
(edge['from'], edge['to'], edge['weight'], "true" if edge['cutable'] else "false"))
fh.write("}\n")
@ -82,22 +76,17 @@ def cwrite(filename):
parser = argparse.ArgumentParser(
allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=
"""dot_importer takes a graphvis .dot file and converts into .cpp file.
description="""dot_importer takes a graphvis .dot file and converts into .cpp file.
This x.cpp file is then manually included in V3GraphTest.cpp to verify
various xsub-algorithms.""",
epilog=
"""Copyright 2005-2024 by Wilson Snyder. This program is free software; you
epilog="""Copyright 2005-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('--debug',
action='store_const',
const=9,
help='enable debug')
parser.add_argument('--debug', action='store_const', const=9, help='enable debug')
parser.add_argument('filename', help='input .dot filename to process')
Args = parser.parse_args()

View File

@ -19,9 +19,12 @@ from argparse import ArgumentParser
def interesting(s):
if 'assert' in s: return 1
if 'Assert' in s: return 1
if 'Aborted' in s: return 1
if 'assert' in s:
return 1
if 'Assert' in s:
return 1
if 'Aborted' in s:
return 1
if 'terminate' in s:
if 'unterminated' in s:
return 0
@ -41,8 +44,7 @@ def main():
for infile in glob(args.dir + '/*'):
# Input filenames are known not to contain spaces or other unusual
# characters, therefore this works.
status, output = getstatusoutput('../../bin/verilator_bin --cc ' +
infile)
status, output = getstatusoutput('../../bin/verilator_bin --cc ' + infile)
if interesting(output):
print(infile)
print(status)

View File

@ -51,9 +51,11 @@ def write_file(filename, contents):
def parse_line(s):
# str->maybe str
if len(s) == 0: return None
if len(s) == 0:
return None
part = skip_while(lambda x: x != '"', s)
if len(part) == 0 or part[0] != '"': return None
if len(part) == 0 or part[0] != '"':
return None
literal_part = take_while(lambda x: x != '"', part[1:])
return ''.join(filter(lambda x: x != '\\', literal_part))

View File

@ -41,8 +41,7 @@ def test():
run("/bin/mkdir -p " + prefix)
run("cd " + blddir + " && make install")
run("test -e " + prefix + "/share/man/man1/verilator.1")
run("test -e " + prefix +
"/share/verilator/examples/make_tracing_c/Makefile")
run("test -e " + prefix + "/share/verilator/examples/make_tracing_c/Makefile")
run("test -e " + prefix + "/share/verilator/include/verilated.h")
run("test -e " + prefix + "/bin/verilator")
run("test -e " + prefix + "/bin/verilator_bin")
@ -58,10 +57,8 @@ def test():
run("/bin/mkdir -p " + odir)
path = prefix + "/bin" + ":" + prefix + "/share/bin"
write_verilog(odir)
run("cd " + odir + " && PATH=" + path +
":$PATH verilator --cc top.v --exe sim_main.cpp")
run("cd " + odir + "/obj_dir && PATH=" + path +
":$PATH make -f Vtop.mk")
run("cd " + odir + " && PATH=" + path + ":$PATH verilator --cc top.v --exe sim_main.cpp")
run("cd " + odir + "/obj_dir && PATH=" + path + ":$PATH make -f Vtop.mk")
run("cd " + odir + " && PATH=" + path + ":$PATH obj_dir/Vtop")
# run a test using exact path to binary
@ -72,8 +69,7 @@ def test():
run("/bin/mkdir -p " + odir)
write_verilog(odir)
bin1 = prefix + "/bin"
run("cd " + odir + " && " + bin1 +
"/verilator --cc top.v --exe sim_main.cpp")
run("cd " + odir + " && " + bin1 + "/verilator --cc top.v --exe sim_main.cpp")
run("cd " + odir + "/obj_dir && make -f Vtop.mk")
run("cd " + odir + "/obj_dir && ./Vtop")
@ -88,8 +84,7 @@ def write_verilog(odir):
def cleanenv():
for var in os.environ:
if var in ('VERILATOR_ROOT', 'VERILATOR_INCLUDE',
'VERILATOR_NO_OPT_BUILD'):
if var in ('VERILATOR_ROOT', 'VERILATOR_INCLUDE', 'VERILATOR_NO_OPT_BUILD'):
print("unset %s # Was '%s'" % (var, os.environ[var]))
del os.environ[var]
@ -113,21 +108,16 @@ def run(command):
parser = argparse.ArgumentParser(
allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=
"""install_test performs several make-and-install iterations to verify the
description="""install_test performs several make-and-install iterations to verify the
Verilator kit. It isn't part of the normal "make test" due to the number
of builds required.""",
epilog=
"""Copyright 2009-2024 by Wilson Snyder. This program is free software; you
epilog="""Copyright 2009-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('--debug',
action='store_const',
const=9,
help='enable debug')
parser.add_argument('--debug', action='store_const', const=9, help='enable debug')
parser.add_argument('--stage',
type=int,
default=0,

View File

@ -101,8 +101,7 @@ parser = argparse.ArgumentParser(
allow_abbrev=False,
prog="log_changes",
description="Create example entries for 'Changes' from parsing 'git log'",
epilog=
"""Copyright 2019-2024 by Wilson Snyder. This program is free software; you
epilog="""Copyright 2019-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0.

View File

@ -47,9 +47,12 @@ class AstseeCmd(gdb.Command):
def _null_check(self, old, new):
err = ""
if old == "<nullptr>\n": err += "old == <nullptr>\n"
if new == "<nullptr>\n": err += "new == <nullptr>"
if err: raise gdb.GdbError(err.strip("\n"))
if old == "<nullptr>\n":
err += "old == <nullptr>\n"
if new == "<nullptr>\n":
err += "new == <nullptr>"
if err:
raise gdb.GdbError(err.strip("\n"))
def invoke(self, arg_str, from_tty):
from astsee import verilator_cli as astsee # pylint: disable=import-error,import-outside-toplevel
@ -58,8 +61,8 @@ class AstseeCmd(gdb.Command):
# We hack `astsee_verilator`'s arg parser to find arguments with nodes
# After finding them, we replace them with proper files
astsee_args = astsee.parser.parse_args(gdb.string_to_argv(arg_str))
with _vltgdb_tmpfile() as oldfile, _vltgdb_tmpfile(
) as newfile, _vltgdb_tmpfile() as metafile:
with _vltgdb_tmpfile() as oldfile, _vltgdb_tmpfile() as newfile, _vltgdb_tmpfile(
) as metafile:
if astsee_args.file:
_vltgdb_fwrite(oldfile, _vltgdb_get_dump(astsee_args.file))
astsee_args.file = oldfile.name
@ -68,8 +71,7 @@ class AstseeCmd(gdb.Command):
astsee_args.newfile = newfile.name
if astsee_args.meta is None:
# pass
gdb.execute(
f'call AstNode::dumpJsonMetaFileGdb("{metafile.name}")')
gdb.execute(f'call AstNode::dumpJsonMetaFileGdb("{metafile.name}")')
astsee_args.meta = metafile.name
try:
astsee.main(astsee_args)

View File

@ -89,8 +89,7 @@ class Node:
assert not self.isCompleted
# Sort sub-classes and convert to tuple, which marks completion
self._subClasses = tuple(
sorted(self._subClasses,
key=lambda _: (bool(_._subClasses), _.name))) # pylint: disable=protected-access
sorted(self._subClasses, key=lambda _: (bool(_._subClasses), _.name))) # pylint: disable=protected-access
self._ordIdx = ordIdx
ordIdx = ordIdx + 1
@ -128,8 +127,7 @@ class Node:
if self.superClass is None:
self._allSuperClasses = ()
else:
self._allSuperClasses = self.superClass.allSuperClasses + (
self.superClass, )
self._allSuperClasses = self.superClass.allSuperClasses + (self.superClass, )
return self._allSuperClasses
@property
@ -139,8 +137,7 @@ class Node:
if self.isLeaf:
self._allSubClasses = ()
else:
self._allSubClasses = self.subClasses + tuple(
_ for subClass in self.subClasses
self._allSubClasses = self.subClasses + tuple(_ for subClass in self.subClasses
for _ in subClass.allSubClasses)
return self._allSubClasses
@ -210,8 +207,7 @@ class Cpt:
self._exec_syms = {}
def error(self, txt):
sys.exit("%%Error: %s:%d: %s" %
(self.in_filename, self.in_linenum, txt))
sys.exit("%%Error: %s:%d: %s" % (self.in_filename, self.in_linenum, txt))
def print(self, txt):
self.out_lines.append(txt)
@ -220,8 +216,7 @@ class Cpt:
self.out_lines.append(func)
def _output_line(self):
self.print("#line " + str(self.out_linenum + 2) + " \"" +
self.out_filename + "\"\n")
self.print("#line " + str(self.out_linenum + 2) + " \"" + self.out_filename + "\"\n")
def process(self, in_filename, out_filename):
self.in_filename = in_filename
@ -234,8 +229,7 @@ class Cpt:
for line in fhi:
ln += 1
if not didln:
self.print("#line " + str(ln) + " \"" + self.in_filename +
"\"\n")
self.print("#line " + str(ln) + " \"" + self.in_filename + "\"\n")
didln = True
match = re.match(r'^\s+(TREE.*)$', line)
if match:
@ -245,8 +239,8 @@ class Cpt:
self.output_func(lambda self: self._output_line())
self.tree_line(func)
didln = False
elif not re.match(r'^\s*(#define|/[/\*])\s*TREE',
line) and re.search(r'\s+TREE', line):
elif not re.match(r'^\s*(#define|/[/\*])\s*TREE', line) and re.search(
r'\s+TREE', line):
self.error("Unknown astgen line: " + line)
else:
self.print(line)
@ -275,8 +269,7 @@ class Cpt:
# 1 2 3 4
r'TREEOP(1?)([ACSV]?)\s*\(\s*\"([^\"]*)\"\s*,\s*\"([^\"]*)\"\s*\)',
func)
match_skip = re.search(r'TREE_SKIP_VISIT\s*\(\s*\"([^\"]*)\"\s*\)',
func)
match_skip = re.search(r'TREE_SKIP_VISIT\s*\(\s*\"([^\"]*)\"\s*\)', func)
if match:
order = match.group(1)
@ -314,19 +307,17 @@ class Cpt:
if re.match(r'^\$([a-zA-Z0-9]+)$', subnode):
continue # "$lhs" is just a comment that this op has a lhs
subnodeif = subnode
subnodeif = re.sub(
r'\$([a-zA-Z0-9]+)\.cast([A-Z][A-Za-z0-9]+)$',
subnodeif = re.sub(r'\$([a-zA-Z0-9]+)\.cast([A-Z][A-Za-z0-9]+)$',
r'VN_IS(nodep->\1(),\2)', subnodeif)
subnodeif = re.sub(r'\$([a-zA-Z0-9]+)\.([a-zA-Z0-9]+)$',
r'nodep->\1()->\2()', subnodeif)
subnodeif = re.sub(r'\$([a-zA-Z0-9]+)\.([a-zA-Z0-9]+)$', r'nodep->\1()->\2()',
subnodeif)
subnodeif = self.add_nodep(subnodeif)
if mif != "" and subnodeif != "":
mif += " && "
mif += subnodeif
exec_func = self.treeop_exec_func(to)
exec_func = re.sub(
r'([-()a-zA-Z0-9_>]+)->cast([A-Z][A-Za-z0-9]+)\(\)',
exec_func = re.sub(r'([-()a-zA-Z0-9_>]+)->cast([A-Z][A-Za-z0-9]+)\(\)',
r'VN_CAST(\1,\2)', exec_func)
if typen not in self.treeop:
@ -431,14 +422,12 @@ class Cpt:
self._exec_nsyms = 0
self._exec_syms_recurse(aref)
for sym in sorted(self._exec_syms.keys(),
key=lambda val: self._exec_syms[val]):
for sym in sorted(self._exec_syms.keys(), key=lambda val: self._exec_syms[val]):
argnp = self._exec_syms[sym]
arg = self.add_nodep(sym)
out += "AstNodeExpr* " + argnp + " = " + arg + "->unlinkFrBack();\n"
out += "AstNodeExpr* newp = " + self._exec_new_recurse(
aref) + ";\n"
out += "AstNodeExpr* newp = " + self._exec_new_recurse(aref) + ";\n"
out += "nodep->replaceWith(newp);"
out += "VL_DO_DANGLING(nodep->deleteTree(), nodep);"
elif func == "NEVER":
@ -454,19 +443,15 @@ class Cpt:
self.tree_base()
def tree_match(self):
self.print(
" // TREEOP functions, each return true if they matched & transformed\n"
)
self.print(" // TREEOP functions, each return true if they matched & transformed\n")
for base in sorted(self.treeop.keys()):
for typefunc in self.treeop[base]:
self.print(" // Generated by astgen\n")
self.print(" bool " + typefunc['match_func'] + "(Ast" +
base + "* nodep) {\n")
self.print(" bool " + typefunc['match_func'] + "(Ast" + base + "* nodep) {\n")
self.print("\t// " + typefunc['comment'] + "\n")
self.print("\tif (" + typefunc['match_if'] + ") {\n")
self.print("\t UINFO(" + str(typefunc['uinfo_level']) +
", cvtToHex(nodep)" + " << \" " +
typefunc['uinfo'] + "\\n\");\n")
self.print("\t UINFO(" + str(typefunc['uinfo_level']) + ", cvtToHex(nodep)" +
" << \" " + typefunc['uinfo'] + "\\n\");\n")
self.print("\t " + typefunc['exec_func'] + "\n")
self.print("\t return true;\n")
self.print("\t}\n")
@ -475,9 +460,7 @@ class Cpt:
def tree_base(self):
self.print(" // TREEOP visitors, call each base type's match\n")
self.print(
" // Bottom class up, as more simple transforms are generally better\n"
)
self.print(" // Bottom class up, as more simple transforms are generally better\n")
for node in AstNodeList:
out_for_type_sc = []
out_for_type = []
@ -488,15 +471,11 @@ class Cpt:
if base not in self.treeop:
continue
for typefunc in self.treeop[base]:
lines = [
" if (" + typefunc['match_func'] +
"(nodep)) return;\n"
]
lines = [" if (" + typefunc['match_func'] + "(nodep)) return;\n"]
if typefunc['short_circuit']: # short-circuit match fn
out_for_type_sc.extend(lines)
else: # Standard match fn
if typefunc[
'order']: # TREEOP1's go in front of others
if typefunc['order']: # TREEOP1's go in front of others
out_for_type = lines + out_for_type
else:
out_for_type.extend(lines)
@ -509,30 +488,24 @@ class Cpt:
# For types without short-circuits, we just use iterateChildren, which
# saves one comparison.
if len(out_for_type_sc) > 0: # Short-circuited types
self.print(
" // Generated by astgen with short-circuiting\n" +
" void visit(Ast" + node.name +
"* nodep) override {\n" +
self.print(" // Generated by astgen with short-circuiting\n" +
" void visit(Ast" + node.name + "* nodep) override {\n" +
" iterateAndNextNull(nodep->{op1}());\n".format(
op1=node.getOp(1)[0]) + "".join(out_for_type_sc))
if out_for_type[0]:
self.print(
" iterateAndNextNull(nodep->{op2}());\n".format(
op2=node.getOp(2)[0]))
" iterateAndNextNull(nodep->{op2}());\n".format(op2=node.getOp(2)[0]))
if node.isSubClassOf(AstNodes["NodeTriop"]):
self.print(
" iterateAndNextNull(nodep->{op3}());\n".
format(op3=node.getOp(3)[0]))
self.print(" iterateAndNextNull(nodep->{op3}());\n".format(
op3=node.getOp(3)[0]))
self.print("".join(out_for_type) + " }\n")
elif len(out_for_type) > 0: # Other types with something to print
skip = node.name in self.tree_skip_visit
gen = "Gen" if skip else ""
virtual = "virtual " if skip else ""
override = "" if skip else " override"
self.print(
" // Generated by astgen\n" + " " + virtual +
"void visit" + gen + "(Ast" + node.name + "* nodep)" +
override + " {\n" +
self.print(" // Generated by astgen\n" + " " + virtual + "void visit" + gen +
"(Ast" + node.name + "* nodep)" + override + " {\n" +
("" if skip else " iterateChildren(nodep);\n") +
''.join(out_for_type) + " }\n")
@ -565,8 +538,7 @@ def read_types(filename, Nodes, prefix):
def error(lineno, message):
nonlocal hasErrors
print(filename + ":" + str(lineno) + ": %Error: " + message,
file=sys.stderr)
print(filename + ":" + str(lineno) + ": %Error: " + message, file=sys.stderr)
hasErrors = True
node = None
@ -579,8 +551,7 @@ def read_types(filename, Nodes, prefix):
if not hasAstgenMembers:
error(
node.lineno,
"'{p}{n}' does not contain 'ASTGEN_MEMBERS_{p}{n};'".format(
p=prefix, n=node.name))
"'{p}{n}' does not contain 'ASTGEN_MEMBERS_{p}{n};'".format(p=prefix, n=node.name))
hasAstgenMembers = False
with open(filename, "r", encoding="utf8") as fh:
@ -598,8 +569,7 @@ def read_types(filename, Nodes, prefix):
classn = re.sub(r'^' + prefix, '', classn)
supern = re.sub(r'^' + prefix, '', supern)
if not supern:
sys.exit("%Error: '{p}{c}' has no super-class".format(
p=prefix, c=classn))
sys.exit("%Error: '{p}{c}' has no super-class".format(p=prefix, c=classn))
checkFinishedNode(node)
superClass = Nodes[supern]
node = Node(classn, superClass, filename, lineno)
@ -608,8 +578,7 @@ def read_types(filename, Nodes, prefix):
if not node:
continue
if re.match(r'^\s*ASTGEN_MEMBERS_' + prefix + node.name + ';',
line):
if re.match(r'^\s*ASTGEN_MEMBERS_' + prefix + node.name + ';', line):
hasAstgenMembers = True
if prefix != "Ast":
@ -631,36 +600,29 @@ def read_types(filename, Nodes, prefix):
ident = ident.strip()
if not sep or not re.match(r'^\w+$', ident):
error(
lineno, "Malformed '@astgen " + what +
"' directive (expecting '" + what +
" := <identifier> : <type>': " + decl)
lineno, "Malformed '@astgen " + what + "' directive (expecting '" +
what + " := <identifier> : <type>': " + decl)
else:
kind = parseOpType(kind)
if not kind:
error(
lineno, "Bad type for '@astgen " + what +
"' (expecting Ast*, Optional[Ast*], or List[Ast*]):"
+ decl)
"' (expecting Ast*, Optional[Ast*], or List[Ast*]):" + decl)
elif node.getOp(n) is not None:
error(
lineno, "Already defined " + what + " for " +
node.name)
error(lineno, "Already defined " + what + " for " + node.name)
else:
node.addOp(n, ident, *kind)
elif what in ("alias op1", "alias op2", "alias op3",
"alias op4"):
elif what in ("alias op1", "alias op2", "alias op3", "alias op4"):
n = int(what[-1])
ident = rest.strip()
if not re.match(r'^\w+$', ident):
error(
lineno, "Malformed '@astgen " + what +
"' directive (expecting '" + what +
" := <identifier>': " + decl)
lineno, "Malformed '@astgen " + what + "' directive (expecting '" +
what + " := <identifier>': " + decl)
else:
op = node.getOp(n)
if op is None:
error(lineno,
"Aliased op" + str(n) + " is not defined")
error(lineno, "Aliased op" + str(n) + " is not defined")
else:
node.addOp(n, ident, *op[1:])
elif what == "ptr":
@ -670,8 +632,7 @@ def read_types(filename, Nodes, prefix):
if not kind:
error(
lineno, "Bad type for '@astgen " + what +
"' (expecting Ast*, Optional[Ast*], or List[Ast*]):"
+ decl)
"' (expecting Ast*, Optional[Ast*], or List[Ast*]):" + decl)
if not re.match(r'^m_(\w+)$', ident):
error(
lineno, "Malformed '@astgen ptr'"
@ -680,20 +641,15 @@ def read_types(filename, Nodes, prefix):
node.addPtr(ident, *kind)
else:
error(
lineno,
"Malformed @astgen what (expecting 'op1'..'op4'," +
lineno, "Malformed @astgen what (expecting 'op1'..'op4'," +
" 'alias op1'.., 'ptr'): " + what)
else:
line = re.sub(r'//.*$', '', line)
if re.match(r'.*[Oo]p[1-9].*', line):
error(lineno,
"Use generated accessors to access op<N> operands")
error(lineno, "Use generated accessors to access op<N> operands")
if re.match(
r'^\s*Ast[A-Z][A-Za-z0-9_]+\s*\*(\s*const)?\s+m_[A-Za-z0-9_]+\s*;',
line):
error(lineno,
"Use '@astgen ptr' for Ast pointer members: " + line)
if re.match(r'^\s*Ast[A-Z][A-Za-z0-9_]+\s*\*(\s*const)?\s+m_[A-Za-z0-9_]+\s*;', line):
error(lineno, "Use '@astgen ptr' for Ast pointer members: " + line)
checkFinishedNode(node)
if hasErrors:
@ -707,33 +663,24 @@ def check_types(sortedTypes, prefix, abstractPrefix):
for node in sortedTypes:
if re.match(r'^' + abstractPrefix, node.name):
if node.isLeaf:
sys.exit(
"%Error: Final {b} subclasses must not be named {b}*: {p}{n}"
.format(b=baseClass, p=prefix, n=node.name))
sys.exit("%Error: Final {b} subclasses must not be named {b}*: {p}{n}".format(
b=baseClass, p=prefix, n=node.name))
else:
if not node.isLeaf:
sys.exit(
"%Error: Non-final {b} subclasses must be named {b}*: {p}{n}"
.format(b=baseClass, p=prefix, n=node.name))
sys.exit("%Error: Non-final {b} subclasses must be named {b}*: {p}{n}".format(
b=baseClass, p=prefix, n=node.name))
# Check ordering of node definitions
hasOrderingError = False
files = tuple(
sorted(set(_.file for _ in sortedTypes if _.file is not None)))
files = tuple(sorted(set(_.file for _ in sortedTypes if _.file is not None)))
for file in files:
nodes = tuple(filter(lambda _, f=file: _.file == f, sortedTypes))
expectOrder = tuple(sorted(nodes, key=lambda _: (_.isLeaf, _.ordIdx)))
actualOrder = tuple(sorted(nodes, key=lambda _: _.lineno))
expect = {
node: pred
for pred, node in zip((None, ) + expectOrder[:-1], expectOrder)
}
actual = {
node: pred
for pred, node in zip((None, ) + actualOrder[:-1], actualOrder)
}
expect = {node: pred for pred, node in zip((None, ) + expectOrder[:-1], expectOrder)}
actual = {node: pred for pred, node in zip((None, ) + actualOrder[:-1], actualOrder)}
for node in nodes:
if expect[node] != actual[node]:
hasOrderingError = True
@ -749,8 +696,7 @@ def check_types(sortedTypes, prefix, abstractPrefix):
file=sys.stderr)
if hasOrderingError:
sys.exit(
"%Error: Stopping due to out of order definitions listed above")
sys.exit("%Error: Stopping due to out of order definitions listed above")
def read_stages(filename):
@ -783,8 +729,7 @@ def read_refs(filename):
if ref not in ClassRefs:
ClassRefs[ref] = {'newed': {}, 'used': {}}
ClassRefs[ref]['used'][basename] = 1
for match in re.finditer(
r'(VN_IS|VN_AS|VN_CAST)\([^.]+, ([A-Za-z0-9_]+)', line):
for match in re.finditer(r'(VN_IS|VN_AS|VN_CAST)\([^.]+, ([A-Za-z0-9_]+)', line):
ref = "Ast" + match.group(2)
if ref not in ClassRefs:
ClassRefs[ref] = {'newed': {}, 'used': {}}
@ -796,9 +741,7 @@ def open_file(filename):
if re.search(r'\.txt$', filename):
fh.write("// Generated by astgen\n")
else:
fh.write(
'// Generated by astgen // -*- mode: C++; c-file-style: "cc-mode" -*-'
+ "\n")
fh.write('// Generated by astgen // -*- mode: C++; c-file-style: "cc-mode" -*-' + "\n")
return fh
@ -808,9 +751,7 @@ def open_file(filename):
def write_report(filename):
with open_file(filename) as fh:
fh.write(
"Processing stages (approximate, based on order in Verilator.cpp):\n"
)
fh.write("Processing stages (approximate, based on order in Verilator.cpp):\n")
for classn in sorted(Stages.keys(), key=lambda val: Stages[val]):
fh.write(" " + classn + "\n")
@ -831,14 +772,12 @@ def write_report(filename):
refs = ClassRefs["Ast" + node.name]
fh.write(" newed: ")
for stage in sorted(refs['newed'].keys(),
key=lambda val: Stages[val]
if (val in Stages) else -1):
key=lambda val: Stages[val] if (val in Stages) else -1):
fh.write(stage + " ")
fh.write("\n")
fh.write(" used: ")
for stage in sorted(refs['used'].keys(),
key=lambda val: Stages[val]
if (val in Stages) else -1):
key=lambda val: Stages[val] if (val in Stages) else -1):
fh.write(stage + " ")
fh.write("\n")
fh.write("\n")
@ -852,8 +791,7 @@ def write_report(filename):
def write_forward_class_decls(prefix, nodeList):
with open_file("V3{p}__gen_forward_class_decls.h".format(p=prefix)) as fh:
for node in nodeList:
fh.write("class {p}{n:<17} // ".format(p=prefix,
n=node.name + ";"))
fh.write("class {p}{n:<17} // ".format(p=prefix, n=node.name + ";"))
for superClass in node.allSuperClasses:
fh.write("{p}{n:<12} ".format(p=prefix, n=superClass.name))
fh.write("\n")
@ -863,8 +801,7 @@ def write_visitor_decls(prefix, nodeList):
with open_file("V3{p}__gen_visitor_decls.h".format(p=prefix)) as fh:
for node in nodeList:
if not node.isRoot:
fh.write("virtual void visit({p}{n}*);\n".format(p=prefix,
n=node.name))
fh.write("virtual void visit({p}{n}*);\n".format(p=prefix, n=node.name))
def write_visitor_defns(prefix, nodeList, visitor):
@ -873,13 +810,8 @@ def write_visitor_defns(prefix, nodeList, visitor):
for node in nodeList:
base = node.superClass
if base is not None:
fh.write(
"void {c}::visit({p}{n}* {v}) {{ visit(static_cast<{p}{b}*>({v})); }}\n"
.format(c=visitor,
p=prefix,
n=node.name,
b=base.name,
v=variable))
fh.write("void {c}::visit({p}{n}* {v}) {{ visit(static_cast<{p}{b}*>({v})); }}\n".
format(c=visitor, p=prefix, n=node.name, b=base.name, v=variable))
def write_type_enum(prefix, nodeList):
@ -887,27 +819,21 @@ def write_type_enum(prefix, nodeList):
with open_file("V3{p}__gen_type_enum.h".format(p=prefix)) as fh:
fh.write(" enum en : uint16_t {\n")
for node in sorted(filter(lambda _: _.isLeaf, nodeList),
key=lambda _: _.typeId):
fh.write(" at{t} = {n},\n".format(t=node.name,
n=node.typeId))
for node in sorted(filter(lambda _: _.isLeaf, nodeList), key=lambda _: _.typeId):
fh.write(" at{t} = {n},\n".format(t=node.name, n=node.typeId))
fh.write(" _ENUM_END = {n}\n".format(n=root.typeIdMax + 1))
fh.write(" };\n")
fh.write(" enum bounds : uint16_t {\n")
for node in sorted(filter(lambda _: not _.isLeaf, nodeList),
key=lambda _: _.typeIdMin):
fh.write(" first{t} = {n},\n".format(t=node.name,
n=node.typeIdMin))
fh.write(" last{t} = {n},\n".format(t=node.name,
n=node.typeIdMax))
for node in sorted(filter(lambda _: not _.isLeaf, nodeList), key=lambda _: _.typeIdMin):
fh.write(" first{t} = {n},\n".format(t=node.name, n=node.typeIdMin))
fh.write(" last{t} = {n},\n".format(t=node.name, n=node.typeIdMax))
fh.write(" _BOUNDS_END\n")
fh.write(" };\n")
fh.write(" const char* ascii() const VL_MT_SAFE {\n")
fh.write(" static const char* const names[_ENUM_END + 1] = {\n")
for node in sorted(filter(lambda _: _.isLeaf, nodeList),
key=lambda _: _.typeId):
for node in sorted(filter(lambda _: _.isLeaf, nodeList), key=lambda _: _.typeId):
fh.write(' "{T}",\n'.format(T=node.name.upper()))
fh.write(" \"_ENUM_END\"\n")
fh.write(" };\n")
@ -928,8 +854,8 @@ def write_type_tests(prefix, nodeList):
enum = "VDfgType"
for node in nodeList:
fh.write(
"template<> inline bool {b}::privateTypeTest<{p}{n}>(const {b}* {v}) {{ "
.format(b=base, p=prefix, n=node.name, v=variable))
"template<> inline bool {b}::privateTypeTest<{p}{n}>(const {b}* {v}) {{ ".format(
b=base, p=prefix, n=node.name, v=variable))
if node.isRoot:
fh.write("return true;")
elif not node.isLeaf:
@ -937,8 +863,9 @@ def write_type_tests(prefix, nodeList):
"return static_cast<int>({v}->type()) >= static_cast<int>({e}::first{t}) && static_cast<int>({v}->type()) <= static_cast<int>({e}::last{t});"
.format(v=variable, e=enum, t=node.name))
else:
fh.write("return {v}->type() == {e}::at{t};".format(
v=variable, e=enum, t=node.name))
fh.write("return {v}->type() == {e}::at{t};".format(v=variable,
e=enum,
t=node.name))
fh.write(" }\n")
@ -949,8 +876,7 @@ def write_type_tests(prefix, nodeList):
def write_ast_type_info(filename):
with open_file(filename) as fh:
for node in sorted(filter(lambda _: _.isLeaf, AstNodeList),
key=lambda _: _.typeId):
for node in sorted(filter(lambda _: _.isLeaf, AstNodeList), key=lambda _: _.typeId):
opTypeList = []
opNameList = []
for n in range(1, 5):
@ -968,12 +894,11 @@ def write_ast_type_info(filename):
opTypeList.append('OP_LIST')
opNameList.append(name)
# opTypeStr = ', '.join(opTypeList)
opTypeStr = ', '.join(
['VNTypeInfo::{0}'.format(s) for s in opTypeList])
opTypeStr = ', '.join(['VNTypeInfo::{0}'.format(s) for s in opTypeList])
opNameStr = ', '.join(['"{0}"'.format(s) for s in opNameList])
fh.write(
' {{ "Ast{name}", {{{opTypeStr}}}, {{{opNameStr}}}, sizeof(Ast{name}) }},\n'
.format(
' {{ "Ast{name}", {{{opTypeStr}}}, {{{opNameStr}}}, sizeof(Ast{name}) }},\n'.
format(
name=node.name,
opTypeStr=opTypeStr,
opNameStr=opNameStr,
@ -984,22 +909,18 @@ def write_ast_impl(filename):
with open_file(filename) as fh:
def emitBlock(pattern, **fmt):
fh.write(
textwrap.indent(textwrap.dedent(pattern),
" ").format(**fmt))
fh.write(textwrap.indent(textwrap.dedent(pattern), " ").format(**fmt))
for node in AstNodeList:
if node.name == "Node":
continue
emitBlock("const char* Ast{t}::brokenGen() const {{\n",
t=node.name)
emitBlock("const char* Ast{t}::brokenGen() const {{\n", t=node.name)
if node.superClass.name != 'Node':
emitBlock(" BROKEN_BASE_RTN(Ast{base}::brokenGen());\n",
base=node.superClass.name)
for ptr in node.ptrs:
if ptr['monad'] == 'Optional':
emitBlock(
" BROKEN_RTN(m_{name} && !m_{name}->brokeExists());\n",
emitBlock(" BROKEN_RTN(m_{name} && !m_{name}->brokeExists());\n",
name=ptr['name'])
else:
emitBlock(" BROKEN_RTN(!m_{name});\n" +
@ -1010,8 +931,7 @@ def write_ast_impl(filename):
emitBlock("void Ast{t}::cloneRelinkGen() {{\n", t=node.name)
if node.superClass.name != 'Node':
emitBlock(" Ast{base}::cloneRelinkGen();\n",
base=node.superClass.name)
emitBlock(" Ast{base}::cloneRelinkGen();\n", base=node.superClass.name)
for ptr in node.ptrs:
emitBlock(
" if (m_{name} && m_{name}->clonep()) m_{name} = m_{name}->clonep();\n",
@ -1020,14 +940,11 @@ def write_ast_impl(filename):
emitBlock("}}\n")
emitBlock("void Ast{t}::dumpJsonGen(std::ostream& str) const {{\n",
t=node.name)
emitBlock("void Ast{t}::dumpJsonGen(std::ostream& str) const {{\n", t=node.name)
if node.superClass.name != 'Node':
emitBlock(" Ast{base}::dumpJson(str);\n",
base=node.superClass.name)
emitBlock(" Ast{base}::dumpJson(str);\n", base=node.superClass.name)
for ptr in node.ptrs:
emitBlock(" dumpJsonPtr(str, \"{name}\", m_{name});\n",
name=ptr['name'])
emitBlock(" dumpJsonPtr(str, \"{name}\", m_{name});\n", name=ptr['name'])
emitBlock("}}\n")
emitBlock(
@ -1038,9 +955,7 @@ def write_ast_impl(filename):
if op is None:
continue
name, _, _ = op
emitBlock(
" dumpNodeListJson(str, {name}(), \"{name}\", indent);\n",
name=name)
emitBlock(" dumpNodeListJson(str, {name}(), \"{name}\", indent);\n", name=name)
emitBlock("}}\n")
@ -1059,9 +974,7 @@ def write_ast_macros(filename):
if not any_ptr:
fh.write("private: \\\n")
any_ptr = True
emitBlock("Ast{kind}* m_{name} = nullptr;",
name=ptr['name'],
kind=ptr['kind'])
emitBlock("Ast{kind}* m_{name} = nullptr;", name=ptr['name'], kind=ptr['kind'])
if any_ptr:
fh.write("public: \\\n")
# TODO pointer accessors
@ -1108,8 +1021,8 @@ def write_ast_macros(filename):
if not op:
continue
name, monad, kind = op
retrieve = ("VN_DBG_AS(op{n}p(), {kind})" if kind != "Node"
else "op{n}p()").format(n=n, kind=kind)
retrieve = ("VN_DBG_AS(op{n}p(), {kind})"
if kind != "Node" else "op{n}p()").format(n=n, kind=kind)
superOp = node.superClass.getOp(n)
superName = None
if superOp:
@ -1126,8 +1039,7 @@ def write_ast_macros(filename):
n=n,
retrieve=retrieve)
if superOp:
hiddenMethods.append("add" + superName[0].upper() +
superName[1:])
hiddenMethods.append("add" + superName[0].upper() + superName[1:])
elif monad == "Optional":
emitBlock('''\
Ast{kind}* {name}() const VL_MT_STABLE {{ return {retrieve}; }}
@ -1150,27 +1062,24 @@ def write_ast_macros(filename):
if hiddenMethods:
fh.write("private: \\\n")
for method in hiddenMethods:
fh.write(" using Ast{sup}::{method}; \\\n".format(
sup=node.superClass.name, method=method))
fh.write(" using Ast{sup}::{method}; \\\n".format(sup=node.superClass.name,
method=method))
fh.write("public: \\\n")
fh.write(
" static_assert(true, \"\")\n") # Swallowing the semicolon
fh.write(" static_assert(true, \"\")\n") # Swallowing the semicolon
# Only care about leaf classes for the rest
if node.isLeaf:
fh.write(
"#define ASTGEN_SUPER_{t}(...) Ast{b}(VNType::at{t}, __VA_ARGS__)\n"
.format(t=node.name, b=node.superClass.name))
"#define ASTGEN_SUPER_{t}(...) Ast{b}(VNType::at{t}, __VA_ARGS__)\n".format(
t=node.name, b=node.superClass.name))
fh.write("\n")
def write_ast_yystype(filename):
with open_file(filename) as fh:
for node in AstNodeList:
fh.write("Ast{t}* {m}p;\n".format(t=node.name,
m=node.name[0].lower() +
node.name[1:]))
fh.write("Ast{t}* {m}p;\n".format(t=node.name, m=node.name[0].lower() + node.name[1:]))
################################################################################
@ -1207,8 +1116,7 @@ def write_dfg_macros(filename):
name=name,
n=n - 1)
operandNames = tuple(
node.getOp(n)[0] for n in range(1, node.arity + 1))
operandNames = tuple(node.getOp(n)[0] for n in range(1, node.arity + 1))
if operandNames:
emitBlock('''\
const std::string srcName(size_t idx) const override {{
@ -1217,10 +1125,8 @@ def write_dfg_macros(filename):
}}
''',
a=node.arity,
ns=", ".join(
map(lambda _: '"' + _ + '"', operandNames)))
fh.write(
" static_assert(true, \"\")\n") # Swallowing the semicolon
ns=", ".join(map(lambda _: '"' + _ + '"', operandNames)))
fh.write(" static_assert(true, \"\")\n") # Swallowing the semicolon
def write_dfg_auto_classes(filename):
@ -1254,11 +1160,8 @@ def write_dfg_ast_to_dfg(filename):
if (node.file is not None) or (not node.isLeaf):
continue
fh.write(
"void visit(Ast{t}* nodep) override {{\n".format(t=node.name))
fh.write(
' UASSERT_OBJ(!nodep->user1p(), nodep, "Already has Dfg vertex");\n\n'
)
fh.write("void visit(Ast{t}* nodep) override {{\n".format(t=node.name))
fh.write(' UASSERT_OBJ(!nodep->user1p(), nodep, "Already has Dfg vertex");\n\n')
fh.write(" if (unhandled(nodep)) return;\n\n")
for i in range(node.arity):
fh.write(" iterate(nodep->op{j}p());\n".format(j=i + 1))
@ -1267,9 +1170,8 @@ def write_dfg_ast_to_dfg(filename):
' UASSERT_OBJ(nodep->op{j}p()->user1p(), nodep, "Child {j} missing Dfg vertex");\n'
.format(j=i + 1))
fh.write("\n")
fh.write(
" Dfg{t}* const vtxp = makeVertex<Dfg{t}>(nodep, *m_dfgp);\n"
.format(t=node.name))
fh.write(" Dfg{t}* const vtxp = makeVertex<Dfg{t}>(nodep, *m_dfgp);\n".format(
t=node.name))
fh.write(" if (!vtxp) {\n")
fh.write(" m_foundUnhandled = true;\n")
fh.write(" ++m_ctx.m_nonRepNode;\n")
@ -1277,8 +1179,8 @@ def write_dfg_ast_to_dfg(filename):
fh.write(" }\n\n")
for i in range(node.arity):
fh.write(
" vtxp->relinkSource<{i}>(nodep->op{j}p()->user1u().to<DfgVertex*>());\n"
.format(i=i, j=i + 1))
" vtxp->relinkSource<{i}>(nodep->op{j}p()->user1u().to<DfgVertex*>());\n".
format(i=i, j=i + 1))
fh.write("\n")
fh.write(" m_uncommittedVertices.push_back(vtxp);\n")
fh.write(" nodep->user1p(vtxp);\n")
@ -1292,14 +1194,12 @@ def write_dfg_dfg_to_ast(filename):
if (node.file is not None) or (not node.isLeaf):
continue
fh.write(
"void visit(Dfg{t}* vtxp) override {{\n".format(t=node.name))
fh.write("void visit(Dfg{t}* vtxp) override {{\n".format(t=node.name))
for i in range(node.arity):
fh.write(
" AstNodeExpr* const op{j}p = convertDfgVertexToAstNodeExpr(vtxp->source<{i}>());\n"
.format(i=i, j=i + 1))
fh.write(
" m_resultp = makeNode<Ast{t}>(vtxp".format(t=node.name))
fh.write(" m_resultp = makeNode<Ast{t}>(vtxp".format(t=node.name))
for i in range(node.arity):
fh.write(", op{j}p".format(j=i + 1))
fh.write(");\n")
@ -1313,8 +1213,7 @@ parser = argparse.ArgumentParser(
allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Generate V3Ast headers to reduce C++ code duplication.""",
epilog=
"""Copyright 2002-2024 by Wilson Snyder. This program is free software; you
epilog="""Copyright 2002-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0.
@ -1322,15 +1221,9 @@ Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('-I', action='store', help='source code include directory')
parser.add_argument('--astdef',
action='append',
help='add AST definition file (relative to -I)')
parser.add_argument('--dfgdef',
action='append',
help='add DFG definition file (relative to -I)')
parser.add_argument('--classes',
action='store_true',
help='makes class declaration files')
parser.add_argument('--astdef', action='append', help='add AST definition file (relative to -I)')
parser.add_argument('--dfgdef', action='append', help='add DFG definition file (relative to -I)')
parser.add_argument('--classes', action='store_true', help='makes class declaration files')
parser.add_argument('--debug', action='store_true', help='enable debug')
parser.add_argument('infiles', nargs='*', help='list of input .cpp filenames')
@ -1409,8 +1302,7 @@ for node in AstNodeList:
# Compute derived properties over the whole DfgVertex hierarchy
DfgVertices["Vertex"].complete()
DfgVertexList = tuple(map(lambda _: DfgVertices[_],
sorted(DfgVertices.keys())))
DfgVertexList = tuple(map(lambda _: DfgVertices[_], sorted(DfgVertices.keys())))
check_types(DfgVertexList, "Dfg", "Vertex")
@ -1457,8 +1349,7 @@ for cpt in Args.infiles:
if not re.search(r'.cpp$', cpt):
sys.exit("%Error: Expected argument to be .cpp file: " + cpt)
cpt = re.sub(r'.cpp$', '', cpt)
Cpt().process(in_filename=Args.I + "/" + cpt + ".cpp",
out_filename=cpt + "__gen.cpp")
Cpt().process(in_filename=Args.I + "/" + cpt + ".cpp", out_filename=cpt + "__gen.cpp")
######################################################################
# Local Variables:

View File

@ -28,15 +28,13 @@ def process():
+ (" -d" if Args.definitions else "") #
+ (" -k" if Args.token_table else "") #
+ (" -v" if Args.verbose else "") #
+ (" --report=itemset --report=lookahead" if
(Args.verbose and supports_report) else "")
+ (" --report=itemset --report=lookahead" if (Args.verbose and supports_report) else "")
# Useful but slow:
# (" -Wcounterexamples" if
# (Args.verbose and supports_counter_examples) else "")
#
# -p required for GLR parsers; they write to -p basename, not -o
+ ((" -p " + Args.name_prefix) if Args.name_prefix else "") + " -b " +
tmp_prefix() #
+ ((" -p " + Args.name_prefix) if Args.name_prefix else "") + " -b " + tmp_prefix() #
+ " -o " + tmp_prefix() + ".c" #
+ " " + tmp_prefix() + ".y")
@ -44,11 +42,10 @@ def process():
status = subprocess.call(command, shell=True)
if status != 0:
unlink_outputs()
sys.exit("bisonpre: %Error: " + Args.yacc + " version " +
str(Bison_Version) + " run failed due to errors\n")
sys.exit("bisonpre: %Error: " + Args.yacc + " version " + str(Bison_Version) +
" run failed due to errors\n")
clean_output(tmp_prefix() + ".output",
output_prefix() + ".output", True, False)
clean_output(tmp_prefix() + ".output", output_prefix() + ".output", True, False)
warning_check(output_prefix() + ".output")
clean_output(tmp_prefix() + ".c", output_prefix() + ".c", False, True)
@ -88,9 +85,7 @@ def unlink_outputs():
def bison_version_check():
with subprocess.Popen(Args.yacc + " --version",
shell=True,
stdout=subprocess.PIPE) as sp:
with subprocess.Popen(Args.yacc + " --version", shell=True, stdout=subprocess.PIPE) as sp:
out = str(sp.stdout.read())
match = re.search(r'([0-9]+\.[0-9]+)', out)
if match:
@ -102,8 +97,7 @@ def bison_version_check():
Bison_Version = v
return
sys.exit("bisonpre: %Error: '" + Args.yacc +
"' is not installed, or not working\n")
sys.exit("bisonpre: %Error: '" + Args.yacc + "' is not installed, or not working\n")
def clean_output(filename, outname, is_output, is_c):
@ -141,8 +135,7 @@ def clean_output(filename, outname, is_output, is_c):
if is_c:
token_values = {}
for line in lines:
if re.search(r'enum\s+yytokentype',
line) and not re.search(r';', line):
if re.search(r'enum\s+yytokentype', line) and not re.search(r';', line):
match = re.search(r'\b(\S+) = (\d+)', line)
if match:
token_values[match.group(2)] = match.group(1)
@ -151,8 +144,7 @@ def clean_output(filename, outname, is_output, is_c):
if _enaline(line) and re.search(r'BISONPRE_TOKEN_NAMES', line):
out.append(line)
for tv in sorted(token_values.keys()):
out.append("\tcase %d: return \"%s\";\n" %
(tv, token_values[tv]))
out.append("\tcase %d: return \"%s\";\n" % (tv, token_values[tv]))
continue
out.append(line)
lines = out
@ -165,8 +157,7 @@ def clean_output(filename, outname, is_output, is_c):
# Fix bison 2.3 and GCC 4.2.1
line = re.sub(r'\(YY_\("', '(YY_((char*)"', line)
# Fix bison 2.3 glr-parser warning about yyerrorloc.YYTYPE::yydummy uninit
line = re.sub(r'(YYLTYPE yyerrloc;)',
r'\1 yyerrloc.yydummy=0;/*bisonpre*/', line)
line = re.sub(r'(YYLTYPE yyerrloc;)', r'\1 yyerrloc.yydummy=0;/*bisonpre*/', line)
# Fix bison 3.6.1 unexpected nested-comment
line = re.sub(r'/\* "/\*.*\*/" \*/', '', line)
fh.write(line)
@ -177,11 +168,8 @@ def warning_check(filename):
linenum = 0
for line in fh:
linenum += 1
if re.search(r'(conflicts|warning:|^useless)',
line,
flags=re.IGNORECASE):
sys.exit("%Error: " + filename + ":" + str(linenum) + ": " +
line + "\n")
if re.search(r'(conflicts|warning:|^useless)', line, flags=re.IGNORECASE):
sys.exit("%Error: " + filename + ":" + str(linenum) + ": " + line + "\n")
######################################################################
@ -214,12 +202,9 @@ def clean_input(filename, outname):
# ^/ to prevent comments from matching
if re.match(r'^[a-zA-Z0-9_<>]+:[^/]*[a-zA-Z]', line):
sys.exit("%Error: " + filename + ":" + str(lineno) +
": Move text on rule line to next line: " + line +
"\n")
": Move text on rule line to next line: " + line + "\n")
matcha = re.match(r'^([a-zA-Z0-9_]+)<(\S*)>(.*)$',
line,
flags=re.DOTALL)
matcha = re.match(r'^([a-zA-Z0-9_]+)<(\S*)>(.*)$', line, flags=re.DOTALL)
matchb = re.match(r'^([a-zA-Z0-9_]+):', line)
if re.match(r'^%%', line):
@ -231,8 +216,8 @@ def clean_input(filename, outname):
dtype = matcha.group(2)
line = name + matcha.group(3)
if name in Rules:
sys.exit("%Error: " + filename + ":" + str(lineno) +
": Redeclaring '" + name + "': " + line)
sys.exit("%Error: " + filename + ":" + str(lineno) + ": Redeclaring '" + name +
"': " + line)
if dtype not in types:
types[dtype] = {}
types[dtype][name] = 1
@ -250,8 +235,8 @@ def clean_input(filename, outname):
name = matchb.group(1)
if name not in ('public', 'private'):
if name in Rules:
sys.exit("%Error: " + filename + ":" + str(lineno) +
": Redeclaring '" + name + "': " + line)
sys.exit("%Error: " + filename + ":" + str(lineno) + ": Redeclaring '" +
name + "': " + line)
Rules[name] = {
'name': name,
'type': "",
@ -268,8 +253,7 @@ def clean_input(filename, outname):
cline = re.sub(r'//.*$', '\n', line)
if re.match(r'^\s*;', cline):
if not last_rule:
sys.exit("%Error: " + filename + ":" + str(lineno) +
": Stray semicolon\n")
sys.exit("%Error: " + filename + ":" + str(lineno) + ": Stray semicolon\n")
last_rule = None
elif last_rule:
Rules[last_rule]['rules_and_productions'] += cline
@ -279,8 +263,8 @@ def clean_input(filename, outname):
dtype = match.group(1)
tok = match.group(2)
if tok in tokens:
sys.exit("%Error: " + filename + ":" + str(lineno) +
": Redeclaring '" + tok + "': " + line)
sys.exit("%Error: " + filename + ":" + str(lineno) + ": Redeclaring '" + tok +
"': " + line)
tokens[tok] = dtype
for tok in re.split(r'[^a-zA-Z0-9_]+', cline):
@ -299,8 +283,7 @@ def clean_input(filename, outname):
lineno += 1
if _enaline(line) and re.search(r'BISONPRE_VERSION', line):
# 1 2 3 4
match = re.search(
r'BISONPRE_VERSION\((\S+)\s*,\s*((\S+)\s*,)?\s*([^\),]+)\)\s*$',
match = re.search(r'BISONPRE_VERSION\((\S+)\s*,\s*((\S+)\s*,)?\s*([^\),]+)\)\s*$',
line)
if not match:
sys.exit("%Error: " + filename + ":" + str(lineno) +
@ -308,8 +291,8 @@ def clean_input(filename, outname):
ver = match.group(1)
ver_max = match.group(3)
cmd = match.group(4)
if Bison_Version >= float(ver) and (
not ver_max or Bison_Version <= float(ver_max)):
if Bison_Version >= float(ver) and (not ver_max
or Bison_Version <= float(ver_max)):
line = cmd + "\n"
else:
line = "//NOP: " + line
@ -323,8 +306,7 @@ def clean_input(filename, outname):
for line in linesin:
lineno += 1
if _enaline(line) and re.search(r'BISONPRE_NOT', line):
match = re.search(
r'(.*)BISONPRE_NOT\((\S+)\)\s*(\{[^}]+})\s*(.*)$',
match = re.search(r'(.*)BISONPRE_NOT\((\S+)\)\s*(\{[^}]+})\s*(.*)$',
line,
flags=re.DOTALL)
if not match:
@ -337,8 +319,7 @@ def clean_input(filename, outname):
for etok in endtoks:
if etok not in tokens:
sys.exit("%Error: " + filename + ":" + str(lineno) +
": Can't find definition for token: " + etok +
"\n")
": Can't find definition for token: " + etok + "\n")
# Push it all onto one line to avoid error messages changing
pipe = ""
for tok in sorted(tokens.keys()):
@ -397,10 +378,8 @@ def clean_input(filename, outname):
# Bison doesn't have a #line directive, so we need somewhere to insert into
line = re.sub(r'^\s*//.*$', '', line)
if not re.match(r'^\s*$', line):
sys.exit(
"%Error: " + filename + ":" + str(lineno) + ": Need " +
str(needmore) +
" more blank lines to keep line numbers constant\n")
sys.exit("%Error: " + filename + ":" + str(lineno) + ": Need " +
str(needmore) + " more blank lines to keep line numbers constant\n")
needmore -= 1
else:
lines.append(line)
@ -418,8 +397,8 @@ def _bisonpre_copy(text, lineno, depth):
text,
flags=re.DOTALL)
if not match:
sys.exit("%Error: " + Filename + ":" + str(lineno) +
": Bad form of BISONPRE_NOT: " + text)
sys.exit("%Error: " + Filename + ":" + str(lineno) + ": Bad form of BISONPRE_NOT: " +
text)
text = match.group(1) + '{HERE}' + match.group(5)
once = match.group(2)
rule = match.group(3)
@ -448,8 +427,7 @@ def _bisonpre_copy(text, lineno, depth):
insert = re.sub(left, right, insert)
insert = re.sub(r'[ \t\n]+\n', "\n", insert)
insert = re.sub(r'\n', " ",
insert) # Optional - preserve line numbering
insert = re.sub(r'\n', " ", insert) # Optional - preserve line numbering
text = re.sub(r'{HERE}', insert, text)
depth += 1
return text
@ -465,8 +443,7 @@ def _enaline(line):
parser = argparse.ArgumentParser(
allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=
"""Bisonpre is a wrapper for the Bison YACC replacement. Input to Bison is
description="""Bisonpre is a wrapper for the Bison YACC replacement. Input to Bison is
preprocessed with substitution as described below under EXTENSIONS. Output
from Bison is checked for additional errors, and corrected to work around
various compile warnings.""",
@ -522,35 +499,17 @@ parser.add_argument('--yacc',
help='name of the bison executable, defaults to "bison"')
# Arguments passed through to bison
parser.add_argument('-b',
'--file-prefix',
action='store',
help='Passed to bison.')
parser.add_argument('-d',
'--definitions',
action='store_true',
help='Passed to bison.')
parser.add_argument('-k',
'--token-table',
action='store_true',
help='Passed to bison.')
parser.add_argument('-b', '--file-prefix', action='store', help='Passed to bison.')
parser.add_argument('-d', '--definitions', action='store_true', help='Passed to bison.')
parser.add_argument('-k', '--token-table', action='store_true', help='Passed to bison.')
parser.add_argument('-o',
'--output',
action='store',
required=True,
help='Passed to bison. Sets output file name')
parser.add_argument('-p',
'--name-prefix',
action='store',
help='Passed to bison.')
parser.add_argument('-t',
'--debug',
action='store_true',
help='Passed to bison.')
parser.add_argument('-v',
'--verbose',
action='store_true',
help='Passed to bison.')
parser.add_argument('-p', '--name-prefix', action='store', help='Passed to bison.')
parser.add_argument('-t', '--debug', action='store_true', help='Passed to bison.')
parser.add_argument('-v', '--verbose', action='store_true', help='Passed to bison.')
parser.add_argument('input', help='Passed to bison. Input grammar file.')

View File

@ -115,11 +115,9 @@ def _suppress(filename, linenum, eid):
return True
if eid == 'constParameter' and re.search(r'gtkwave/', filename):
return True
if eid == 'ctuOneDefinitionRuleViolation' and re.search(
r'vltstd/', filename):
if eid == 'ctuOneDefinitionRuleViolation' and re.search(r'vltstd/', filename):
return True
if eid == 'duplicateConditionalAssign' and re.search(
r'gtkwave/', filename):
if eid == 'duplicateConditionalAssign' and re.search(r'gtkwave/', filename):
return True
if eid == 'knownConditionTrueFalse' and re.search(r'gtkwave/', filename):
return True
@ -151,8 +149,7 @@ def _suppress(filename, linenum, eid):
return True
if not os.path.exists(filename):
print("%Warning: " + filename + " does not exist, ignored",
file=sys.stderr)
print("%Warning: " + filename + " does not exist, ignored", file=sys.stderr)
return False
with open(filename, "r", encoding="utf8") as fh:
@ -160,13 +157,11 @@ def _suppress(filename, linenum, eid):
for line in fh:
lineno += 1
if (lineno + 1) == linenum:
match = re.search(
r'(cppcheck|cppcheck-has-bug|cppverilator)-suppress((\s+\S+)+)',
match = re.search(r'(cppcheck|cppcheck-has-bug|cppverilator)-suppress((\s+\S+)+)',
line)
if match:
for supid in match.group(2).split():
if (supid == eid or (eid in SuppressMap
and supid == SuppressMap[eid])):
if (supid == eid or (eid in SuppressMap and supid == SuppressMap[eid])):
return True
return False
@ -182,8 +177,7 @@ filters out unnecessary warnings related to Verilator. Run as:
cd $VERILATOR_ROOT
make -k cppcheck""",
epilog=
"""Copyright 2014-2024 by Wilson Snyder. This program is free software; you
epilog="""Copyright 2014-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0.

View File

@ -16,23 +16,19 @@ import sys
for line in sys.stdin:
# Fix flex 2.6.1 warning
line = re.sub(
r'for \( i = 0; i < _yybytes_len; \+\+i \)',
r'for ( i = 0; (yy_size_t)(i) < (yy_size_t)(_yybytes_len); ++i )',
line)
line = re.sub(r'for \( i = 0; i < _yybytes_len; \+\+i \)',
r'for ( i = 0; (yy_size_t)(i) < (yy_size_t)(_yybytes_len); ++i )', line)
# Fix flex 2.6.0 warning
line = re.sub(
r'\(\(int\) \(\(yy_n_chars\) \+ number_to_move\) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size\)',
r'((int) ((yy_n_chars) + number_to_move) > (int) YY_CURRENT_BUFFER_LVALUE->yy_buf_size)',
line)
line = re.sub(r' number_to_move == YY_MORE_ADJ ',
r' (int)number_to_move == (int)YY_MORE_ADJ ', line)
# Fix flex 2.5.4 namespace omission
line = re.sub(r'^class istream;',
'#include <iostream>\nusing namespace std;\n', line)
# Fix flex 2.5.31 redefinition
line = re.sub(r'(\#define\s+yyFlexLexer\s+yyFlexLexer)', r'//flexfix: \1',
line = re.sub(r' number_to_move == YY_MORE_ADJ ', r' (int)number_to_move == (int)YY_MORE_ADJ ',
line)
# Fix flex 2.5.4 namespace omission
line = re.sub(r'^class istream;', '#include <iostream>\nusing namespace std;\n', line)
# Fix flex 2.5.31 redefinition
line = re.sub(r'(\#define\s+yyFlexLexer\s+yyFlexLexer)', r'//flexfix: \1', line)
# Fix flex 2.5.1 yytext_ptr undef
line = re.sub(r'(\#undef\s+yytext_ptr)', r'//flexfix: \1', line)
# Fix flex 2.5.4 and GCC 4.1.0 warn_unused_result
@ -41,8 +37,7 @@ for line in sys.stdin:
line = re.sub(r'for \( n = 0; n < max_size && ',
r'for ( n = 0; ((size_t)n < (size_t)max_size) && ', line)
# Fix flex 2.5.4 and GCC 4.0.2 under FLEX_DEBUG
line = re.sub(r'--accepting rule at line %d ',
r'--accepting rule at line %ld ', line)
line = re.sub(r'--accepting rule at line %d ', r'--accepting rule at line %ld ', line)
# Fix compiler warning filenames
line = re.sub(r'(#line \d+ ".*)_pretmp', r'\1', line)
# Fix 'register' storage class specifier is deprecated and incompatible with C++17

View File

@ -19,11 +19,9 @@ def read_keys(filename):
if re.match(r'^\s*$', line):
continue
if re.search(r'^\s*VLCOVGEN_ITEM', line):
match = re.search(r'^\s*VLCOVGEN_ITEM *\( *"([^"]+)" *\)',
line)
match = re.search(r'^\s*VLCOVGEN_ITEM *\( *"([^"]+)" *\)', line)
if not match:
sys.exit("%Error: " + filename +
": vlcovgen misformed VLCOVGEN_ITEM line")
sys.exit("%Error: " + filename + ": vlcovgen misformed VLCOVGEN_ITEM line")
code = "{" + match.group(1) + "}"
data = eval(code)
# pprint(data)
@ -59,8 +57,7 @@ def write_keys(filename):
deleting = True
out.append(line)
for keyref in sorted(Items, key=lambda a: a['name']):
out.append(
" if (key == \"%s\") return VL_CIK_%s;\n" %
out.append(" if (key == \"%s\") return VL_CIK_%s;\n" %
(keyref['name'], keyref['name'].upper()))
elif re.search(r'VLCOVGEN_.*AUTO_EDIT_END', line):
deleting = False
@ -80,19 +77,15 @@ def write_keys(filename):
parser = argparse.ArgumentParser(
allow_abbrev=False,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=
"""Generate verilated_cov headers to reduce C++ code duplication.""",
epilog=
"""Copyright 2002-2024 by Wilson Snyder. This program is free software; you
description="""Generate verilated_cov headers to reduce C++ code duplication.""",
epilog="""Copyright 2002-2024 by Wilson Snyder. This program is free software; you
can redistribute it and/or modify it under the terms of either the GNU
Lesser General Public License Version 3 or the Perl Artistic License
Version 2.0.
SPDX-License-Identifier: LGPL-3.0-only OR Artistic-2.0""")
parser.add_argument('--srcdir',
action='store',
help='directory containing Verilator sources')
parser.add_argument('--srcdir', action='store', help='directory containing Verilator sources')
parser.set_defaults(srcdir=".")

View File

@ -42,8 +42,7 @@ for cmd in sys.stdin:
if 'example_lint' in line:
# We don't have a way to specify this yet, so just for now
# sys.stderr.write($line)
prefixes.append("int lint_off_line_" + str(lineno) +
" = 1;\n")
prefixes.append("int lint_off_line_" + str(lineno) + " = 1;\n")
lineno += 1
pos = newpos + 1