3
0
Fork 0
mirror of https://github.com/YosysHQ/sby.git synced 2025-08-14 08:55:27 +00:00

Merge pull request #325 from YosysHQ/krys/csv_statuses

This commit is contained in:
N. Engelhardt 2025-07-08 20:49:32 +02:00 committed by GitHub
commit dd008ec7f7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 714 additions and 210 deletions

View file

@ -61,8 +61,11 @@ jobcount = args.jobcount
init_config_file = args.init_config_file
status_show = args.status
status_reset = args.status_reset
status_live_csv = args.livecsv
status_show_csv = args.statuscsv
status_latest = args.status_latest
if status_show or status_reset:
if status_show or status_reset or status_show_csv:
target = workdir_prefix or workdir or sbyfile
if target is None:
print("ERROR: Specify a .sby config file or working directory to use --status.")
@ -85,15 +88,26 @@ if status_show or status_reset:
status_db = SbyStatusDb(status_path, task=None)
if status_show:
status_db.print_status_summary()
sys.exit(0)
if status_reset:
status_db.reset()
elif status_db.test_schema():
print(f"ERROR: Status database does not match expected formatted. Use --statusreset to reset.")
sys.exit(1)
if status_show:
status_db.print_status_summary(status_latest)
if status_show_csv:
status_db.print_status_summary_csv(tasknames, status_latest)
status_db.db.close()
if status_live_csv:
print(f"WARNING: --livecsv flag found but not used.")
sys.exit(0)
elif status_latest:
print(f"WARNING: --latest flag found but not used.")
if sbyfile is not None:
@ -465,7 +479,7 @@ def start_task(taskloop, taskname):
else:
junit_filename = "junit"
task = SbyTask(sbyconfig, my_workdir, early_logmsgs, reusedir, taskloop)
task = SbyTask(sbyconfig, my_workdir, early_logmsgs, reusedir, taskloop, name=taskname, live_csv=status_live_csv)
for k, v in exe_paths.items():
task.exe_paths[k] = v

View file

@ -29,6 +29,8 @@ def parser_func(release_version='unknown SBY version'):
help="maximum number of processes to run in parallel")
parser.add_argument("--sequential", action="store_true", dest="sequential",
help="run tasks in sequence, not in parallel")
parser.add_argument("--livecsv", action="store_true", dest="livecsv",
help="print live updates of property statuses during task execution in csv format")
parser.add_argument("--autotune", action="store_true", dest="autotune",
help="automatically find a well performing engine and engine configuration for each task")
@ -73,6 +75,10 @@ def parser_func(release_version='unknown SBY version'):
parser.add_argument("--status", action="store_true", dest="status",
help="summarize the contents of the status database")
parser.add_argument("--statuscsv", action="store_true", dest="statuscsv",
help="print the most recent status for each property in csv format")
parser.add_argument("--latest", action="store_true", dest="status_latest",
help="only check statuses from the most recent run of a task")
parser.add_argument("--statusreset", action="store_true", dest="status_reset",
help="reset the contents of the status database")

View file

@ -20,6 +20,7 @@ import os, re, sys, signal, platform, click
if os.name == "posix":
import resource, fcntl
import subprocess
from pathlib import Path
from dataclasses import dataclass, field
from collections import defaultdict
from typing import Optional
@ -631,6 +632,8 @@ class SbyTraceSummary:
path: Optional[str] = field(default=None)
engine_case: Optional[str] = field(default=None)
events: dict = field(default_factory=lambda: defaultdict(lambda: defaultdict(list)))
trace_ids: dict[str, int] = field(default_factory=lambda: dict())
last_ext: Optional[str] = field(default=None)
@property
def kind(self):
@ -682,42 +685,62 @@ class SbySummary:
if update_status:
status_metadata = dict(source="summary_event", engine=engine.engine)
if event.step:
status_metadata["step"] = event.step
add_trace = False
if event.prop:
if event.type == "$assert":
if event.type is None:
event.type = event.prop.celltype
elif event.type == "$assert":
event.prop.status = "FAIL"
if event.path:
event.prop.tracefiles.append(event.path)
if update_status:
self.task.status_db.add_task_property_data(
event.prop,
"trace",
data=dict(path=event.path, step=event.step, **status_metadata),
)
if event.prop:
if event.type == "$cover":
add_trace = True
elif event.type == "$cover":
event.prop.status = "PASS"
if event.path:
event.prop.tracefiles.append(event.path)
if update_status:
self.task.status_db.add_task_property_data(
event.prop,
"trace",
data=dict(path=event.path, step=event.step, **status_metadata),
)
add_trace = True
trace_id = None
trace_path = None
if event.trace:
# get or create trace summary
try:
trace_summary = engine.traces[event.trace]
except KeyError:
trace_summary = SbyTraceSummary(event.trace, path=event.path, engine_case=event.engine_case)
engine.traces[event.trace] = trace_summary
if event.path:
trace_path = Path(event.path)
trace_ext = trace_path.suffix
trace_summary.last_ext = trace_ext
try:
# use existing tracefile for this extension
trace_id = trace_summary.trace_ids[trace_ext]
except KeyError:
# add tracefile to database
trace_id = self.task.status_db.add_task_trace(event.trace, event.path, trace_ext[1:], event.engine_case)
trace_summary.trace_ids[trace_ext] = trace_id
elif trace_summary.path:
# use existing tracefile for last extension
trace_path = Path(trace_summary.path)
trace_ext = trace_summary.last_ext
trace_id = trace_summary.trace_ids[trace_ext]
if event.type:
by_type = trace_summary.events[event.type]
if event.hdlname:
by_type[event.hdlname].append(event)
if event.prop and update_status:
# update property status in database
self.task.status_db.set_task_property_status(
event.prop,
data=status_metadata
trace_id=trace_id,
data=status_metadata,
)
if event.trace not in engine.traces:
engine.traces[event.trace] = SbyTraceSummary(event.trace, path=event.path, engine_case=event.engine_case)
if event.type:
by_type = engine.traces[event.trace].events[event.type]
if event.hdlname:
by_type[event.hdlname].append(event)
if trace_path and add_trace:
event.prop.tracefiles.append(str(trace_path))
def set_engine_status(self, engine_idx, status, case=None):
engine_summary = self.engine_summary(engine_idx)
@ -759,10 +782,21 @@ class SbySummary:
break
case_suffix = f" [{trace.engine_case}]" if trace.engine_case else ""
if trace.path:
if short:
yield f"{trace.kind}{case_suffix}: {self.task.workdir}/{trace.path}"
else:
yield f"{trace.kind}{case_suffix}: {trace.path}"
# print single preferred trace
preferred_exts = [".fst", ".vcd"]
if trace.last_ext not in preferred_exts: preferred_exts.append(trace.last_ext)
for ext in trace.trace_ids.keys():
if ext not in preferred_exts: preferred_exts.append(ext)
for ext in preferred_exts:
if ext not in trace.trace_ids:
continue
if short:
path = Path(self.task.workdir) / trace.path
else:
path = Path(trace.path)
yield f"{trace.kind}{case_suffix}: {path.with_suffix(ext)}"
if short:
break
else:
yield f"{trace.kind}{case_suffix}: <{trace.trace}>"
produced_traces = True
@ -785,15 +819,18 @@ class SbySummary:
break
event = same_events[0]
steps = sorted(e.step for e in same_events)
# uniquify steps and ignore events with missing steps
steps = sorted(set(e.step for e in same_events if e.step))
if short and len(steps) > step_limit:
excess = len(steps) - step_limit
steps = [str(step) for step in steps[:step_limit]]
omitted_excess = True
steps[-1] += f" and {excess} further step{'s' if excess != 1 else ''}"
steps = f"step{'s' if len(steps) > 1 else ''} {', '.join(map(str, steps))}"
yield f" {desc} {event.hdlname} at {event.src} in {steps}"
event_string = f" {desc} {hdlname} at {event.src}"
if steps:
event_string += f" step{'s' if len(steps) > 1 else ''} {', '.join(map(str, steps))}"
yield event_string
if not produced_traces:
yield f"{engine.engine} did not produce any traces"
@ -801,7 +838,7 @@ class SbySummary:
if self.unreached_covers is None and self.task.opt_mode == 'cover' and self.task.status != "PASS" and self.task.design:
self.unreached_covers = []
for prop in self.task.design.hierarchy:
if prop.type == prop.Type.COVER and prop.status == "UNKNOWN":
if prop.type == prop.Type.COVER and prop.status in ["UNKNOWN", "FAIL"]:
self.unreached_covers.append(prop)
if self.unreached_covers:
@ -825,12 +862,14 @@ class SbySummary:
class SbyTask(SbyConfig):
def __init__(self, sbyconfig, workdir, early_logs, reusedir, taskloop=None, logfile=None):
def __init__(self, sbyconfig, workdir, early_logs, reusedir, taskloop=None, logfile=None, name=None, live_csv=False):
super().__init__()
self.used_options = set()
self.models = dict()
self.workdir = workdir
self.reusedir = reusedir
self.name = name
self.live_csv = live_csv
self.status = "UNKNOWN"
self.total_time = 0
self.expect = list()
@ -1215,17 +1254,27 @@ class SbyTask(SbyConfig):
proc.terminate(timeout=timeout)
for proc in list(self.procs_pending):
proc.terminate(timeout=timeout)
if timeout:
self.update_unknown_props(dict(source="timeout"))
def proc_failed(self, proc):
# proc parameter used by autotune override
self.status = "ERROR"
self.terminate()
def update_unknown_props(self, data):
for prop in self.design.hierarchy:
if prop.status != "UNKNOWN":
continue
if ((prop.type == prop.Type.ASSERT and self.opt_mode in ["bmc", "prove"]) or
(prop.type == prop.Type.COVER and self.opt_mode == "cover")):
self.status_db.set_task_property_status(prop, data=data)
def pass_unknown_asserts(self, data):
for prop in self.design.pass_unknown_asserts():
self.status_db.set_task_property_status(prop, data=data)
def update_status(self, new_status):
def update_status(self, new_status, step = None):
assert new_status in ["PASS", "FAIL", "UNKNOWN", "ERROR"]
self.status_db.set_task_status(new_status)
@ -1239,7 +1288,10 @@ class SbyTask(SbyConfig):
assert self.status != "FAIL"
self.status = "PASS"
if self.opt_mode in ("bmc", "prove") and self.design:
self.pass_unknown_asserts(dict(source="task_status"))
data = {"source": "task_status"}
if step:
data["step"] = step
self.pass_unknown_asserts(data)
elif new_status == "FAIL":
assert self.status != "PASS"
@ -1312,7 +1364,7 @@ class SbyTask(SbyConfig):
except FileNotFoundError:
status_path = f"{self.workdir}/status.sqlite"
self.status_db = SbyStatusDb(status_path, self)
self.status_db = SbyStatusDb(status_path, self, live_csv=self.live_csv)
def setup_procs(self, setupmode):
self.handle_non_engine_options()

View file

@ -111,6 +111,10 @@ class SbyProperty:
def celltype(self):
return f"${str(self.type).lower()}"
@property
def kind(self):
return str(self.type)
@property
def hdlname(self):
return pretty_path(self.path).rstrip()

View file

@ -18,6 +18,7 @@
import re, getopt
import json
import os
from sby_core import SbyProc
from sby_engine_aiger import aigsmt_exit_callback, aigsmt_trace_callback
@ -173,18 +174,25 @@ def run(mode, task, engine_idx, engine):
aiger_props.append(task.design.properties_by_path.get(tuple(path)))
if keep_going:
match = re.match(r"Writing CEX for output ([0-9]+) to engine_[0-9]+/(.*)\.aiw", line)
match = re.match(r"Writing CEX for output ([0-9]+) to (engine_[0-9]+/(.*)\.aiw)", line)
if match:
output = int(match[1])
tracefile = match[2]
name = match[3]
trace, _ = os.path.splitext(name)
task.summary.add_event(engine_idx=engine_idx, trace=trace, path=tracefile)
prop = aiger_props[output]
if prop:
prop.status = "FAIL"
task.status_db.set_task_property_status(prop, data=dict(source="abc pdr", engine=f"engine_{engine_idx}"))
task.summary.add_event(
engine_idx=engine_idx, trace=trace,
hdlname=prop.hdlname, src=prop.location, prop=prop,
)
disproved.add(output)
proc_status = "FAIL"
proc = aigsmt_trace_callback(task, engine_idx, proc_status,
run_aigsmt=run_aigsmt, smtbmc_vcd=smtbmc_vcd, smtbmc_append=smtbmc_append, sim_append=sim_append,
name=match[2],
name=name,
)
proc.register_exit_callback(exit_callback)
procs_running += 1
@ -198,7 +206,10 @@ def run(mode, task, engine_idx, engine):
prop = aiger_props[output]
if prop:
prop.status = "PASS"
task.status_db.set_task_property_status(prop, data=dict(source="abc pdr", engine=f"engine_{engine_idx}"))
task.summary.add_event(
engine_idx=engine_idx, trace=None,
hdlname=prop.hdlname, src=prop.location, prop=prop,
)
proved.add(output)
match = re.match(r"^Simulation of [0-9]+ frames for [0-9]+ rounds with [0-9]+ restarts did not assert POs.", line)

View file

@ -202,11 +202,13 @@ def aigsmt_trace_callback(task, engine_idx, proc_status, *, run_aigsmt, smtbmc_v
proc2_status = None
last_prop = []
recorded_last = False
current_step = None
def output_callback2(line):
nonlocal proc2_status
nonlocal last_prop
nonlocal recorded_last
nonlocal current_step
smt2_trans = {'\\':'/', '|':'/'}
@ -218,6 +220,8 @@ def aigsmt_trace_callback(task, engine_idx, proc_status, *, run_aigsmt, smtbmc_v
match = re.match(r"^## [0-9: ]+ .* in step ([0-9]+)\.\.", line)
if match:
last_prop = []
recorded_last = False
current_step = int(match[1])
return line
@ -233,32 +237,33 @@ def aigsmt_trace_callback(task, engine_idx, proc_status, *, run_aigsmt, smtbmc_v
cell_name = match[3] or match[2]
prop = task.design.hierarchy.find_property(path, cell_name, trans_dict=smt2_trans)
prop.status = "FAIL"
task.status_db.set_task_property_status(prop, data=dict(source="aigsmt", engine=f"engine_{engine_idx}"))
last_prop.append(prop)
return line
match = re.match(r"^## [0-9: ]+ Writing trace to VCD file: (\S+)", line)
match = re.match(r"^## [0-9: ]+ Writing trace to (VCD|Yosys witness) file: (\S+)", line)
if match:
tracefile = match[1]
trace = os.path.basename(tracefile)[:-4]
tracefile = match[2]
trace, _ = os.path.splitext(os.path.basename(tracefile))
task.summary.add_event(engine_idx=engine_idx, trace=trace, path=tracefile)
if match and last_prop:
for p in last_prop:
task.summary.add_event(
engine_idx=engine_idx, trace=trace,
type=p.celltype, hdlname=p.hdlname, src=p.location, step=current_step)
p.tracefiles.append(tracefile)
last_prop = []
type=p.celltype, hdlname=p.hdlname, src=p.location,
step=current_step, prop=p,
)
recorded_last = True
return line
return line
def exit_callback2(retcode):
nonlocal last_prop, recorded_last
if proc2_status is None:
task.error(f"engine_{engine_idx}: Could not determine aigsmt status.")
if proc2_status != "FAIL":
task.error(f"engine_{engine_idx}: Unexpected aigsmt status.")
if len(last_prop) and not recorded_last:
task.error(f"engine_{engine_idx}: Found properties without trace.")
proc2.output_callback = output_callback2
proc2.register_exit_callback(exit_callback2)

View file

@ -77,6 +77,7 @@ def run(mode, task, engine_idx, engine):
common_state.wit_file = None
common_state.assert_fail = False
common_state.running_procs = 0
common_state.current_step = None
def print_traces_and_terminate():
if mode == "cover":
@ -90,6 +91,7 @@ def run(mode, task, engine_idx, engine):
proc_status = "FAIL"
else:
task.error(f"engine_{engine_idx}: Engine terminated without status.")
task.update_unknown_props(dict(source="btor", engine=f"engine_{engine_idx}"))
else:
if common_state.expected_cex == 0:
proc_status = "pass"
@ -100,7 +102,7 @@ def run(mode, task, engine_idx, engine):
else:
task.error(f"engine_{engine_idx}: Engine terminated without status.")
task.update_status(proc_status.upper())
task.update_status(proc_status.upper(), common_state.current_step)
task.summary.set_engine_status(engine_idx, proc_status)
task.terminate()
@ -117,9 +119,11 @@ def run(mode, task, engine_idx, engine):
def make_exit_callback(suffix):
def exit_callback2(retcode):
vcdpath = f"engine_{engine_idx}/trace{suffix}.vcd"
if os.path.exists(f"{task.workdir}/{vcdpath}"):
task.summary.add_event(engine_idx=engine_idx, trace=f'trace{suffix}', path=vcdpath, type="$cover" if mode == "cover" else "$assert")
trace = f"trace{suffix}"
vcdpath = f"engine_{engine_idx}/{trace}.vcd"
trace_path = f"{task.workdir}/{vcdpath}"
if os.path.exists(trace_path):
task.summary.add_event(engine_idx=engine_idx, trace=trace, path=vcdpath, type="$cover" if mode == "cover" else "$assert")
common_state.running_procs -= 1
if (common_state.running_procs == 0):
@ -140,6 +144,7 @@ def run(mode, task, engine_idx, engine):
common_state.expected_cex = int(match[1])
if common_state.produced_cex != 0:
task.error(f"engine_{engine_idx}: Unexpected engine output (property count).")
task.update_unknown_props(dict(source="btor_init", engine=f"engine_{engine_idx}"))
else:
task.error(f"engine_{engine_idx}: BTOR solver '{solver_args[0]}' is currently not supported in cover mode.")
@ -205,6 +210,9 @@ def run(mode, task, engine_idx, engine):
if solver_args[0] == "btormc":
if "calling BMC on" in line:
return line
match = re.match(r".*at bound k = (\d+).*", line)
if match:
common_state.current_step = int(match[1])
if "SATISFIABLE" in line:
return line
if "bad state properties at bound" in line:
@ -215,6 +223,9 @@ def run(mode, task, engine_idx, engine):
return line
elif solver_args[0] == "pono":
match = re.match(r".*at bound (\d+).*", line)
if match:
common_state.current_step = int(match[1])
if line == "unknown":
if common_state.solver_status is None:
common_state.solver_status = "unsat"

View file

@ -184,6 +184,7 @@ def run(mode, task, engine_idx, engine):
proc_status = None
last_prop = []
recorded_last = False
pending_sim = None
current_step = None
procs_running = 1
@ -192,6 +193,7 @@ def run(mode, task, engine_idx, engine):
def output_callback(line):
nonlocal proc_status
nonlocal last_prop
nonlocal recorded_last
nonlocal pending_sim
nonlocal current_step
nonlocal procs_running
@ -212,7 +214,14 @@ def run(mode, task, engine_idx, engine):
match = re.match(r"^## [0-9: ]+ .* in step ([0-9]+)\.\.", line)
if match:
last_prop = []
recorded_last = False
if mode == "prove_induction":
return line
last_step = current_step
current_step = int(match[1])
if current_step != last_step and last_step is not None:
task.update_unknown_props(dict(source="smtbmc", engine=f"engine_{engine_idx}", step=last_step))
return line
match = re.match(r"^## [0-9: ]+ Status: FAILED", line)
@ -242,7 +251,6 @@ def run(mode, task, engine_idx, engine):
cell_name = match[3] or match[2]
prop = task.design.hierarchy.find_property(path, cell_name, trans_dict=smt2_trans)
prop.status = "FAIL"
task.status_db.set_task_property_status(prop, data=dict(source="smtbmc", engine=f"engine_{engine_idx}"))
last_prop.append(prop)
return line
@ -252,31 +260,25 @@ def run(mode, task, engine_idx, engine):
cell_name = match[3] or match[2]
prop = task.design.hierarchy.find_property(path, cell_name, trans_dict=smt2_trans)
prop.status = "PASS"
task.status_db.set_task_property_status(prop, data=dict(source="smtbmc", engine=f"engine_{engine_idx}"))
last_prop.append(prop)
return line
if smtbmc_vcd and not task.opt_fst:
match = re.match(r"^## [0-9: ]+ Writing trace to VCD file: (\S+)", line)
if match:
tracefile = match[1]
trace = os.path.basename(tracefile)[:-4]
engine_case = mode.split('_')[1] if '_' in mode else None
task.summary.add_event(engine_idx=engine_idx, trace=trace, path=tracefile, engine_case=engine_case)
if match and last_prop:
for p in last_prop:
task.summary.add_event(
engine_idx=engine_idx, trace=trace,
type=p.celltype, hdlname=p.hdlname, src=p.location, step=current_step)
p.tracefiles.append(tracefile)
last_prop = []
return line
else:
match = re.match(r"^## [0-9: ]+ Writing trace to Yosys witness file: (\S+)", line)
if match:
tracefile = match[1]
match = re.match(r"^## [0-9: ]+ Writing trace to (VCD|Yosys witness) file: (\S+)", line)
if match:
tracefile = match[2]
if match[1] == "Yosys witness" and (task.opt_fst or task.opt_vcd_sim):
pending_sim = tracefile
trace, _ = os.path.splitext(os.path.basename(tracefile))
engine_case = mode.split('_')[1] if '_' in mode else None
task.summary.add_event(engine_idx=engine_idx, trace=trace, path=tracefile, engine_case=engine_case)
for p in last_prop:
task.summary.add_event(
engine_idx=engine_idx, trace=trace,
type=p.celltype, hdlname=p.hdlname, src=p.location,
step=current_step, prop=p,
)
recorded_last = True
return line
match = re.match(r"^## [0-9: ]+ Unreached cover statement at ([^:]+): (\S+)(?: \((\S+)\))?", line)
if match and not failed_assert:
@ -284,7 +286,11 @@ def run(mode, task, engine_idx, engine):
cell_name = match[3] or match[2]
prop = task.design.hierarchy.find_property(path, cell_name, trans_dict=smt2_trans)
prop.status = "FAIL"
task.status_db.set_task_property_status(prop, data=dict(source="smtbmc", engine=f"engine_{engine_idx}"))
task.summary.add_event(
engine_idx=engine_idx, trace=None,
hdlname=prop.hdlname, src=prop.location,
step=current_step, prop=prop,
)
return line
@ -295,15 +301,19 @@ def run(mode, task, engine_idx, engine):
last_exit_callback()
def exit_callback(retcode):
nonlocal last_prop, recorded_last
if proc_status is None:
task.error(f"engine_{engine_idx}: Engine terminated without status.")
if len(last_prop) and not recorded_last:
task.error(f"engine_{engine_idx}: Found properties without trace.")
simple_exit_callback(retcode)
def last_exit_callback():
nonlocal current_step
if mode == "bmc" or mode == "cover":
task.update_status(proc_status)
task.update_status(proc_status, current_step)
if proc_status == "FAIL" and mode == "bmc" and keep_going:
task.pass_unknown_asserts(dict(source="smtbmc", keep_going=True, engine=f"engine_{engine_idx}"))
task.pass_unknown_asserts(dict(source="smtbmc", keep_going=True, engine=f"engine_{engine_idx}", step=current_step))
proc_status_lower = proc_status.lower() if proc_status == "PASS" else proc_status
task.summary.set_engine_status(engine_idx, proc_status_lower)
if not keep_going:
@ -335,7 +345,7 @@ def run(mode, task, engine_idx, engine):
assert False
if task.basecase_pass and task.induction_pass:
task.update_status("PASS")
task.update_status("PASS", current_step)
task.summary.append("successful proof by k-induction.")
if not keep_going:
task.terminate()

View file

@ -4,6 +4,8 @@ import sqlite3
import os
import time
import json
import click
import re
from collections import defaultdict
from functools import wraps
from pathlib import Path
@ -13,69 +15,126 @@ from sby_design import SbyProperty, pretty_path
Fn = TypeVar("Fn", bound=Callable[..., Any])
SQLSCRIPT = """\
CREATE TABLE task (
id INTEGER PRIMARY KEY,
workdir TEXT,
name TEXT,
mode TEXT,
created REAL
);
CREATE TABLE task_status (
id INTEGER PRIMARY KEY,
task INTEGER,
status TEXT,
data TEXT,
created REAL,
FOREIGN KEY(task) REFERENCES task(id)
);
CREATE TABLE task_property (
id INTEGER PRIMARY KEY,
task INTEGER,
src TEXT,
name TEXT,
hdlname TEXT,
kind TEXT,
created REAL,
FOREIGN KEY(task) REFERENCES task(id)
);
CREATE TABLE task_property_status (
id INTEGER PRIMARY KEY,
task_property INTEGER,
task_trace INTEGER,
status TEXT,
data TEXT,
created REAL,
FOREIGN KEY(task_property) REFERENCES task_property(id),
FOREIGN KEY(task_trace) REFERENCES task_trace(id)
);
CREATE TABLE task_trace (
id INTEGER PRIMARY KEY,
task INTEGER,
trace TEXT,
path TEXT,
kind TEXT,
engine_case TEXT,
created REAL,
FOREIGN KEY(task) REFERENCES task(id)
);"""
def transaction(method: Fn) -> Fn:
@wraps(method)
def wrapper(self: SbyStatusDb, *args: Any, **kwargs: Any) -> Any:
if self._transaction_active:
if self.con.in_transaction:
return method(self, *args, **kwargs)
try:
self.log_debug(f"begin {method.__name__!r} transaction")
self.db.execute("begin")
self._transaction_active = True
result = method(self, *args, **kwargs)
self.db.execute("commit")
self._transaction_active = False
self.log_debug(f"comitted {method.__name__!r} transaction")
return result
except sqlite3.OperationalError as err:
self.log_debug(f"failed {method.__name__!r} transaction {err}")
self.db.rollback()
self._transaction_active = False
with self.con:
self.log_debug(f"begin {method.__name__!r} transaction")
self.db.execute("begin")
result = method(self, *args, **kwargs)
except Exception as err:
self.log_debug(f"failed {method.__name__!r} transaction {err}")
self.db.rollback()
self._transaction_active = False
raise
if not isinstance(err, sqlite3.OperationalError):
raise
if re.match(r"table \w+ has no column named \w+", err.args[0]):
err.add_note("SBY status database can be reset with --statusreset")
raise
else:
self.log_debug(f"comitted {method.__name__!r} transaction")
return result
try:
self.log_debug(
f"retrying {method.__name__!r} transaction once in immediate mode"
)
self.db.execute("begin immediate")
self._transaction_active = True
result = method(self, *args, **kwargs)
self.db.execute("commit")
self._transaction_active = False
self.log_debug(f"comitted {method.__name__!r} transaction")
return result
with self.con:
self.log_debug(
f"retrying {method.__name__!r} transaction once in immediate mode"
)
self.db.execute("begin immediate")
result = method(self, *args, **kwargs)
except Exception as err:
self.log_debug(f"failed {method.__name__!r} transaction {err}")
self.db.rollback()
self._transaction_active = False
raise
else:
self.log_debug(f"comitted {method.__name__!r} transaction")
return result
return wrapper # type: ignore
class SbyStatusDb:
def __init__(self, path: Path, task, timeout: float = 5.0):
def __init__(self, path: Path, task, timeout: float = 5.0, live_csv = False):
self.debug = False
self.task = task
self._transaction_active = False
self.live_csv = live_csv
setup = not os.path.exists(path)
self.db = sqlite3.connect(path, isolation_level=None, timeout=timeout)
self.con = sqlite3.connect(path, isolation_level=None, timeout=timeout)
self.db = self.con.cursor()
self.db.row_factory = sqlite3.Row
self.db.execute("PRAGMA journal_mode=WAL").fetchone()
self.db.execute("PRAGMA synchronous=0").fetchone()
err_count = 0
err_max = 3
while True:
try:
self.db.execute("PRAGMA journal_mode=WAL")
self.db.execute("PRAGMA synchronous=0")
self.db.execute("PRAGMA foreign_keys=ON")
except sqlite3.OperationalError as err:
if "database is locked" not in err.args[0]:
raise
err_count += 1
if err_count > err_max:
err.add_note(f"Failed to acquire lock after {err_count} attempts, aborting")
raise
backoff = err_count / 10.0
self.log_debug(f"Database locked, retrying in {backoff}s")
time.sleep(backoff)
else:
break
if setup:
self._setup()
self._setup()
if task is not None:
self.task_id = self.create_task(workdir=task.workdir, mode=task.opt_mode)
self.start_time = time.time()
self.task_id = self.create_task(workdir=task.workdir, name=task.name, mode=task.opt_mode, now=self.start_time)
def log_debug(self, *args):
if self.debug:
@ -86,59 +145,25 @@ class SbyStatusDb:
@transaction
def _setup(self):
script = """
CREATE TABLE task (
id INTEGER PRIMARY KEY,
workdir TEXT,
mode TEXT,
created REAL
);
CREATE TABLE task_status (
id INTEGER PRIMARY KEY,
task INTEGER,
status TEXT,
data TEXT,
created REAL,
FOREIGN KEY(task) REFERENCES task(id)
);
CREATE TABLE task_property (
id INTEGER PRIMARY KEY,
task INTEGER,
src TEXT,
name TEXT,
created REAL,
FOREIGN KEY(task) REFERENCES task(id)
);
CREATE TABLE task_property_status (
id INTEGER PRIMARY KEY,
task_property INTEGER,
status TEXT,
data TEXT,
created REAL,
FOREIGN KEY(task_property) REFERENCES task_property(id)
);
CREATE TABLE task_property_data (
id INTEGER PRIMARY KEY,
task_property INTEGER,
kind TEXT,
data TEXT,
created REAL,
FOREIGN KEY(task_property) REFERENCES task_property(id)
);
"""
for statement in script.split(";\n"):
statement = statement.strip()
for statement in SQLSCRIPT.split(";\n"):
statement = statement.strip().replace("CREATE TABLE", "CREATE TABLE IF NOT EXISTS")
if statement:
self.db.execute(statement)
def test_schema(self) -> bool:
schema = self.db.execute("SELECT sql FROM sqlite_master;").fetchall()
schema_script = '\n'.join(str(sql[0] + ';') for sql in schema)
self._tables = re.findall(r"CREATE TABLE (\w+) \(", schema_script)
return schema_script != SQLSCRIPT
@transaction
def create_task(self, workdir: str, mode: str) -> int:
def create_task(self, workdir: str, name: str, mode: str, now:float) -> int:
return self.db.execute(
"""
INSERT INTO task (workdir, mode, created)
VALUES (:workdir, :mode, :now)
INSERT INTO task (workdir, name, mode, created)
VALUES (:workdir, :name, :mode, :now)
""",
dict(workdir=workdir, mode=mode, now=time.time()),
dict(workdir=workdir, name=name, mode=mode, now=now),
).lastrowid
@transaction
@ -150,14 +175,16 @@ class SbyStatusDb:
now = time.time()
self.db.executemany(
"""
INSERT INTO task_property (name, src, task, created)
VALUES (:name, :src, :task, :now)
INSERT INTO task_property (name, src, hdlname, task, kind, created)
VALUES (:name, :src, :hdlname, :task, :kind, :now)
""",
[
dict(
name=json.dumps(prop.path),
src=prop.location or "",
hdlname=prop.hdlname,
task=task_id,
kind=prop.kind,
now=now,
)
for prop in properties
@ -195,55 +222,66 @@ class SbyStatusDb:
def set_task_property_status(
self,
property: SbyProperty,
status: Optional[str] = None,
trace_id: Optional[int] = None,
data: Any = None,
):
if status is None:
status = property.status
now = time.time()
self.db.execute(
"""
INSERT INTO task_property_status (
task_property, status, data, created
task_property, task_trace, status, data, created
)
VALUES (
(SELECT id FROM task_property WHERE task = :task AND name = :name),
:status, :data, :now
:trace_id, :status, :data, :now
)
""",
dict(
task=self.task_id,
trace_id=trace_id,
name=json.dumps(property.path),
status=status,
status=property.status,
data=json.dumps(data),
now=now,
),
)
if self.live_csv:
row = self.get_status_data_joined(self.db.lastrowid)
csvline = format_status_data_csvline(row)
self.task.log(f"{click.style('csv', fg='yellow')}: {csvline}")
@transaction
def add_task_property_data(self, property: SbyProperty, kind: str, data: Any):
def add_task_trace(
self,
trace: str,
path: str,
kind: str,
engine_case: Optional[str] = None,
task_id: Optional[int] = None,
):
if task_id is None:
task_id = self.task_id
now = time.time()
self.db.execute(
return self.db.execute(
"""
INSERT INTO task_property_data (
task_property, kind, data, created
INSERT INTO task_trace (
trace, task, path, engine_case, kind, created
)
VALUES (
(SELECT id FROM task_property WHERE task = :task AND name = :name),
:kind, :data, :now
:trace, :task, :path, :engine_case, :kind, :now
)
""",
dict(
task=self.task_id,
name=json.dumps(property.path),
trace=trace,
task=task_id,
path=path,
engine_case=engine_case,
kind=kind,
data=json.dumps(data),
now=now,
),
)
now=now
)
).lastrowid
@transaction
def all_tasks(self):
rows = self.db.execute(
"""
@ -253,7 +291,6 @@ class SbyStatusDb:
return {row["id"]: dict(row) for row in rows}
@transaction
def all_task_properties(self):
rows = self.db.execute(
"""
@ -264,12 +301,10 @@ class SbyStatusDb:
def get_result(row):
row = dict(row)
row["name"] = tuple(json.loads(row.get("name", "[]")))
row["data"] = json.loads(row.get("data", "null"))
return row
return {row["id"]: get_result(row) for row in rows}
@transaction
def all_task_property_statuses(self):
rows = self.db.execute(
"""
@ -285,7 +320,6 @@ class SbyStatusDb:
return {row["id"]: get_result(row) for row in rows}
@transaction
def all_status_data(self):
return (
self.all_tasks(),
@ -294,20 +328,35 @@ class SbyStatusDb:
)
@transaction
def reset(self):
self.db.execute("""DELETE FROM task_property_status""")
self.db.execute("""DELETE FROM task_property_data""")
self.db.execute("""DELETE FROM task_property""")
self.db.execute("""DELETE FROM task_status""")
self.db.execute("""DELETE FROM task""")
def _reset(self):
hard_reset = self.test_schema()
# table names can't be parameters, so we need to use f-strings
# but it is safe to use here because it comes from the regex "\w+"
for table in self._tables:
if hard_reset:
self.log_debug(f"dropping {table}")
self.db.execute(f"DROP TABLE {table}")
else:
self.log_debug(f"clearing {table}")
self.db.execute(f"DELETE FROM {table}")
if hard_reset:
self._setup()
def print_status_summary(self):
def reset(self):
self.db.execute("PRAGMA foreign_keys=OFF")
self._reset()
self.db.execute("PRAGMA foreign_keys=ON")
def print_status_summary(self, latest: bool):
tasks, task_properties, task_property_statuses = self.all_status_data()
latest_task_ids = filter_latest_task_ids(tasks)
properties = defaultdict(set)
uniquify_paths = defaultdict(dict)
def add_status(task_property, status):
if latest and task_property["task"] not in latest_task_ids:
return
display_name = task_property["name"]
if display_name[-1].startswith("$"):
@ -334,6 +383,90 @@ class SbyStatusDb:
for display_name, statuses in sorted(properties.items()):
print(pretty_path(display_name), combine_statuses(statuses))
def get_status_data_joined(self, status_id: int):
row = self.db.execute(
"""
SELECT task.name as 'task_name', task.mode, task.workdir, task.created, task_property.kind,
task_property.src as 'location', task_property.name, task_property.hdlname, task_property_status.status,
task_property_status.data, task_property_status.created as 'status_created',
task_property_status.id, task_trace.path, task_trace.kind as trace_kind
FROM task
INNER JOIN task_property ON task_property.task=task.id
INNER JOIN task_property_status ON task_property_status.task_property=task_property.id
LEFT JOIN task_trace ON task_property_status.task_trace=task_trace.id
WHERE task_property_status.id=:status_id;
""",
dict(status_id=status_id)
).fetchone()
return parse_status_data_row(row)
def all_status_data_joined(self):
rows = self.db.execute(
"""
SELECT task.id as 'task_id', task.name as 'task_name', task.mode, task.workdir, task.created, task_property.kind,
task_property.src as 'location', task_property.name, task_property.hdlname, task_property_status.status,
task_property_status.data, task_property_status.created as 'status_created',
task_property_status.id, task_trace.path, task_trace.kind as trace_kind
FROM task
INNER JOIN task_property ON task_property.task=task.id
INNER JOIN task_property_status ON task_property_status.task_property=task_property.id
LEFT JOIN task_trace ON task_property_status.task_trace=task_trace.id;
"""
).fetchall()
return {row["id"]: parse_status_data_row(row) for row in rows}
def print_status_summary_csv(self, tasknames: list[str], latest: bool):
# get all statuses
all_properties = self.all_status_data_joined()
latest_task_ids = filter_latest_task_ids(self.all_tasks())
# print csv header
csvheader = format_status_data_csvline(None)
print(csvheader)
# find summary for each task/property combo
prop_map: dict[(str, str), dict[str, (int, int)]] = {}
for row, prop_status in all_properties.items():
if tasknames and prop_status['task_name'] not in tasknames:
continue
if latest and prop_status['task_id'] not in latest_task_ids:
continue
status = prop_status['status']
this_depth = prop_status['data'].get('step')
this_kind = prop_status['trace_kind']
key = (prop_status['task_name'], prop_status['hdlname'])
try:
prop_status_map = prop_map[key]
except KeyError:
prop_map[key] = prop_status_map = {}
current_depth, _ = prop_status_map.get(status, (None, None))
update_map = False
if current_depth is None:
update_map = True
elif this_depth is not None:
if status == 'FAIL' and this_depth < current_depth:
# earliest fail
update_map = True
elif status != 'FAIL' and this_depth > current_depth:
# latest non-FAIL
update_map = True
elif this_depth == current_depth and this_kind in ['fst', 'vcd']:
# prefer traces over witness files
update_map = True
if update_map:
prop_status_map[status] = (this_depth, row)
for prop in prop_map.values():
# ignore UNKNOWNs if there are other statuses
if len(prop) > 1 and "UNKNOWN" in prop:
del prop["UNKNOWN"]
for _, row in prop.values():
csvline = format_status_data_csvline(all_properties[row])
print(csvline)
def combine_statuses(statuses):
statuses = set(statuses)
@ -342,3 +475,57 @@ def combine_statuses(statuses):
statuses.discard("UNKNOWN")
return ",".join(sorted(statuses))
def parse_status_data_row(raw: sqlite3.Row):
row_dict = dict(raw)
row_dict["name"] = json.loads(row_dict.get("name", "null"))
row_dict["data"] = json.loads(row_dict.get("data") or "{}")
return row_dict
def format_status_data_csvline(row: dict|None) -> str:
if row is None:
csv_header = [
"time",
"task_name",
"mode",
"engine",
"name",
"location",
"kind",
"status",
"trace",
"depth",
]
return ','.join(csv_header)
else:
engine = row['data'].get('engine', row['data'].get('source'))
try:
time = row['status_created'] - row['created']
except TypeError:
time = 0
name = row['hdlname']
depth = row['data'].get('step')
try:
trace_path = Path(row['workdir']) / row['path']
except TypeError:
trace_path = None
csv_line = [
round(time, 2),
row['task_name'],
row['mode'],
engine,
name or pretty_path(row['name']),
row['location'],
row['kind'],
row['status'] or "UNKNOWN",
trace_path,
depth,
]
return ','.join("" if v is None else str(v) for v in csv_line)
def filter_latest_task_ids(all_tasks: dict[int, dict[str]]):
latest: dict[str, int] = {}
for task_id, task_dict in all_tasks.items():
latest[task_dict["workdir"]] = task_id
return list(latest.values())

View file

@ -5,7 +5,7 @@ import sys
def get_prop_type(prop: str):
prop = json.loads(prop or "[]")
name_parts = prop[-1].split("_")
if name_parts[0] == "\check":
if name_parts[0] == "\\check":
# read_verilog style
# \check_cover_mixed_v...
return name_parts[1]
@ -24,22 +24,28 @@ def main():
# custom sql to get all task property statuses for the current workdir
rows = db.execute(
"""
SELECT task.id, task_property.name, task_property.src, task_property_status.status
SELECT task.id, task_property.id, task_property.name, task_property.src, task_property_status.status
FROM task
LEFT JOIN task_property ON task_property.task=task.id
LEFT JOIN task_property_status ON task_property_status.task_property=task_property.id
WHERE task.workdir=:workdir;
WHERE task.workdir=:workdir
ORDER BY task_property_status.id DESC;
""",
{"workdir": workdir}
).fetchall()
# only check the most recent iteration of the test
last_id = 0
for row_id, _, _, _ in rows:
for row_id, _, _, _, _ in rows:
if row_id > last_id:
last_id = row_id
for row_id, prop, src, status in rows:
# only check the last status of a property
checked_props = set()
for row_id, prop_id, prop, src, status in rows:
if row_id != last_id:
continue
if prop_id in checked_props:
continue
checked_props.add(prop_id)
prop_type = get_prop_type(prop)
valid_status: list[None|str] = []
if workdir in ["mixed_bmc", "mixed_assert"] and prop_type == "assert":

14
tests/statusdb/reset.sby Normal file
View file

@ -0,0 +1,14 @@
[options]
mode bmc
depth 100
expect fail
[engines]
smtbmc --keep-going boolector
[script]
read -formal reset.sv
prep -top demo
[files]
reset.sv

11
tests/statusdb/reset.sh Normal file
View file

@ -0,0 +1,11 @@
#!/bin/bash
set -e
# run task
python3 $SBY_MAIN -f $SBY_FILE $TASK
# task has 3 properties
python3 $SBY_MAIN -f $SBY_FILE --status | wc -l | grep -q '3'
# resetting task should work
python3 $SBY_MAIN -f $SBY_FILE --statusreset
# clean database should have no properties
python3 $SBY_MAIN -f $SBY_FILE --status | wc -l | grep -q '0'

21
tests/statusdb/reset.sv Normal file
View file

@ -0,0 +1,21 @@
module demo (
input clk,
output reg [5:0] counter
);
initial counter = 0;
always @(posedge clk) begin
if (counter == 15)
counter <= 0;
else
counter <= counter + 1;
end
`ifdef FORMAL
always @(posedge clk) begin
assert (1);
assert (counter < 7);
assert (0);
end
`endif
endmodule

130
tests/statusdb/timeout.sby Normal file
View file

@ -0,0 +1,130 @@
[tasks]
btor_bmc: btor bmc
btor_fin_bmc: btor bmc fin
pono_bmc: pono bmc
pono_fin_bmc: pono bmc fin
btor_cover: btor cover
btor_fin_cover: btor cover fin
smt_bmc: smt bmc
smt_fin_bmc: smt bmc fin
smt_cover: smt cover
smt_fin_cover: smt cover fin
smt_prove: smt prove
smt_fin_prove: smt prove fin
smt_fail: smtfail bmc fail
smt_fin_fail: smtfail bmc fail fin
aig_bmc: aigbmc bmc
aig_fin_bmc: aigbmc bmc fin
aig_prove: aiger prove
aig_fin_prove: aiger prove fin
abc_bmc: abcbmc bmc
abc_fin_bmc: abcbmc bmc fin
abc_prove: abc prove
abc_fin_prove: abc prove fin
abc_fail: abcfail prove fail
abc_fin_fail: abcfail prove fail fin
[options]
bmc: mode bmc
cover: mode cover
prove: mode prove
fin:
expect PASS,FAIL,UNKNOWN
depth 10
--
~fin:
expect TIMEOUT
depth 40000
timeout 1
--
[engines]
btor: btor btormc
pono: btor pono
smt: smtbmc bitwuzla
smtfail: smtbmc --keep-going bitwuzla
aigbmc: aiger aigbmc
aiger: aiger suprove
abcbmc: abc bmc3
abc: abc pdr
abcfail: abc --keep-going pdr
[script]
fin: read -define WIDTH=4
~fin: read -define WIDTH=8
fail: read -define FAIL=1
read -sv timeout.sv
prep -top top
[file timeout.sv]
module top #(
parameter WIDTH = `WIDTH
) (
input clk,
input load,
input [WIDTH-1:0] a,
input [WIDTH-1:0] b,
output reg [WIDTH-1:0] q,
output reg [WIDTH-1:0] r,
output reg done
);
reg [WIDTH-1:0] a_reg = 0;
reg [WIDTH-1:0] b_reg = 1;
initial begin
q <= 0;
r <= 0;
done <= 1;
end
reg [WIDTH-1:0] q_step = 1;
reg [WIDTH-1:0] r_step = 1;
// This is not how you design a good divider circuit!
always @(posedge clk) begin
if (load) begin
a_reg <= a;
b_reg <= b;
q <= 0;
r <= a;
q_step <= 1;
r_step <= b;
done <= b == 0;
end else begin
if (r_step <= r) begin
q <= q + q_step;
r <= r - r_step;
if (!r_step[WIDTH-1]) begin
r_step <= r_step << 1;
q_step <= q_step << 1;
end
end else begin
if (!q_step[0]) begin
r_step <= r_step >> 1;
q_step <= q_step >> 1;
end else begin
done <= 1;
end
end
end
end
always @(posedge clk) begin
assert (1); // trivial
`ifdef FAIL
assert (0);
`endif
assert (r_step == b_reg * q_step); // Helper invariant
assert (q * b_reg + r == a_reg); // Main invariant & correct output relationship
if (done) assert (r <= b_reg - 1); // Output range
cover (done);
cover (done && b_reg == 0);
cover (r != a_reg);
cover (r == a_reg);
cover (0); // unreachable
end
endmodule

22
tests/statusdb/timeout.sh Normal file
View file

@ -0,0 +1,22 @@
#!/bin/bash
set -e
python3 $SBY_MAIN -f $SBY_FILE $TASK
STATUS_CSV=${WORKDIR}/status.csv
python3 $SBY_MAIN -f $SBY_FILE $TASK --statuscsv --latest | tee $STATUS_CSV
if [[ $TASK =~ "_cover" ]]; then
wc -l $STATUS_CSV | grep -q '6'
grep "COVER" $STATUS_CSV | wc -l | grep -q '5'
elif [[ $TASK =~ "_fail" ]]; then
wc -l $STATUS_CSV | grep -q '6'
grep "ASSERT" $STATUS_CSV | wc -l | grep -q '5'
grep "FAIL" $STATUS_CSV | wc -l | grep -q '1'
else
wc -l $STATUS_CSV | grep -q '5'
grep "ASSERT" $STATUS_CSV | wc -l | grep -q '4'
fi
if [[ $TASK == "smt_cover" ]]; then
grep "PASS" $STATUS_CSV | wc -l | grep -q '4'
fi