mirror of
https://github.com/Z3Prover/z3
synced 2025-09-12 12:41:27 +00:00
Merge remote-tracking branch 'upstream/ilana' into parallel-solving
This commit is contained in:
commit
05e8c2e713
2 changed files with 60 additions and 14 deletions
|
@ -4,6 +4,7 @@ import z3
|
|||
MAX_CONFLICTS = 5000
|
||||
MAX_EXAMPLES = 5
|
||||
bench_dir = "/home/t-ilshapiro/z3-poly-testing/inputs/QF_LIA"
|
||||
bench_dir = "C:/tmp/parameter-tuning"
|
||||
|
||||
params = [
|
||||
("smt.arith.eager_eq_axioms", False),
|
||||
|
@ -12,20 +13,24 @@ params = [
|
|||
("smt.relevancy", 0),
|
||||
("smt.phase_caching_off", 200),
|
||||
("smt.phase_caching_on", 600),
|
||||
|
||||
# For LIA:
|
||||
# arith.eager_eq_axioms
|
||||
# arith.branch_cut_ratio
|
||||
# arith.bprop_on_pivoted_rows
|
||||
# arith.int_eq_branch
|
||||
# arith.propagate_eqs
|
||||
# restart_strategy
|
||||
|
||||
# For NIA, Certora
|
||||
# arith.nl.branching
|
||||
# etc many arith.nl options
|
||||
]
|
||||
|
||||
# Iterate through all .smt2 files in the directory
|
||||
num_examples = 0
|
||||
for benchmark in os.listdir(bench_dir):
|
||||
if num_examples > MAX_EXAMPLES:
|
||||
break
|
||||
if not benchmark.endswith(".smt2"):
|
||||
continue
|
||||
|
||||
filepath = os.path.join(bench_dir, benchmark)
|
||||
print(f"Running {filepath}\n")
|
||||
|
||||
def score_benchmark(filepath):
|
||||
scores = {}
|
||||
print(f"Running {filepath}\n")
|
||||
for n, v in params:
|
||||
s = z3.SimpleSolver()
|
||||
s.from_file(filepath)
|
||||
|
@ -37,16 +42,43 @@ for benchmark in os.listdir(bench_dir):
|
|||
r = s.check()
|
||||
st = s.statistics()
|
||||
|
||||
# TODO: if r != unknown, the score should be better than
|
||||
# scores of runs that don't finish
|
||||
|
||||
try:
|
||||
conf = st.get_key_value('conflicts')
|
||||
d = st.get_key_value('decisions')
|
||||
except:
|
||||
try:
|
||||
conf = st.num_conflicts()
|
||||
d = st.decisions()
|
||||
except AttributeError:
|
||||
conf = None
|
||||
d = None
|
||||
|
||||
scores[(n, v)] = conf
|
||||
print(n, v, d, st)
|
||||
|
||||
scores[(n, v)] = d
|
||||
return scores
|
||||
|
||||
# Iterate through all .smt2 files in the directory
|
||||
num_examples = 0
|
||||
for benchmark in os.listdir(bench_dir):
|
||||
if num_examples > MAX_EXAMPLES:
|
||||
break
|
||||
if not benchmark.endswith(".smt2"):
|
||||
continue
|
||||
|
||||
filepath = os.path.join(bench_dir, benchmark)
|
||||
|
||||
scores = score_benchmark(filepath)
|
||||
|
||||
evaluate_score(filepath, scores)
|
||||
|
||||
num_examples += 1
|
||||
|
||||
print(f"Scores for {benchmark}: {scores}")
|
||||
|
||||
def evaluate_score(filepath, scores):
|
||||
# Pick the top score,
|
||||
# Run the benchmark with default config and with updated config based on best score.
|
||||
# Check if time improves.
|
||||
pass
|
||||
|
||||
|
|
|
@ -500,6 +500,20 @@ namespace smt {
|
|||
}
|
||||
|
||||
expr_ref_vector parallel::worker::get_split_atoms() {
|
||||
#if false
|
||||
for (bool_var v = 0; v < ctx->get_num_bool_vars(); ++v) {
|
||||
if (ctx->get_assignment(v) != l_undef)
|
||||
continue;
|
||||
expr* e = ctx->bool_var2expr(v);
|
||||
if (!e)
|
||||
continue;
|
||||
auto v_score = ctx->m_lit_scores[0][v] * ctx->m_lit_scores[1][v];
|
||||
// if v_score is maximal then v is our split atom..
|
||||
|
||||
ctx->m_lit_scores[0][v] /= 2;
|
||||
ctx->m_lit_scores[1][v] /= 2;
|
||||
}
|
||||
#endif
|
||||
unsigned k = 2;
|
||||
|
||||
auto candidates = ctx->m_pq_scores.get_heap();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue