From 394173f98ccc67687d47fd328a43bcfca9a85e90 Mon Sep 17 00:00:00 2001 From: Nikolaj Bjorner Date: Fri, 22 Aug 2025 15:08:31 -0700 Subject: [PATCH] add notes to script Signed-off-by: Nikolaj Bjorner --- parameter_tuning.py | 60 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 14 deletions(-) diff --git a/parameter_tuning.py b/parameter_tuning.py index 1cea305ea..2dbcf9ad0 100644 --- a/parameter_tuning.py +++ b/parameter_tuning.py @@ -4,6 +4,7 @@ import z3 MAX_CONFLICTS = 5000 MAX_EXAMPLES = 5 bench_dir = "/home/t-ilshapiro/z3-poly-testing/inputs/QF_LIA" +bench_dir = "C:/tmp/parameter-tuning" params = [ ("smt.arith.eager_eq_axioms", False), @@ -12,20 +13,24 @@ params = [ ("smt.relevancy", 0), ("smt.phase_caching_off", 200), ("smt.phase_caching_on", 600), + +# For LIA: +# arith.eager_eq_axioms +# arith.branch_cut_ratio +# arith.bprop_on_pivoted_rows +# arith.int_eq_branch +# arith.propagate_eqs +# restart_strategy + +# For NIA, Certora +# arith.nl.branching +# etc many arith.nl options ] -# Iterate through all .smt2 files in the directory -num_examples = 0 -for benchmark in os.listdir(bench_dir): - if num_examples > MAX_EXAMPLES: - break - if not benchmark.endswith(".smt2"): - continue - - filepath = os.path.join(bench_dir, benchmark) - print(f"Running {filepath}\n") +def score_benchmark(filepath): scores = {} + print(f"Running {filepath}\n") for n, v in params: s = z3.SimpleSolver() s.from_file(filepath) @@ -37,16 +42,43 @@ for benchmark in os.listdir(bench_dir): r = s.check() st = s.statistics() + # TODO: if r != unknown, the score should be better than + # scores of runs that don't finish + try: - conf = st.get_key_value('conflicts') + d = st.get_key_value('decisions') except: try: - conf = st.num_conflicts() + d = st.decisions() except AttributeError: - conf = None + d = None - scores[(n, v)] = conf + print(n, v, d, st) + + scores[(n, v)] = d + return scores + +# Iterate through all .smt2 files in the directory +num_examples = 0 +for benchmark in os.listdir(bench_dir): + if num_examples > MAX_EXAMPLES: + break + if not benchmark.endswith(".smt2"): + continue + + filepath = os.path.join(bench_dir, benchmark) + + scores = score_benchmark(filepath) + + evaluate_score(filepath, scores) num_examples += 1 print(f"Scores for {benchmark}: {scores}") + +def evaluate_score(filepath, scores): + # Pick the top score, + # Run the benchmark with default config and with updated config based on best score. + # Check if time improves. + pass +