3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-10-04 06:53:58 +00:00
This commit is contained in:
Ilana Shapiro 2025-08-17 16:56:09 -07:00
commit 5ad5899b0a
8 changed files with 218 additions and 49 deletions

View file

@ -215,4 +215,80 @@ SMT parameters that could be tuned:
seq.split_w_len (bool) (default: true)
</pre>
# Benchmark setup
## Prepare benchmark data
Data
<pre>
QF_LIA - heavily skewed for selected benchmarks
QF_NIA_small
Others we should try:
Certora
QF_ABV
QF_UFBV
QF_AUFBV (or whatever it is called)
QF_UFLIA (might be too small)
push more instances from smtlib.org benchmarks into z3-poly-testing/inputs or upload tgz files directly in setup.
</pre>
## Naming conventions and basic parameters
The first rounds created a base-line where all features were turned off, then it created variants where one feature was turned off at a time.
The data-point where all but one feature is turned off could tell us something if a feature has a chance of being useful in isolation.
The following is a sample naming scheme for associated settings.
<pre>
threads-1
-T:30 smt.threads=1 tactic.default_tactic=smt
threads-4-none-unbounded
-T:30 smt.threads=4 tactic.default_tactic=smt smt_parallel.never_cube=true smt_parallel.share_conflicts=false smt_parallel.share_units=false smt.threads.max_conflicts=4294967295
threads-4-none
-T:30 smt.threads=4 tactic.default_tactic=smt smt_parallel.never_cube=true smt_parallel.share_conflicts=false smt_parallel.share_units=false
threads-4-shareunits
-T:30 smt.threads=4 tactic.default_tactic=smt smt_parallel.never_cube=true smt_parallel.share_conflicts=false smt_parallel.share_units=true
threads-4-cube-frugal
-T:30 smt.threads=4 tactic.default_tactic=smt smt_parallel.never_cube=false smt_parallel.frugal_cube_only=true smt_parallel.share_conflicts=false smt_parallel.share_units=false
threads-4-cube-frugal-shareconflicts
-T:30 smt.threads=4 tactic.default_tactic=smt smt_parallel.never_cube=false smt_parallel.frugal_cube_only=true smt_parallel.share_conflicts=true smt_parallel.share_units=false
threads-4-shareunits-nonrelevant
-T:30 smt.threads=4 tactic.default_tactic=smt smt_parallel.never_cube=true smt_parallel.share_conflicts=true smt_parallel.share_units=false smt_parallel.relevant_units_only=false
threads-4-cube
-T:30 smt.threads=4 tactic.default_tactic=smt smt_parallel.never_cube=false smt_parallel.frugal_cube_only=false smt_parallel.share_conflicts=false smt_parallel.share_units=false
threads-4-cube-shareconflicts
-T:30 smt.threads=4 tactic.default_tactic=smt smt_parallel.never_cube=false smt_parallel.frugal_cube_only=false smt_parallel.share_conflicts=true smt_parallel.share_units=false
</pre>
Ideas for other knobs that can be tested
<il>
<li> Only cube on literals that exist in initial formula. Don't cube on literals created during search (such as by theory lemmas).
<li> Only share units for literals that exist in the initial formula.
<li> Vary the backoff scheme for <b>max_conflict_mul</b> from 1.5 to lower and higher.
<li> Vary <b>smt.threads.max_conflicts</b>
<li> Replace backoff scheme by a geometric scheme: add <b>conflict_inc</b> (a new parameter) every time and increment <b>conflict_inc</b>
</il>
<pre>
cube_initial_only (bool) (default: false) only useful when never_cube=false
frugal_cube_only (bool) (default: false) only useful when never_cube=false
max_conflict_mul (double) (default: 1.5)
never_cube (bool) (default: false)
relevant_units_only (bool) (default: true) only useful when share_units=true
share_conflicts (bool) (default: true) only useful when never_cube=false
share_units (bool) (default: true)
share_units_initial_only (bool) (default: false) only useful when share_units=true
</pre>

View file

@ -41,4 +41,5 @@ void lp::lp_settings::updt_params(params_ref const& _p) {
m_dio_ignore_big_nums = lp_p.dio_ignore_big_nums();
m_dio_calls_period = lp_p.dio_calls_period();
m_dio_run_gcd = lp_p.dio_run_gcd();
m_max_conflicts = p.max_conflicts();
}

View file

@ -137,6 +137,7 @@ struct statistics {
unsigned m_bounds_tightening_conflicts = 0;
unsigned m_bounds_tightenings = 0;
unsigned m_nla_throttled_lemmas = 0;
::statistics m_st = {};
void reset() {
@ -221,11 +222,13 @@ public:
unsigned percent_of_entering_to_check = 5; // we try to find a profitable column in a percentage of the columns
bool use_scaling = true;
unsigned max_number_of_iterations_with_no_improvements = 2000000;
double time_limit; // the maximum time limit of the total run time in seconds
double time_limit; // the maximum time limit of the total run time in seconds
// end of dual section
bool m_bound_propagation = true;
bool presolve_with_double_solver_for_lar = true;
simplex_strategy_enum m_simplex_strategy;
unsigned m_max_conflicts = 0;
int report_frequency = 1000;
bool print_statistics = false;

View file

@ -38,7 +38,7 @@ core::core(lp::lar_solver& s, params_ref const& p, reslimit & lim) :
m_grobner(this),
m_emons(m_evars),
m_use_nra_model(false),
m_nra(s, m_nra_lim, *this),
m_nra(s, m_nra_lim, *this, p),
m_throttle(lra.trail(),
lra.settings().stats()) {
m_nlsat_delay_bound = lp_settings().nlsat_delay();
@ -1561,6 +1561,9 @@ lbool core::check() {
if (no_effect() && params().arith_nl_nra()) {
scoped_limits sl(m_reslim);
sl.push_child(&m_nra_lim);
params_ref p;
p.set_uint("max_conflicts", lp_settings().m_max_conflicts);
m_nra.updt_params(p);
ret = m_nra.check();
lp_settings().stats().m_nra_calls++;
}
@ -1595,7 +1598,7 @@ lbool core::bounded_nlsat() {
scoped_rlimit sr(m_nra_lim, 100000);
ret = m_nra.check();
}
p.set_uint("max_conflicts", UINT_MAX);
p.set_uint("max_conflicts", lp_settings().m_max_conflicts);
m_nra.updt_params(p);
lp_settings().stats().m_nra_calls++;
if (ret == l_undef)

View file

@ -6,5 +6,8 @@ def_module_params('smt_parallel',
('share_conflicts', BOOL, True, 'share conflicts'),
('never_cube', BOOL, False, 'never cube'),
('frugal_cube_only', BOOL, False, 'only apply frugal cube strategy'),
('relevant_units_only', BOOL, False, 'only share relvant units')
('relevant_units_only', BOOL, True, 'only share relvant units'),
('max_conflict_mul', DOUBLE, 1.5, 'increment multiplier for max-conflicts'),
('share_units_initial_only', BOOL, False, 'share only initial Boolean atoms as units'),
('cube_initial_only', BOOL, False, 'cube only on initial Boolean atoms')
))

View file

@ -39,48 +39,50 @@ namespace smt {
#include <thread>
#include <cassert>
#define LOG_WORKER(lvl, s) IF_VERBOSE(lvl, verbose_stream() << "Worker " << id << s)
namespace smt {
void parallel::worker::run() {
ast_translation g2l(p.ctx.m, m); // global to local context -- MUST USE p.ctx.m, not ctx->m, AS GLOBAL MANAGER!!!
ast_translation l2g(m, p.ctx.m); // local to global context
while (m.inc()) { // inc: increase the limit and check if it is canceled, vs m.limit().is_canceled() is readonly. the .limit() is also not necessary (m.inc() etc provides a convenience wrapper)
vector<expr_ref_vector> cubes;
b.get_cubes(g2l, cubes);
b.get_cubes(m_g2l, cubes);
if (cubes.empty())
return;
collect_shared_clauses(g2l);
collect_shared_clauses(m_g2l);
for (auto& cube : cubes) {
if (!m.inc()) {
b.set_exception("context cancelled");
return;
}
IF_VERBOSE(1, verbose_stream() << "Worker " << id << " checking cube: " << mk_bounded_pp(mk_and(cube), m, 3) << "\n");
LOG_WORKER(1, " checking cube: " << mk_bounded_pp(mk_and(cube), m, 3) << " max-conflicts " << m_config.m_threads_max_conflicts << "\n");
lbool r = check_cube(cube);
if (m.limit().is_canceled()) {
IF_VERBOSE(1, verbose_stream() << "Worker " << id << " context cancelled\n");
LOG_WORKER(1, " context cancelled\n");
return;
}
switch (r) {
case l_undef: {
if (m.limit().is_canceled())
break;
IF_VERBOSE(1, verbose_stream() << "Worker " << id << " found undef cube\n");
LOG_WORKER(1, " found undef cube\n");
// return unprocessed cubes to the batch manager
// add a split literal to the batch manager.
// optionally process other cubes and delay sending back unprocessed cubes to batch manager.
vector<expr_ref_vector> returned_cubes;
returned_cubes.push_back(cube);
auto split_atoms = get_split_atoms();
b.return_cubes(l2g, returned_cubes, split_atoms);
if (!m_config.m_never_cube) {
vector<expr_ref_vector> returned_cubes;
returned_cubes.push_back(cube);
auto split_atoms = get_split_atoms();
b.return_cubes(m_l2g, returned_cubes, split_atoms);
}
update_max_thread_conflicts();
break;
}
case l_true: {
IF_VERBOSE(1, verbose_stream() << "Worker " << id << " found sat cube\n");
LOG_WORKER(1, " found sat cube\n");
model_ref mdl;
ctx->get_model(mdl);
b.set_sat(l2g, *mdl);
b.set_sat(m_l2g, *mdl);
return;
}
case l_false: {
@ -89,42 +91,58 @@ namespace smt {
// share with batch manager.
// process next cube.
expr_ref_vector const& unsat_core = ctx->unsat_core();
IF_VERBOSE(1, verbose_stream() << "unsat core: " << unsat_core << "\n");
LOG_WORKER(2, " unsat core:\n"; for (auto c : unsat_core) verbose_stream() << mk_bounded_pp(c, m, 3) << "\n");
// If the unsat core only contains assumptions,
// unsatisfiability does not depend on the current cube and the entire problem is unsat.
if (all_of(unsat_core, [&](expr* e) { return asms.contains(e); })) {
IF_VERBOSE(1, verbose_stream() << "Worker " << id << " determined formula unsat\n");
b.set_unsat(l2g, unsat_core);
LOG_WORKER(1, " determined formula unsat\n");
b.set_unsat(m_l2g, unsat_core);
return;
}
for (expr* e : unsat_core)
if (asms.contains(e))
b.report_assumption_used(l2g, e); // report assumptions used in unsat core, so they can be used in final core
b.report_assumption_used(m_l2g, e); // report assumptions used in unsat core, so they can be used in final core
IF_VERBOSE(1, verbose_stream() << "Worker " << id << " found unsat cube\n");
if (smt_parallel_params(p.ctx.m_params).share_conflicts())
b.collect_clause(l2g, id, mk_not(mk_and(unsat_core)));
LOG_WORKER(1, " found unsat cube\n");
if (m_config.m_share_conflicts)
b.collect_clause(m_l2g, id, mk_not(mk_and(unsat_core)));
break;
}
}
}
if (smt_parallel_params(p.ctx.m_params).share_units())
share_units(l2g);
if (m_config.m_share_units)
share_units(m_l2g);
}
}
parallel::worker::worker(unsigned id, parallel& p, expr_ref_vector const& _asms): id(id), p(p), b(p.m_batch_manager), m_smt_params(p.ctx.get_fparams()), asms(m) {
ast_translation g2l(p.ctx.m, m);
parallel::worker::worker(unsigned id, parallel& p, expr_ref_vector const& _asms):
id(id), p(p), b(p.m_batch_manager), m_smt_params(p.ctx.get_fparams()), asms(m),
m_g2l(p.ctx.m, m),
m_l2g(m, p.ctx.m) {
for (auto e : _asms)
asms.push_back(g2l(e));
IF_VERBOSE(1, verbose_stream() << "Worker " << id << " created with " << asms.size() << " assumptions\n");
asms.push_back(m_g2l(e));
LOG_WORKER(1, " created with " << asms.size() << " assumptions\n");
m_smt_params.m_preprocess = false;
ctx = alloc(context, m, m_smt_params, p.ctx.get_params());
context::copy(p.ctx, *ctx, true);
ctx->set_random_seed(id + m_smt_params.m_random_seed);
m_max_thread_conflicts = ctx->get_fparams().m_threads_max_conflicts;
m_max_conflicts = ctx->get_fparams().m_max_conflicts;
smt_parallel_params pp(p.ctx.m_params);
m_config.m_threads_max_conflicts = ctx->get_fparams().m_threads_max_conflicts;
m_config.m_max_conflicts = ctx->get_fparams().m_max_conflicts;
m_config.m_relevant_units_only = pp.relevant_units_only();
m_config.m_never_cube = pp.never_cube();
m_config.m_share_conflicts = pp.share_conflicts();
m_config.m_share_units = pp.share_units();
m_config.m_share_units_initial_only = pp.share_units_initial_only();
m_config.m_cube_initial_only = pp.cube_initial_only();
m_config.m_max_conflict_mul = pp.max_conflict_mul();
// don't share initial units
ctx->pop_to_base_lvl();
m_num_shared_units = ctx->assigned_literals().size();
m_num_initial_atoms = ctx->get_num_bool_vars();
}
void parallel::worker::share_units(ast_translation& l2g) {
@ -133,15 +151,18 @@ namespace smt {
unsigned sz = ctx->assigned_literals().size();
for (unsigned j = m_num_shared_units; j < sz; ++j) { // iterate only over new literals since last sync
literal lit = ctx->assigned_literals()[j];
if (!ctx->is_relevant(lit.var()) && smt_parallel_params(p.ctx.m_params).relevant_units_only()) {
// IF_VERBOSE(0, verbose_stream() << "non-relevant literal " << lit << "\n");
if (!ctx->is_relevant(lit.var()) && m_config.m_relevant_units_only)
continue;
if (m_config.m_share_units_initial_only && lit.var() >= m_num_initial_atoms) {
LOG_WORKER(2, " Skipping non-initial unit: " << lit.var() << "\n");
continue; // skip non-iniial atoms if configured to do so
}
expr_ref e(ctx->bool_var2expr(lit.var()), ctx->m); // turn literal into a Boolean expression
if (m.is_and(e) || m.is_or(e)) {
//IF_VERBOSE(0, verbose_stream() << "skip Boolean\n");
if (m.is_and(e) || m.is_or(e))
continue;
}
if (lit.sign())
e = m.mk_not(e); // negate if literal is negative
@ -150,6 +171,40 @@ namespace smt {
m_num_shared_units = sz;
}
void parallel::batch_manager::init_parameters_state() {
auto& smt_params = p.ctx.get_fparams();
std::function<std::function<void(void)>(unsigned&)> inc = [](unsigned& v) { return [&]() -> void { ++v; }; };
std::function<std::function<void(void)>(unsigned&)> dec = [](unsigned& v) { return [&]() -> void { if (v > 0) --v; }; };
std::function<std::function<void(void)>(bool&)> incb = [](bool& v) { return [&]() -> void { v = true; }; };
std::function<std::function<void(void)>(bool&)> decb = [](bool& v) { return [&]() -> void { v = false; }; };
std::function<parameter_state(unsigned&)> unsigned_parameter = [&](unsigned& p) -> parameter_state {
return { { { p , 1.0}},
{ { 1.0, inc(p) }, { 1.0, dec(p) }}
};
};
std::function<parameter_state(bool&)> bool_parameter = [&](bool& p) -> parameter_state {
return { { { p , 1.0}},
{ { 1.0, incb(p) }, { 1.0, decb(p) }}
};
};
parameter_state s1 = unsigned_parameter(smt_params.m_arith_branch_cut_ratio);
parameter_state s2 = bool_parameter(smt_params.m_arith_eager_eq_axioms);
// arith.enable_hnf(bool) (default: true)
// arith.greatest_error_pivot(bool) (default: false)
// arith.int_eq_branch(bool) (default: false)
// arith.min(bool) (default: false)
// arith.nl.branching(bool) (default: true)
// arith.nl.cross_nested(bool) (default: true)
// arith.nl.delay(unsigned int) (default: 10)
// arith.nl.expensive_patching(bool) (default: false)
// arith.nl.expp(bool) (default: false)
// arith.nl.gr_q(unsigned int) (default: 10)
// arith.nl.grobner(bool) (default: true)
// arith.nl.grobner_cnfl_to_report(unsigned int) (default: 1) };
}
void parallel::batch_manager::collect_clause(ast_translation& l2g, unsigned source_worker_id, expr* clause) {
std::scoped_lock lock(mux);
expr* g_clause = l2g(clause);
@ -166,7 +221,7 @@ namespace smt {
for (expr* e : new_clauses) {
expr_ref local_clause(e, g2l.to()); // e was already translated to the local context in the batch manager!!
ctx->assert_expr(local_clause); // assert the clause in the local context
IF_VERBOSE(2, verbose_stream() << "Worker " << id << " asserting shared clause: " << mk_bounded_pp(local_clause, m, 3) << "\n");
LOG_WORKER(2, " asserting shared clause: " << mk_bounded_pp(local_clause, m, 3) << "\n");
}
}
@ -188,7 +243,7 @@ namespace smt {
asms.push_back(atom);
lbool r = l_undef;
ctx->get_fparams().m_max_conflicts = std::min(m_max_thread_conflicts, m_max_conflicts);
ctx->get_fparams().m_max_conflicts = std::min(m_config.m_threads_max_conflicts, m_config.m_max_conflicts);
try {
r = ctx->check(asms.size(), asms.data());
}
@ -202,7 +257,7 @@ namespace smt {
b.set_exception("unknown exception");
}
asms.shrink(asms.size() - cube.size());
IF_VERBOSE(1, verbose_stream() << "Worker " << id << " DONE checking cube " << r << "\n";);
LOG_WORKER(1, " DONE checking cube " << r << "\n";);
return r;
}
@ -354,8 +409,7 @@ namespace smt {
// currenly, the code just implements the greedy strategy
void parallel::batch_manager::return_cubes(ast_translation& l2g, vector<expr_ref_vector>const& C_worker, expr_ref_vector const& A_worker) {
if (smt_parallel_params(p.ctx.m_params).never_cube())
return; // portfolio only
SASSERT(!smt_parallel_params(p.ctx.m_params).never_cube());
auto atom_in_cube = [&](expr_ref_vector const& cube, expr* atom) {
return any_of(cube, [&](expr* e) { return e == atom || (m.is_not(e, e) && e == atom); });
@ -364,7 +418,9 @@ namespace smt {
auto add_split_atom = [&](expr* atom, unsigned start) {
unsigned stop = m_cubes.size();
for (unsigned i = start; i < stop; ++i) {
m_cubes.push_back(m_cubes[i]);
// copy the last cube so that expanding m_cubes doesn't invalidate reference.
auto cube = m_cubes[i];
m_cubes.push_back(cube);
m_cubes.back().push_back(m.mk_not(atom));
m_cubes[i].push_back(atom);
}
@ -440,9 +496,15 @@ namespace smt {
expr_ref_vector top_lits(m);
for (const auto& node: candidates) {
if (ctx->get_assignment(node.key) != l_undef)
continue;
if (m_config.m_cube_initial_only && node.key >= m_num_initial_atoms) {
LOG_WORKER(2, " Skipping non-initial atom from cube: " << node.key << "\n");
continue; // skip non-initial atoms if configured to do so
}
expr* e = ctx->bool_var2expr(node.key);
if (!e)
continue;
@ -451,7 +513,7 @@ namespace smt {
if (top_lits.size() >= k)
break;
}
IF_VERBOSE(2, verbose_stream() << "top literals " << top_lits << " head size " << ctx->m_pq_scores.size() << " num vars " << ctx->get_num_bool_vars() << "\n");
IF_VERBOSE(3, verbose_stream() << "top literals " << top_lits << " head size " << ctx->m_pq_scores.size() << " num vars " << ctx->get_num_bool_vars() << "\n");
return top_lits;
}

View file

@ -11,7 +11,7 @@ Abstract:
Author:
nbjorner 2020-01-31
Ilana 2025
Revision History:
@ -32,6 +32,11 @@ namespace smt {
expr_ref clause;
};
struct parameter_state {
std::vector<std::pair<unsigned, double>> m_value_scores; // bounded number of values with scores.
std::vector<std::pair<double, std::function<void(void)>>> m_weighted_moves; // possible moves weighted by how well they did
};
class batch_manager {
enum state {
is_running,
@ -52,6 +57,7 @@ namespace smt {
std::string m_exception_msg;
vector<shared_clause> shared_clause_trail; // store all shared clauses with worker IDs
obj_hashtable<expr> shared_clause_set; // for duplicate filtering on per-thread clause expressions
vector<parameter_state> m_parameters_state;
// called from batch manager to cancel other workers if we've reached a verdict
void cancel_workers() {
@ -60,6 +66,8 @@ namespace smt {
w->cancel();
}
void init_parameters_state();
public:
batch_manager(ast_manager& m, parallel& p) : m(m), p(p), m_split_atoms(m) { }
@ -89,21 +97,33 @@ namespace smt {
};
class worker {
struct config {
unsigned m_threads_max_conflicts = 1000;
unsigned m_max_conflicts = 10000000;
bool m_relevant_units_only = true;
bool m_never_cube = false;
bool m_share_conflicts = true;
bool m_share_units = true;
double m_max_conflict_mul = 1.5;
bool m_share_units_initial_only = false;
bool m_cube_initial_only = false;
};
unsigned id; // unique identifier for the worker
parallel& p;
batch_manager& b;
ast_manager m;
expr_ref_vector asms;
smt_params m_smt_params;
config m_config;
scoped_ptr<context> ctx;
unsigned m_max_conflicts; // the global budget for all work this worker can do across cubes in the current run. THIS GETS SET IN THE CPP FILE
unsigned m_max_thread_conflicts; // the per-cube limit for how many conflicts the worker can spend on a single cube before timing out on it and moving on. THIS GETS SET IN THE CPP FILE
ast_translation m_g2l, m_l2g;
unsigned m_num_shared_units = 0;
unsigned m_num_initial_atoms = 0;
unsigned m_shared_clause_limit = 0; // remembers the index into shared_clause_trail marking the boundary between "old" and "new" clauses to share
void share_units(ast_translation& l2g);
lbool check_cube(expr_ref_vector const& cube);
void update_max_thread_conflicts() {
m_max_thread_conflicts *= 2;
m_config.m_threads_max_conflicts = (unsigned)(m_config.m_max_conflict_mul * m_config.m_threads_max_conflicts);
} // allow for backoff scheme of conflicts within the thread for cube timeouts.
public:
worker(unsigned id, parallel& p, expr_ref_vector const& _asms);
@ -116,7 +136,6 @@ namespace smt {
m.limit().cancel();
}
void collect_statistics(::statistics& st) const {
IF_VERBOSE(1, verbose_stream() << "Collecting statistics for worker " << id << "\n");
ctx->collect_statistics(st);
}
reslimit& limit() {

View file

@ -1427,6 +1427,8 @@ public:
void init_search_eh() {
m_arith_eq_adapter.init_search_eh();
m_num_conflicts = 0;
if (m_solver)
lp().settings().m_max_conflicts = ctx().get_fparams().m_max_conflicts;
}
bool can_get_value(theory_var v) const {