3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-09-21 00:44:50 +00:00

Merge remote-tracking branch 'upstream/ilana' into parallel-solving

This commit is contained in:
Ilana Shapiro 2025-08-18 20:51:40 -07:00
commit b1eb5b5612
4 changed files with 92 additions and 22 deletions

View file

@ -215,6 +215,38 @@ SMT parameters that could be tuned:
seq.split_w_len (bool) (default: true)
</pre>
### Probing parameter tuning
A first experiment with parameter tuning can be performed using the Python API.
The idea is to run a solver with one update to parameters at a time and measure
progress measures. The easiest is to use number of decisions per conflict as a proxy for progress.
Finer-grained progress can be measured by checking glue levels of conflicts and average depth (depth of decisions before a conflict).
Roughly,
<pre>
max_conflicts = 5000
params = [("smt.arith.eager_eq_axioms", False), ("smt.restart_factor", 1.2),
("smt.restart_factor", 1.4), ("smt.relevancy", 0), ("smt.phase_caching_off", 200), ("smt.phase_caching_on", 600) ] # etc
for benchmark in benchmarks:
scores = {}
for n, v in params:
s = SimpleSolver()
s.from_file(benchmarkfile)
s.set("smt.auto_config", False)
s.set(n, v)
s.set("smt.max_conflicts", max_conflicts)
r = s.check()
st = s.statistics()
conf = st.num_conflicts()
scores[(n, v)] = conf
</pre>
It can filter
# Benchmark setup
## Prepare benchmark data
@ -275,17 +307,21 @@ threads-4-cube-shareconflicts
Ideas for other knobs that can be tested
<il>
<li> Only cube on literals that exist in initial formula. Don't cube on literals created during search (such as by theory lemmas).
<li> Only share units for literals that exist in the initial formula.
<li> Vary the backoff scheme for <b>max_conflict_mul</b> from 1.5 to lower and higher.
<li> Vary <b>smt.threads.max_conflicts</b>
<li> Replace backoff scheme by a geometric scheme: add <b>conflict_inc</b> (a new parameter) every time and increment <b>conflict_inc</b>
<li> Only cube on literals that exist in initial formula. Don't cube on literals created during search (such as by theory lemmas).</li>
<li> Only share units for literals that exist in the initial formula.</li>
<li> Vary the backoff scheme for <b>max_conflict_mul</b> from 1.5 to lower and higher.</li>
<li> Vary <b>smt.threads.max_conflicts</b>.</li>
<li> Replace backoff scheme by a geometric scheme: add <b>conflict_inc</b> (a new parameter) every time and increment <b>conflict_inc</b>.</li>
<li> Revert backoff if a cube is solved.</li>
<li>Delay lemma and unit sharing.</li>
<li>Vary <b>max_cube_size</b>, or add a parameter to grow <b>max_cube_size</b> if initial attempts to conquer reach <b>max_conflicts</b>.</li>
</il>
<pre>
cube_initial_only (bool) (default: false) only useful when never_cube=false
frugal_cube_only (bool) (default: false) only useful when never_cube=false
max_conflict_mul (double) (default: 1.5)
max_cube_size (unsigned int) (default: 20) only useful when never_cube=false
never_cube (bool) (default: false)
relevant_units_only (bool) (default: true) only useful when share_units=true
share_conflicts (bool) (default: true) only useful when never_cube=false

View file

@ -9,5 +9,6 @@ def_module_params('smt_parallel',
('relevant_units_only', BOOL, True, 'only share relvant units'),
('max_conflict_mul', DOUBLE, 1.5, 'increment multiplier for max-conflicts'),
('share_units_initial_only', BOOL, False, 'share only initial Boolean atoms as units'),
('cube_initial_only', BOOL, False, 'cube only on initial Boolean atoms')
('cube_initial_only', BOOL, False, 'cube only on initial Boolean atoms'),
('max_cube_size', UINT, 20, 'maximum size of a cube to share'),
))

View file

@ -171,6 +171,15 @@ namespace smt {
m_num_shared_units = sz;
}
void parallel::worker::collect_statistics(::statistics& st) const {
ctx->collect_statistics(st);
}
void parallel::worker::cancel() {
LOG_WORKER(1, " canceling\n");
m.limit().cancel();
}
void parallel::batch_manager::init_parameters_state() {
auto& smt_params = p.ctx.get_fparams();
std::function<std::function<void(void)>(unsigned&)> inc = [](unsigned& v) { return [&]() -> void { ++v; }; };
@ -409,7 +418,7 @@ namespace smt {
// currenly, the code just implements the greedy strategy
void parallel::batch_manager::return_cubes(ast_translation& l2g, vector<expr_ref_vector>const& C_worker, expr_ref_vector const& A_worker) {
SASSERT(!smt_parallel_params(p.ctx.m_params).never_cube());
SASSERT(!m_config.never_cube());
auto atom_in_cube = [&](expr_ref_vector const& cube, expr* atom) {
return any_of(cube, [&](expr* e) { return e == atom || (m.is_not(e, e) && e == atom); });
@ -420,15 +429,19 @@ namespace smt {
for (unsigned i = start; i < stop; ++i) {
// copy the last cube so that expanding m_cubes doesn't invalidate reference.
auto cube = m_cubes[i];
if (cube.size() >= m_config.m_max_cube_size)
continue;
m_cubes.push_back(cube);
m_cubes.back().push_back(m.mk_not(atom));
m_cubes[i].push_back(atom);
m_stats.m_max_cube_size = std::max(m_stats.m_max_cube_size, m_cubes.back().size());
m_stats.m_num_cubes += 2;
}
};
std::scoped_lock lock(mux);
unsigned max_cubes = 1000;
bool greedy_mode = (m_cubes.size() <= max_cubes) && !smt_parallel_params(p.ctx.m_params).frugal_cube_only();
bool greedy_mode = (m_cubes.size() <= max_cubes) && !m_config.m_frugal_cube_only;
unsigned a_worker_start_idx = 0;
//
@ -522,6 +535,16 @@ namespace smt {
m_cubes.reset();
m_cubes.push_back(expr_ref_vector(m)); // push empty cube
m_split_atoms.reset();
smt_parallel_params sp(p.ctx.m_params);
m_config.m_max_cube_size = sp.max_cube_size();
m_config.m_frugal_cube_only = sp.frugal_cube_only();
m_config.m_never_cube = sp.never_cube();
}
void parallel::batch_manager::collect_statistics(::statistics& st) const {
//ctx->collect_statistics(st);
st.update("parallel-num_cubes", m_stats.m_num_cubes);
st.update("parallel-max-cube-size", m_stats.m_max_cube_size);
}
lbool parallel::operator()(expr_ref_vector const& asms) {
@ -530,12 +553,12 @@ namespace smt {
if (m.has_trace_stream())
throw default_exception("trace streams have to be off in parallel mode");
struct scoped_clear_table {
obj_hashtable<expr>& ht;
scoped_clear_table(obj_hashtable<expr>& ht) : ht(ht) {} // Constructor: Takes a reference to a hash table when the object is created and saves it.
~scoped_clear_table() { ht.reset(); } // Destructor: When the scoped_clear_table object goes out of scope, it automatically calls reset() on that hash table, clearing it
struct scoped_clear {
parallel& p;
scoped_clear(parallel& p) : p(p) {}
~scoped_clear() { p.m_workers.clear(); p.m_assumptions_used.reset(); }
};
scoped_clear_table clear(m_assumptions_used); // creates a scoped_clear_table named clear, bound to m_assumptions_used
scoped_clear clear(*this);
{
m_batch_manager.initialize();
@ -567,10 +590,10 @@ namespace smt {
th.join();
for (auto w : m_workers)
w->collect_statistics(ctx.m_aux_stats);
w->collect_statistics(ctx.m_aux_stats);
m_batch_manager.collect_statistics(ctx.m_aux_stats);
}
m_workers.clear();
return m_batch_manager.get_result(); // i.e. all threads have finished all of their cubes -- so if state::is_running is still true, means the entire formula is unsat (otherwise a thread would have returned l_undef)
}

View file

@ -45,11 +45,22 @@ namespace smt {
is_exception_msg,
is_exception_code
};
struct config {
unsigned m_max_cube_size = 20;
bool m_frugal_cube_only = false;
bool m_never_cube = false;
};
struct stats {
unsigned m_max_cube_size = 0;
unsigned m_num_cubes = 0;
};
ast_manager& m;
parallel& p;
std::mutex mux;
state m_state = state::is_running;
config m_config;
stats m_stats;
expr_ref_vector m_split_atoms; // atoms to split on
vector<expr_ref_vector> m_cubes;
unsigned m_max_batch_size = 10;
@ -93,6 +104,8 @@ namespace smt {
void report_assumption_used(ast_translation& l2g, expr* assumption);
void collect_clause(ast_translation& l2g, unsigned source_worker_id, expr* e);
expr_ref_vector return_shared_clauses(ast_translation& g2l, unsigned& worker_limit, unsigned worker_id);
void collect_statistics(::statistics& st) const;
lbool get_result() const;
};
@ -108,6 +121,7 @@ namespace smt {
bool m_share_units_initial_only = false;
bool m_cube_initial_only = false;
};
unsigned id; // unique identifier for the worker
parallel& p;
batch_manager& b;
@ -131,13 +145,9 @@ namespace smt {
expr_ref_vector get_split_atoms();
void collect_shared_clauses(ast_translation& g2l);
void cancel() {
IF_VERBOSE(1, verbose_stream() << "Worker " << id << " canceling\n");
m.limit().cancel();
}
void collect_statistics(::statistics& st) const {
ctx->collect_statistics(st);
}
void cancel();
void collect_statistics(::statistics& st) const;
reslimit& limit() {
return m.limit();
}