3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2026-01-07 03:22:44 +00:00

clean up code

This commit is contained in:
Ilana Shapiro 2025-12-21 18:58:15 -08:00
parent 5bc60ce055
commit 26119a8815
2 changed files with 11 additions and 116 deletions

View file

@ -2,24 +2,5 @@ def_module_params('smt_parallel',
export=True,
description='Experimental parameters for parallel solving',
params=(
('share_units', BOOL, True, 'share units'),
('share_conflicts', BOOL, True, 'share conflicts'),
('never_cube', BOOL, False, 'never cube'),
('frugal_cube_only', BOOL, False, 'only apply frugal cube strategy'),
('relevant_units_only', BOOL, True, 'only share relvant units'),
('max_conflict_mul', DOUBLE, 1.5, 'increment multiplier for max-conflicts'),
('share_units_initial_only', BOOL, True, 'share only initial Boolean atoms as units'),
('cube_initial_only', BOOL, False, 'cube only on initial Boolean atoms'),
('max_cube_depth', UINT, 20, 'maximum depth (size) of a cube to share'),
('max_greedy_cubes', UINT, 1000, 'maximum number of cube to greedily share before switching to frugal'),
('num_split_lits', UINT, 2, 'how many literals, k, we split on to create 2^k cubes'),
('depth_splitting_only', BOOL, False, 'only apply frugal cube strategy, and only on deepest (biggest) cubes from the batch manager'),
('backbone_detection', BOOL, False, 'apply backbone literal heuristic'),
('iterative_deepening', BOOL, False, 'deepen cubes based on iterative hardness cutoff heuristic'),
('beam_search', BOOL, False, 'use beam search with PQ to rank cubes given to threads'),
('explicit_hardness', BOOL, False, 'use explicit hardness metric for cube'),
('cubetree', BOOL, False, 'use cube tree data structure for storing cubes'),
('searchtree', BOOL, False, 'use search tree implementation (parallel2)'),
('inprocessing', BOOL, False, 'integrate in-processing as a heuristic simplification'),
('inprocessing_delay', UINT, 0, 'number of undef before invoking simplification'),
('inprocessing', BOOL, True, 'integrate in-processing as a heuristic simplification'),
))

View file

@ -64,64 +64,6 @@ namespace smt {
namespace smt {
static bool is_arith_atom(ast_manager& m, expr* e) {
// strip negation
if (m.is_not(e, e))
; // e updated by reference
if (!m.is_bool(e))
return false;
if (m.is_eq(e)) {
app* ap = to_app(e);
arith_util a(m);
return a.is_int(ap->get_arg(0)) || a.is_real(ap->get_arg(0));
}
app* ap = is_app(e) ? to_app(e) : nullptr;
if (!ap)
return false;
arith_util a(m);
switch (ap->get_decl_kind()) {
case OP_LE:
case OP_GE:
case OP_LT:
case OP_GT:
return a.is_int(ap->get_arg(0)) || a.is_real(ap->get_arg(0));
default:
return false;
}
}
bool contains_arith(ast_manager& m, expr* e) {
return false;
// strip not
if (m.is_not(e, e))
return contains_arith(m, e);
// direct arithmetic atom?
if (is_arith_atom(m, e))
return true;
// recurse into Boolean structure
if (m.is_or(e) || m.is_and(e)) {
app* ap = to_app(e);
for (expr* arg : *ap)
if (contains_arith(m, arg))
return true;
return false;
}
// Boolean variable: UNKNOWN — treat as potentially arithmetic
if (m.is_bool(e) && is_app(e) && to_app(e)->get_num_args() == 0)
return true;
return false;
}
void parallel::worker::run() {
search_tree::node<cube_config> *node = nullptr;
expr_ref_vector cube(m);
@ -154,10 +96,6 @@ namespace smt {
goto check_cube_start;
b.split(m_l2g, id, node, atom);
simplify();
if (m_config.m_share_units) {
IF_VERBOSE(1, verbose_stream() << " Sharing units\n");
share_units();
}
break;
}
case l_true: {
@ -182,31 +120,14 @@ namespace smt {
LOG_WORKER(1, " found unsat cube\n");
b.backtrack(m_l2g, unsat_core, node);
// bool all_arith = true;
// for (expr* l : unsat_core) {
// IF_VERBOSE(1, verbose_stream() << " Checking unsat core literal for arith: "
// << mk_bounded_pp(l, m, 3) << "\n");
// if (!contains_arith(m, l)) {
// all_arith = false;
// IF_VERBOSE(1, verbose_stream() << " Not sharing conflict due to non-arith atom: "
// << mk_bounded_pp(l, m, 3) << "\n");
// break;
// }
// }
// if (m_config.m_share_conflicts && all_arith && unsat_core.size() <= 3) {
// IF_VERBOSE(1, verbose_stream() << " Sharing conflicts\n");
// b.collect_clause(m_l2g, id, mk_not(mk_and(unsat_core)));
// }
if (m_config.m_share_conflicts) {
IF_VERBOSE(1, verbose_stream() << " Sharing conflicts\n");
if (m_config.m_share_conflicts)
b.collect_clause(m_l2g, id, mk_not(mk_and(unsat_core)));
}
break;
}
}
if (m_config.m_share_units)
share_units();
}
}
@ -227,19 +148,17 @@ namespace smt {
ctx->get_fparams().m_preprocess = false; // avoid preprocessing lemmas that are exchanged
smt_parallel_params pp(p.ctx.m_params);
m_config.m_share_units = pp.share_units();
m_config.m_share_conflicts = pp.share_conflicts();
m_config.m_inprocessing = pp.inprocessing();
}
void parallel::worker::share_units() {
// Collect new base-level units learned locally by this worker and send to batch manager
// Collect new units learned locally by this worker and send to batch manager
unsigned sz = ctx->assigned_literals().size();
for (unsigned j = m_num_shared_units; j < sz; ++j) {
for (unsigned j = m_num_shared_units; j < sz; ++j) { // iterate only over new literals since last sync
literal lit = ctx->assigned_literals()[j];
// NEW: only share base-level assignments
// filter by assign level: do not pop to base level as this destroys the current search state
if (ctx->get_assign_level(lit) > ctx->m_base_lvl)
continue;
@ -248,22 +167,17 @@ namespace smt {
if (m_config.m_share_units_initial_only && lit.var() >= m_num_initial_atoms) {
LOG_WORKER(4, " Skipping non-initial unit: " << lit.var() << "\n");
continue;
continue; // skip non-initial atoms if configured to do so
}
expr_ref e(ctx->bool_var2expr(lit.var()), ctx->m);
if (!e)
continue;
expr_ref e(ctx->bool_var2expr(lit.var()), ctx->m); // turn literal into a Boolean expression
if (m.is_and(e) || m.is_or(e))
continue;
if (lit.sign())
e = m.mk_not(e);
e = m.mk_not(e); // negate if literal is negative
b.collect_clause(m_l2g, id, e);
}
m_num_shared_units = sz;
}