3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-10-08 17:01:55 +00:00

update smt-parallel

Signed-off-by: Nikolaj Bjorner <nbjorner@microsoft.com>
This commit is contained in:
Nikolaj Bjorner 2025-09-19 19:22:09 -07:00
parent f12d751007
commit 7eb6ef3161
3 changed files with 189 additions and 267 deletions

View file

@ -1,3 +1,4 @@
# Z3 Theorem Prover clang-format configuration # Z3 Theorem Prover clang-format configuration
# Based on analysis of existing codebase style patterns # Based on analysis of existing codebase style patterns
@ -12,7 +13,6 @@ UseTab: Never
ColumnLimit: 120 ColumnLimit: 120
# Braces # Braces
BreakBeforeBraces: Linux
Cpp11BracedListStyle: true Cpp11BracedListStyle: true
# Classes and structs # Classes and structs
@ -25,7 +25,15 @@ AlwaysBreakAfterReturnType: None
AllowShortFunctionsOnASingleLine: Empty AllowShortFunctionsOnASingleLine: Empty
AllowShortIfStatementsOnASingleLine: false AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false AllowShortLoopsOnASingleLine: false
# Ensure function-opening brace is attached to the signature
BreakBeforeBraces: Custom
# Explicitly ensure function brace is not placed on a new line
BraceWrapping:
AfterFunction: false
AfterClass: false
AfterControlStatement: false
AfterNamespace: false
AfterStruct: false
# Spacing # Spacing
SpaceAfterCStyleCast: false SpaceAfterCStyleCast: false
SpaceAfterLogicalNot: false SpaceAfterLogicalNot: false
@ -52,7 +60,7 @@ BreakBeforeTernaryOperators: true
SortIncludes: false # Z3 has specific include ordering conventions SortIncludes: false # Z3 has specific include ordering conventions
# Namespaces # Namespaces
NamespaceIndentation: None NamespaceIndentation: All
# Comments and documentation # Comments and documentation
ReflowComments: true ReflowComments: true

View file

@ -12,12 +12,10 @@ Abstract:
Author: Author:
nbjorner 2020-01-31 nbjorner 2020-01-31
Ilana Shapiro 2025
--*/ --*/
#include "util/scoped_ptr_vector.h" #include "util/scoped_ptr_vector.h"
#include "ast/ast_util.h" #include "ast/ast_util.h"
#include "ast/ast_pp.h" #include "ast/ast_pp.h"
@ -26,26 +24,25 @@ Author:
#include "ast/simplifiers/then_simplifier.h" #include "ast/simplifiers/then_simplifier.h"
#include "smt/smt_parallel.h" #include "smt/smt_parallel.h"
#include "smt/smt_lookahead.h" #include "smt/smt_lookahead.h"
// #include "params/smt_parallel_params.hpp"
#include "solver/solver_preprocess.h" #include "solver/solver_preprocess.h"
#include <cmath> #include <cmath>
#include <condition_variable>
#include <mutex> #include <mutex>
class bounded_pp_exprs { class bounded_pp_exprs {
expr_ref_vector const& es; expr_ref_vector const &es;
public:
bounded_pp_exprs(expr_ref_vector const& es): es(es) {}
std::ostream& display(std::ostream& out) const { public:
bounded_pp_exprs(expr_ref_vector const &es) : es(es) {}
std::ostream &display(std::ostream &out) const {
for (auto e : es) for (auto e : es)
out << mk_bounded_pp(e, es.get_manager()) << "\n"; out << mk_bounded_pp(e, es.get_manager()) << "\n";
return out; return out;
} }
}; };
inline std::ostream& operator<<(std::ostream& out, bounded_pp_exprs const& pp) { inline std::ostream &operator<<(std::ostream &out, bounded_pp_exprs const &pp) {
return pp.display(out); return pp.display(out);
} }
@ -53,16 +50,14 @@ inline std::ostream& operator<<(std::ostream& out, bounded_pp_exprs const& pp) {
namespace smt { namespace smt {
lbool parallel::operator()(expr_ref_vector const& asms) { lbool parallel::operator()(expr_ref_vector const &asms) {
return l_undef; return l_undef;
} }
} } // namespace smt
#else #else
#include <thread> #include <thread>
#include <mutex>
#include <condition_variable>
#define SHARE_SEARCH_TREE 1 #define SHARE_SEARCH_TREE 1
@ -70,23 +65,15 @@ namespace smt {
namespace smt { namespace smt {
void parallel::worker::run() { void parallel::worker::run() {
search_tree::node<cube_config>* node = nullptr; search_tree::node<cube_config> *node = nullptr;
expr_ref_vector cube(m); expr_ref_vector cube(m);
while (true) { while (true) {
#if SHARE_SEARCH_TREE
if (!b.get_cube(m_g2l, id, cube, node)) { if (!b.get_cube(m_g2l, id, cube, node)) {
LOG_WORKER(1, " no more cubes\n"); LOG_WORKER(1, " no more cubes\n");
return; return;
} }
#else
if (!get_cube(cube, node))
return;
#endif
collect_shared_clauses(m_g2l); collect_shared_clauses(m_g2l);
check_cube_start: check_cube_start:
@ -99,98 +86,58 @@ namespace smt {
} }
switch (r) { switch (r) {
case l_undef: { case l_undef: {
update_max_thread_conflicts(); update_max_thread_conflicts();
LOG_WORKER(1, " found undef cube\n"); LOG_WORKER(1, " found undef cube\n");
// return unprocessed cubes to the batch manager // return unprocessed cubes to the batch manager
// add a split literal to the batch manager. // add a split literal to the batch manager.
// optionally process other cubes and delay sending back unprocessed cubes to batch manager. // optionally process other cubes and delay sending back unprocessed cubes to batch manager.
if (m_config.m_max_cube_depth <= cube.size()) if (m_config.m_max_cube_depth <= cube.size())
goto check_cube_start; goto check_cube_start;
auto atom = get_split_atom(); auto atom = get_split_atom();
if (!atom) if (!atom)
goto check_cube_start; goto check_cube_start;
#if SHARE_SEARCH_TREE b.split(m_l2g, id, node, atom);
b.split(m_l2g, id, node, atom); simplify();
#else break;
split(node, atom); }
#endif case l_true: {
simplify(); LOG_WORKER(1, " found sat cube\n");
break; model_ref mdl;
} ctx->get_model(mdl);
case l_true: { b.set_sat(m_l2g, *mdl);
LOG_WORKER(1, " found sat cube\n"); return;
model_ref mdl; }
ctx->get_model(mdl); case l_false: {
b.set_sat(m_l2g, *mdl); expr_ref_vector const &unsat_core = ctx->unsat_core();
LOG_WORKER(2, " unsat core:\n";
for (auto c : unsat_core) verbose_stream() << mk_bounded_pp(c, m, 3) << "\n");
// If the unsat core only contains external assumptions,
// unsatisfiability does not depend on the current cube and the entire problem is unsat.
if (all_of(unsat_core, [&](expr *e) { return asms.contains(e); })) {
LOG_WORKER(1, " determined formula unsat\n");
b.set_unsat(m_l2g, unsat_core);
return; return;
} }
case l_false: { for (expr *e : unsat_core)
expr_ref_vector const& unsat_core = ctx->unsat_core(); if (asms.contains(e))
LOG_WORKER(2, " unsat core:\n"; for (auto c : unsat_core) verbose_stream() << mk_bounded_pp(c, m, 3) << "\n"); b.report_assumption_used(
// If the unsat core only contains external assumptions, m_l2g, e); // report assumptions used in unsat core, so they can be used in final core
// unsatisfiability does not depend on the current cube and the entire problem is unsat.
if (all_of(unsat_core, [&](expr* e) { return asms.contains(e); })) {
LOG_WORKER(1, " determined formula unsat\n");
b.set_unsat(m_l2g, unsat_core);
return;
}
for (expr* e : unsat_core)
if (asms.contains(e))
b.report_assumption_used(m_l2g, e); // report assumptions used in unsat core, so they can be used in final core
LOG_WORKER(1, " found unsat cube\n"); LOG_WORKER(1, " found unsat cube\n");
#if SHARE_SEARCH_TREE b.backtrack(m_l2g, unsat_core, node);
b.backtrack(m_l2g, unsat_core, node); break;
#else }
backtrack(unsat_core, node);
#endif
break;
}
} }
if (m_config.m_share_units) if (m_config.m_share_units)
share_units(m_l2g); share_units(m_l2g);
} }
} }
bool parallel::worker::get_cube(expr_ref_vector& cube, node*& n) { parallel::worker::worker(unsigned id, parallel &p, expr_ref_vector const &_asms)
node* t = m_search_tree.activate_node(n); : id(id), p(p), b(p.m_batch_manager), m_smt_params(p.ctx.get_fparams()), asms(m), m_g2l(p.ctx.m, m),
cube.reset(); m_l2g(m, p.ctx.m), m_search_tree(expr_ref(m)) {
if (!t) {
b.set_unsat(m_l2g, cube);
return false;
}
n = t;
while (t) {
if (cube_config::literal_is_null(t->get_literal()))
break;
cube.push_back(t->get_literal());
t = t->parent();
}
return true;
}
void parallel::worker::backtrack(expr_ref_vector const& core, node* n) {
vector<expr_ref> core_copy;
for (auto c : core)
core_copy.push_back(expr_ref(c, m));
m_search_tree.backtrack(n, core_copy);
//LOG_WORKER(1, m_search_tree.display(verbose_stream() << bounded_pp_exprs(core) << "\n"););
}
void parallel::worker::split(node* n, expr* atom) {
expr_ref lit(atom, m), nlit(m);
nlit = mk_not(m, lit);
IF_VERBOSE(1, verbose_stream() << "Batch manager splitting on literal: " << mk_bounded_pp(lit, m, 3) << "\n");
m_search_tree.split(n, lit, nlit);
}
parallel::worker::worker(unsigned id, parallel& p, expr_ref_vector const& _asms):
id(id), p(p), b(p.m_batch_manager), m_smt_params(p.ctx.get_fparams()), asms(m),
m_g2l(p.ctx.m, m),
m_l2g(m, p.ctx.m),
m_search_tree(expr_ref(m)) {
for (auto e : _asms) for (auto e : _asms)
asms.push_back(m_g2l(e)); asms.push_back(m_g2l(e));
LOG_WORKER(1, " created with " << asms.size() << " assumptions\n"); LOG_WORKER(1, " created with " << asms.size() << " assumptions\n");
@ -198,38 +145,13 @@ namespace smt {
ctx = alloc(context, m, m_smt_params, p.ctx.get_params()); ctx = alloc(context, m, m_smt_params, p.ctx.get_params());
context::copy(p.ctx, *ctx, true); context::copy(p.ctx, *ctx, true);
ctx->set_random_seed(id + m_smt_params.m_random_seed); ctx->set_random_seed(id + m_smt_params.m_random_seed);
#if 0
smt_parallel_params pp(p.ctx.m_params);
m_config.m_threads_max_conflicts = ctx->get_fparams().m_threads_max_conflicts;
m_config.m_max_conflicts = ctx->get_fparams().m_max_conflicts;
m_config.m_relevant_units_only = pp.relevant_units_only();
m_config.m_never_cube = pp.never_cube();
m_config.m_share_conflicts = pp.share_conflicts();
m_config.m_share_units = pp.share_units();
m_config.m_share_units_initial_only = pp.share_units_initial_only();
m_config.m_cube_initial_only = pp.cube_initial_only();
m_config.m_max_conflict_mul = pp.max_conflict_mul();
m_config.m_max_greedy_cubes = pp.max_greedy_cubes();
m_config.m_num_split_lits = pp.num_split_lits();
m_config.m_backbone_detection = pp.backbone_detection();
m_config.m_iterative_deepening = pp.iterative_deepening();
m_config.m_beam_search = pp.beam_search();
m_config.m_explicit_hardness = pp.explicit_hardness();
m_config.m_cubetree = pp.cubetree();
m_config.m_max_cube_depth = pp.max_cube_depth();
m_config.m_inprocessing = pp.inprocessing();
m_config.m_inprocessing_delay = pp.inprocessing_delay();
#endif
// don't share initial units // don't share initial units
ctx->pop_to_base_lvl(); ctx->pop_to_base_lvl();
m_num_shared_units = ctx->assigned_literals().size(); m_num_shared_units = ctx->assigned_literals().size();
m_num_initial_atoms = ctx->get_num_bool_vars(); m_num_initial_atoms = ctx->get_num_bool_vars();
} }
void parallel::worker::share_units(ast_translation& l2g) { void parallel::worker::share_units(ast_translation &l2g) {
// Collect new units learned locally by this worker and send to batch manager // Collect new units learned locally by this worker and send to batch manager
ctx->pop_to_base_lvl(); ctx->pop_to_base_lvl();
unsigned sz = ctx->assigned_literals().size(); unsigned sz = ctx->assigned_literals().size();
@ -240,22 +162,20 @@ namespace smt {
if (m_config.m_share_units_initial_only && lit.var() >= m_num_initial_atoms) { if (m_config.m_share_units_initial_only && lit.var() >= m_num_initial_atoms) {
LOG_WORKER(2, " Skipping non-initial unit: " << lit.var() << "\n"); LOG_WORKER(2, " Skipping non-initial unit: " << lit.var() << "\n");
continue; // skip non-iniial atoms if configured to do so continue; // skip non-iniial atoms if configured to do so
} }
expr_ref e(ctx->bool_var2expr(lit.var()), ctx->m); // turn literal into a Boolean expression expr_ref e(ctx->bool_var2expr(lit.var()), ctx->m); // turn literal into a Boolean expression
if (m.is_and(e) || m.is_or(e)) if (m.is_and(e) || m.is_or(e))
continue; continue;
if (lit.sign()) if (lit.sign())
e = m.mk_not(e); // negate if literal is negative e = m.mk_not(e); // negate if literal is negative
b.collect_clause(l2g, id, e); b.collect_clause(l2g, id, e);
} }
m_num_shared_units = sz; m_num_shared_units = sz;
} }
void parallel::worker::simplify() { void parallel::worker::simplify() {
if (!m.inc()) if (!m.inc())
return; return;
@ -274,8 +194,11 @@ namespace smt {
--m_config.m_inprocessing_delay; --m_config.m_inprocessing_delay;
return; return;
} }
m_config.m_inprocessing = false; // initial strategy is to immediately disable inprocessing for future calls. ctx->pop_to_base_lvl();
dependent_expr_simplifier* s = ctx->m_simplifier.get(); if (ctx->m_base_lvl > 0)
return; // simplification only at base level
m_config.m_inprocessing = false; // initial strategy is to immediately disable inprocessing for future calls.
dependent_expr_simplifier *s = ctx->m_simplifier.get();
if (!s) { if (!s) {
// create a simplifier if none exists // create a simplifier if none exists
// initialize it to a default pre-processing simplifier. // initialize it to a default pre-processing simplifier.
@ -286,8 +209,7 @@ namespace smt {
init_preprocess(m, ctx->get_params(), *then_s, *ctx->m_fmls); init_preprocess(m, ctx->get_params(), *then_s, *ctx->m_fmls);
} }
dependent_expr_state &fmls = *ctx->m_fmls.get();
dependent_expr_state& fmls = *ctx->m_fmls.get();
// extract assertions from ctx. // extract assertions from ctx.
// it is possible to track proof objects here if wanted. // it is possible to track proof objects here if wanted.
// feed them to the simplifier // feed them to the simplifier
@ -295,40 +217,37 @@ namespace smt {
expr_ref_vector units(m); expr_ref_vector units(m);
ctx->get_assertions(assertions); ctx->get_assertions(assertions);
ctx->get_units(units); ctx->get_units(units);
for (expr* e : assertions) for (expr *e : assertions)
fmls.add(dependent_expr(m, e, nullptr, nullptr)); fmls.add(dependent_expr(m, e, nullptr, nullptr));
for (expr* e : units) for (expr *e : units)
fmls.add(dependent_expr(m, e, nullptr, nullptr)); fmls.add(dependent_expr(m, e, nullptr, nullptr));
// run in-processing on the assertions // run in-processing on the assertions
s->reduce(); s->reduce();
// LOG_WORKER(1, ""; fmls.display(verbose_stream() << " inprocess result\n"););
scoped_ptr<context> new_ctx = alloc(context, m, m_smt_params, p.ctx.get_params()); scoped_ptr<context> new_ctx = alloc(context, m, m_smt_params, p.ctx.get_params());
// extract simplified assertions from the simplifier // extract simplified assertions from the simplifier
// create a new context with the simplified assertions // create a new context with the simplified assertions
// update ctx with the new context. // update ctx with the new context.
for (unsigned i = 0; i < fmls.qtail(); ++i) { for (unsigned i = 0; i < fmls.qtail(); ++i) {
auto const& de = fmls[i]; auto const &de = fmls[i];
new_ctx->assert_expr(de.fml()); new_ctx->assert_expr(de.fml());
} }
ctx = new_ctx.detach();
asserted_formulas &src_af = ctx->m_asserted_formulas;
asserted_formulas &dst_af = new_ctx->m_asserted_formulas;
src_af.get_macro_manager().copy_to(dst_af.get_macro_manager());
new_ctx->copy_user_propagator(*ctx, true);
ctx = new_ctx.detach();
ctx->setup_context(true); ctx->setup_context(true);
ctx->internalize_assertions(); ctx->internalize_assertions();
auto old_atoms = m_num_initial_atoms; auto old_atoms = m_num_initial_atoms;
m_num_shared_units = ctx->assigned_literals().size(); m_num_shared_units = ctx->assigned_literals().size();
m_num_initial_atoms = ctx->get_num_bool_vars(); m_num_initial_atoms = ctx->get_num_bool_vars();
LOG_WORKER(1, " inprocess " << old_atoms << " -> " << m_num_initial_atoms << "\n"); LOG_WORKER(1, " inprocess " << old_atoms << " -> " << m_num_initial_atoms << "\n");
// TODO: copy user-propagators similar to context::copy.
} }
void parallel::worker::collect_statistics(::statistics& st) const { void parallel::worker::collect_statistics(::statistics &st) const {
ctx->collect_statistics(st); ctx->collect_statistics(st);
} }
@ -337,8 +256,8 @@ namespace smt {
m.limit().cancel(); m.limit().cancel();
} }
void parallel::batch_manager::backtrack(ast_translation& l2g, expr_ref_vector const& core, void parallel::batch_manager::backtrack(ast_translation &l2g, expr_ref_vector const &core,
search_tree::node<cube_config>* node) { search_tree::node<cube_config> *node) {
std::scoped_lock lock(mux); std::scoped_lock lock(mux);
IF_VERBOSE(1, verbose_stream() << "Batch manager backtracking.\n"); IF_VERBOSE(1, verbose_stream() << "Batch manager backtracking.\n");
if (m_state != state::is_running) if (m_state != state::is_running)
@ -359,8 +278,8 @@ namespace smt {
} }
} }
void parallel::batch_manager::split(ast_translation& l2g, unsigned source_worker_id, void parallel::batch_manager::split(ast_translation &l2g, unsigned source_worker_id,
search_tree::node<cube_config>* node, expr* atom) { search_tree::node<cube_config> *node, expr *atom) {
std::scoped_lock lock(mux); std::scoped_lock lock(mux);
expr_ref lit(m), nlit(m); expr_ref lit(m), nlit(m);
lit = l2g(atom); lit = l2g(atom);
@ -375,9 +294,9 @@ namespace smt {
m_search_tree.split(node, lit, nlit); m_search_tree.split(node, lit, nlit);
} }
void parallel::batch_manager::collect_clause(ast_translation& l2g, unsigned source_worker_id, expr* clause) { void parallel::batch_manager::collect_clause(ast_translation &l2g, unsigned source_worker_id, expr *clause) {
std::scoped_lock lock(mux); std::scoped_lock lock(mux);
expr* g_clause = l2g(clause); expr *g_clause = l2g(clause);
if (!shared_clause_set.contains(g_clause)) { if (!shared_clause_set.contains(g_clause)) {
shared_clause_set.insert(g_clause); shared_clause_set.insert(g_clause);
shared_clause sc{source_worker_id, expr_ref(g_clause, m)}; shared_clause sc{source_worker_id, expr_ref(g_clause, m)};
@ -385,45 +304,45 @@ namespace smt {
} }
} }
void parallel::worker::collect_shared_clauses(ast_translation& g2l) { void parallel::worker::collect_shared_clauses(ast_translation &g2l) {
expr_ref_vector new_clauses = b.return_shared_clauses(g2l, m_shared_clause_limit, id); expr_ref_vector new_clauses = b.return_shared_clauses(g2l, m_shared_clause_limit, id);
// iterate over new clauses and assert them in the local context // iterate over new clauses and assert them in the local context
for (expr* e : new_clauses) { for (expr *e : new_clauses) {
expr_ref local_clause(e, g2l.to()); expr_ref local_clause(e, g2l.to());
ctx->assert_expr(local_clause); ctx->assert_expr(local_clause);
LOG_WORKER(2, " asserting shared clause: " << mk_bounded_pp(local_clause, m, 3) << "\n"); LOG_WORKER(2, " asserting shared clause: " << mk_bounded_pp(local_clause, m, 3) << "\n");
} }
} }
expr_ref_vector parallel::batch_manager::return_shared_clauses(ast_translation& g2l, unsigned& worker_limit, unsigned worker_id) { expr_ref_vector parallel::batch_manager::return_shared_clauses(ast_translation &g2l, unsigned &worker_limit,
unsigned worker_id) {
std::scoped_lock lock(mux); std::scoped_lock lock(mux);
expr_ref_vector result(g2l.to()); expr_ref_vector result(g2l.to());
for (unsigned i = worker_limit; i < shared_clause_trail.size(); ++i) { for (unsigned i = worker_limit; i < shared_clause_trail.size(); ++i) {
if (shared_clause_trail[i].source_worker_id == worker_id) if (shared_clause_trail[i].source_worker_id == worker_id)
continue; // skip clauses from the requesting worker continue; // skip clauses from the requesting worker
result.push_back(g2l(shared_clause_trail[i].clause.get())); result.push_back(g2l(shared_clause_trail[i].clause.get()));
} }
worker_limit = shared_clause_trail.size(); // update the worker limit to the end of the current trail worker_limit = shared_clause_trail.size(); // update the worker limit to the end of the current trail
return result; return result;
} }
lbool parallel::worker::check_cube(expr_ref_vector const& cube) { lbool parallel::worker::check_cube(expr_ref_vector const &cube) {
for (auto& atom : cube) for (auto &atom : cube)
asms.push_back(atom); asms.push_back(atom);
lbool r = l_undef; lbool r = l_undef;
ctx->get_fparams().m_max_conflicts = std::min(m_config.m_threads_max_conflicts, m_config.m_max_conflicts); ctx->get_fparams().m_max_conflicts = std::min(m_config.m_threads_max_conflicts, m_config.m_max_conflicts);
IF_VERBOSE(1, verbose_stream() << " Checking cube\n" << bounded_pp_exprs(cube) << "with max_conflicts: " << ctx->get_fparams().m_max_conflicts << "\n";); IF_VERBOSE(1, verbose_stream() << " Checking cube\n"
<< bounded_pp_exprs(cube)
<< "with max_conflicts: " << ctx->get_fparams().m_max_conflicts << "\n";);
try { try {
r = ctx->check(asms.size(), asms.data()); r = ctx->check(asms.size(), asms.data());
} } catch (z3_error &err) {
catch (z3_error& err) {
b.set_exception(err.error_code()); b.set_exception(err.error_code());
} } catch (z3_exception &ex) {
catch (z3_exception& ex) {
b.set_exception(ex.what()); b.set_exception(ex.what());
} } catch (...) {
catch (...) {
b.set_exception("unknown exception"); b.set_exception("unknown exception");
} }
asms.shrink(asms.size() - cube.size()); asms.shrink(asms.size() - cube.size());
@ -439,7 +358,7 @@ namespace smt {
for (bool_var v = 0; v < ctx->get_num_bool_vars(); ++v) { for (bool_var v = 0; v < ctx->get_num_bool_vars(); ++v) {
if (ctx->get_assignment(v) != l_undef) if (ctx->get_assignment(v) != l_undef)
continue; continue;
expr* e = ctx->bool_var2expr(v); expr *e = ctx->bool_var2expr(v);
if (!e) if (!e)
continue; continue;
@ -456,7 +375,7 @@ namespace smt {
return result; return result;
} }
void parallel::batch_manager::set_sat(ast_translation& l2g, model& m) { void parallel::batch_manager::set_sat(ast_translation &l2g, model &m) {
std::scoped_lock lock(mux); std::scoped_lock lock(mux);
IF_VERBOSE(1, verbose_stream() << "Batch manager setting SAT.\n"); IF_VERBOSE(1, verbose_stream() << "Batch manager setting SAT.\n");
if (m_state != state::is_running) if (m_state != state::is_running)
@ -466,7 +385,7 @@ namespace smt {
cancel_workers(); cancel_workers();
} }
void parallel::batch_manager::set_unsat(ast_translation& l2g, expr_ref_vector const& unsat_core) { void parallel::batch_manager::set_unsat(ast_translation &l2g, expr_ref_vector const &unsat_core) {
std::scoped_lock lock(mux); std::scoped_lock lock(mux);
IF_VERBOSE(1, verbose_stream() << "Batch manager setting UNSAT.\n"); IF_VERBOSE(1, verbose_stream() << "Batch manager setting UNSAT.\n");
if (m_state != state::is_running) if (m_state != state::is_running)
@ -475,7 +394,7 @@ namespace smt {
// each call to check_sat needs to have a fresh unsat core // each call to check_sat needs to have a fresh unsat core
SASSERT(p.ctx.m_unsat_core.empty()); SASSERT(p.ctx.m_unsat_core.empty());
for (expr* e : unsat_core) for (expr *e : unsat_core)
p.ctx.m_unsat_core.push_back(l2g(e)); p.ctx.m_unsat_core.push_back(l2g(e));
cancel_workers(); cancel_workers();
} }
@ -490,7 +409,7 @@ namespace smt {
cancel_workers(); cancel_workers();
} }
void parallel::batch_manager::set_exception(std::string const& msg) { void parallel::batch_manager::set_exception(std::string const &msg) {
std::scoped_lock lock(mux); std::scoped_lock lock(mux);
IF_VERBOSE(1, verbose_stream() << "Batch manager setting exception msg: " << msg << ".\n"); IF_VERBOSE(1, verbose_stream() << "Batch manager setting exception msg: " << msg << ".\n");
if (m_state != state::is_running) if (m_state != state::is_running)
@ -500,40 +419,42 @@ namespace smt {
cancel_workers(); cancel_workers();
} }
void parallel::batch_manager::report_assumption_used(ast_translation& l2g, expr* assumption) { void parallel::batch_manager::report_assumption_used(ast_translation &l2g, expr *assumption) {
std::scoped_lock lock(mux); std::scoped_lock lock(mux);
p.m_assumptions_used.insert(l2g(assumption)); p.m_assumptions_used.insert(l2g(assumption));
} }
lbool parallel::batch_manager::get_result() const { lbool parallel::batch_manager::get_result() const {
if (m.limit().is_canceled()) if (m.limit().is_canceled())
return l_undef; // the main context was cancelled, so we return undef. return l_undef; // the main context was cancelled, so we return undef.
switch (m_state) { switch (m_state) {
case state::is_running: // batch manager is still running, but all threads have processed their cubes, which means all cubes were unsat case state::is_running: // batch manager is still running, but all threads have processed their cubes, which
if (!m_search_tree.is_closed()) // means all cubes were unsat
throw default_exception("inconsistent end state"); if (!m_search_tree.is_closed())
if (!p.m_assumptions_used.empty()) { throw default_exception("inconsistent end state");
// collect unsat core from assumptions used, if any --> case when all cubes were unsat, but depend on nonempty asms, so we need to add these asms to final unsat core if (!p.m_assumptions_used.empty()) {
SASSERT(p.ctx.m_unsat_core.empty()); // collect unsat core from assumptions used, if any --> case when all cubes were unsat, but depend on
for (auto a : p.m_assumptions_used) // nonempty asms, so we need to add these asms to final unsat core
p.ctx.m_unsat_core.push_back(a); SASSERT(p.ctx.m_unsat_core.empty());
} for (auto a : p.m_assumptions_used)
return l_false; p.ctx.m_unsat_core.push_back(a);
case state::is_unsat: }
return l_false; return l_false;
case state::is_sat: case state::is_unsat:
return l_true; return l_false;
case state::is_exception_msg: case state::is_sat:
throw default_exception(m_exception_msg.c_str()); return l_true;
case state::is_exception_code: case state::is_exception_msg:
throw z3_error(m_exception_code); throw default_exception(m_exception_msg.c_str());
default: case state::is_exception_code:
UNREACHABLE(); throw z3_error(m_exception_code);
return l_undef; default:
UNREACHABLE();
return l_undef;
} }
} }
bool parallel::batch_manager::get_cube(ast_translation& g2l, unsigned id, expr_ref_vector& cube, node*& n) { bool parallel::batch_manager::get_cube(ast_translation &g2l, unsigned id, expr_ref_vector &cube, node *&n) {
cube.reset(); cube.reset();
std::unique_lock<std::mutex> lock(mux); std::unique_lock<std::mutex> lock(mux);
if (m_search_tree.is_closed()) { if (m_search_tree.is_closed()) {
@ -565,26 +486,25 @@ namespace smt {
return false; return false;
} }
void parallel::batch_manager::initialize() { void parallel::batch_manager::initialize() {
m_state = state::is_running; m_state = state::is_running;
m_search_tree.reset(); m_search_tree.reset();
} }
void parallel::batch_manager::collect_statistics(::statistics& st) const { void parallel::batch_manager::collect_statistics(::statistics &st) const {
st.update("parallel-num_cubes", m_stats.m_num_cubes); st.update("parallel-num_cubes", m_stats.m_num_cubes);
st.update("parallel-max-cube-size", m_stats.m_max_cube_depth); st.update("parallel-max-cube-size", m_stats.m_max_cube_depth);
} }
lbool parallel::operator()(expr_ref_vector const& asms) { lbool parallel::operator()(expr_ref_vector const &asms) {
ast_manager& m = ctx.m; ast_manager &m = ctx.m;
if (m.has_trace_stream()) if (m.has_trace_stream())
throw default_exception("trace streams have to be off in parallel mode"); throw default_exception("trace streams have to be off in parallel mode");
struct scoped_clear { struct scoped_clear {
parallel& p; parallel &p;
scoped_clear(parallel& p) : p(p) {} scoped_clear(parallel &p) : p(p) {}
~scoped_clear() { ~scoped_clear() {
p.m_workers.reset(); p.m_workers.reset();
p.m_assumptions_used.reset(); p.m_assumptions_used.reset();
@ -593,39 +513,35 @@ namespace smt {
}; };
scoped_clear clear(*this); scoped_clear clear(*this);
{ m_batch_manager.initialize();
m_batch_manager.initialize(); m_workers.reset();
m_workers.reset(); for (auto e : asms)
for (auto e : asms) m_assumptions.insert(e);
m_assumptions.insert(e); scoped_limits sl(m.limit());
scoped_limits sl(m.limit()); flet<unsigned> _nt(ctx.m_fparams.m_threads, 1);
flet<unsigned> _nt(ctx.m_fparams.m_threads, 1); SASSERT(num_threads > 1);
SASSERT(num_threads > 1); for (unsigned i = 0; i < num_threads; ++i)
for (unsigned i = 0; i < num_threads; ++i) m_workers.push_back(alloc(worker, i, *this, asms));
m_workers.push_back(alloc(worker, i, *this, asms));
for (auto w : m_workers) for (auto w : m_workers)
sl.push_child(&(w->limit())); sl.push_child(&(w->limit()));
// Launch threads // Launch threads
vector<std::thread> threads(num_threads); vector<std::thread> threads(num_threads);
for (unsigned i = 0; i < num_threads; ++i) { for (unsigned i = 0; i < num_threads; ++i) {
threads[i] = std::thread([&, i]() { threads[i] = std::thread([&, i]() { m_workers[i]->run(); });
m_workers[i]->run();
});
}
// Wait for all threads to finish
for (auto& th : threads)
th.join();
for (auto w : m_workers)
w->collect_statistics(ctx.m_aux_stats);
m_batch_manager.collect_statistics(ctx.m_aux_stats);
} }
// Wait for all threads to finish
for (auto &th : threads)
th.join();
for (auto w : m_workers)
w->collect_statistics(ctx.m_aux_stats);
m_batch_manager.collect_statistics(ctx.m_aux_stats);
return m_batch_manager.get_result(); return m_batch_manager.get_result();
} }
} } // namespace smt
#endif #endif

View file

@ -42,6 +42,7 @@ namespace smt {
}; };
class batch_manager { class batch_manager {
enum state { enum state {
is_running, is_running,
is_sat, is_sat,
@ -49,6 +50,7 @@ namespace smt {
is_exception_msg, is_exception_msg,
is_exception_code is_exception_code
}; };
struct stats { struct stats {
unsigned m_max_cube_depth = 0; unsigned m_max_cube_depth = 0;
unsigned m_num_cubes = 0; unsigned m_num_cubes = 0;
@ -114,7 +116,7 @@ namespace smt {
bool m_share_units_initial_only = true; bool m_share_units_initial_only = true;
bool m_cube_initial_only = true; bool m_cube_initial_only = true;
bool m_inprocessing = true; bool m_inprocessing = true;
unsigned m_inprocessing_delay = 0; unsigned m_inprocessing_delay = 1;
unsigned m_max_cube_depth = 20; unsigned m_max_cube_depth = 20;
}; };
@ -145,10 +147,6 @@ namespace smt {
m_config.m_threads_max_conflicts = (unsigned)(m_config.m_max_conflict_mul * m_config.m_threads_max_conflicts); m_config.m_threads_max_conflicts = (unsigned)(m_config.m_max_conflict_mul * m_config.m_threads_max_conflicts);
} // allow for backoff scheme of conflicts within the thread for cube timeouts. } // allow for backoff scheme of conflicts within the thread for cube timeouts.
bool get_cube(expr_ref_vector& cube, node*& n);
void backtrack(expr_ref_vector const& core, node* n);
void split(node* n, expr* atom);
void simplify(); void simplify();
public: public: