diff --git a/.clang-format b/.clang-format index 0a879bb95..7ef241e3d 100644 --- a/.clang-format +++ b/.clang-format @@ -1,3 +1,4 @@ + # Z3 Theorem Prover clang-format configuration # Based on analysis of existing codebase style patterns @@ -12,7 +13,6 @@ UseTab: Never ColumnLimit: 120 # Braces -BreakBeforeBraces: Linux Cpp11BracedListStyle: true # Classes and structs @@ -25,7 +25,15 @@ AlwaysBreakAfterReturnType: None AllowShortFunctionsOnASingleLine: Empty AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false - +# Ensure function-opening brace is attached to the signature +BreakBeforeBraces: Custom +# Explicitly ensure function brace is not placed on a new line +BraceWrapping: + AfterFunction: false + AfterClass: false + AfterControlStatement: false + AfterNamespace: false + AfterStruct: false # Spacing SpaceAfterCStyleCast: false SpaceAfterLogicalNot: false @@ -52,7 +60,7 @@ BreakBeforeTernaryOperators: true SortIncludes: false # Z3 has specific include ordering conventions # Namespaces -NamespaceIndentation: None +NamespaceIndentation: All # Comments and documentation ReflowComments: true diff --git a/src/smt/smt_parallel.cpp b/src/smt/smt_parallel.cpp index ece523a51..96daf4ca1 100644 --- a/src/smt/smt_parallel.cpp +++ b/src/smt/smt_parallel.cpp @@ -12,12 +12,10 @@ Abstract: Author: nbjorner 2020-01-31 + Ilana Shapiro 2025 --*/ - - - #include "util/scoped_ptr_vector.h" #include "ast/ast_util.h" #include "ast/ast_pp.h" @@ -26,43 +24,40 @@ Author: #include "ast/simplifiers/then_simplifier.h" #include "smt/smt_parallel.h" #include "smt/smt_lookahead.h" -// #include "params/smt_parallel_params.hpp" #include "solver/solver_preprocess.h" #include -#include #include class bounded_pp_exprs { - expr_ref_vector const& es; -public: - bounded_pp_exprs(expr_ref_vector const& es): es(es) {} + expr_ref_vector const &es; - std::ostream& display(std::ostream& out) const { - for (auto e : es) +public: + bounded_pp_exprs(expr_ref_vector const &es) : es(es) {} + + std::ostream &display(std::ostream &out) const { + for (auto e : es) out << mk_bounded_pp(e, es.get_manager()) << "\n"; return out; } }; -inline std::ostream& operator<<(std::ostream& out, bounded_pp_exprs const& pp) { +inline std::ostream &operator<<(std::ostream &out, bounded_pp_exprs const &pp) { return pp.display(out); } #ifdef SINGLE_THREAD namespace smt { - - lbool parallel::operator()(expr_ref_vector const& asms) { + + lbool parallel::operator()(expr_ref_vector const &asms) { return l_undef; } -} +} // namespace smt #else #include -#include -#include #define SHARE_SEARCH_TREE 1 @@ -70,23 +65,15 @@ namespace smt { namespace smt { - - void parallel::worker::run() { - search_tree::node* node = nullptr; + search_tree::node *node = nullptr; expr_ref_vector cube(m); - while (true) { + while (true) { - -#if SHARE_SEARCH_TREE if (!b.get_cube(m_g2l, id, cube, node)) { LOG_WORKER(1, " no more cubes\n"); return; } -#else - if (!get_cube(cube, node)) - return; -#endif collect_shared_clauses(m_g2l); check_cube_start: @@ -97,165 +84,98 @@ namespace smt { b.set_exception("context cancelled"); return; } - - switch (r) { - case l_undef: { - update_max_thread_conflicts(); - LOG_WORKER(1, " found undef cube\n"); - // return unprocessed cubes to the batch manager - // add a split literal to the batch manager. - // optionally process other cubes and delay sending back unprocessed cubes to batch manager. - if (m_config.m_max_cube_depth <= cube.size()) - goto check_cube_start; - auto atom = get_split_atom(); - if (!atom) - goto check_cube_start; -#if SHARE_SEARCH_TREE - b.split(m_l2g, id, node, atom); -#else - split(node, atom); -#endif - simplify(); - break; - } - case l_true: { - LOG_WORKER(1, " found sat cube\n"); - model_ref mdl; - ctx->get_model(mdl); - b.set_sat(m_l2g, *mdl); + switch (r) { + case l_undef: { + update_max_thread_conflicts(); + LOG_WORKER(1, " found undef cube\n"); + // return unprocessed cubes to the batch manager + // add a split literal to the batch manager. + // optionally process other cubes and delay sending back unprocessed cubes to batch manager. + if (m_config.m_max_cube_depth <= cube.size()) + goto check_cube_start; + + auto atom = get_split_atom(); + if (!atom) + goto check_cube_start; + b.split(m_l2g, id, node, atom); + simplify(); + break; + } + case l_true: { + LOG_WORKER(1, " found sat cube\n"); + model_ref mdl; + ctx->get_model(mdl); + b.set_sat(m_l2g, *mdl); + return; + } + case l_false: { + expr_ref_vector const &unsat_core = ctx->unsat_core(); + LOG_WORKER(2, " unsat core:\n"; + for (auto c : unsat_core) verbose_stream() << mk_bounded_pp(c, m, 3) << "\n"); + // If the unsat core only contains external assumptions, + // unsatisfiability does not depend on the current cube and the entire problem is unsat. + if (all_of(unsat_core, [&](expr *e) { return asms.contains(e); })) { + LOG_WORKER(1, " determined formula unsat\n"); + b.set_unsat(m_l2g, unsat_core); return; } - case l_false: { - expr_ref_vector const& unsat_core = ctx->unsat_core(); - LOG_WORKER(2, " unsat core:\n"; for (auto c : unsat_core) verbose_stream() << mk_bounded_pp(c, m, 3) << "\n"); - // If the unsat core only contains external assumptions, - // unsatisfiability does not depend on the current cube and the entire problem is unsat. - if (all_of(unsat_core, [&](expr* e) { return asms.contains(e); })) { - LOG_WORKER(1, " determined formula unsat\n"); - b.set_unsat(m_l2g, unsat_core); - return; - } - for (expr* e : unsat_core) - if (asms.contains(e)) - b.report_assumption_used(m_l2g, e); // report assumptions used in unsat core, so they can be used in final core + for (expr *e : unsat_core) + if (asms.contains(e)) + b.report_assumption_used( + m_l2g, e); // report assumptions used in unsat core, so they can be used in final core - LOG_WORKER(1, " found unsat cube\n"); -#if SHARE_SEARCH_TREE - b.backtrack(m_l2g, unsat_core, node); -#else - backtrack(unsat_core, node); -#endif - break; - } - } + LOG_WORKER(1, " found unsat cube\n"); + b.backtrack(m_l2g, unsat_core, node); + break; + } + } if (m_config.m_share_units) share_units(m_l2g); } } - bool parallel::worker::get_cube(expr_ref_vector& cube, node*& n) { - node* t = m_search_tree.activate_node(n); - cube.reset(); - if (!t) { - b.set_unsat(m_l2g, cube); - return false; - } - n = t; - while (t) { - if (cube_config::literal_is_null(t->get_literal())) - break; - cube.push_back(t->get_literal()); - t = t->parent(); - } - return true; - } - - void parallel::worker::backtrack(expr_ref_vector const& core, node* n) { - vector core_copy; - for (auto c : core) - core_copy.push_back(expr_ref(c, m)); - m_search_tree.backtrack(n, core_copy); - //LOG_WORKER(1, m_search_tree.display(verbose_stream() << bounded_pp_exprs(core) << "\n");); - } - - void parallel::worker::split(node* n, expr* atom) { - expr_ref lit(atom, m), nlit(m); - nlit = mk_not(m, lit); - IF_VERBOSE(1, verbose_stream() << "Batch manager splitting on literal: " << mk_bounded_pp(lit, m, 3) << "\n"); - m_search_tree.split(n, lit, nlit); - } - - parallel::worker::worker(unsigned id, parallel& p, expr_ref_vector const& _asms): - id(id), p(p), b(p.m_batch_manager), m_smt_params(p.ctx.get_fparams()), asms(m), - m_g2l(p.ctx.m, m), - m_l2g(m, p.ctx.m), - m_search_tree(expr_ref(m)) { - for (auto e : _asms) + parallel::worker::worker(unsigned id, parallel &p, expr_ref_vector const &_asms) + : id(id), p(p), b(p.m_batch_manager), m_smt_params(p.ctx.get_fparams()), asms(m), m_g2l(p.ctx.m, m), + m_l2g(m, p.ctx.m), m_search_tree(expr_ref(m)) { + for (auto e : _asms) asms.push_back(m_g2l(e)); - LOG_WORKER(1, " created with " << asms.size() << " assumptions\n"); + LOG_WORKER(1, " created with " << asms.size() << " assumptions\n"); m_smt_params.m_preprocess = false; ctx = alloc(context, m, m_smt_params, p.ctx.get_params()); context::copy(p.ctx, *ctx, true); ctx->set_random_seed(id + m_smt_params.m_random_seed); - - #if 0 - smt_parallel_params pp(p.ctx.m_params); - m_config.m_threads_max_conflicts = ctx->get_fparams().m_threads_max_conflicts; - m_config.m_max_conflicts = ctx->get_fparams().m_max_conflicts; - m_config.m_relevant_units_only = pp.relevant_units_only(); - m_config.m_never_cube = pp.never_cube(); - m_config.m_share_conflicts = pp.share_conflicts(); - m_config.m_share_units = pp.share_units(); - m_config.m_share_units_initial_only = pp.share_units_initial_only(); - m_config.m_cube_initial_only = pp.cube_initial_only(); - m_config.m_max_conflict_mul = pp.max_conflict_mul(); - m_config.m_max_greedy_cubes = pp.max_greedy_cubes(); - m_config.m_num_split_lits = pp.num_split_lits(); - m_config.m_backbone_detection = pp.backbone_detection(); - m_config.m_iterative_deepening = pp.iterative_deepening(); - m_config.m_beam_search = pp.beam_search(); - m_config.m_explicit_hardness = pp.explicit_hardness(); - m_config.m_cubetree = pp.cubetree(); - m_config.m_max_cube_depth = pp.max_cube_depth(); - m_config.m_inprocessing = pp.inprocessing(); - m_config.m_inprocessing_delay = pp.inprocessing_delay(); - #endif - // don't share initial units ctx->pop_to_base_lvl(); m_num_shared_units = ctx->assigned_literals().size(); - m_num_initial_atoms = ctx->get_num_bool_vars(); } - void parallel::worker::share_units(ast_translation& l2g) { + void parallel::worker::share_units(ast_translation &l2g) { // Collect new units learned locally by this worker and send to batch manager ctx->pop_to_base_lvl(); unsigned sz = ctx->assigned_literals().size(); for (unsigned j = m_num_shared_units; j < sz; ++j) { // iterate only over new literals since last sync literal lit = ctx->assigned_literals()[j]; - if (!ctx->is_relevant(lit.var()) && m_config.m_relevant_units_only) + if (!ctx->is_relevant(lit.var()) && m_config.m_relevant_units_only) continue; if (m_config.m_share_units_initial_only && lit.var() >= m_num_initial_atoms) { LOG_WORKER(2, " Skipping non-initial unit: " << lit.var() << "\n"); - continue; // skip non-iniial atoms if configured to do so + continue; // skip non-iniial atoms if configured to do so } - - expr_ref e(ctx->bool_var2expr(lit.var()), ctx->m); // turn literal into a Boolean expression - if (m.is_and(e) || m.is_or(e)) + + expr_ref e(ctx->bool_var2expr(lit.var()), ctx->m); // turn literal into a Boolean expression + if (m.is_and(e) || m.is_or(e)) continue; if (lit.sign()) - e = m.mk_not(e); // negate if literal is negative + e = m.mk_not(e); // negate if literal is negative b.collect_clause(l2g, id, e); } m_num_shared_units = sz; } - - void parallel::worker::simplify() { if (!m.inc()) return; @@ -263,8 +183,8 @@ namespace smt { // a precise schedule of repeated simplification is TBD. // also, the in-processing simplifier should be applied to // a current set of irredundant clauses that may be reduced by - // unit propagation. By including the units we are effectively - // repeating unit propagation, but potentially not subsumption or + // unit propagation. By including the units we are effectively + // repeating unit propagation, but potentially not subsumption or // Boolean simplifications that a solver could perform (smt_context doesnt really) // Integration of inprocssing simplifcation here or in sat/smt solver could // be based on taking the current clause set instead of the asserted formulas. @@ -273,9 +193,12 @@ namespace smt { if (m_config.m_inprocessing_delay > 0) { --m_config.m_inprocessing_delay; return; - } - m_config.m_inprocessing = false; // initial strategy is to immediately disable inprocessing for future calls. - dependent_expr_simplifier* s = ctx->m_simplifier.get(); + } + ctx->pop_to_base_lvl(); + if (ctx->m_base_lvl > 0) + return; // simplification only at base level + m_config.m_inprocessing = false; // initial strategy is to immediately disable inprocessing for future calls. + dependent_expr_simplifier *s = ctx->m_simplifier.get(); if (!s) { // create a simplifier if none exists // initialize it to a default pre-processing simplifier. @@ -285,9 +208,8 @@ namespace smt { ctx->m_simplifier = s; init_preprocess(m, ctx->get_params(), *then_s, *ctx->m_fmls); } - - dependent_expr_state& fmls = *ctx->m_fmls.get(); + dependent_expr_state &fmls = *ctx->m_fmls.get(); // extract assertions from ctx. // it is possible to track proof objects here if wanted. // feed them to the simplifier @@ -295,40 +217,37 @@ namespace smt { expr_ref_vector units(m); ctx->get_assertions(assertions); ctx->get_units(units); - for (expr* e : assertions) + for (expr *e : assertions) fmls.add(dependent_expr(m, e, nullptr, nullptr)); - for (expr* e : units) + for (expr *e : units) fmls.add(dependent_expr(m, e, nullptr, nullptr)); // run in-processing on the assertions s->reduce(); - // LOG_WORKER(1, ""; fmls.display(verbose_stream() << " inprocess result\n");); - scoped_ptr new_ctx = alloc(context, m, m_smt_params, p.ctx.get_params()); // extract simplified assertions from the simplifier // create a new context with the simplified assertions // update ctx with the new context. for (unsigned i = 0; i < fmls.qtail(); ++i) { - auto const& de = fmls[i]; + auto const &de = fmls[i]; new_ctx->assert_expr(de.fml()); } - ctx = new_ctx.detach(); + asserted_formulas &src_af = ctx->m_asserted_formulas; + asserted_formulas &dst_af = new_ctx->m_asserted_formulas; + src_af.get_macro_manager().copy_to(dst_af.get_macro_manager()); + new_ctx->copy_user_propagator(*ctx, true); + ctx = new_ctx.detach(); ctx->setup_context(true); ctx->internalize_assertions(); - auto old_atoms = m_num_initial_atoms; m_num_shared_units = ctx->assigned_literals().size(); m_num_initial_atoms = ctx->get_num_bool_vars(); - - LOG_WORKER(1, " inprocess " << old_atoms << " -> " << m_num_initial_atoms << "\n"); - - // TODO: copy user-propagators similar to context::copy. } - void parallel::worker::collect_statistics(::statistics& st) const { + void parallel::worker::collect_statistics(::statistics &st) const { ctx->collect_statistics(st); } @@ -337,8 +256,8 @@ namespace smt { m.limit().cancel(); } - void parallel::batch_manager::backtrack(ast_translation& l2g, expr_ref_vector const& core, - search_tree::node* node) { + void parallel::batch_manager::backtrack(ast_translation &l2g, expr_ref_vector const &core, + search_tree::node *node) { std::scoped_lock lock(mux); IF_VERBOSE(1, verbose_stream() << "Batch manager backtracking.\n"); if (m_state != state::is_running) @@ -359,8 +278,8 @@ namespace smt { } } - void parallel::batch_manager::split(ast_translation& l2g, unsigned source_worker_id, - search_tree::node* node, expr* atom) { + void parallel::batch_manager::split(ast_translation &l2g, unsigned source_worker_id, + search_tree::node *node, expr *atom) { std::scoped_lock lock(mux); expr_ref lit(m), nlit(m); lit = l2g(atom); @@ -375,9 +294,9 @@ namespace smt { m_search_tree.split(node, lit, nlit); } - void parallel::batch_manager::collect_clause(ast_translation& l2g, unsigned source_worker_id, expr* clause) { + void parallel::batch_manager::collect_clause(ast_translation &l2g, unsigned source_worker_id, expr *clause) { std::scoped_lock lock(mux); - expr* g_clause = l2g(clause); + expr *g_clause = l2g(clause); if (!shared_clause_set.contains(g_clause)) { shared_clause_set.insert(g_clause); shared_clause sc{source_worker_id, expr_ref(g_clause, m)}; @@ -385,45 +304,45 @@ namespace smt { } } - void parallel::worker::collect_shared_clauses(ast_translation& g2l) { - expr_ref_vector new_clauses = b.return_shared_clauses(g2l, m_shared_clause_limit, id); + void parallel::worker::collect_shared_clauses(ast_translation &g2l) { + expr_ref_vector new_clauses = b.return_shared_clauses(g2l, m_shared_clause_limit, id); // iterate over new clauses and assert them in the local context - for (expr* e : new_clauses) { + for (expr *e : new_clauses) { expr_ref local_clause(e, g2l.to()); - ctx->assert_expr(local_clause); + ctx->assert_expr(local_clause); LOG_WORKER(2, " asserting shared clause: " << mk_bounded_pp(local_clause, m, 3) << "\n"); } } - expr_ref_vector parallel::batch_manager::return_shared_clauses(ast_translation& g2l, unsigned& worker_limit, unsigned worker_id) { + expr_ref_vector parallel::batch_manager::return_shared_clauses(ast_translation &g2l, unsigned &worker_limit, + unsigned worker_id) { std::scoped_lock lock(mux); expr_ref_vector result(g2l.to()); for (unsigned i = worker_limit; i < shared_clause_trail.size(); ++i) { if (shared_clause_trail[i].source_worker_id == worker_id) - continue; // skip clauses from the requesting worker + continue; // skip clauses from the requesting worker result.push_back(g2l(shared_clause_trail[i].clause.get())); } - worker_limit = shared_clause_trail.size(); // update the worker limit to the end of the current trail + worker_limit = shared_clause_trail.size(); // update the worker limit to the end of the current trail return result; } - lbool parallel::worker::check_cube(expr_ref_vector const& cube) { - for (auto& atom : cube) - asms.push_back(atom); + lbool parallel::worker::check_cube(expr_ref_vector const &cube) { + for (auto &atom : cube) + asms.push_back(atom); lbool r = l_undef; ctx->get_fparams().m_max_conflicts = std::min(m_config.m_threads_max_conflicts, m_config.m_max_conflicts); - IF_VERBOSE(1, verbose_stream() << " Checking cube\n" << bounded_pp_exprs(cube) << "with max_conflicts: " << ctx->get_fparams().m_max_conflicts << "\n";); + IF_VERBOSE(1, verbose_stream() << " Checking cube\n" + << bounded_pp_exprs(cube) + << "with max_conflicts: " << ctx->get_fparams().m_max_conflicts << "\n";); try { r = ctx->check(asms.size(), asms.data()); - } - catch (z3_error& err) { + } catch (z3_error &err) { b.set_exception(err.error_code()); - } - catch (z3_exception& ex) { + } catch (z3_exception &ex) { b.set_exception(ex.what()); - } - catch (...) { + } catch (...) { b.set_exception("unknown exception"); } asms.shrink(asms.size() - cube.size()); @@ -439,7 +358,7 @@ namespace smt { for (bool_var v = 0; v < ctx->get_num_bool_vars(); ++v) { if (ctx->get_assignment(v) != l_undef) continue; - expr* e = ctx->bool_var2expr(v); + expr *e = ctx->bool_var2expr(v); if (!e) continue; @@ -456,7 +375,7 @@ namespace smt { return result; } - void parallel::batch_manager::set_sat(ast_translation& l2g, model& m) { + void parallel::batch_manager::set_sat(ast_translation &l2g, model &m) { std::scoped_lock lock(mux); IF_VERBOSE(1, verbose_stream() << "Batch manager setting SAT.\n"); if (m_state != state::is_running) @@ -466,16 +385,16 @@ namespace smt { cancel_workers(); } - void parallel::batch_manager::set_unsat(ast_translation& l2g, expr_ref_vector const& unsat_core) { + void parallel::batch_manager::set_unsat(ast_translation &l2g, expr_ref_vector const &unsat_core) { std::scoped_lock lock(mux); IF_VERBOSE(1, verbose_stream() << "Batch manager setting UNSAT.\n"); if (m_state != state::is_running) return; - m_state = state::is_unsat; + m_state = state::is_unsat; // each call to check_sat needs to have a fresh unsat core SASSERT(p.ctx.m_unsat_core.empty()); - for (expr* e : unsat_core) + for (expr *e : unsat_core) p.ctx.m_unsat_core.push_back(l2g(e)); cancel_workers(); } @@ -490,7 +409,7 @@ namespace smt { cancel_workers(); } - void parallel::batch_manager::set_exception(std::string const& msg) { + void parallel::batch_manager::set_exception(std::string const &msg) { std::scoped_lock lock(mux); IF_VERBOSE(1, verbose_stream() << "Batch manager setting exception msg: " << msg << ".\n"); if (m_state != state::is_running) @@ -500,40 +419,42 @@ namespace smt { cancel_workers(); } - void parallel::batch_manager::report_assumption_used(ast_translation& l2g, expr* assumption) { + void parallel::batch_manager::report_assumption_used(ast_translation &l2g, expr *assumption) { std::scoped_lock lock(mux); p.m_assumptions_used.insert(l2g(assumption)); } lbool parallel::batch_manager::get_result() const { if (m.limit().is_canceled()) - return l_undef; // the main context was cancelled, so we return undef. + return l_undef; // the main context was cancelled, so we return undef. switch (m_state) { - case state::is_running: // batch manager is still running, but all threads have processed their cubes, which means all cubes were unsat - if (!m_search_tree.is_closed()) - throw default_exception("inconsistent end state"); - if (!p.m_assumptions_used.empty()) { - // collect unsat core from assumptions used, if any --> case when all cubes were unsat, but depend on nonempty asms, so we need to add these asms to final unsat core - SASSERT(p.ctx.m_unsat_core.empty()); - for (auto a : p.m_assumptions_used) - p.ctx.m_unsat_core.push_back(a); - } - return l_false; - case state::is_unsat: - return l_false; - case state::is_sat: - return l_true; - case state::is_exception_msg: - throw default_exception(m_exception_msg.c_str()); - case state::is_exception_code: - throw z3_error(m_exception_code); - default: - UNREACHABLE(); - return l_undef; + case state::is_running: // batch manager is still running, but all threads have processed their cubes, which + // means all cubes were unsat + if (!m_search_tree.is_closed()) + throw default_exception("inconsistent end state"); + if (!p.m_assumptions_used.empty()) { + // collect unsat core from assumptions used, if any --> case when all cubes were unsat, but depend on + // nonempty asms, so we need to add these asms to final unsat core + SASSERT(p.ctx.m_unsat_core.empty()); + for (auto a : p.m_assumptions_used) + p.ctx.m_unsat_core.push_back(a); + } + return l_false; + case state::is_unsat: + return l_false; + case state::is_sat: + return l_true; + case state::is_exception_msg: + throw default_exception(m_exception_msg.c_str()); + case state::is_exception_code: + throw z3_error(m_exception_code); + default: + UNREACHABLE(); + return l_undef; } } - bool parallel::batch_manager::get_cube(ast_translation& g2l, unsigned id, expr_ref_vector& cube, node*& n) { + bool parallel::batch_manager::get_cube(ast_translation &g2l, unsigned id, expr_ref_vector &cube, node *&n) { cube.reset(); std::unique_lock lock(mux); if (m_search_tree.is_closed()) { @@ -565,67 +486,62 @@ namespace smt { return false; } - void parallel::batch_manager::initialize() { - m_state = state::is_running; + m_state = state::is_running; m_search_tree.reset(); } - void parallel::batch_manager::collect_statistics(::statistics& st) const { + void parallel::batch_manager::collect_statistics(::statistics &st) const { st.update("parallel-num_cubes", m_stats.m_num_cubes); st.update("parallel-max-cube-size", m_stats.m_max_cube_depth); } - lbool parallel::operator()(expr_ref_vector const& asms) { - ast_manager& m = ctx.m; + lbool parallel::operator()(expr_ref_vector const &asms) { + ast_manager &m = ctx.m; if (m.has_trace_stream()) throw default_exception("trace streams have to be off in parallel mode"); - + struct scoped_clear { - parallel& p; - scoped_clear(parallel& p) : p(p) {} - ~scoped_clear() { - p.m_workers.reset(); - p.m_assumptions_used.reset(); + parallel &p; + scoped_clear(parallel &p) : p(p) {} + ~scoped_clear() { + p.m_workers.reset(); + p.m_assumptions_used.reset(); p.m_assumptions.reset(); } }; scoped_clear clear(*this); - { - m_batch_manager.initialize(); - m_workers.reset(); - for (auto e : asms) - m_assumptions.insert(e); - scoped_limits sl(m.limit()); - flet _nt(ctx.m_fparams.m_threads, 1); - SASSERT(num_threads > 1); - for (unsigned i = 0; i < num_threads; ++i) - m_workers.push_back(alloc(worker, i, *this, asms)); + m_batch_manager.initialize(); + m_workers.reset(); + for (auto e : asms) + m_assumptions.insert(e); + scoped_limits sl(m.limit()); + flet _nt(ctx.m_fparams.m_threads, 1); + SASSERT(num_threads > 1); + for (unsigned i = 0; i < num_threads; ++i) + m_workers.push_back(alloc(worker, i, *this, asms)); - for (auto w : m_workers) - sl.push_child(&(w->limit())); + for (auto w : m_workers) + sl.push_child(&(w->limit())); - // Launch threads - vector threads(num_threads); - for (unsigned i = 0; i < num_threads; ++i) { - threads[i] = std::thread([&, i]() { - m_workers[i]->run(); - }); - } - - // Wait for all threads to finish - for (auto& th : threads) - th.join(); - - for (auto w : m_workers) - w->collect_statistics(ctx.m_aux_stats); - m_batch_manager.collect_statistics(ctx.m_aux_stats); + // Launch threads + vector threads(num_threads); + for (unsigned i = 0; i < num_threads; ++i) { + threads[i] = std::thread([&, i]() { m_workers[i]->run(); }); } - return m_batch_manager.get_result(); + // Wait for all threads to finish + for (auto &th : threads) + th.join(); + + for (auto w : m_workers) + w->collect_statistics(ctx.m_aux_stats); + m_batch_manager.collect_statistics(ctx.m_aux_stats); + + return m_batch_manager.get_result(); } -} +} // namespace smt #endif diff --git a/src/smt/smt_parallel.h b/src/smt/smt_parallel.h index 16532e668..f7dfd4456 100644 --- a/src/smt/smt_parallel.h +++ b/src/smt/smt_parallel.h @@ -42,6 +42,7 @@ namespace smt { }; class batch_manager { + enum state { is_running, is_sat, @@ -49,6 +50,7 @@ namespace smt { is_exception_msg, is_exception_code }; + struct stats { unsigned m_max_cube_depth = 0; unsigned m_num_cubes = 0; @@ -114,7 +116,7 @@ namespace smt { bool m_share_units_initial_only = true; bool m_cube_initial_only = true; bool m_inprocessing = true; - unsigned m_inprocessing_delay = 0; + unsigned m_inprocessing_delay = 1; unsigned m_max_cube_depth = 20; }; @@ -145,10 +147,6 @@ namespace smt { m_config.m_threads_max_conflicts = (unsigned)(m_config.m_max_conflict_mul * m_config.m_threads_max_conflicts); } // allow for backoff scheme of conflicts within the thread for cube timeouts. - bool get_cube(expr_ref_vector& cube, node*& n); - void backtrack(expr_ref_vector const& core, node* n); - void split(node* n, expr* atom); - void simplify(); public: