From d593bb89f3952577a992daf2bbc23c6624f94639 Mon Sep 17 00:00:00 2001 From: Ilana Shapiro Date: Mon, 4 Aug 2025 09:48:10 -0700 Subject: [PATCH 1/4] Parallel solving (#7756) * very basic setup * ensure solve_eqs is fully disabled when smt.solve_eqs=false, #7743 Signed-off-by: Nikolaj Bjorner * respect smt configuration parameter in elim_unconstrained simplifier Signed-off-by: Nikolaj Bjorner * indentation * add bash files for test runs * add option to selectively disable variable solving for only ground expressions Signed-off-by: Nikolaj Bjorner * remove verbose output Signed-off-by: Nikolaj Bjorner * fix #7745 axioms for len(substr(...)) escaped due to nested rewriting * ensure atomic constraints are processed by arithmetic solver * #7739 optimization add simplification rule for at(x, offset) = "" Introducing j just postpones some rewrites that prevent useful simplifications. Z3 already uses common sub-expressions. The example highlights some opportunities for simplification, noteworthy at(..) = "". The example is solved in both versions after adding this simplification. * fix unsound len(substr) axiom Signed-off-by: Nikolaj Bjorner * FreshConst is_sort (#7748) * #7750 add pre-processing simplification * Add parameter validation for selected API functions * updates to ac-plugin fix incrementality bugs by allowing destructive updates during saturation at the cost of redoing saturation after a pop. * enable passive, add check for bloom up-to-date * add top-k fixed-sized min-heap priority queue for top scoring literals * set up worker thread batch manager for multithreaded batch cubes paradigm, need to debug as I am getting segfault still * fix bug in parallel solving batch setup * fix bug * allow for internalize implies * disable pre-processing during cubing * debugging * process cubes as lists of individual lits --------- Signed-off-by: Nikolaj Bjorner Co-authored-by: Nikolaj Bjorner Co-authored-by: humnrdble <83878671+humnrdble@users.noreply.github.com> --- src/smt/smt_context.h | 12 ++ src/smt/smt_internalizer.cpp | 32 ++++- src/smt/smt_lookahead.cpp | 11 +- src/smt/smt_parallel.cpp | 228 ++++++++++++++++++++++++++++++----- src/smt/smt_relevancy.cpp | 54 ++++++++- src/smt/smt_relevancy.h | 1 + 6 files changed, 299 insertions(+), 39 deletions(-) diff --git a/src/smt/smt_context.h b/src/smt/smt_context.h index 08ee9800f..8e548e678 100644 --- a/src/smt/smt_context.h +++ b/src/smt/smt_context.h @@ -51,6 +51,7 @@ Revision History: #include "solver/progress_callback.h" #include "solver/assertions/asserted_formulas.h" #include "smt/priority_queue.h" +#include "util/dlist.h" #include // there is a significant space overhead with allocating 1000+ contexts in @@ -191,6 +192,13 @@ namespace smt { svector m_bdata; //!< mapping bool_var -> data svector m_activity; updatable_priority_queue::priority_queue m_pq_scores; + + struct lit_node : dll_base { + literal lit; + lit_node(literal l) : lit(l) { init(this); } + }; + lit_node* m_dll_lits; + svector> m_lit_scores; clause_vector m_aux_clauses; @@ -908,6 +916,8 @@ namespace smt { void add_or_rel_watches(app * n); + void add_implies_rel_watches(app* n); + void add_ite_rel_watches(app * n); void mk_not_cnstr(app * n); @@ -916,6 +926,8 @@ namespace smt { void mk_or_cnstr(app * n); + void mk_implies_cnstr(app* n); + void mk_iff_cnstr(app * n, bool sign); void mk_ite_cnstr(app * n); diff --git a/src/smt/smt_internalizer.cpp b/src/smt/smt_internalizer.cpp index fdccea4e7..ecb56e516 100644 --- a/src/smt/smt_internalizer.cpp +++ b/src/smt/smt_internalizer.cpp @@ -696,6 +696,10 @@ namespace smt { mk_or_cnstr(to_app(n)); add_or_rel_watches(to_app(n)); break; + case OP_IMPLIES: + mk_implies_cnstr(to_app(n)); + add_implies_rel_watches(to_app(n)); + break; case OP_EQ: if (m.is_iff(n)) mk_iff_cnstr(to_app(n), false); @@ -711,8 +715,7 @@ namespace smt { mk_iff_cnstr(to_app(n), true); break; case OP_DISTINCT: - case OP_IMPLIES: - throw default_exception("formula has not been simplified"); + throw default_exception(std::string("formula has not been simplified") + " : " + mk_pp(n, m)); case OP_OEQ: UNREACHABLE(); default: @@ -1712,6 +1715,14 @@ namespace smt { } } + void context::add_implies_rel_watches(app* n) { + if (relevancy()) { + relevancy_eh* eh = m_relevancy_propagator->mk_implies_relevancy_eh(n); + add_rel_watch(~get_literal(n->get_arg(0)), eh); + add_rel_watch(get_literal(n->get_arg(1)), eh); + } + } + void context::add_ite_rel_watches(app * n) { if (relevancy()) { relevancy_eh * eh = m_relevancy_propagator->mk_ite_relevancy_eh(n); @@ -1758,9 +1769,24 @@ namespace smt { mk_gate_clause(buffer.size(), buffer.data()); } + void context::mk_implies_cnstr(app* n) { + literal l = get_literal(n); + literal_buffer buffer; + buffer.push_back(~l); + auto arg1 = n->get_arg(0); + literal l_arg1 = get_literal(arg1); + mk_gate_clause(l, l_arg1); + buffer.push_back(~l_arg1); + auto arg2 = n->get_arg(1); + literal l_arg2 = get_literal(arg2); + mk_gate_clause(l, ~l_arg2); + buffer.push_back(l_arg2); + mk_gate_clause(buffer.size(), buffer.data()); + } + void context::mk_iff_cnstr(app * n, bool sign) { if (n->get_num_args() != 2) - throw default_exception("formula has not been simplified"); + throw default_exception(std::string("formula has not been simplified") + " : " + mk_pp(n, m)); literal l = get_literal(n); literal l1 = get_literal(n->get_arg(0)); literal l2 = get_literal(n->get_arg(1)); diff --git a/src/smt/smt_lookahead.cpp b/src/smt/smt_lookahead.cpp index 221c2d0ea..eb4f96320 100644 --- a/src/smt/smt_lookahead.cpp +++ b/src/smt/smt_lookahead.cpp @@ -72,9 +72,14 @@ namespace smt { svector vars; for (bool_var v = 0; v < static_cast(sz); ++v) { expr* b = ctx.bool_var2expr(v); - if (b && ctx.get_assignment(v) == l_undef) { - vars.push_back(v); - } + if (!b) + continue; + if (ctx.get_assignment(v) != l_undef) + continue; + if (m.is_and(b) || m.is_or(b) || m.is_not(b) || m.is_ite(b) || m.is_implies(b) || m.is_iff(b) || m.is_xor(b)) + continue; // do not choose connectives + vars.push_back(v); + } compare comp(ctx); std::sort(vars.begin(), vars.end(), comp); diff --git a/src/smt/smt_parallel.cpp b/src/smt/smt_parallel.cpp index feea6fc17..ce8b699aa 100644 --- a/src/smt/smt_parallel.cpp +++ b/src/smt/smt_parallel.cpp @@ -36,6 +36,7 @@ namespace smt { #else #include +#include namespace smt { @@ -77,13 +78,16 @@ namespace smt { throw default_exception("trace streams have to be off in parallel mode"); + params_ref params = ctx.get_params(); for (unsigned i = 0; i < num_threads; ++i) { smt_params.push_back(ctx.get_fparams()); + smt_params.back().m_preprocess = false; } + for (unsigned i = 0; i < num_threads; ++i) { ast_manager* new_m = alloc(ast_manager, m, true); pms.push_back(new_m); - pctxs.push_back(alloc(context, *new_m, smt_params[i], ctx.get_params())); + pctxs.push_back(alloc(context, *new_m, smt_params[i], params)); context& new_ctx = *pctxs.back(); context::copy(ctx, new_ctx, true); new_ctx.set_random_seed(i + ctx.get_fparams().m_random_seed); @@ -103,8 +107,9 @@ namespace smt { } }; - auto cube_batch_pq = [&](context& ctx, expr_ref_vector& lasms, expr_ref& c) { - unsigned k = 4; // Number of top literals you want + auto cube_pq = [&](context& ctx, expr_ref_vector& lasms, expr_ref& c) { + unsigned k = 3; // Number of top literals you want + ast_manager& m = ctx.get_manager(); // Get the entire fixed-size priority queue (it's not that big) @@ -134,12 +139,11 @@ namespace smt { lasms.push_back(c); }; - auto cube_batch = [&](context& ctx, expr_ref_vector& lasms, expr_ref& c) { - std::vector> candidates; + auto cube_score = [&](context& ctx, expr_ref_vector& lasms, expr_ref& c) { + vector> candidates; unsigned k = 4; // Get top-k scoring literals ast_manager& m = ctx.get_manager(); - // std::cout << ctx.m_bool_var2expr.size() << std::endl; // Prints the size of m_bool_var2expr // Loop over first 100 Boolean vars for (bool_var v = 0; v < 100; ++v) { if (ctx.get_assignment(v) != l_undef) continue; @@ -151,7 +155,7 @@ namespace smt { double score = ctx.get_score(lit); if (score == 0.0) continue; - candidates.emplace_back(expr_ref(e, m), score); + candidates.push_back(std::make_pair(expr_ref(e, m), score)); } // Sort all candidate literals descending by score @@ -179,6 +183,7 @@ namespace smt { for (unsigned i = 0; i < num_threads; ++i) unit_lim.push_back(0); std::function collect_units = [&,this]() { + //return; -- has overhead for (unsigned i = 0; i < num_threads; ++i) { context& pctx = *pctxs[i]; pctx.pop_to_base_lvl(); @@ -203,7 +208,7 @@ namespace smt { for (unsigned j = unit_lim[i]; j < sz; ++j) { expr_ref src(ctx.m), dst(pctx.m); dst = tr(unit_trail.get(j)); - pctx.assert_expr(dst); + pctx.assert_expr(dst); // Assert that the conjunction of the assumptions in this unsat core is not satisfiable — prune it from future search } unit_lim[i] = pctx.assigned_literals().size(); } @@ -213,7 +218,7 @@ namespace smt { std::mutex mux; // Lambda defining the work each SMT thread performs - auto worker_thread = [&](int i) { + auto worker_thread = [&](int i, vector& cube_batch) { try { // Get thread-specific context and AST manager context& pctx = *pctxs[i]; @@ -221,36 +226,68 @@ namespace smt { // Initialize local assumptions and cube expr_ref_vector lasms(pasms[i]); - expr_ref c(pm); - // Set the max conflict limit for this thread - pctx.get_fparams().m_max_conflicts = std::min(thread_max_conflicts, max_conflicts); + vector results; + for (expr_ref_vector& cube : cube_batch) { + expr_ref_vector lasms_copy(lasms); - // Periodically generate cubes based on frequency - if (num_rounds > 0 && (num_rounds % pctx.get_fparams().m_threads_cube_frequency) == 0) - cube_batch(pctx, lasms, c); + if (&cube.get_manager() != &pm) { + std::cerr << "Manager mismatch on cube: " << mk_bounded_pp(mk_and(cube), pm, 3) << "\n"; + UNREACHABLE(); // or throw + } - // Optional verbose logging - IF_VERBOSE(1, verbose_stream() << "(smt.thread " << i; - if (num_rounds > 0) verbose_stream() << " :round " << num_rounds; - if (c) verbose_stream() << " :cube " << mk_bounded_pp(c, pm, 3); - verbose_stream() << ")\n";); + for (expr* cube_lit : cube) { + lasms_copy.push_back(expr_ref(cube_lit, pm)); + } - // Check satisfiability of assumptions - lbool r = pctx.check(lasms.size(), lasms.data()); + // Set the max conflict limit for this thread + pctx.get_fparams().m_max_conflicts = std::min(thread_max_conflicts, max_conflicts); + + // Optional verbose logging + IF_VERBOSE(1, verbose_stream() << "(smt.thread " << i; + if (num_rounds > 0) verbose_stream() << " :round " << num_rounds; + verbose_stream() << " :cube " << mk_bounded_pp(mk_and(cube), pm, 3); + verbose_stream() << ")\n";); + + lbool r = pctx.check(lasms_copy.size(), lasms_copy.data()); + std::cout << "Thread " << i << " finished cube " << mk_bounded_pp(mk_and(cube), pm, 3) << " with result: " << r << "\n"; + results.push_back(r); + } + + lbool r = l_false; + for (lbool res : results) { + if (res == l_true) { + r = l_true; + } else if (res == l_undef) { + if (r == l_false) + r = l_undef; + } + } + + auto cube_intersects_core = [&](expr* cube, const expr_ref_vector &core) { + expr_ref_vector cube_lits(pctx.m); + flatten_and(cube, cube_lits); + for (expr* lit : cube_lits) + if (core.contains(lit)) + return true; + return false; + }; // Handle results based on outcome and conflict count if (r == l_undef && pctx.m_num_conflicts >= max_conflicts) ; // no-op, allow loop to continue else if (r == l_undef && pctx.m_num_conflicts >= thread_max_conflicts) return; // quit thread early - - // If cube was unsat and it's in the core, learn from it - else if (r == l_false && pctx.unsat_core().contains(c)) { - IF_VERBOSE(1, verbose_stream() << "(smt.thread " << i << " :learn " << mk_bounded_pp(c, pm, 3) << ")"); - pctx.assert_expr(mk_not(mk_and(pctx.unsat_core()))); - return; - } + // If cube was unsat and it's in the core, learn from it. i.e. a thread can be UNSAT because the cube c contradicted F. In this case learn the negation of the cube ¬c + // else if (r == l_false) { + // // IF_VERBOSE(1, verbose_stream() << "(smt.thread " << i << " :learn cube batch " << mk_bounded_pp(cube, pm, 3) << ")" << " unsat_core: " << pctx.unsat_core() << ")"); + // for (expr* cube : cube_batch) { // iterate over each cube in the batch + // if (cube_intersects_core(cube, pctx.unsat_core())) { + // // IF_VERBOSE(1, verbose_stream() << "(pruning cube: " << mk_bounded_pp(cube, pm, 3) << " given unsat core: " << pctx.unsat_core() << ")"); + // pctx.assert_expr(mk_not(mk_and(pctx.unsat_core()))); + // } + // } + // } // Begin thread-safe update of shared result state bool first = false; @@ -273,7 +310,6 @@ namespace smt { for (ast_manager* m : pms) { if (m != &pm) m->limit().cancel(); } - } catch (z3_error & err) { if (finished_id == UINT_MAX) { error_code = err.error_code(); @@ -295,14 +331,142 @@ namespace smt { } }; + struct BatchManager { + std::mutex mtx; + vector> batches; + unsigned batch_idx = 0; + unsigned batch_size = 1; + + BatchManager(unsigned batch_size) : batch_size(batch_size) {} + + // translate the next SINGLE batch of batch_size cubes to the thread + vector get_next_batch( + ast_manager &main_ctx_m, + ast_manager &thread_m + ) { + std::lock_guard lock(mtx); + vector cube_batch; // ensure bound to thread manager + if (batch_idx >= batches.size()) return cube_batch; + + vector next_batch = batches[batch_idx]; + + for (const expr_ref_vector& cube : next_batch) { + expr_ref_vector translated_cube_lits(thread_m); + for (expr* lit : cube) { + // Translate each literal to the thread's manager + translated_cube_lits.push_back(translate(lit, main_ctx_m, thread_m)); + } + cube_batch.push_back(translated_cube_lits); + } + + ++batch_idx; + + return cube_batch; + } + + // returns a list (vector) of cubes, where each cube is an expr_ref_vector of literals + vector cube_batch_pq(context& ctx) { + unsigned k = 1; // generates 2^k cubes in the batch + ast_manager& m = ctx.get_manager(); + + auto candidates = ctx.m_pq_scores.get_heap(); + std::sort(candidates.begin(), candidates.end(), + [](const auto& a, const auto& b) { return a.priority > b.priority; }); + + expr_ref_vector top_lits(m); + for (const auto& node : candidates) { + if (ctx.get_assignment(node.key) != l_undef) continue; + + expr* e = ctx.bool_var2expr(node.key); + if (!e) continue; + + top_lits.push_back(expr_ref(e, m)); + if (top_lits.size() >= k) break; + } + + // std::cout << "Top lits:\n"; + // for (unsigned j = 0; j < top_lits.size(); ++j) { + // std::cout << " [" << j << "] " << mk_pp(top_lits[j].get(), m) << "\n"; + // } + + unsigned num_lits = top_lits.size(); + unsigned num_cubes = 1 << num_lits; // 2^num_lits combinations + + vector cube_batch; + + for (unsigned mask = 0; mask < num_cubes; ++mask) { + expr_ref_vector cube_lits(m); + for (unsigned i = 0; i < num_lits; ++i) { + expr_ref lit(top_lits[i].get(), m); + if ((mask >> i) & 1) + cube_lits.push_back(mk_not(lit)); + else + cube_lits.push_back(lit); + } + cube_batch.push_back(cube_lits); + } + + std::cout << "Cubes out:\n"; + for (size_t j = 0; j < cube_batch.size(); ++j) { + std::cout << " [" << j << "]\n"; + for (size_t k = 0; k < cube_batch[j].size(); ++k) { + std::cout << " [" << k << "] " << mk_pp(cube_batch[j][k].get(), m) << "\n"; + } + } + + return cube_batch; + }; + + // returns a vector of new cubes batches. each cube batch is a vector of expr_ref_vector cubes + vector> gen_new_batches(context& main_ctx) { + vector> cube_batches; + + // Get all cubes in the main context's manager + vector all_cubes = cube_batch_pq(main_ctx); + + ast_manager &m = main_ctx.get_manager(); + + // Partition into batches + for (unsigned start = 0; start < all_cubes.size(); start += batch_size) { + vector batch; + + unsigned end = std::min(start + batch_size, all_cubes.size()); + for (unsigned j = start; j < end; ++j) { + batch.push_back(all_cubes[j]); + } + + cube_batches.push_back(batch); + } + batch_idx = 0; // Reset index for next round + return cube_batches; + } + + void check_for_new_batches(context& main_ctx) { + std::lock_guard lock(mtx); + if (batch_idx >= batches.size()) { + batches = gen_new_batches(main_ctx); + } + } + }; + + BatchManager batch_manager(1); + // Thread scheduling loop while (true) { vector threads(num_threads); + batch_manager.check_for_new_batches(ctx); // Launch threads for (unsigned i = 0; i < num_threads; ++i) { // [&, i] is the lambda's capture clause: capture all variables by reference (&) except i, which is captured by value. - threads[i] = std::thread([&, i]() { worker_thread(i); }); + threads[i] = std::thread([&, i]() { + while (!done) { + auto next_batch = batch_manager.get_next_batch(ctx.m, *pms[i]); + if (next_batch.empty()) break; // No more work + + worker_thread(i, next_batch); + } + }); } // Wait for all threads to finish @@ -317,7 +481,7 @@ namespace smt { collect_units(); ++num_rounds; max_conflicts = (max_conflicts < thread_max_conflicts) ? 0 : (max_conflicts - thread_max_conflicts); - thread_max_conflicts *= 2; + thread_max_conflicts *= 2; } // Gather statistics from all solver contexts diff --git a/src/smt/smt_relevancy.cpp b/src/smt/smt_relevancy.cpp index f7ba3dcce..48fa3657d 100644 --- a/src/smt/smt_relevancy.cpp +++ b/src/smt/smt_relevancy.cpp @@ -62,6 +62,13 @@ namespace smt { void operator()(relevancy_propagator & rp) override; }; + class implies_relevancy_eh : public relevancy_eh { + app* m_parent; + public: + implies_relevancy_eh(app* p) :m_parent(p) {} + void operator()(relevancy_propagator& rp) override; + }; + class ite_relevancy_eh : public relevancy_eh { app * m_parent; public: @@ -108,6 +115,11 @@ namespace smt { return mk_relevancy_eh(or_relevancy_eh(n)); } + relevancy_eh* relevancy_propagator::mk_implies_relevancy_eh(app* n) { + SASSERT(get_manager().is_implies(n)); + return mk_relevancy_eh(implies_relevancy_eh(n)); + } + relevancy_eh * relevancy_propagator::mk_and_relevancy_eh(app * n) { SASSERT(get_manager().is_and(n)); return mk_relevancy_eh(and_relevancy_eh(n)); @@ -357,8 +369,38 @@ namespace smt { --j; mark_as_relevant(n->get_arg(j)); } - } + } + void propagate_relevant_implies(app* n) { + SASSERT(get_manager().is_implies(n)); + lbool val = m_context.find_assignment(n); + // If val is l_undef, then the expression + // is a root, and no boolean variable was created for it. + if (val == l_undef) + val = l_true; + switch (val) { + case l_false: + propagate_relevant_app(n); + break; + case l_undef: + break; + case l_true: { + expr* true_arg = nullptr; + auto arg0 = n->get_arg(0); + auto arg1 = n->get_arg(1); + if (m_context.find_assignment(arg0) == l_false) { + if (!is_relevant_core(arg0)) + mark_as_relevant(arg0); + return; + } + if (m_context.find_assignment(arg1) == l_true) { + if (!is_relevant_core(arg1)) + mark_as_relevant(arg1); + return; + } + } + } + } /** \brief Propagate relevancy for an or-application. */ @@ -470,6 +512,9 @@ namespace smt { case OP_AND: propagate_relevant_and(to_app(n)); break; + case OP_IMPLIES: + propagate_relevant_implies(to_app(n)); + break; case OP_ITE: propagate_relevant_ite(to_app(n)); break; @@ -505,6 +550,8 @@ namespace smt { propagate_relevant_or(to_app(n)); else if (m.is_and(n)) propagate_relevant_and(to_app(n)); + else if (m.is_implies(n)) + propagate_relevant_implies(to_app(n)); } relevancy_ehs * ehs = get_watches(n, val); while (ehs != nullptr) { @@ -644,6 +691,11 @@ namespace smt { static_cast(rp).propagate_relevant_or(m_parent); } + void implies_relevancy_eh::operator()(relevancy_propagator& rp) { + if (rp.is_relevant(m_parent)) + static_cast(rp).propagate_relevant_implies(m_parent); + } + void ite_relevancy_eh::operator()(relevancy_propagator & rp) { if (rp.is_relevant(m_parent)) { static_cast(rp).propagate_relevant_ite(m_parent); diff --git a/src/smt/smt_relevancy.h b/src/smt/smt_relevancy.h index 8dea2842f..4827fffcb 100644 --- a/src/smt/smt_relevancy.h +++ b/src/smt/smt_relevancy.h @@ -188,6 +188,7 @@ namespace smt { void add_dependency(expr * src, expr * target); relevancy_eh * mk_or_relevancy_eh(app * n); + relevancy_eh* mk_implies_relevancy_eh(app* n); relevancy_eh * mk_and_relevancy_eh(app * n); relevancy_eh * mk_ite_relevancy_eh(app * n); relevancy_eh * mk_term_ite_relevancy_eh(app * c, app * t, app * e); From d190c83984cef35a86a25086b7b08d82ad68fd3a Mon Sep 17 00:00:00 2001 From: Nikolaj Bjorner Date: Mon, 4 Aug 2025 09:44:32 -0700 Subject: [PATCH 2/4] snapshot Signed-off-by: Nikolaj Bjorner --- src/smt/smt_context.h | 10 +++++++--- src/smt/smt_internalizer.cpp | 11 ++++++----- src/smt/smt_parallel.cpp | 10 ---------- src/smt/smt_parallel.h | 16 ++++++++++++++++ 4 files changed, 29 insertions(+), 18 deletions(-) diff --git a/src/smt/smt_context.h b/src/smt/smt_context.h index 8e548e678..6eb070b7c 100644 --- a/src/smt/smt_context.h +++ b/src/smt/smt_context.h @@ -199,7 +199,8 @@ namespace smt { }; lit_node* m_dll_lits; - svector> m_lit_scores; + + svector m_lit_scores[2]; clause_vector m_aux_clauses; clause_vector m_lemmas; @@ -947,11 +948,14 @@ namespace smt { void dump_axiom(unsigned n, literal const* lits); void add_scores(unsigned n, literal const* lits); void reset_scores() { - for (auto& s : m_lit_scores) s[0] = s[1] = 0.0; + for (auto& e : m_lit_scores[0]) + e = 0; + for (auto& e : m_lit_scores[1]) + e = 0; m_pq_scores.clear(); // Clear the priority queue heap as well } double get_score(literal l) const { - return m_lit_scores[l.var()][l.sign()]; + return m_lit_scores[l.sign()][l.var()]; } public: diff --git a/src/smt/smt_internalizer.cpp b/src/smt/smt_internalizer.cpp index ecb56e516..c7e257fac 100644 --- a/src/smt/smt_internalizer.cpp +++ b/src/smt/smt_internalizer.cpp @@ -931,8 +931,10 @@ namespace smt { set_bool_var(id, v); m_bdata.reserve(v+1); m_activity.reserve(v+1); - m_lit_scores.reserve(v + 1); - m_lit_scores[v][0] = m_lit_scores[v][1] = 0.0; + m_lit_scores[0].reserve(v + 1); + m_lit_scores[1].reserve(v + 1); + + m_lit_scores[0][v] = m_lit_scores[1][v] = 0.0; m_bool_var2expr.reserve(v+1); m_bool_var2expr[v] = n; literal l(v, false); @@ -1543,10 +1545,9 @@ namespace smt { auto lit = lits[i]; unsigned v = lit.var(); // unique key per literal - auto curr_score = m_lit_scores[v][0] * m_lit_scores[v][1]; - m_lit_scores[v][lit.sign()] += 1.0 / n; + m_lit_scores[lit.sign()][v] += 1.0 / n; - auto new_score = m_lit_scores[v][0] * m_lit_scores[v][1]; + auto new_score = m_lit_scores[0][v] * m_lit_scores[1][v]; m_pq_scores.set(v, new_score); } diff --git a/src/smt/smt_parallel.cpp b/src/smt/smt_parallel.cpp index ce8b699aa..40954437f 100644 --- a/src/smt/smt_parallel.cpp +++ b/src/smt/smt_parallel.cpp @@ -97,16 +97,6 @@ namespace smt { } - auto cube = [](context& ctx, expr_ref_vector& lasms, expr_ref& c) { - lookahead lh(ctx); - c = lh.choose(); - if (c) { - if ((ctx.get_random_value() % 2) == 0) - c = c.get_manager().mk_not(c); - lasms.push_back(c); - } - }; - auto cube_pq = [&](context& ctx, expr_ref_vector& lasms, expr_ref& c) { unsigned k = 3; // Number of top literals you want diff --git a/src/smt/smt_parallel.h b/src/smt/smt_parallel.h index 07b04019d..b1c5aa6b5 100644 --- a/src/smt/smt_parallel.h +++ b/src/smt/smt_parallel.h @@ -24,6 +24,22 @@ namespace smt { class parallel { context& ctx; + + class worker { + ast_manager m; + context ctx; + expr_ref_vector asms; + public: + worker(context& ctx, expr_ref_vector const& asms); + void run(); + void cancel(); + }; + + std::mutex mux; + void set_unsat(); + void set_sat(ast_translation& tr, model& m); + void get_cubes(ast_translation& tr, expr_ref_vector& cubes); + public: parallel(context& ctx): ctx(ctx) {} From 03f2e6775cd404fcad53047a75f9705bcf5ecb76 Mon Sep 17 00:00:00 2001 From: Nikolaj Bjorner Date: Mon, 4 Aug 2025 11:45:56 -0700 Subject: [PATCH 3/4] pair programming Signed-off-by: Nikolaj Bjorner --- src/smt/smt_parallel.cpp | 130 ++++++++++++++++++++++++++++++++++++++- src/smt/smt_parallel.h | 71 +++++++++++++++++---- 2 files changed, 187 insertions(+), 14 deletions(-) diff --git a/src/smt/smt_parallel.cpp b/src/smt/smt_parallel.cpp index 40954437f..7b2bc72d4 100644 --- a/src/smt/smt_parallel.cpp +++ b/src/smt/smt_parallel.cpp @@ -39,6 +39,132 @@ namespace smt { #include namespace smt { + + + void parallel::worker::run() { + ast_translation tr(ctx->m, m); + while (m.inc()) { + vector cubes; + b.get_cubes(tr, cubes); + if (cubes.empty()) + return; + for (auto& cube : cubes) { + if (!m.inc()) + return; // stop if the main context is cancelled + switch (check_cube(cube)) { + case l_undef: + // return unprocessed cubes to the batch manager + // add a split literal to the batch manager. + // optionally process other cubes and delay sending back unprocessed cubes to batch manager. + break; + case l_true: { + model_ref mdl; + ctx->get_model(mdl); + b.set_sat(tr, *mdl); + return; + } + case l_false: + // if unsat core only contains assumptions, then unsat + // otherwise, extract lemmas that can be shared (units (and unsat core?)). + // share with batch manager. + // process next cube. + break; + } + } + } + } + + parallel::worker::worker(parallel& p, context& _ctx, expr_ref_vector const& _asms): p(p), b(p.m_batch_manager), m_smt_params(_ctx.get_fparams()), asms(m) { + + ast_translation g2l(_ctx.m, m); + for (auto e : _asms) + asms.push_back(g2l(e)); + m_smt_params.m_preprocess = false; + ctx = alloc(context, m, m_smt_params, _ctx.get_params()); + } + + + lbool parallel::worker::check_cube(expr_ref_vector const& cube) { + + return l_undef; + } + + void parallel::batch_manager::get_cubes(ast_translation& g2l, vector& cubes) { + std::scoped_lock lock(mux); + if (m_cubes.size() == 1 && m_cubes[0].size() == 0) { + // special initialization: the first cube is emtpy, have the worker work on an empty cube. + cubes.push_back(expr_ref_vector(g2l.to())); + return; + } + // TODO adjust to number of worker threads runnin. + // if the size of m_cubes is less than m_max_batch_size/ num_threads, then return fewer cubes. + for (unsigned i = 0; i < m_max_batch_size && !m_cubes.empty(); ++i) { + auto& cube = m_cubes.back(); + expr_ref_vector l_cube(g2l.to()); + for (auto& e : cube) { + l_cube.push_back(g2l(e)); + } + cubes.push_back(l_cube); + m_cubes.pop_back(); + } + } + + + void parallel::batch_manager::return_cubes(ast_translation& l2g, vectorconst& cubes, expr_ref_vector const& split_atoms) { + std::scoped_lock lock(mux); + for (auto & c : cubes) { + expr_ref_vector g_cube(l2g.to()); + for (auto& e : c) { + g_cube.push_back(l2g(e)); + } + // TODO: split this g_cube on m_split_atoms that are not already in g_cube as literals. + m_cubes.push_back(g_cube); + } + // TODO: avoid making m_cubes too large. + for (auto& atom : split_atoms) { + expr_ref g_atom(l2g.from()); + g_atom = l2g(atom); + if (m_split_atoms.contains(g_atom)) + continue; + m_split_atoms.push_back(g_atom); + unsigned sz = m_cubes.size(); + for (unsigned i = 0; i < sz; ++i) { + m_cubes.push_back(m_cubes[i]); // copy the existing cubes + m_cubes.back().push_back(m.mk_not(g_atom)); // add the negation of the split atom to each cube + m_cubes[i].push_back(g_atom); + } + } + } + + + lbool parallel::new_check(expr_ref_vector const& asms) { + + ast_manager& m = ctx.m; + { + scoped_limits sl(m.limit()); + unsigned num_threads = std::min((unsigned)std::thread::hardware_concurrency(), ctx.get_fparams().m_threads); + SASSERT(num_threads > 1); + for (unsigned i = 0; i < num_threads; ++i) + m_workers.push_back(alloc(worker, *this, ctx, asms)); + for (auto w : m_workers) + sl.push_child(&(w->limit())); + + // Launch threads + vector threads(num_threads); + for (unsigned i = 0; i < num_threads; ++i) { + threads[i] = std::thread([&, i]() { + m_workers[i]->run(); + } + ); + } + + // Wait for all threads to finish + for (auto& th : threads) + th.join(); + } + m_workers.clear(); + return m_batch_manager.get_result(); + } lbool parallel::operator()(expr_ref_vector const& asms) { @@ -397,9 +523,9 @@ namespace smt { } std::cout << "Cubes out:\n"; - for (size_t j = 0; j < cube_batch.size(); ++j) { + for (unsigned j = 0; j < cube_batch.size(); ++j) { std::cout << " [" << j << "]\n"; - for (size_t k = 0; k < cube_batch[j].size(); ++k) { + for (unsigned k = 0; k < cube_batch[j].size(); ++k) { std::cout << " [" << k << "] " << mk_pp(cube_batch[j][k].get(), m) << "\n"; } } diff --git a/src/smt/smt_parallel.h b/src/smt/smt_parallel.h index b1c5aa6b5..7bdea79e4 100644 --- a/src/smt/smt_parallel.h +++ b/src/smt/smt_parallel.h @@ -25,23 +25,70 @@ namespace smt { class parallel { context& ctx; - class worker { - ast_manager m; - context ctx; - expr_ref_vector asms; + class batch_manager { + ast_manager& m; + parallel& p; + std::mutex mux; + expr_ref_vector m_split_atoms; // atoms to split on + vector m_cubes; + lbool m_result = l_false; + unsigned m_max_batch_size = 10; + public: - worker(context& ctx, expr_ref_vector const& asms); - void run(); - void cancel(); + batch_manager(ast_manager& m, parallel& p) : m(m), p(p), m_split_atoms(m) { m_cubes.push_back(expr_ref_vector(m)); } + void set_unsat(); + void set_sat(ast_translation& l2g, model& m); + void set_exception(std::string const& msg); + void set_exception(unsigned error_code); + + // + // worker threads ask the batch manager for a supply of cubes to check. + // they pass in a translation function from the global context to local context (ast-manager). It is called g2l. + // The batch manager returns a list of cubes to solve. + // + void get_cubes(ast_translation& g2l, vector& cubes); + + // + // worker threads return unprocessed cubes to the batch manager together with split literal candidates. + // the batch manager re-enqueues unprocessed cubes and optionally splits them using the split_atoms returned by this and workers. + // + void return_cubes(ast_translation& l2g, vectorconst& cubes, expr_ref_vector const& split_atoms); + void share_lemma(ast_translation& l2g, expr* lemma); + lbool get_result() const { return m.limit().is_canceled() ? l_undef : m_result; } }; - std::mutex mux; - void set_unsat(); - void set_sat(ast_translation& tr, model& m); - void get_cubes(ast_translation& tr, expr_ref_vector& cubes); + class worker { + parallel& p; + batch_manager& b; + ast_manager m; + expr_ref_vector asms; + smt_params m_smt_params; + scoped_ptr ctx; + unsigned m_max_conflicts = 100; + unsigned m_num_shared_units = 0; + void share_units(); + lbool check_cube(expr_ref_vector const& cube); + public: + worker(parallel& p, context& _ctx, expr_ref_vector const& _asms); + void run(); + void cancel() { + m.limit().cancel(); + } + void collect_statistics(::statistics& st) const { + ctx->collect_statistics(st); + } + reslimit& limit() { + return m.limit(); + } + }; + + batch_manager m_batch_manager; + ptr_vector m_workers; + + lbool new_check(expr_ref_vector const& asms); public: - parallel(context& ctx): ctx(ctx) {} + parallel(context& ctx): ctx(ctx), m_batch_manager(ctx.m, *this) {} lbool operator()(expr_ref_vector const& asms); From 0ac6abf3a8071f15a557e1bacc221c1085369c6d Mon Sep 17 00:00:00 2001 From: Nikolaj Bjorner Date: Mon, 4 Aug 2025 11:46:28 -0700 Subject: [PATCH 4/4] pair programming Signed-off-by: Nikolaj Bjorner --- src/smt/smt_parallel.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/smt/smt_parallel.cpp b/src/smt/smt_parallel.cpp index 7b2bc72d4..967b95773 100644 --- a/src/smt/smt_parallel.cpp +++ b/src/smt/smt_parallel.cpp @@ -60,7 +60,7 @@ namespace smt { case l_true: { model_ref mdl; ctx->get_model(mdl); - b.set_sat(tr, *mdl); + //b.set_sat(tr, *mdl); return; } case l_false: