diff --git a/src/smt/smt_parallel.cpp b/src/smt/smt_parallel.cpp index 10f10356c..fb4125b20 100644 --- a/src/smt/smt_parallel.cpp +++ b/src/smt/smt_parallel.cpp @@ -45,12 +45,20 @@ namespace smt { namespace smt { - double parallel::worker::eval_hardness() { + double parallel::worker::explicit_hardness(expr_ref_vector const& cube) { double total = 0.0; unsigned clause_count = 0; + // Get the scope level before the cube is assumed unsigned scope_lvl = ctx->get_scope_level(); + // Build a set of bool_vars corresponding to the cube literals + svector cube_vars; + for (expr* e : cube) { + bool_var v = ctx->get_bool_var(e); + if (v != null_bool_var) cube_vars.push_back(v); + } + for (auto* cp : ctx->m_aux_clauses) { auto& clause = *cp; unsigned sz = 0; @@ -58,19 +66,21 @@ namespace smt { bool satisfied = false; for (literal l : clause) { - expr* e = ctx->bool_var2expr(l.var()); - if (asms.contains(e)) continue; + bool_var v = l.var(); + + // Only consider literals that are part of the cube + if (!cube_vars.contains(v)) continue; lbool val = ctx->get_assignment(l); unsigned lvl = ctx->get_assign_level(l); - if (lvl <= scope_lvl) continue; // ignore pre-existing assignments + // Only include assignments made after the scope level + if (lvl <= scope_lvl) continue; sz++; if (val == l_true) { satisfied = true; break; } if (val == l_false) n_false++; } - // LOG_WORKER(1, " n_false: " << n_false << " satisfied: " << satisfied << "\n"); if (satisfied || sz == 0) continue; @@ -82,6 +92,73 @@ namespace smt { return clause_count ? total / clause_count : 0.0; } + double parallel::worker::heule_schur_hardness(expr_ref_vector const& cube) { + double total = 0.0; + unsigned n = 0; + + for (expr* e : cube) { + double literal_score = 0.0; + + for (auto* cp : ctx->m_aux_clauses) { + auto& clause = *cp; + unsigned sz = 0; + bool occurs = false; + + for (literal l : clause) { + expr* lit_expr = ctx->bool_var2expr(l.var()); + if (asms.contains(lit_expr)) continue; // ignore assumptions + sz++; + if (lit_expr == e) occurs = true; + } + + if (occurs && sz > 0) { + literal_score += 1.0 / pow(2.0, sz); // weight inversely by clause size + } + } + + total += literal_score; + n++; + } + + return n ? total / n : 0.0; + } + + double parallel::worker::march_cu_hardness(expr_ref_vector const& cube) { + double total = 0.0; + unsigned n = 0; + + for (expr* e : cube) { + double score = 1.0; // start with 1 to avoid zero + + for (auto* cp : ctx->m_aux_clauses) { + auto& clause = *cp; + bool occurs = false; + + unsigned sz = 0; // clause size counting only non-assumption literals + + for (literal l : clause) { + expr* lit_expr = ctx->bool_var2expr(l.var()); + + if (asms.contains(lit_expr)) continue; // ignore assumptions + sz++; + + if (lit_expr == e) { + occurs = true; + } + } + + if (occurs && sz > 0) { + score += pow(0.5, static_cast(sz)); // approximate March weight + } + } + + total += score; + n++; + } + + return n ? total / n : 0.0; + } + void parallel::worker::run() { while (m.inc()) { // inc: increase the limit and check if it is canceled, vs m.limit().is_canceled() is readonly. the .limit() is also not necessary (m.inc() etc provides a convenience wrapper) vector cubes; @@ -95,11 +172,6 @@ namespace smt { return; } - unsigned conflicts_before = ctx->m_stats.m_num_conflicts; - unsigned propagations_before = ctx->m_stats.m_num_propagations + ctx->m_stats.m_num_bin_propagations; - unsigned decisions_before = ctx->m_stats.m_num_decisions; - unsigned restarts_before = ctx->m_stats.m_num_restarts; - LOG_WORKER(1, " checking cube\n"); lbool r = check_cube(cube); @@ -118,26 +190,8 @@ namespace smt { if (m_config.m_iterative_deepening || m_config.m_beam_search) { // let's automatically do iterative deepening for beam search LOG_WORKER(1, " applying iterative deepening\n"); - if (false) { - // per-cube deltas - unsigned conflicts_delta = ctx->m_stats.m_num_conflicts - conflicts_before; - unsigned propagations_delta = (ctx->m_stats.m_num_propagations + ctx->m_stats.m_num_bin_propagations) - propagations_before; - unsigned decisions_delta = ctx->m_stats.m_num_decisions - decisions_before; - unsigned restarts_delta = ctx->m_stats.m_num_restarts - restarts_before; - LOG_WORKER(1, " cube deltas: conflicts: " << conflicts_delta << " propagations: " << propagations_delta << " decisions: " << decisions_delta << " restarts: " << restarts_delta << "\n"); - - // tuned experimentally - const double w_conflicts = 1.0; - const double w_propagations = 0.001; - const double w_decisions = 0.1; - const double w_restarts = 0.5; - - const double cube_hardness = w_conflicts * conflicts_delta + - w_propagations * propagations_delta + - w_decisions * decisions_delta + - w_restarts * restarts_delta; - } - const double cube_hardness = eval_hardness(); + // const double cube_hardness = ctx->m_stats.m_num_decisions / std::max(1u, ctx->m_stats.m_num_conflicts); + const double cube_hardness = explicit_hardness(cube); // huele_schur_hardness(cube); march_cu_hardness(cube); const double avg_hardness = b.update_avg_cube_hardness(cube_hardness); const double factor = 1; // can tune for multiple of avg hardness later @@ -538,7 +592,7 @@ namespace smt { ------------------------------------------------------------------------------------------------------------------------------------------------------------ Final thought (do this!): use greedy strategy by a policy when C_batch, A_batch, A_worker are "small". -- want to do this. switch to frugal strategy after reaching size limit */ - void parallel::batch_manager::return_cubes(ast_translation& l2g, vectorconst& C_worker, expr_ref_vector const& A_worker, const bool should_split) { + void parallel::batch_manager::return_cubes(ast_translation& l2g, vectorconst& C_worker, expr_ref_vector const& A_worker, const bool should_split, const double hardness) { // SASSERT(!m_config.never_cube()); auto atom_in_cube = [&](expr_ref_vector const& cube, expr* atom) { @@ -607,12 +661,12 @@ namespace smt { // Positive atom branch expr_ref_vector cube_pos = g_cube; cube_pos.push_back(atom); - m_cubes_pq.push(ScoredCube(d, cube_pos)); + m_cubes_pq.push(ScoredCube(d / hardness, cube_pos)); // Negative atom branch expr_ref_vector cube_neg = g_cube; cube_neg.push_back(m.mk_not(atom)); - m_cubes_pq.push(ScoredCube(d, cube_neg)); + m_cubes_pq.push(ScoredCube(d / hardness, cube_neg)); m_stats.m_num_cubes += 2; m_stats.m_max_cube_depth = std::max(m_stats.m_max_cube_depth, d + 1); @@ -664,7 +718,7 @@ namespace smt { if ((c.size() >= m_config.m_max_cube_depth || !should_split) && (m_config.m_depth_splitting_only || m_config.m_iterative_deepening || m_config.m_beam_search)) { if (m_config.m_beam_search) { - m_cubes_pq.push(ScoredCube(g_cube.size(), g_cube)); // re-enqueue the cube as is + m_cubes_pq.push(ScoredCube(g_cube.size() / hardness, g_cube)); // re-enqueue the cube as is } else { // need to add the depth set if it doesn't exist yet if (m_cubes_depth_sets.find(g_cube.size()) == m_cubes_depth_sets.end()) { diff --git a/src/smt/smt_parallel.h b/src/smt/smt_parallel.h index 44c57c6e8..cb5a4bad1 100644 --- a/src/smt/smt_parallel.h +++ b/src/smt/smt_parallel.h @@ -133,7 +133,7 @@ namespace smt { // worker threads return unprocessed cubes to the batch manager together with split literal candidates. // the batch manager re-enqueues unprocessed cubes and optionally splits them using the split_atoms returned by this and workers. // - void return_cubes(ast_translation& l2g, vectorconst& cubes, expr_ref_vector const& split_atoms, const bool should_split=true); + void return_cubes(ast_translation& l2g, vectorconst& cubes, expr_ref_vector const& split_atoms, const bool should_split=true, const double hardness=1.0); void report_assumption_used(ast_translation& l2g, expr* assumption); void collect_clause(ast_translation& l2g, unsigned source_worker_id, expr* e); expr_ref_vector return_shared_clauses(ast_translation& g2l, unsigned& worker_limit, unsigned worker_id); @@ -187,7 +187,9 @@ namespace smt { expr_ref_vector find_backbone_candidates(); expr_ref_vector get_backbones_from_candidates(expr_ref_vector const& candidates); - double eval_hardness(); + double explicit_hardness(expr_ref_vector const& cube); + double heule_schur_hardness(expr_ref_vector const& cube); + double march_cu_hardness(expr_ref_vector const& cube); public: worker(unsigned id, parallel& p, expr_ref_vector const& _asms); void run();