diff --git a/src/smt/smt_parallel.cpp b/src/smt/smt_parallel.cpp index 3243ba5bd..fb4125b20 100644 --- a/src/smt/smt_parallel.cpp +++ b/src/smt/smt_parallel.cpp @@ -45,6 +45,120 @@ namespace smt { namespace smt { + double parallel::worker::explicit_hardness(expr_ref_vector const& cube) { + double total = 0.0; + unsigned clause_count = 0; + + // Get the scope level before the cube is assumed + unsigned scope_lvl = ctx->get_scope_level(); + + // Build a set of bool_vars corresponding to the cube literals + svector cube_vars; + for (expr* e : cube) { + bool_var v = ctx->get_bool_var(e); + if (v != null_bool_var) cube_vars.push_back(v); + } + + for (auto* cp : ctx->m_aux_clauses) { + auto& clause = *cp; + unsigned sz = 0; + unsigned n_false = 0; + bool satisfied = false; + + for (literal l : clause) { + bool_var v = l.var(); + + // Only consider literals that are part of the cube + if (!cube_vars.contains(v)) continue; + + lbool val = ctx->get_assignment(l); + unsigned lvl = ctx->get_assign_level(l); + + // Only include assignments made after the scope level + if (lvl <= scope_lvl) continue; + + sz++; + if (val == l_true) { satisfied = true; break; } + if (val == l_false) n_false++; + } + + if (satisfied || sz == 0) continue; + + double m = static_cast(sz) / std::max(1u, sz - n_false); + total += m; + clause_count++; + } + + return clause_count ? total / clause_count : 0.0; + } + + double parallel::worker::heule_schur_hardness(expr_ref_vector const& cube) { + double total = 0.0; + unsigned n = 0; + + for (expr* e : cube) { + double literal_score = 0.0; + + for (auto* cp : ctx->m_aux_clauses) { + auto& clause = *cp; + unsigned sz = 0; + bool occurs = false; + + for (literal l : clause) { + expr* lit_expr = ctx->bool_var2expr(l.var()); + if (asms.contains(lit_expr)) continue; // ignore assumptions + sz++; + if (lit_expr == e) occurs = true; + } + + if (occurs && sz > 0) { + literal_score += 1.0 / pow(2.0, sz); // weight inversely by clause size + } + } + + total += literal_score; + n++; + } + + return n ? total / n : 0.0; + } + + double parallel::worker::march_cu_hardness(expr_ref_vector const& cube) { + double total = 0.0; + unsigned n = 0; + + for (expr* e : cube) { + double score = 1.0; // start with 1 to avoid zero + + for (auto* cp : ctx->m_aux_clauses) { + auto& clause = *cp; + bool occurs = false; + + unsigned sz = 0; // clause size counting only non-assumption literals + + for (literal l : clause) { + expr* lit_expr = ctx->bool_var2expr(l.var()); + + if (asms.contains(lit_expr)) continue; // ignore assumptions + sz++; + + if (lit_expr == e) { + occurs = true; + } + } + + if (occurs && sz > 0) { + score += pow(0.5, static_cast(sz)); // approximate March weight + } + } + + total += score; + n++; + } + + return n ? total / n : 0.0; + } + void parallel::worker::run() { while (m.inc()) { // inc: increase the limit and check if it is canceled, vs m.limit().is_canceled() is readonly. the .limit() is also not necessary (m.inc() etc provides a convenience wrapper) vector cubes; @@ -57,16 +171,12 @@ namespace smt { b.set_exception("context cancelled"); return; } - LOG_WORKER(1, " checking cube: " << mk_bounded_pp(mk_and(cube), m, 3) << " max-conflicts " << m_config.m_threads_max_conflicts << "\n"); + + LOG_WORKER(1, " checking cube\n"); lbool r = check_cube(cube); - if (m.limit().is_canceled()) { - LOG_WORKER(1, " context cancelled\n"); - return; - } + switch (r) { case l_undef: { - if (m.limit().is_canceled()) - break; LOG_WORKER(1, " found undef cube\n"); // return unprocessed cubes to the batch manager // add a split literal to the batch manager. @@ -76,40 +186,17 @@ namespace smt { returned_cubes.push_back(cube); auto split_atoms = get_split_atoms(); - if (m_config.m_iterative_deepening) { - unsigned conflicts_before = ctx->m_stats.m_num_conflicts; - unsigned propagations_before = ctx->m_stats.m_num_propagations + ctx->m_stats.m_num_bin_propagations; - unsigned decisions_before = ctx->m_stats.m_num_decisions; - unsigned restarts_before = ctx->m_stats.m_num_restarts; + LOG_WORKER(1, " CUBING\n"); + if (m_config.m_iterative_deepening || m_config.m_beam_search) { // let's automatically do iterative deepening for beam search + LOG_WORKER(1, " applying iterative deepening\n"); - lbool r = check_cube(cube); - - unsigned conflicts_after = ctx->m_stats.m_num_conflicts; - unsigned propagations_after = ctx->m_stats.m_num_propagations + ctx->m_stats.m_num_bin_propagations; - unsigned decisions_after = ctx->m_stats.m_num_decisions; - unsigned restarts_after = ctx->m_stats.m_num_restarts; - - // per-cube deltas - unsigned conflicts_delta = conflicts_after - conflicts_before; - unsigned propagations_delta = propagations_after - propagations_before; - unsigned decisions_delta = decisions_after - decisions_before; - unsigned restarts_delta = restarts_after - restarts_before; - LOG_WORKER(1, " cube deltas: conflicts: " << conflicts_delta << " propagations: " << propagations_delta << " decisions: " << decisions_delta << " restarts: " << restarts_delta << "\n"); - - // tuned experimentally - const double w_conflicts = 1.0; - const double w_propagations = 0.001; - const double w_decisions = 0.1; - const double w_restarts = 0.5; - - const double cube_hardness = w_conflicts * conflicts_delta + - w_propagations * propagations_delta + - w_decisions * decisions_delta + - w_restarts * restarts_delta; + // const double cube_hardness = ctx->m_stats.m_num_decisions / std::max(1u, ctx->m_stats.m_num_conflicts); + const double cube_hardness = explicit_hardness(cube); // huele_schur_hardness(cube); march_cu_hardness(cube); const double avg_hardness = b.update_avg_cube_hardness(cube_hardness); const double factor = 1; // can tune for multiple of avg hardness later - bool should_split = cube_hardness >= avg_hardness * factor; + bool should_split = cube_hardness > avg_hardness * factor; + LOG_WORKER(1, " cube hardness: " << cube_hardness << " avg: " << avg_hardness << " avg*factor: " << avg_hardness * factor << " should-split: " << should_split << "\n"); // we still need to call return_cubes, even if we don't split, since we need to re-enqueue the current unsolved cube to the batch manager! // should_split tells return_cubes whether to further split the unsolved cube. @@ -194,6 +281,7 @@ namespace smt { m_config.m_num_split_lits = pp.num_split_lits(); m_config.m_backbone_detection = pp.backbone_detection(); m_config.m_iterative_deepening = pp.iterative_deepening(); + m_config.m_beam_search = pp.beam_search(); // don't share initial units ctx->pop_to_base_lvl(); @@ -504,7 +592,7 @@ namespace smt { ------------------------------------------------------------------------------------------------------------------------------------------------------------ Final thought (do this!): use greedy strategy by a policy when C_batch, A_batch, A_worker are "small". -- want to do this. switch to frugal strategy after reaching size limit */ - void parallel::batch_manager::return_cubes(ast_translation& l2g, vectorconst& C_worker, expr_ref_vector const& A_worker, const bool should_split) { + void parallel::batch_manager::return_cubes(ast_translation& l2g, vectorconst& C_worker, expr_ref_vector const& A_worker, const bool should_split, const double hardness) { // SASSERT(!m_config.never_cube()); auto atom_in_cube = [&](expr_ref_vector const& cube, expr* atom) { @@ -573,12 +661,12 @@ namespace smt { // Positive atom branch expr_ref_vector cube_pos = g_cube; cube_pos.push_back(atom); - m_cubes_pq.push(ScoredCube(d, cube_pos)); + m_cubes_pq.push(ScoredCube(d / hardness, cube_pos)); // Negative atom branch expr_ref_vector cube_neg = g_cube; cube_neg.push_back(m.mk_not(atom)); - m_cubes_pq.push(ScoredCube(d, cube_neg)); + m_cubes_pq.push(ScoredCube(d / hardness, cube_neg)); m_stats.m_num_cubes += 2; m_stats.m_max_cube_depth = std::max(m_stats.m_max_cube_depth, d + 1); @@ -630,7 +718,7 @@ namespace smt { if ((c.size() >= m_config.m_max_cube_depth || !should_split) && (m_config.m_depth_splitting_only || m_config.m_iterative_deepening || m_config.m_beam_search)) { if (m_config.m_beam_search) { - m_cubes_pq.push(ScoredCube(g_cube.size(), g_cube)); // re-enqueue the cube as is + m_cubes_pq.push(ScoredCube(g_cube.size() / hardness, g_cube)); // re-enqueue the cube as is } else { // need to add the depth set if it doesn't exist yet if (m_cubes_depth_sets.find(g_cube.size()) == m_cubes_depth_sets.end()) { diff --git a/src/smt/smt_parallel.h b/src/smt/smt_parallel.h index 129271551..cb5a4bad1 100644 --- a/src/smt/smt_parallel.h +++ b/src/smt/smt_parallel.h @@ -133,7 +133,7 @@ namespace smt { // worker threads return unprocessed cubes to the batch manager together with split literal candidates. // the batch manager re-enqueues unprocessed cubes and optionally splits them using the split_atoms returned by this and workers. // - void return_cubes(ast_translation& l2g, vectorconst& cubes, expr_ref_vector const& split_atoms, const bool should_split=true); + void return_cubes(ast_translation& l2g, vectorconst& cubes, expr_ref_vector const& split_atoms, const bool should_split=true, const double hardness=1.0); void report_assumption_used(ast_translation& l2g, expr* assumption); void collect_clause(ast_translation& l2g, unsigned source_worker_id, expr* e); expr_ref_vector return_shared_clauses(ast_translation& g2l, unsigned& worker_limit, unsigned worker_id); @@ -164,6 +164,7 @@ namespace smt { unsigned m_num_split_lits = 2; bool m_backbone_detection = false; bool m_iterative_deepening = false; + bool m_beam_search = false; }; unsigned id; // unique identifier for the worker @@ -186,6 +187,9 @@ namespace smt { expr_ref_vector find_backbone_candidates(); expr_ref_vector get_backbones_from_candidates(expr_ref_vector const& candidates); + double explicit_hardness(expr_ref_vector const& cube); + double heule_schur_hardness(expr_ref_vector const& cube); + double march_cu_hardness(expr_ref_vector const& cube); public: worker(unsigned id, parallel& p, expr_ref_vector const& _asms); void run();