diff --git a/src/smt/CubeTree.h b/src/smt/CubeTree.h index af8bd0b3b..dc3b6b415 100644 --- a/src/smt/CubeTree.h +++ b/src/smt/CubeTree.h @@ -10,14 +10,30 @@ struct CubeNode; typedef expr_ref_vector Cube; // shorthand +enum State { + open, + closed, + active +}; + +inline const char* to_string(State s) { + switch (s) { + case open: return "open"; + case closed: return "closed"; + case active: return "active"; + default: return "unknown"; + } +} + struct CubeNode { Cube cube; CubeNode* parent; std::vector children; - bool active = true; + + State state; CubeNode(const Cube& c, CubeNode* p = 0) - : cube(c), parent(p) {} + : cube(c), parent(p), state(open) {} bool is_leaf() const { return children.empty(); } }; @@ -25,8 +41,8 @@ struct CubeNode { class CubeTree { public: CubeTree(ast_manager& m) { - Cube root_cube(m); // empty cube - root = nullptr; + Cube empty_cube(m); + root = new CubeNode(empty_cube); // root is allocated properly std::srand((unsigned)std::time(0)); // is seeding the pseudo-random number generator used by std::rand() } @@ -72,163 +88,138 @@ public: } } - // mark node as inactive and propagate upward if parent becomes a leaf (all children inactive) + // mark node as closed and propagate upward if its polarity pair is also closed (so we have a tautology, so its parent is closed, and thus all its siblings are closed) // return pointer to last affected ancestor (or nullptr if none) so we can select one of its siblings as the next cube - CubeNode* remove_node_and_propagate(CubeNode* node) { - if (!node || node == root || !node->is_leaf()) return nullptr; // error, root, or not a leaf + CubeNode* remove_node_and_propagate(CubeNode* node, ast_manager& m) { + if (!node) return nullptr; CubeNode* parent = node->parent; - CubeNode* last_marked = node; + CubeNode* last_closed = node; - // mark this node as inactive - node->active = false; + // helper: recursively mark a subtree inactive + std::function close_subtree = [&](CubeNode* n) { + if (!n) + return; + n->state = closed; + for (CubeNode* child : n->children) + close_subtree(child); + }; - // propagate upward if parent became a "leaf" (all children inactive) - while (parent && parent != root) { - bool all_inactive = true; - for (CubeNode* child : parent->children) { - if (child->active) { - all_inactive = false; - break; - } + // mark this node as closed + close_subtree(node); + + // propagate upward if parent becomes UNSAT because one of its child polarity pairs (i.e. tautology) is closed + while (parent) { + // get the index of the node in its parent's children + auto it = std::find(parent->children.begin(), parent->children.end(), last_closed); + SASSERT(it != parent->children.end()); + unsigned idx = std::distance(parent->children.begin(), it); + + CubeNode* polarity_pair = nullptr; + if (idx % 2 == 0 && idx + 1 < parent->children.size()) { + polarity_pair = parent->children[idx + 1]; // even index -> polarity pair is right sibling + } else if (idx % 2 == 1) { + polarity_pair = parent->children[idx - 1]; // odd index -> polarity pair is left sibling } - if (!all_inactive) break; // stop propagating + // print the cube and its polarity pair CONTENTS, we have to loop thru each cube + IF_VERBOSE(1, { + verbose_stream() << "CubeTree: checking if parent node can be closed. Current node cube size: " << last_closed->cube.size() << " State: " << to_string(last_closed->state) << " Cube: "; + for (auto* e : last_closed->cube) { + verbose_stream() << mk_bounded_pp(e, m, 3) << " "; + } + verbose_stream() << "\n"; + if (polarity_pair) { + verbose_stream() << "CubeTree: polarity pair cube size: " << polarity_pair->cube.size() << " State: " << to_string(polarity_pair->state) << " Cube: "; + for (auto* e : polarity_pair->cube) { + verbose_stream() << mk_bounded_pp(e, m, 3) << " "; + } + verbose_stream() << "\n"; + } else { + verbose_stream() << "CubeTree: no polarity pair found for current node\n"; + } + }); - SASSERT(parent->active); // parent must not be currently worked on - last_marked = parent; // track the last ancestor we mark - parent->active = false; // mark parent inactive - parent = parent->parent; + // node and its polarity pair are closed, this is a tautology, so the parent is closed, so the parent's entire subtree is closed + if (polarity_pair && polarity_pair->state == closed) { + SASSERT(parent->state != active); // parent must not be currently worked on + close_subtree(parent); // mark parent and its subtree as closed + last_closed = parent; // track the last ancestor we mark + parent = parent->parent; + } else { + break; // stop propagation + } } - return last_marked; + return last_closed; } - - // get closest cube to current by getting a random sibling of current (if current was UNSAT and we removed it from the tree) - // or by descending randomly to a leaf (if we split the current node) to get the newest cube split fromthe current + // or by descending randomly to a leaf (if we split the current node) to get the newest cube split from the current // we descend randomly to a leaf instead of just taking a random child because it's possible another thread made more descendants - - CubeNode* get_next_cube(CubeNode* current, std::vector& frontier_roots, ast_manager& m, unsigned worker_id) { + CubeNode* get_next_cube(CubeNode* current, ast_manager& m, unsigned worker_id) { print_tree(m); - + IF_VERBOSE(1, verbose_stream() << "CubeTree: current cube is null: " << (current == nullptr) << "\n"); if (!current) return nullptr; IF_VERBOSE(1, verbose_stream() << "CubeTree: getting next cube from current of size " << current->cube.size() << "\n"); - // lambda to find any active leaf in the subtree (explore all branches) - std::function find_active_leaf = [&](CubeNode* node) -> CubeNode* { + // lambda to find any open leaf in the subtree (explore all branches) + std::function find_open_leaf = [&](CubeNode* node) -> CubeNode* { if (!node) return nullptr; - if (node->is_leaf() && node->active) return node; + if (node->is_leaf() && node->state == open) return node; for (CubeNode* child : node->children) { - CubeNode* active_leaf = find_active_leaf(child); - if (active_leaf) return active_leaf; + CubeNode* open_leaf = find_open_leaf(child); + if (open_leaf) return open_leaf; } return nullptr; }; CubeNode* node = current; - std::vector remaining_frontier_roots = frontier_roots; - bool is_unexplored_frontier = frontier_roots.size() > 0 && current->cube.size() < frontier_roots[0]->cube.size(); // i.e. current is above the frontier (which always happens when we start with the empty cube!!) - IF_VERBOSE(1, verbose_stream() << "CubeTree: current cube is " << (is_unexplored_frontier ? "above" : "within") << " the frontier. Current cube has the following children: \n"); + + IF_VERBOSE(1, verbose_stream() << "CubeTree: Current cube has the following children: \n"); for (auto* child : current->children) { - IF_VERBOSE(1, verbose_stream() << " Child cube size: " << child->cube.size() << " Active: " << child->active << " Cube: "); + IF_VERBOSE(1, verbose_stream() << " Child cube size: " << child->cube.size() << " State: " << to_string(child->state) << " Cube: "); for (auto* e : child->cube) { IF_VERBOSE(1, verbose_stream() << mk_bounded_pp(e, m, 3) << " "); } IF_VERBOSE(1, verbose_stream() << "\n"); } - // if current is above the frontier, start searching from the first frontier root - if (is_unexplored_frontier && !frontier_roots.empty()) { - IF_VERBOSE(1, verbose_stream() << "CubeTree: Worker " << worker_id << " starting search from first frontier root. Frontier roots are:\n"); - for (auto* x : frontier_roots) { - IF_VERBOSE(1, verbose_stream() << " Cube size: " << x->cube.size() << " Active: " << x->active << " Cube: "); - for (auto* e : x->cube) { - IF_VERBOSE(1, verbose_stream() << mk_bounded_pp(e, m, 3) << " "); - } - IF_VERBOSE(1, verbose_stream() << "\n"); - } - node = frontier_roots[0]; - IF_VERBOSE(1, verbose_stream() << "CubeTree: Worker " << worker_id << " selected frontier root: "); - for (auto* e : node->cube) { - IF_VERBOSE(1, verbose_stream() << mk_bounded_pp(e, m, 3) << " "); - } - IF_VERBOSE(1, verbose_stream() << "\n"); - IF_VERBOSE(1, verbose_stream() << "This frontier root has children:\n"); - for (auto* child : node->children) { - IF_VERBOSE(1, verbose_stream() << " Child cube size: " << child->cube.size() << " Active: " << child->active << " Cube: "); - for (auto* e : child->cube) { - IF_VERBOSE(1, verbose_stream() << mk_bounded_pp(e, m, 3) << " "); - } - IF_VERBOSE(1, verbose_stream() << "\n"); - } - } - - while (node) { - // check active leaf descendants - CubeNode* leaf_descendant = nullptr; - leaf_descendant = find_active_leaf(node); - - if (leaf_descendant) { - IF_VERBOSE(1, {verbose_stream() << "CubeTree: Worker " << worker_id << " found active leaf descendant under node (which could be the node itself): "; + // check open leaf descendants + CubeNode* nearest_open_leaf = nullptr; + nearest_open_leaf = find_open_leaf(node); // find an open leaf descendant + + if (nearest_open_leaf) { + IF_VERBOSE(1, {verbose_stream() << "CubeTree: Worker " << worker_id << " found open leaf descendant under node (which could be the node itself): "; for (auto* e : node->cube) { verbose_stream() << mk_bounded_pp(e, m, 3) << " "; } - verbose_stream() << "\n Active leaf descendant is: "; - for (auto* e : leaf_descendant->cube) { + verbose_stream() << "\n Open leaf descendant is: "; + for (auto* e : nearest_open_leaf->cube) { verbose_stream() << mk_bounded_pp(e, m, 3) << " "; } verbose_stream() << "\n"; }); - return leaf_descendant; + return nearest_open_leaf; } - IF_VERBOSE(1, {verbose_stream() << "CubeTree: Worker " << worker_id << " found no active leaf descendants found under node: "; + IF_VERBOSE(1, {verbose_stream() << "CubeTree: Worker " << worker_id << " found no open leaf descendants found under node: "; for (auto* e : node->cube) { verbose_stream() << mk_bounded_pp(e, m, 3) << " "; } verbose_stream() << "\n"; }); + // DO NOT NEED TO CHECK FOR ACTIVE LEAVES bc this would only happen if we're in another thread's subtree and another thread + // is working on some leaf. but this will NEVER HAPPEN because once we exhaust our own subtree, the problem must be UNSAT + // bc of polarity pair tautologies!! so ONLY NEED TO CHECK FOR OPEN LEAVES // DO NOT NEED to check siblings and their active leaf descendants // since this is handled by the recusion up the tree!! - // and checking siblings here is unsafe if we adhere to thread frontier optimizations - // see if we're at a boundary of the frontier (i.e. we hit one of the frontier roots) - auto it = std::find(remaining_frontier_roots.begin(), remaining_frontier_roots.end(), node); - // get the index of the node in remaining_frontier_roots - unsigned curr_root_idx = std::distance(remaining_frontier_roots.begin(), it); - if (it != remaining_frontier_roots.end()) { // i.e. the node is in the list of remaining_frontier_roots - IF_VERBOSE(1, verbose_stream() << "CubeTree: hit frontier root " << node << "\n"); - - if (!remaining_frontier_roots.empty()) { - IF_VERBOSE(1, verbose_stream() << "CubeTree: picking next frontier root to search from.\n"); - // pick the next frontier root (wrap around if at end) - // we do this so we either move onto the next split atom in the frontier (if we just processed neg(atom)) - // or we get the negation of the atom we just processed (if we just processed pos(atom)) - // since the other the splits are added is [pos, neg, ...] for each split atom - node = remaining_frontier_roots[curr_root_idx + 1 < remaining_frontier_roots.size() ? curr_root_idx + 1 : 0]; - - // Remove exhausted frontier root - remaining_frontier_roots.erase(it); - } else { - IF_VERBOSE(1, verbose_stream() << "CubeTree: resetting frontier after exhausting\n"); - // Frontier exhausted: reset frontier_roots for next iteration - frontier_roots.clear(); - - // Start "global" search from current node - node = node->parent; - } - - continue; - } - - // Move up in the current frontier node = node->parent; } @@ -246,17 +237,6 @@ public: private: CubeNode* root; - // mark leaf as inactive instead of deleting it - void mark_leaf_inactive(CubeNode* node) { - if (!node || !node->active) return; - - // must be a leaf - SASSERT(node->children.empty()); - - // just mark inactive - node->active = false; - } - void delete_subtree(CubeNode* node) { if (!node) return; for (CubeNode* child : node->children) { @@ -273,7 +253,7 @@ private: IF_VERBOSE(1, verbose_stream() << "Node@" << node << " size=" << node->cube.size() - << " active=" << node->active + << " state=" << to_string(node->state) << " cube={ "); for (expr* e : node->cube) { diff --git a/src/smt/smt_parallel.cpp b/src/smt/smt_parallel.cpp index 591bb8a65..e0cf84dc1 100644 --- a/src/smt/smt_parallel.cpp +++ b/src/smt/smt_parallel.cpp @@ -104,12 +104,12 @@ namespace smt { LOG_WORKER(1, " Curr cube node is null: " << (m_curr_cube_node == nullptr) << "\n"); if (m_config.m_cubetree) { // use std::tie so we don't overshadow cube_node!!! - - std::tie(cube_node, cube) = b.get_cube_from_tree(m_g2l, frontier_roots, id, m_curr_cube_node); // cube node is the reference to the node in the tree, tells us how to get the next cube. "cube" is the translated cube we need for the solver - + std::tie(cube_node, cube) = get_cube_from_tree(id, m_curr_cube_node); // cube node is the reference to the node in the tree, tells us how to get the next cube. "cube" is the translated cube we need for the solver + LOG_WORKER(1, " Got cube node from CubeTree. Is null: " << (cube_node == nullptr) << "\n"); if (!cube_node) { // i.e. no more cubes - LOG_WORKER(1, " No more cubes from CubeTree, exiting\n"); + LOG_WORKER(1, " Cube_Tree ran out of nodes, problem is UNSAT\n"); + b.set_unsat(m_g2l, cube); return; } m_curr_cube_node = cube_node; // store the current cube so we know how to get the next closest cube from the tree @@ -125,7 +125,7 @@ namespace smt { } collect_shared_clauses(m_g2l); - if (!m.inc()) { + if (m.limit().is_canceled()) { b.set_exception("context cancelled"); return; } @@ -149,7 +149,7 @@ namespace smt { // let's automatically do iterative deepening for beam search. // when using more advanced metrics like explicit_hardness etc: need one of two things: (1) split if greater than OR EQUAL TO than avg hardness, or (3) enter this branch only when cube.size() > 0, or else we get stuck in a loop of never deepening. - if (m_config.m_iterative_deepening || m_config.m_beam_search) { + if (!m_config.m_cubetree && (m_config.m_iterative_deepening || m_config.m_beam_search)) { LOG_WORKER(1, " applying iterative deepening\n"); double cube_hardness; @@ -171,30 +171,28 @@ namespace smt { b.return_cubes(m_l2g, cube, split_atoms, should_split, cube_hardness); } else if (m_config.m_cubetree) { IF_VERBOSE(1, verbose_stream() << " returning undef cube to CubeTree. Cube node is null: " << (cube_node == nullptr) << "\n"); + bool should_split = true; - bool is_new_frontier = frontier_roots.empty(); - b.return_cubes_tree(m_l2g, cube_node, cube, split_atoms, frontier_roots); + if (m_config.m_iterative_deepening) { + LOG_WORKER(1, " applying iterative deepening\n"); + + double cube_hardness; + if (m_config.m_explicit_hardness) { + cube_hardness = explicit_hardness(cube); + // LOG_WORKER(1, " explicit hardness: " << cube_hardness << "\n"); + } else { // default to naive hardness + cube_hardness = naive_hardness(); + // LOG_WORKER(1, " naive hardness: " << cube_hardness << "\n"); + } - if (is_new_frontier) { - IF_VERBOSE(1, { - verbose_stream() << " Worker " << id << " has new frontier roots, with the following children: \n"; - for (auto* node : frontier_roots) { - verbose_stream() << " Cube size: " << node->cube.size() << " Active: " << node->active << " Cube: "; - for (auto* e : node->cube) { - verbose_stream() << mk_bounded_pp(e, m, 3) << " "; - } - verbose_stream() << " Children: "; - for (auto* child : node->children) { - verbose_stream() << "["; - for (auto* e : child->cube) { - verbose_stream() << mk_bounded_pp(e, m, 3) << " "; - } - verbose_stream() << "] "; - } - verbose_stream() << "\n"; - } - }); + const double avg_hardness = update_avg_cube_hardness_worker(cube_hardness); // let's only compare to hardness on the same thread + const double factor = 1; // can tune for multiple of avg hardness later + should_split = cube_hardness >= avg_hardness * factor; // must be >= otherwise we never deepen + + LOG_WORKER(1, " cube hardness: " << cube_hardness << " avg: " << avg_hardness << " avg*factor: " << avg_hardness * factor << " should-split: " << should_split << "\n"); } + + return_cubes_tree(cube_node, split_atoms, should_split); } else { b.return_cubes(m_l2g, cube, split_atoms); } @@ -244,7 +242,7 @@ namespace smt { // prune the tree now that we know the cube is unsat if (m_config.m_cubetree) { IF_VERBOSE(1, verbose_stream() << " removing cube node from CubeTree and propagate deletion\n"); - b.remove_node_and_propagate(m_curr_cube_node); + remove_node_and_propagate(m_curr_cube_node, m); } break; } @@ -257,7 +255,9 @@ namespace smt { parallel::worker::worker(unsigned id, parallel& p, expr_ref_vector const& _asms): id(id), p(p), b(p.m_batch_manager), m_smt_params(p.ctx.get_fparams()), asms(m), m_g2l(p.ctx.m, m), - m_l2g(m, p.ctx.m) { + m_l2g(m, p.ctx.m), + m_cubes_tree(m), + m_split_atoms(m) { for (auto e : _asms) asms.push_back(m_g2l(e)); LOG_WORKER(1, " created with " << asms.size() << " assumptions\n"); @@ -319,6 +319,8 @@ namespace smt { void parallel::worker::collect_statistics(::statistics& st) const { ctx->collect_statistics(st); + st.update("parallel-num_cubes", m_stats.m_num_cubes); + st.update("parallel-max-cube-size", m_stats.m_max_cube_depth); } void parallel::worker::cancel() { @@ -417,18 +419,14 @@ namespace smt { return r; } - - std::pair parallel::batch_manager::get_cube_from_tree(ast_translation& g2l, std::vector& frontier_roots, unsigned worker_id, CubeNode* prev_cube) { - std::scoped_lock lock(mux); - expr_ref_vector l_cube(g2l.to()); + std::pair parallel::worker::get_cube_from_tree(unsigned worker_id, CubeNode* prev_cube) { + expr_ref_vector l_cube(m); SASSERT(m_config.m_cubetree); - if (m_cubes_tree.empty()) { // special initialization: the first cube is emtpy, have the worker work on an empty cube. IF_VERBOSE(1, verbose_stream() << "Batch manager giving out empty cube.\n"); - expr_ref_vector g_cube(g2l.from()); - CubeNode* new_cube_node = new CubeNode(g_cube, nullptr); + CubeNode* new_cube_node = new CubeNode(l_cube, nullptr); m_cubes_tree.add_node(new_cube_node, nullptr); return {new_cube_node, l_cube}; // return empty cube } else if (!prev_cube) { @@ -437,21 +435,20 @@ namespace smt { // get a cube from the CubeTree SASSERT(!m_cubes_tree.empty()); + CubeNode* next_cube_node = m_cubes_tree.get_next_cube(prev_cube, m, worker_id); // get the next cube in the tree closest to the prev cube (i.e. longest common prefix) - CubeNode* next_cube_node = m_cubes_tree.get_next_cube(prev_cube, frontier_roots, g2l.to(), worker_id); // get the next cube in the tree closest to the prev cube (i.e. longest common prefix) - - IF_VERBOSE(1, verbose_stream() << "Batch manager giving out cube from CubeTree. Is null: " << (next_cube_node==nullptr) << "\n"); + LOG_WORKER(1, " giving out cube from CubeTree. Is null: " << (next_cube_node==nullptr) << "\n"); if (!next_cube_node) { // i.e. no more cubes - IF_VERBOSE(1, verbose_stream() << " No more cubes from CubeTree, exiting\n"); - return {nullptr, l_cube}; // return nullptr and empty cube + LOG_WORKER(1, " No more cubes from CubeTree, exiting\n"); + return {nullptr, l_cube}; // return nullptr and empty cube } for (auto& e : next_cube_node->cube) { - l_cube.push_back(g2l(e)); + l_cube.push_back(e); } - next_cube_node->active = false; // mark the cube as inactive (i.e. being processed by a worker) + next_cube_node->state = active; // mark the cube as active (i.e. being processed by a worker) return {next_cube_node, l_cube}; } @@ -463,8 +460,7 @@ namespace smt { if (m_cubes.size() == 1 && m_cubes[0].empty() || m_config.m_beam_search && m_cubes_pq.empty() - || m_config.m_depth_splitting_only && m_cubes_depth_sets.empty() - || m_config.m_cubetree && m_cubes_tree.empty()) { + || m_config.m_depth_splitting_only && m_cubes_depth_sets.empty()) { // special initialization: the first cube is emtpy, have the worker work on an empty cube. IF_VERBOSE(1, verbose_stream() << "Batch manager giving out empty cube.\n"); return l_cube; // return empty cube @@ -821,76 +817,49 @@ namespace smt { } } - void parallel::batch_manager::return_cubes_tree(ast_translation& l2g, CubeNode* cube_node, expr_ref_vector const& l_cube, expr_ref_vector const& A_worker, std::vector& frontier_roots) { - IF_VERBOSE(1, verbose_stream() << " Returning cube to batch manager's cube tree. Cube node null: " << (cube_node == nullptr) << " PROCESSING CUBE of size: " << l_cube.size() << "\n"); - - - bool is_new_frontier = frontier_roots.empty(); // need to save this as a bool here, bc otherwise the frontier stops being populated after a single split atom - IF_VERBOSE(1, verbose_stream() << " Is new frontier: " << is_new_frontier << "\n"); + void parallel::worker::return_cubes_tree(CubeNode* cube_node, expr_ref_vector const& A_worker, bool should_split) { + LOG_WORKER(1, " Returning cube to batch manager's cube tree. Cube node null: " << (cube_node == nullptr) << " PROCESSING CUBE of size: " << cube_node->cube.size() << "\n"); auto atom_in_cube = [&](expr_ref_vector const& cube, expr* atom) { return any_of(cube, [&](expr* e) { return e == atom || (m.is_not(e, e) && e == atom); }); }; - // apply the frugal strategy to ALL incoming worker cubes, but save in the PQ data structure for beam search - auto add_split_atom_tree = [&](expr* atom) { - IF_VERBOSE(1, verbose_stream() << " Adding split atom to tree: " << mk_bounded_pp(atom, m, 3) << "\n"); - expr_ref_vector g_cube(l2g.to()); - for (auto& atom : l_cube) - g_cube.push_back(l2g(atom)); - - // Positive atom branch - expr_ref_vector g_cube_pos = g_cube; - g_cube_pos.push_back(atom); - - // Negative atom branch - expr_ref_vector g_cube_neg = g_cube; - g_cube_neg.push_back(m.mk_not(atom)); - - - IF_VERBOSE(1, verbose_stream() << " Splitting positive and negative atoms: " << mk_pp(atom, m) << "," << mk_pp(m.mk_not(atom), m) << " from root: "); - //print the cube + LOG_WORKER(1, " Adding split atom to tree: " << mk_bounded_pp(atom, m, 3) << " from root: "); for (auto& e : cube_node->cube) - IF_VERBOSE(1, verbose_stream() << mk_bounded_pp(e, m, 3) << " "); - IF_VERBOSE(1, verbose_stream() << "\n"); + LOG_WORKER(1, mk_bounded_pp(e, m, 3) << " "); + LOG_WORKER(1, "\n"); - auto [left_child, right_child] = m_cubes_tree.add_children(cube_node, g_cube_pos, g_cube_neg); // note: default is active + expr_ref_vector l_cube_pos = cube_node->cube; + l_cube_pos.push_back(atom); + expr_ref_vector l_cube_neg = cube_node->cube; + l_cube_neg.push_back(m.mk_not(atom)); - if (is_new_frontier) { - frontier_roots.push_back(left_child); - frontier_roots.push_back(right_child); - IF_VERBOSE(1, verbose_stream() << " Added split to frontier roots. Size now: " - << frontier_roots.size() << "\n"); - - } + auto [left_child, right_child] = m_cubes_tree.add_children(cube_node, l_cube_pos, l_cube_neg); // note: default is open m_stats.m_num_cubes += 2; - m_stats.m_max_cube_depth = std::max(m_stats.m_max_cube_depth, g_cube.size() + 1); + m_stats.m_max_cube_depth = std::max(m_stats.m_max_cube_depth, cube_node->cube.size() + 1); }; - std::scoped_lock lock(mux); - - if (l_cube.size() >= m_config.m_max_cube_depth) { - IF_VERBOSE(1, verbose_stream() << " Skipping split of cube at max depth " << m_config.m_max_cube_depth << "\n";); - cube_node->active = true; // mark the cube as active again since we didn't split it + if (cube_node->cube.size() >= m_config.m_max_cube_depth || !should_split) { + LOG_WORKER(1, " Skipping split of cube at max depth " << m_config.m_max_cube_depth << "\n"); + cube_node->state = open; // mark the cube as open again since we didn't split it return; } // --- Frugal approach: only process NEW worker cubes with NEW atoms --- for (unsigned i = 0; i < A_worker.size(); ++i) { - IF_VERBOSE(1, verbose_stream() << " Processing worker atom " << mk_bounded_pp(A_worker[i], m, 3) << "\n"); - expr_ref g_atom(l2g(A_worker[i]), l2g.to()); - if (!m_split_atoms.contains(g_atom)) - m_split_atoms.push_back(g_atom); - - IF_VERBOSE(1, verbose_stream() << " splitting worker cube on new atom " << mk_bounded_pp(g_atom, m, 3) << "\n"); - add_split_atom_tree(g_atom); + LOG_WORKER(1, " Processing worker atom " << mk_bounded_pp(A_worker[i], m, 3) << "\n"); + if (!m_split_atoms.contains(A_worker[i])) + m_split_atoms.push_back(A_worker[i]); + + LOG_WORKER(1, " splitting worker cube on new atom " << mk_bounded_pp(A_worker[i], m, 3) << "\n"); + add_split_atom_tree(A_worker[i]); } - IF_VERBOSE(1, verbose_stream() << " The returned cube:"); - for (auto& e : l_cube) + LOG_WORKER(1, " The returned cube:"); + for (auto& e : cube_node->cube) IF_VERBOSE(1, verbose_stream() << mk_bounded_pp(e, m, 3) << " "); IF_VERBOSE(1, verbose_stream() << " now has the following children:\n"); for (auto child : cube_node->children) { @@ -1017,7 +986,7 @@ namespace smt { } expr_ref_vector parallel::worker::get_split_atoms() { - unsigned k = 2; + unsigned k = 1; // auto candidates = ctx->m_pq_scores.get_heap(); std::vector> top_k; // will hold at most k elements @@ -1067,8 +1036,6 @@ namespace smt { m_cubes_depth_sets.clear(); } else if (m_config.m_beam_search) { m_cubes_pq = CubePQ(); - } else if (m_config.m_cubetree) { - m_cubes_tree = CubeTree(m); } else { m_cubes.reset(); m_cubes.push_back(expr_ref_vector(m)); // push empty cube diff --git a/src/smt/smt_parallel.h b/src/smt/smt_parallel.h index db3aebdd4..321fffba8 100644 --- a/src/smt/smt_parallel.h +++ b/src/smt/smt_parallel.h @@ -72,7 +72,6 @@ namespace smt { stats m_stats; expr_ref_vector m_split_atoms; // atoms to split on vector m_cubes; - CubeTree m_cubes_tree; struct ScoredCube { double score; @@ -117,7 +116,7 @@ namespace smt { void init_parameters_state(); public: - batch_manager(ast_manager& m, parallel& p) : m(m), p(p), m_split_atoms(m), m_cubes_tree(m) { } + batch_manager(ast_manager& m, parallel& p) : m(m), p(p), m_split_atoms(m) { } void initialize(); @@ -132,34 +131,17 @@ namespace smt { // The batch manager returns the next cube to // expr_ref_vector get_cube(ast_translation& g2l); // FOR ALL NON-TREE VERSIONS - - std::pair get_cube_from_tree(ast_translation& g2l, std::vector& frontier_roots, unsigned worker_id, CubeNode* prev_cube = nullptr); - - + // // worker threads return unprocessed cubes to the batch manager together with split literal candidates. // the batch manager re-enqueues unprocessed cubes and optionally splits them using the split_atoms returned by this and workers. // - - void return_cubes_tree(ast_translation& l2g, CubeNode* cube_node, expr_ref_vector const& cube, expr_ref_vector const& split_atoms, std::vector& frontier_roots); - // FOR ALL NON-TREE VERSIONS void return_cubes(ast_translation& l2g, expr_ref_vector const& cube, expr_ref_vector const& split_atoms, const bool should_split=true, const double hardness=1.0); void report_assumption_used(ast_translation& l2g, expr* assumption); void collect_clause(ast_translation& l2g, unsigned source_worker_id, expr* e); expr_ref_vector return_shared_clauses(ast_translation& g2l, unsigned& worker_limit, unsigned worker_id); - void remove_node_and_propagate(CubeNode* node) { - std::scoped_lock lock(mux); - SASSERT(m_config.m_cubetree); - CubeNode* last_removed = m_cubes_tree.remove_node_and_propagate(node); - if (last_removed) { - IF_VERBOSE(1, verbose_stream() << "Cube tree: removed node and propagated up to depth " << last_removed->cube.size() << "\n"); - } else { - IF_VERBOSE(1, verbose_stream() << "Cube tree: ERROR removing node with no propagation\n"); - } - } - double update_avg_cube_hardness(double hardness) { std::scoped_lock lock(mux); IF_VERBOSE(1, verbose_stream() << "Cube hardness: " << hardness << ", previous avg: " << m_avg_cube_hardness << ", solved cubes: " << m_solved_cube_count << "\n";); @@ -183,6 +165,7 @@ namespace smt { double m_max_conflict_mul = 1.5; bool m_share_units_initial_only = false; bool m_cube_initial_only = false; + unsigned m_max_cube_depth = 20; unsigned m_max_greedy_cubes = 1000; unsigned m_num_split_lits = 2; bool m_backbone_detection = false; @@ -192,6 +175,11 @@ namespace smt { bool m_cubetree = false; }; + struct stats { + unsigned m_max_cube_depth = 0; + unsigned m_num_cubes = 0; + }; + unsigned id; // unique identifier for the worker parallel& p; batch_manager& b; @@ -202,12 +190,33 @@ namespace smt { scoped_ptr ctx; ast_translation m_g2l, m_l2g; CubeNode* m_curr_cube_node = nullptr; - std::vector frontier_roots; + CubeTree m_cubes_tree; + stats m_stats; + expr_ref_vector m_split_atoms; unsigned m_num_shared_units = 0; unsigned m_num_initial_atoms = 0; unsigned m_shared_clause_limit = 0; // remembers the index into shared_clause_trail marking the boundary between "old" and "new" clauses to share + double m_avg_cube_hardness = 0.0; + unsigned m_solved_cube_count = 0; + + void remove_node_and_propagate(CubeNode* node, ast_manager& m) { + SASSERT(m_config.m_cubetree); + // CubeNode* last_removed = m_cubes_tree.remove_node_and_propagate(node, m); + // if (last_removed) { + // IF_VERBOSE(1, verbose_stream() << "Cube tree: removed node and propagated up to depth " << last_removed->cube.size() << "\n"); + // } else { + // IF_VERBOSE(1, verbose_stream() << "Cube tree: ERROR removing node with no propagation\n"); + // } + } + + double update_avg_cube_hardness_worker(double hardness) { + IF_VERBOSE(1, verbose_stream() << "Cube hardness: " << hardness << ", previous avg: " << m_avg_cube_hardness << ", solved cubes: " << m_solved_cube_count << "\n";); + m_avg_cube_hardness = (m_avg_cube_hardness * m_solved_cube_count + hardness) / (m_solved_cube_count + 1); + m_solved_cube_count++; + return m_avg_cube_hardness; + } void share_units(ast_translation& l2g); lbool check_cube(expr_ref_vector const& cube); @@ -217,7 +226,9 @@ namespace smt { expr_ref_vector find_backbone_candidates(); expr_ref_vector get_backbones_from_candidates(expr_ref_vector const& candidates); - + std::pair get_cube_from_tree(unsigned worker_id, CubeNode* prev_cube = nullptr); + void return_cubes_tree(CubeNode* cube_node, expr_ref_vector const& split_atoms, const bool should_split=true); + double naive_hardness(); double explicit_hardness(expr_ref_vector const& cube); public: diff --git a/src/smt/smt_parallel2.cpp b/src/smt/smt_parallel2.cpp index 7aafcc5e1..f12f5ed7a 100644 --- a/src/smt/smt_parallel2.cpp +++ b/src/smt/smt_parallel2.cpp @@ -28,6 +28,8 @@ Author: #include "params/smt_parallel_params.hpp" #include +#include +#include class bounded_pp_exprs { expr_ref_vector const& es; @@ -117,13 +119,9 @@ namespace smt { return; } case l_false: { - // if unsat core only contains (external) assumptions (i.e. all the unsat core are asms), then unsat and return as this does NOT depend on cubes - // otherwise, extract lemmas that can be shared (units (and unsat core?)). - // share with batch manager. - // process next cube. expr_ref_vector const& unsat_core = ctx->unsat_core(); LOG_WORKER(2, " unsat core:\n"; for (auto c : unsat_core) verbose_stream() << mk_bounded_pp(c, m, 3) << "\n"); - // If the unsat core only contains assumptions, + // If the unsat core only contains external assumptions, // unsatisfiability does not depend on the current cube and the entire problem is unsat. if (all_of(unsat_core, [&](expr* e) { return asms.contains(e); })) { LOG_WORKER(1, " determined formula unsat\n"); @@ -303,16 +301,15 @@ namespace smt { } void parallel2::worker::collect_shared_clauses(ast_translation& g2l) { - expr_ref_vector new_clauses = b.return_shared_clauses(g2l, m_shared_clause_limit, id); // get new clauses from the batch manager + expr_ref_vector new_clauses = b.return_shared_clauses(g2l, m_shared_clause_limit, id); // iterate over new clauses and assert them in the local context for (expr* e : new_clauses) { - expr_ref local_clause(e, g2l.to()); // e was already translated to the local context in the batch manager!! - ctx->assert_expr(local_clause); // assert the clause in the local context + expr_ref local_clause(e, g2l.to()); + ctx->assert_expr(local_clause); LOG_WORKER(2, " asserting shared clause: " << mk_bounded_pp(local_clause, m, 3) << "\n"); } } - // get new clauses from the batch manager and assert them in the local context expr_ref_vector parallel2::batch_manager::return_shared_clauses(ast_translation& g2l, unsigned& worker_limit, unsigned worker_id) { std::scoped_lock lock(mux); expr_ref_vector result(g2l.to()); @@ -363,7 +360,6 @@ namespace smt { double new_score = ctx->m_lit_scores[0][v] * ctx->m_lit_scores[1][v]; - // decay the scores ctx->m_lit_scores[0][v] /= 2; ctx->m_lit_scores[1][v] /= 2; @@ -392,9 +388,7 @@ namespace smt { return; m_state = state::is_unsat; - // every time we do a check_sat call, don't want to have old info coming from a prev check_sat call - // the unsat core gets reset internally in the context after each check_sat, so we assert this property here - // takeaway: each call to check_sat needs to have a fresh unsat core + // each call to check_sat needs to have a fresh unsat core SASSERT(p.ctx.m_unsat_core.empty()); for (expr* e : unsat_core) p.ctx.m_unsat_core.push_back(l2g(e)); @@ -509,7 +503,6 @@ namespace smt { } void parallel2::batch_manager::collect_statistics(::statistics& st) const { - //ctx->collect_statistics(st); st.update("parallel-num_cubes", m_stats.m_num_cubes); st.update("parallel-max-cube-size", m_stats.m_max_cube_depth); } @@ -536,11 +529,6 @@ namespace smt { for (unsigned i = 0; i < num_threads; ++i) m_workers.push_back(alloc(worker, i, *this, asms)); // i.e. "new worker(i, *this, asms)" - // THIS WILL ALLOW YOU TO CANCEL ALL THE CHILD THREADS - // within the lexical scope of the code block, creates a data structure that allows you to push children - // objects to the limit object, so if someone cancels the parent object, the cancellation propagates to the children - // and that cancellation has the lifetime of the scope - // even if this code doesn't expliclty kill the main thread, still applies bc if you e.g. Ctrl+C the main thread, the children threads need to be cancelled for (auto w : m_workers) sl.push_child(&(w->limit())); @@ -561,7 +549,7 @@ namespace smt { m_batch_manager.collect_statistics(ctx.m_aux_stats); } - return m_batch_manager.get_result(); // i.e. all threads have finished all of their cubes -- so if state::is_running is still true, means the entire formula is unsat (otherwise a thread would have returned l_undef) + return m_batch_manager.get_result(); } } diff --git a/src/smt/smt_parallel2.h b/src/smt/smt_parallel2.h index 345a398fa..1d387ea82 100644 --- a/src/smt/smt_parallel2.h +++ b/src/smt/smt_parallel2.h @@ -21,6 +21,7 @@ Revision History: #include "smt/smt_context.h" #include "util/search_tree.h" #include +#include #include #include diff --git a/src/test/search_tree.cpp b/src/test/search_tree.cpp index 25bad2150..00a8087d8 100644 --- a/src/test/search_tree.cpp +++ b/src/test/search_tree.cpp @@ -4,6 +4,7 @@ #include #include #include +#include // Initially there are no cubes.