mirror of
https://github.com/Z3Prover/z3
synced 2026-05-09 11:52:21 +00:00
fix(parallel-smt): gate split/backtrack by lease epoch
What it changes:
- util/search_tree.h
- bumps node epoch on split
- threads epoch through should_split(...) and try_split(...)
- always records effort, but only split/reopen if the lease epoch still matches
- smt/smt_parallel.cpp
- requires is_lease_valid(..., lease.epoch) before backtrack(...)
- passes lease.epoch into m_search_tree.try_split(...)
This commit is contained in:
parent
b341c61fbc
commit
013f1db739
2 changed files with 11 additions and 9 deletions
|
|
@ -358,9 +358,9 @@ namespace smt {
|
|||
g_core.push_back(expr_ref(l2g(c), m));
|
||||
}
|
||||
release_lease_unlocked(worker_id, lease.node, lease.epoch);
|
||||
if (lease.node && !m_search_tree.is_lease_canceled(lease.node, lease.cancel_epoch))
|
||||
m_search_tree.backtrack(lease.node, g_core);
|
||||
else if (lease.node && lease.node->get_status() == search_tree::status::closed)
|
||||
if (lease.node &&
|
||||
m_search_tree.is_lease_valid(lease.node, lease.epoch) &&
|
||||
!m_search_tree.is_lease_canceled(lease.node, lease.cancel_epoch))
|
||||
m_search_tree.backtrack(lease.node, g_core);
|
||||
|
||||
cancel_closed_leases_unlocked(worker_id);
|
||||
|
|
@ -390,7 +390,7 @@ namespace smt {
|
|||
if (!lease.node || m_search_tree.is_lease_canceled(lease.node, lease.cancel_epoch))
|
||||
return;
|
||||
|
||||
bool did_split = m_search_tree.try_split(lease.node, lit, nlit, effort);
|
||||
bool did_split = m_search_tree.try_split(lease.node, lit, nlit, effort, lease.epoch);
|
||||
|
||||
if (did_split) {
|
||||
++m_stats.m_num_cubes;
|
||||
|
|
|
|||
|
|
@ -80,6 +80,7 @@ namespace search_tree {
|
|||
SASSERT(!m_right);
|
||||
m_left = alloc(node<Config>, a, this);
|
||||
m_right = alloc(node<Config>, b, this);
|
||||
bump_epoch();
|
||||
}
|
||||
|
||||
node* left() const { return m_left; }
|
||||
|
|
@ -277,8 +278,8 @@ namespace search_tree {
|
|||
find_shallowest_timed_out_leaf_depth(cur->right(), best_depth);
|
||||
}
|
||||
|
||||
bool should_split(node<Config>* n) {
|
||||
if (!n || n->get_status() != status::active || !n->is_leaf())
|
||||
bool should_split(node<Config>* n, unsigned epoch) {
|
||||
if (!n || n->epoch() != epoch || n->get_status() != status::active || !n->is_leaf())
|
||||
return false;
|
||||
|
||||
unsigned num_active_nodes = count_active_nodes(m_root.get());
|
||||
|
|
@ -474,7 +475,7 @@ namespace search_tree {
|
|||
|
||||
// On timeout, either expand the current leaf or reopen the node for a
|
||||
// later revisit, depending on the tree-expansion heuristic.
|
||||
bool try_split(node<Config> *n, literal const &a, literal const &b, unsigned effort) {
|
||||
bool try_split(node<Config> *n, literal const &a, literal const &b, unsigned effort, unsigned epoch) {
|
||||
// the node could have been marked open by another thread that finished first and split
|
||||
// we still want to add the current thread's effort in this case, but not if the node was closed
|
||||
if (!n || n->get_status() == status::closed)
|
||||
|
|
@ -483,7 +484,7 @@ namespace search_tree {
|
|||
n->add_effort(effort);
|
||||
bool did_split = false;
|
||||
|
||||
if (should_split(n)) {
|
||||
if (should_split(n, epoch)) {
|
||||
n->split(a, b);
|
||||
did_split = true;
|
||||
}
|
||||
|
|
@ -497,7 +498,8 @@ namespace search_tree {
|
|||
//
|
||||
// Waiting for all workers would introduce per-node synchronization, delay
|
||||
// diversification, and let a slow worker stall progress.
|
||||
n->set_status(status::open);
|
||||
if (n->epoch() == epoch)
|
||||
n->set_status(status::open);
|
||||
|
||||
return did_split;
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue