mirror of
https://github.com/Z3Prover/z3
synced 2025-09-13 05:01:28 +00:00
remove priority queue for top-k lits and replace with simple linear scan. the PQ implementation backend still remains in case we want to switch back
This commit is contained in:
parent
9bf74b5d2b
commit
58dc54e840
3 changed files with 33 additions and 42 deletions
|
@ -191,7 +191,7 @@ namespace smt {
|
||||||
unsigned_vector m_lit_occs; //!< occurrence count of literals
|
unsigned_vector m_lit_occs; //!< occurrence count of literals
|
||||||
svector<bool_var_data> m_bdata; //!< mapping bool_var -> data
|
svector<bool_var_data> m_bdata; //!< mapping bool_var -> data
|
||||||
svector<double> m_activity;
|
svector<double> m_activity;
|
||||||
updatable_priority_queue::priority_queue<bool_var, double> m_pq_scores;
|
// updatable_priority_queue::priority_queue<bool_var, double> m_pq_scores;
|
||||||
|
|
||||||
struct lit_node : dll_base<lit_node> {
|
struct lit_node : dll_base<lit_node> {
|
||||||
literal lit;
|
literal lit;
|
||||||
|
@ -199,7 +199,6 @@ namespace smt {
|
||||||
};
|
};
|
||||||
lit_node* m_dll_lits;
|
lit_node* m_dll_lits;
|
||||||
|
|
||||||
// svector<std::array<double, 2>> m_lit_scores;
|
|
||||||
svector<double> m_lit_scores[2];
|
svector<double> m_lit_scores[2];
|
||||||
|
|
||||||
clause_vector m_aux_clauses;
|
clause_vector m_aux_clauses;
|
||||||
|
@ -952,7 +951,7 @@ namespace smt {
|
||||||
e = 0;
|
e = 0;
|
||||||
for (auto& e : m_lit_scores[1])
|
for (auto& e : m_lit_scores[1])
|
||||||
e = 0;
|
e = 0;
|
||||||
m_pq_scores.clear(); // Clear the priority queue heap as well
|
// m_pq_scores.clear(); // Clear the priority queue heap as well
|
||||||
}
|
}
|
||||||
double get_score(literal l) const {
|
double get_score(literal l) const {
|
||||||
return m_lit_scores[l.sign()][l.var()];
|
return m_lit_scores[l.sign()][l.var()];
|
||||||
|
|
|
@ -1532,14 +1532,6 @@ namespace smt {
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
// void context::add_scores(unsigned n, literal const* lits) {
|
|
||||||
// for (unsigned i = 0; i < n; ++i) {
|
|
||||||
// auto lit = lits[i];
|
|
||||||
// unsigned v = lit.var();
|
|
||||||
// m_lit_scores[v][lit.sign()] += 1.0 / n;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
void context::add_scores(unsigned n, literal const* lits) {
|
void context::add_scores(unsigned n, literal const* lits) {
|
||||||
for (unsigned i = 0; i < n; ++i) {
|
for (unsigned i = 0; i < n; ++i) {
|
||||||
auto lit = lits[i];
|
auto lit = lits[i];
|
||||||
|
@ -1547,8 +1539,8 @@ namespace smt {
|
||||||
|
|
||||||
m_lit_scores[lit.sign()][v] += 1.0 / n;
|
m_lit_scores[lit.sign()][v] += 1.0 / n;
|
||||||
|
|
||||||
auto new_score = m_lit_scores[0][v] * m_lit_scores[1][v];
|
// auto new_score = m_lit_scores[0][v] * m_lit_scores[1][v];
|
||||||
m_pq_scores.set(v, new_score);
|
// m_pq_scores.set(v, new_score);
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -560,47 +560,47 @@ namespace smt {
|
||||||
}
|
}
|
||||||
|
|
||||||
expr_ref_vector parallel::worker::get_split_atoms() {
|
expr_ref_vector parallel::worker::get_split_atoms() {
|
||||||
#if false
|
unsigned k = 2;
|
||||||
|
|
||||||
|
// auto candidates = ctx->m_pq_scores.get_heap();
|
||||||
|
auto candidates = ctx->m_lit_scores;
|
||||||
|
std::vector<std::pair<double, expr*>> top_k; // will hold at most k elements
|
||||||
|
|
||||||
for (bool_var v = 0; v < ctx->get_num_bool_vars(); ++v) {
|
for (bool_var v = 0; v < ctx->get_num_bool_vars(); ++v) {
|
||||||
if (ctx->get_assignment(v) != l_undef)
|
if (ctx->get_assignment(v) != l_undef)
|
||||||
continue;
|
continue;
|
||||||
expr* e = ctx->bool_var2expr(v);
|
expr* e = ctx->bool_var2expr(v);
|
||||||
if (!e)
|
if (!e)
|
||||||
continue;
|
continue;
|
||||||
auto v_score = ctx->m_lit_scores[0][v] * ctx->m_lit_scores[1][v];
|
|
||||||
// if v_score is maximal then v is our split atom..
|
|
||||||
|
|
||||||
|
double score = ctx->m_lit_scores[0][v] * ctx->m_lit_scores[1][v];
|
||||||
|
|
||||||
|
// decay the scores
|
||||||
ctx->m_lit_scores[0][v] /= 2;
|
ctx->m_lit_scores[0][v] /= 2;
|
||||||
ctx->m_lit_scores[1][v] /= 2;
|
ctx->m_lit_scores[1][v] /= 2;
|
||||||
}
|
|
||||||
#endif
|
|
||||||
unsigned k = 2;
|
|
||||||
|
|
||||||
auto candidates = ctx->m_pq_scores.get_heap();
|
// insert into top_k. linear scan since k is very small
|
||||||
|
if (top_k.size() < k) {
|
||||||
std::sort(candidates.begin(), candidates.end(),
|
top_k.push_back({score, e});
|
||||||
[](const auto& a, const auto& b) { return a.priority > b.priority; });
|
} else {
|
||||||
|
// find the smallest in top_k and replace if we found a new min
|
||||||
|
size_t min_idx = 0;
|
||||||
|
for (size_t i = 1; i < k; ++i)
|
||||||
|
if (top_k[i].first < top_k[min_idx].first)
|
||||||
|
min_idx = i;
|
||||||
|
|
||||||
|
if (score > top_k[min_idx].first) {
|
||||||
|
top_k[min_idx] = {score, e};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
expr_ref_vector top_lits(m);
|
expr_ref_vector top_lits(m);
|
||||||
for (const auto& node: candidates) {
|
for (auto& p : top_k)
|
||||||
|
top_lits.push_back(expr_ref(p.second, m));
|
||||||
if (ctx->get_assignment(node.key) != l_undef)
|
|
||||||
continue;
|
IF_VERBOSE(3, verbose_stream() << "top literals " << top_lits << " head size " << ctx->m_lit_scores->size() << " num vars " << ctx->get_num_bool_vars() << "\n");
|
||||||
|
|
||||||
if (m_config.m_cube_initial_only && node.key >= m_num_initial_atoms) {
|
|
||||||
LOG_WORKER(2, " Skipping non-initial atom from cube: " << node.key << "\n");
|
|
||||||
continue; // skip non-initial atoms if configured to do so
|
|
||||||
}
|
|
||||||
|
|
||||||
expr* e = ctx->bool_var2expr(node.key);
|
|
||||||
if (!e)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
top_lits.push_back(expr_ref(e, m));
|
|
||||||
if (top_lits.size() >= k)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
IF_VERBOSE(3, verbose_stream() << "top literals " << top_lits << " head size " << ctx->m_pq_scores.size() << " num vars " << ctx->get_num_bool_vars() << "\n");
|
|
||||||
return top_lits;
|
return top_lits;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue