3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-04-10 03:07:07 +00:00
Signed-off-by: Nikolaj Bjorner <nbjorner@microsoft.com>
This commit is contained in:
Nikolaj Bjorner 2020-02-18 01:06:00 -10:00
parent 98bd437e46
commit 559c3ca012
5 changed files with 43 additions and 21 deletions

View file

@ -252,7 +252,6 @@ namespace sat {
break;
}
}
unsigned nbits = 1u << vars.size();
SASSERT(i < vars.size());
v = vars[i];
vars.erase(v);

View file

@ -165,6 +165,9 @@ namespace smt {
void model_checker::assert_neg_q_m(quantifier * q, expr_ref_vector & sks) {
expr_ref tmp(m);
TRACE("model_checker", tout << "curr_model:\n"; model_pp(tout, *m_curr_model););
if (!m_curr_model->eval(q->get_expr(), tmp, true)) {
return;
}
@ -395,6 +398,7 @@ namespace smt {
args[i] = e->get_arg(i);
}
tmp = sub(q->get_expr(), num_decls, args.c_ptr());
TRACE("model_checker", tout << "curr_model:\n"; model_pp(tout, *m_curr_model););
m_curr_model->eval(tmp, result, true);
if (strict_rec_fun ? !m.is_true(result) : m.is_false(result)) {
add_instance(q, args, 0, nullptr);
@ -438,6 +442,9 @@ namespace smt {
m_curr_model = md;
m_value2expr.reset();
TRACE("model_checker", tout << "MODEL_CHECKER INVOKED\n";
tout << "model:\n"; model_pp(tout, *m_curr_model););
md->compress();
TRACE("model_checker", tout << "MODEL_CHECKER INVOKED\n";

View file

@ -106,6 +106,7 @@ namespace smt {
SASSERT(m_elems.contains(n));
SASSERT(m_inv.empty());
m_elems.erase(n);
TRACE("model_finder", tout << mk_pp(n, m) << "\n";);
m.dec_ref(n);
}
@ -361,7 +362,7 @@ namespace smt {
void set_else(expr * e) {
SASSERT(!is_mono_proj());
SASSERT(get_root()->m_else == 0);
SASSERT(get_root()->m_else == nullptr);
get_root()->m_else = e;
}
@ -370,7 +371,7 @@ namespace smt {
}
void set_proj(func_decl * f) {
SASSERT(get_root()->m_proj == 0);
SASSERT(get_root()->m_proj == nullptr);
get_root()->m_proj = f;
}
@ -539,8 +540,9 @@ namespace smt {
}
// For each instantiation_set, remove entries that do not evaluate to values.
ptr_vector<expr> to_delete;
void cleanup_instantiation_sets() {
ptr_vector<expr> to_delete;
for (node * curr : m_nodes) {
if (curr->is_root()) {
instantiation_set * s = curr->get_instantiation_set();
@ -549,8 +551,10 @@ namespace smt {
for (auto const& kv : elems) {
expr * n = kv.m_key;
expr * n_val = eval(n, true);
if (!n_val || !m.is_value(n_val))
// if (!n_val || (!m.is_value(n_val) && !m_array.is_array(n_val))) {
if (!n_val || (!m.is_value(n_val))) {
to_delete.push_back(n);
}
}
for (expr* e : to_delete) {
s->remove(e);
@ -592,19 +596,17 @@ namespace smt {
and the interpretations of the m_else of nodes in n->get_avoid_set()
*/
void collect_exceptions_values(node * n, ptr_buffer<expr> & r) {
ptr_vector<expr> const & exceptions = n->get_exceptions();
ptr_vector<node> const & avoid_set = n->get_avoid_set();
for (expr* e : exceptions) {
for (expr* e : n->get_exceptions()) {
expr * val = eval(e, true);
SASSERT(val != nullptr);
r.push_back(val);
}
for (node* a : avoid_set) {
node * n = a->get_root();
if (!n->is_mono_proj() && n->get_else() != nullptr) {
expr * val = eval(n->get_else(), true);
for (node* a : n->get_avoid_set()) {
node * p = a->get_root();
if (!p->is_mono_proj() && p->get_else() != nullptr) {
expr * val = eval(p->get_else(), true);
SASSERT(val != nullptr);
r.push_back(val);
}

View file

@ -332,7 +332,7 @@ namespace smt {
}
bool check_quantifier(quantifier* q) {
return m_context.is_relevant(q) && m_context.get_assignment(q) == l_true; // && !m().is_rec_fun_def(q);
return m_context.is_relevant(q) && m_context.get_assignment(q) == l_true;
}
bool quick_check_quantifiers() {

View file

@ -1143,7 +1143,7 @@ namespace smt {
void theory_bv::new_eq_eh(theory_var v1, theory_var v2) {
TRACE("bv_eq", tout << "new_eq: " << mk_pp(get_enode(v1)->get_owner(), get_manager()) << " = " << mk_pp(get_enode(v2)->get_owner(), get_manager()) << "\n";);
TRACE("bv", tout << "new_eq_eh v" << v1 << " = v" << v2 <<
TRACE("bv", tout << "new_eq_eh v" << v1 << " = v" << v2 << " @ " << get_context().get_scope_level() <<
" relevant1: " << get_context().is_relevant(get_enode(v1)) <<
" relevant2: " << get_context().is_relevant(get_enode(v2)) << "\n";);
m_find.merge(v1, v2);
@ -1236,7 +1236,7 @@ namespace smt {
void theory_bv::assign_eh(bool_var v, bool is_true) {
atom * a = get_bv2a(v);
TRACE("bv", tout << "assert: v" << v << " #" << get_context().bool_var2expr(v)->get_id() << " is_true: " << is_true << "\n";);
TRACE("bv", tout << "assert: b" << v << " #" << get_context().bool_var2expr(v)->get_id() << " is_true: " << is_true << "\n";);
if (a->is_bit()) {
// The following optimization is not correct.
// Boolean variables created for performing bit-blasting are reused.
@ -1298,7 +1298,7 @@ namespace smt {
SASSERT(bit != ~bit2);
lbool val2 = ctx.get_assignment(bit2);
TRACE("bv_bit_prop", tout << "propagating #" << get_enode(v2)->get_owner_id() << "[" << idx << "] = " << val2 << "\n";);
TRACE("bv", tout << bit << " " << bit2 << "\n";);
TRACE("bv", tout << bit << " " << bit2 << " " << val << " " << val2 << "\n";);
if (val != val2) {
literal consequent = bit2;
@ -1327,7 +1327,7 @@ namespace smt {
SASSERT(ctx.get_assignment(antecedent) == l_true);
SASSERT(m_bits[v2][idx].var() == consequent.var());
SASSERT(consequent.var() != antecedent.var());
TRACE("bv_bit_prop", tout << "assigning: "; ctx.display_literal(tout, consequent);
TRACE("bv_bit_prop", tout << "assigning: " << consequent << " @ " << ctx.get_scope_level();
tout << " using "; ctx.display_literal(tout, antecedent);
tout << " #" << get_enode(v1)->get_owner_id() << " #" << get_enode(v2)->get_owner_id() << " idx: " << idx << "\n";
tout << "propagate_eqc: " << propagate_eqc << "\n";);
@ -1347,6 +1347,18 @@ namespace smt {
body = m.mk_implies(ctx.bool_var2expr(eq.var()), m.mk_implies(ctx.bool_var2expr(consequent.var()), ctx.bool_var2expr(antecedent.var())));
log_axiom_instantiation(body);
}
//
// Issue #3035:
// merge_eh invokes assign_bit, which updates the propagation queue and includes the
// theory axiom for the propagated equality. When relevancy is non-zero, propagation may get
// lost on backtracking because the propagation queue is reset on conflicts.
// An alternative approach is to ensure the propagation queue is chronological with
// backtracking scopes (ie., it doesn't get reset, but shrunk to a previous level, and similar
// with a qhead indicator.
//
ctx.mark_as_relevant(lits[0]);
ctx.mark_as_relevant(lits[1]);
ctx.mark_as_relevant(lits[2]);
ctx.mk_th_axiom(get_id(), lits.size(), lits.c_ptr());
if (m.has_trace_stream()) m.trace_stream() << "[end-of-instance]\n";
@ -1419,8 +1431,8 @@ namespace smt {
}
void theory_bv::pop_scope_eh(unsigned num_scopes) {
TRACE("bv",tout << num_scopes << "\n";);
m_trail_stack.pop_scope(num_scopes);
TRACE("bv", m_find.display(tout << num_scopes << " @ " << get_context().get_scope_level() << "\n"););
unsigned num_old_vars = get_old_num_vars(num_scopes);
m_bits.shrink(num_old_vars);
m_wpos.shrink(num_old_vars);
@ -1778,7 +1790,7 @@ namespace smt {
}
bool theory_bv::check_assignment(theory_var v) {
context & ctx = get_context();
context & ctx = get_context();
if (!is_root(v))
return true;
if (!ctx.is_relevant(get_enode(v))) {
@ -1792,6 +1804,7 @@ namespace smt {
literal_vector const & bits1 = m_bits[v1];
SASSERT(bits1.size() == bits2.size());
unsigned sz = bits1.size();
VERIFY(ctx.is_relevant(get_enode(v1)));
for (unsigned i = 0; i < sz; i++) {
literal bit1 = bits1[i];
literal bit2 = bits2[i];
@ -1803,10 +1816,11 @@ namespace smt {
display_var(tout, v2);
tout << "relevant: " << ctx.is_relevant(bit1) << " " << ctx.is_relevant(bit2) << "\n";
tout << "val1: " << val1 << " lvl: " << ctx.get_assign_level(bit1.var()) << " bit " << bit1 << "\n";
tout << "val2: " << val2 << " lvl: " << ctx.get_assign_level(bit2.var()) << " bit " << bit2 << "\n";);
tout << "val2: " << val2 << " lvl: " << ctx.get_assign_level(bit2.var()) << " bit " << bit2 << "\n";
tout << "level: " << ctx.get_scope_level() << "\n";
);
VERIFY(val1 == val2);
}
SASSERT(ctx.is_relevant(get_enode(v1)));
v1 = next(v1);
}
while (v1 != v);