From fcfa6baeca651c11ef9aa1892753350cab6139b3 Mon Sep 17 00:00:00 2001 From: Arie Gurfinkel Date: Wed, 20 Jun 2018 21:15:13 -0400 Subject: [PATCH 01/10] Refactor mk_th_lemma --- src/muz/spacer/spacer_proof_utils.cpp | 33 +++++++++++++++------------ 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/src/muz/spacer/spacer_proof_utils.cpp b/src/muz/spacer/spacer_proof_utils.cpp index 082cc4b5d..995a4b8b3 100644 --- a/src/muz/spacer/spacer_proof_utils.cpp +++ b/src/muz/spacer/spacer_proof_utils.cpp @@ -71,6 +71,21 @@ void theory_axiom_reducer::reset() { m_pinned.reset(); } +static proof* mk_th_lemma(ast_manager &m, ptr_buffer const &parents, + unsigned num_params, parameter const *params) { + buffer v; + for (unsigned i = 1; i < num_params; ++i) + v.push_back(params[i]); + + SASSERT(params[0].is_symbol()); + family_id tid = m.mk_family_id(params[0].get_symbol()); + SASSERT(tid != null_family_id); + + return m.mk_th_lemma(tid, m.mk_false(), + parents.size(), parents.c_ptr(), + num_params-1, v.c_ptr()); +} + // -- rewrite theory axioms into theory lemmas proof_ref theory_axiom_reducer::reduce(proof* pr) { proof_post_order pit(pr, m); @@ -108,20 +123,10 @@ proof_ref theory_axiom_reducer::reduce(proof* pr) { hyps.push_back(hyp); } - // (b) create farkas lemma. Rebuild parameters since - // mk_th_lemma() adds tid as first parameter - unsigned num_params = p->get_decl()->get_num_parameters(); - parameter const* params = p->get_decl()->get_parameters(); - vector parameters; - for (unsigned i = 1; i < num_params; ++i) parameters.push_back(params[i]); - - SASSERT(params[0].is_symbol()); - family_id tid = m.mk_family_id(params[0].get_symbol()); - SASSERT(tid != null_family_id); - - proof* th_lemma = m.mk_th_lemma(tid, m.mk_false(), - hyps.size(), hyps.c_ptr(), - num_params-1, parameters.c_ptr()); + // (b) Create a theory lemma + proof *th_lemma; + func_decl *d = p->get_decl(); + th_lemma = mk_th_lemma(m, hyps, d->get_num_parameters(), d->get_parameters()); m_pinned.push_back(th_lemma); SASSERT(is_arith_lemma(m, th_lemma)); From 4ed6783aff22015b1e93a9b9751757b068595344 Mon Sep 17 00:00:00 2001 From: Arie Gurfinkel Date: Wed, 20 Jun 2018 21:15:59 -0400 Subject: [PATCH 02/10] Formatting only. No change to code --- src/muz/spacer/spacer_proof_utils.cpp | 890 +++++++++++++------------- 1 file changed, 445 insertions(+), 445 deletions(-) diff --git a/src/muz/spacer/spacer_proof_utils.cpp b/src/muz/spacer/spacer_proof_utils.cpp index 995a4b8b3..8dee2ad44 100644 --- a/src/muz/spacer/spacer_proof_utils.cpp +++ b/src/muz/spacer/spacer_proof_utils.cpp @@ -29,35 +29,35 @@ Revision History: namespace spacer { -// arithmetic lemma recognizer -bool is_arith_lemma(ast_manager& m, proof* pr) -{ - // arith lemmas: second parameter specifies exact type of lemma, - // could be "farkas", "triangle-eq", "eq-propagate", - // "assign-bounds", maybe also something else - if (pr->get_decl_kind() == PR_TH_LEMMA) { - func_decl* d = pr->get_decl(); - symbol sym; - return d->get_num_parameters() >= 1 && - d->get_parameter(0).is_symbol(sym) && - sym == "arith"; + // arithmetic lemma recognizer + bool is_arith_lemma(ast_manager& m, proof* pr) + { + // arith lemmas: second parameter specifies exact type of lemma, + // could be "farkas", "triangle-eq", "eq-propagate", + // "assign-bounds", maybe also something else + if (pr->get_decl_kind() == PR_TH_LEMMA) { + func_decl* d = pr->get_decl(); + symbol sym; + return d->get_num_parameters() >= 1 && + d->get_parameter(0).is_symbol(sym) && + sym == "arith"; + } + return false; } - return false; -} // farkas lemma recognizer -bool is_farkas_lemma(ast_manager& m, proof* pr) -{ - if (pr->get_decl_kind() == PR_TH_LEMMA) + bool is_farkas_lemma(ast_manager& m, proof* pr) { - func_decl* d = pr->get_decl(); - symbol sym; - return d->get_num_parameters() >= 2 && - d->get_parameter(0).is_symbol(sym) && sym == "arith" && - d->get_parameter(1).is_symbol(sym) && sym == "farkas"; + if (pr->get_decl_kind() == PR_TH_LEMMA) + { + func_decl* d = pr->get_decl(); + symbol sym; + return d->get_num_parameters() >= 2 && + d->get_parameter(0).is_symbol(sym) && sym == "arith" && + d->get_parameter(1).is_symbol(sym) && sym == "farkas"; + } + return false; } - return false; -} /* @@ -66,495 +66,495 @@ bool is_farkas_lemma(ast_manager& m, proof* pr) * ==================================== */ -void theory_axiom_reducer::reset() { - m_cache.reset(); - m_pinned.reset(); -} - -static proof* mk_th_lemma(ast_manager &m, ptr_buffer const &parents, - unsigned num_params, parameter const *params) { - buffer v; - for (unsigned i = 1; i < num_params; ++i) - v.push_back(params[i]); - - SASSERT(params[0].is_symbol()); - family_id tid = m.mk_family_id(params[0].get_symbol()); - SASSERT(tid != null_family_id); - - return m.mk_th_lemma(tid, m.mk_false(), - parents.size(), parents.c_ptr(), - num_params-1, v.c_ptr()); -} - -// -- rewrite theory axioms into theory lemmas -proof_ref theory_axiom_reducer::reduce(proof* pr) { - proof_post_order pit(pr, m); - while (pit.hasNext()) { - proof* p = pit.next(); - - if (m.get_num_parents(p) == 0 && is_arith_lemma(m, p)) { - // we have an arith-theory-axiom and want to get rid of it - // we need to replace the axiom with - // (a) corresponding hypothesis, - // (b) a theory lemma, and - // (c) a lemma. - // Furthermore update data-structures - app *fact = to_app(m.get_fact(p)); - ptr_buffer cls; - if (m.is_or(fact)) { - for (unsigned i = 0, sz = fact->get_num_args(); i < sz; ++i) - cls.push_back(fact->get_arg(i)); - } - else - cls.push_back(fact); - - // (a) create hypothesis - ptr_buffer hyps; - for (unsigned i = 0, sz = cls.size(); i < sz; ++i) { - expr *c; - expr_ref hyp_fact(m); - if (m.is_not(cls[i], c)) - hyp_fact = c; - else - hyp_fact = m.mk_not (cls[i]); - - proof* hyp = m.mk_hypothesis(hyp_fact); - m_pinned.push_back(hyp); - hyps.push_back(hyp); - } - - // (b) Create a theory lemma - proof *th_lemma; - func_decl *d = p->get_decl(); - th_lemma = mk_th_lemma(m, hyps, d->get_num_parameters(), d->get_parameters()); - m_pinned.push_back(th_lemma); - SASSERT(is_arith_lemma(m, th_lemma)); - - // (c) create lemma - proof* res = m.mk_lemma(th_lemma, fact); - m_pinned.push_back(res); - m_cache.insert(p, res); - - SASSERT(m.get_fact(res) == m.get_fact(p)); - } - else { - // proof is dirty, if a sub-proof of one of its premises - // has been transformed - bool dirty = false; - - ptr_buffer args; - for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) { - proof *pp, *tmp; - pp = m.get_parent(p, i); - VERIFY(m_cache.find(pp, tmp)); - args.push_back(tmp); - dirty |= (pp != tmp); - } - // if not dirty just use the old step - if (!dirty) m_cache.insert(p, p); - // otherwise create new proof with the corresponding proofs - // of the premises - else { - if (m.has_fact(p)) args.push_back(m.get_fact(p)); - - SASSERT(p->get_decl()->get_arity() == args.size()); - - proof* res = m.mk_app(p->get_decl(), - args.size(), (expr * const*)args.c_ptr()); - m_pinned.push_back(res); - m_cache.insert(p, res); - } - } + void theory_axiom_reducer::reset() { + m_cache.reset(); + m_pinned.reset(); } - proof* res; - VERIFY(m_cache.find(pr,res)); - DEBUG_CODE( - proof_checker pc(m); - expr_ref_vector side(m); - SASSERT(pc.check(res, side)); - ); + static proof* mk_th_lemma(ast_manager &m, ptr_buffer const &parents, + unsigned num_params, parameter const *params) { + buffer v; + for (unsigned i = 1; i < num_params; ++i) + v.push_back(params[i]); - return proof_ref(res, m); -} + SASSERT(params[0].is_symbol()); + family_id tid = m.mk_family_id(params[0].get_symbol()); + SASSERT(tid != null_family_id); + + return m.mk_th_lemma(tid, m.mk_false(), + parents.size(), parents.c_ptr(), + num_params-1, v.c_ptr()); + } + +// -- rewrite theory axioms into theory lemmas + proof_ref theory_axiom_reducer::reduce(proof* pr) { + proof_post_order pit(pr, m); + while (pit.hasNext()) { + proof* p = pit.next(); + + if (m.get_num_parents(p) == 0 && is_arith_lemma(m, p)) { + // we have an arith-theory-axiom and want to get rid of it + // we need to replace the axiom with + // (a) corresponding hypothesis, + // (b) a theory lemma, and + // (c) a lemma. + // Furthermore update data-structures + app *fact = to_app(m.get_fact(p)); + ptr_buffer cls; + if (m.is_or(fact)) { + for (unsigned i = 0, sz = fact->get_num_args(); i < sz; ++i) + cls.push_back(fact->get_arg(i)); + } + else + cls.push_back(fact); + + // (a) create hypothesis + ptr_buffer hyps; + for (unsigned i = 0, sz = cls.size(); i < sz; ++i) { + expr *c; + expr_ref hyp_fact(m); + if (m.is_not(cls[i], c)) + hyp_fact = c; + else + hyp_fact = m.mk_not (cls[i]); + + proof* hyp = m.mk_hypothesis(hyp_fact); + m_pinned.push_back(hyp); + hyps.push_back(hyp); + } + + // (b) Create a theory lemma + proof *th_lemma; + func_decl *d = p->get_decl(); + th_lemma = mk_th_lemma(m, hyps, d->get_num_parameters(), d->get_parameters()); + m_pinned.push_back(th_lemma); + SASSERT(is_arith_lemma(m, th_lemma)); + + // (c) create lemma + proof* res = m.mk_lemma(th_lemma, fact); + m_pinned.push_back(res); + m_cache.insert(p, res); + + SASSERT(m.get_fact(res) == m.get_fact(p)); + } + else { + // proof is dirty, if a sub-proof of one of its premises + // has been transformed + bool dirty = false; + + ptr_buffer args; + for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) { + proof *pp, *tmp; + pp = m.get_parent(p, i); + VERIFY(m_cache.find(pp, tmp)); + args.push_back(tmp); + dirty |= (pp != tmp); + } + // if not dirty just use the old step + if (!dirty) m_cache.insert(p, p); + // otherwise create new proof with the corresponding proofs + // of the premises + else { + if (m.has_fact(p)) args.push_back(m.get_fact(p)); + + SASSERT(p->get_decl()->get_arity() == args.size()); + + proof* res = m.mk_app(p->get_decl(), + args.size(), (expr * const*)args.c_ptr()); + m_pinned.push_back(res); + m_cache.insert(p, res); + } + } + } + + proof* res; + VERIFY(m_cache.find(pr,res)); + DEBUG_CODE( + proof_checker pc(m); + expr_ref_vector side(m); + SASSERT(pc.check(res, side)); + ); + + return proof_ref(res, m); + } /* ------------------------------------------------------------------------- */ /* hypothesis_reducer */ /* ------------------------------------------------------------------------- */ -proof_ref hypothesis_reducer::reduce(proof* pr) { - compute_hypsets(pr); - collect_units(pr); + proof_ref hypothesis_reducer::reduce(proof* pr) { + compute_hypsets(pr); + collect_units(pr); - proof_ref res(reduce_core(pr), m); - SASSERT(res); - reset(); + proof_ref res(reduce_core(pr), m); + SASSERT(res); + reset(); - DEBUG_CODE(proof_checker pc(m); - expr_ref_vector side(m); - SASSERT(pc.check(res, side));); - return res; -} + DEBUG_CODE(proof_checker pc(m); + expr_ref_vector side(m); + SASSERT(pc.check(res, side));); + return res; + } -void hypothesis_reducer::reset() { - m_active_hyps.reset(); - m_units.reset(); - m_cache.reset(); - for (auto t : m_pinned_active_hyps) dealloc(t); - m_pinned_active_hyps.reset(); - m_pinned.reset(); - m_hyp_mark.reset(); - m_open_mark.reset(); - m_visited.reset(); -} + void hypothesis_reducer::reset() { + m_active_hyps.reset(); + m_units.reset(); + m_cache.reset(); + for (auto t : m_pinned_active_hyps) dealloc(t); + m_pinned_active_hyps.reset(); + m_pinned.reset(); + m_hyp_mark.reset(); + m_open_mark.reset(); + m_visited.reset(); + } -void hypothesis_reducer::compute_hypsets(proof *pr) { - ptr_buffer todo; - todo.push_back(pr); + void hypothesis_reducer::compute_hypsets(proof *pr) { + ptr_buffer todo; + todo.push_back(pr); - while (!todo.empty()) { - proof* p = todo.back(); + while (!todo.empty()) { + proof* p = todo.back(); + + if (m_visited.is_marked(p)) { + todo.pop_back(); + continue; + } + + unsigned todo_sz = todo.size(); + for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) { + SASSERT(m.is_proof(p->get_arg(i))); + proof *parent = to_app(p->get_arg(i)); + + if (!m_visited.is_marked(parent)) + todo.push_back(parent); + } + if (todo.size() > todo_sz) continue; - if (m_visited.is_marked(p)) { todo.pop_back(); - continue; - } - unsigned todo_sz = todo.size(); - for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) { - SASSERT(m.is_proof(p->get_arg(i))); - proof *parent = to_app(p->get_arg(i)); - - if (!m_visited.is_marked(parent)) - todo.push_back(parent); - } - if (todo.size() > todo_sz) continue; - - todo.pop_back(); - - m_visited.mark(p); + m_visited.mark(p); - proof_ptr_vector* active_hyps = nullptr; - // fill both sets - if (m.is_hypothesis(p)) { - // create active_hyps-set for step p - proof_ptr_vector* active_hyps = alloc(proof_ptr_vector); - m_pinned_active_hyps.insert(active_hyps); - m_active_hyps.insert(p, active_hyps); - active_hyps->push_back(p); - m_open_mark.mark(p); - m_hyp_mark.mark(m.get_fact(p)); - continue; - } + proof_ptr_vector* active_hyps = nullptr; + // fill both sets + if (m.is_hypothesis(p)) { + // create active_hyps-set for step p + proof_ptr_vector* active_hyps = alloc(proof_ptr_vector); + m_pinned_active_hyps.insert(active_hyps); + m_active_hyps.insert(p, active_hyps); + active_hyps->push_back(p); + m_open_mark.mark(p); + m_hyp_mark.mark(m.get_fact(p)); + continue; + } - ast_fast_mark1 seen; + ast_fast_mark1 seen; - active_hyps = alloc(proof_ptr_vector); - for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) { - proof* parent = m.get_parent(p, i); - // lemmas clear all hypotheses above them - if (m.is_lemma(p)) continue; - for (auto *x : *m_active_hyps.find(parent)) { - if (!seen.is_marked(x)) { - seen.mark(x); - active_hyps->push_back(x); - m_open_mark.mark(p); + active_hyps = alloc(proof_ptr_vector); + for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) { + proof* parent = m.get_parent(p, i); + // lemmas clear all hypotheses above them + if (m.is_lemma(p)) continue; + for (auto *x : *m_active_hyps.find(parent)) { + if (!seen.is_marked(x)) { + seen.mark(x); + active_hyps->push_back(x); + m_open_mark.mark(p); + } } } - } - if (active_hyps->empty()) { - dealloc(active_hyps); - m_active_hyps.insert(p, &m_empty_vector); - } - else { - m_pinned_active_hyps.push_back(active_hyps); - m_active_hyps.insert(p, active_hyps); + if (active_hyps->empty()) { + dealloc(active_hyps); + m_active_hyps.insert(p, &m_empty_vector); + } + else { + m_pinned_active_hyps.push_back(active_hyps); + m_active_hyps.insert(p, active_hyps); + } } } -} // collect all units that are hyp-free and are used as hypotheses somewhere // requires that m_active_hyps has been computed -void hypothesis_reducer::collect_units(proof* pr) { + void hypothesis_reducer::collect_units(proof* pr) { - proof_post_order pit(pr, m); - while (pit.hasNext()) { - proof* p = pit.next(); - if (!m.is_hypothesis(p)) { - // collect units that are hyp-free and are used as - // hypotheses in the proof pr - if (!m_open_mark.is_marked(p) && m.has_fact(p) && - m_hyp_mark.is_marked(m.get_fact(p))) - m_units.insert(m.get_fact(p), p); + proof_post_order pit(pr, m); + while (pit.hasNext()) { + proof* p = pit.next(); + if (!m.is_hypothesis(p)) { + // collect units that are hyp-free and are used as + // hypotheses in the proof pr + if (!m_open_mark.is_marked(p) && m.has_fact(p) && + m_hyp_mark.is_marked(m.get_fact(p))) + m_units.insert(m.get_fact(p), p); + } } } -} /** \brief returns true if p is an ancestor of q - */ -bool hypothesis_reducer::is_ancestor(proof *p, proof *q) { - if (p == q) return true; - ptr_vector todo; - todo.push_back(q); +*/ + bool hypothesis_reducer::is_ancestor(proof *p, proof *q) { + if (p == q) return true; + ptr_vector todo; + todo.push_back(q); - expr_mark visited; - while (!todo.empty()) { - proof *cur; - cur = todo.back(); - todo.pop_back(); - - if (visited.is_marked(cur)) continue; - - if (cur == p) return true; - visited.mark(cur); - - for (unsigned i = 0, sz = m.get_num_parents(cur); i < sz; ++i) { - todo.push_back(m.get_parent(cur, i)); - } - } - return false; -} - -proof* hypothesis_reducer::reduce_core(proof* pf) { - SASSERT(m.is_false(m.get_fact(pf))); - - proof *res = NULL; - - ptr_vector todo; - todo.push_back(pf); - ptr_buffer args; - bool dirty = false; - - while (true) { - proof *p, *tmp, *pp; - unsigned todo_sz; - - p = todo.back(); - if (m_cache.find(p, tmp)) { + expr_mark visited; + while (!todo.empty()) { + proof *cur; + cur = todo.back(); todo.pop_back(); - continue; - } - dirty = false; - args.reset(); - todo_sz = todo.size(); - for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) { - pp = m.get_parent(p, i); - if (m_cache.find(pp, tmp)) { - args.push_back(tmp); - dirty |= pp != tmp; - } else { - todo.push_back(pp); + if (visited.is_marked(cur)) continue; + + if (cur == p) return true; + visited.mark(cur); + + for (unsigned i = 0, sz = m.get_num_parents(cur); i < sz; ++i) { + todo.push_back(m.get_parent(cur, i)); } } + return false; + } - if (todo_sz < todo.size()) continue; + proof* hypothesis_reducer::reduce_core(proof* pf) { + SASSERT(m.is_false(m.get_fact(pf))); - todo.pop_back(); + proof *res = NULL; - // transform the current proof node + ptr_vector todo; + todo.push_back(pf); + ptr_buffer args; + bool dirty = false; - if (m.is_hypothesis(p)) { - // if possible, replace a hypothesis by a unit derivation - if (m_units.find(m.get_fact(p), tmp)) { - // use already transformed proof of the unit if it is available - proof* proof_of_unit; - if (!m_cache.find(tmp, proof_of_unit)) { - proof_of_unit = tmp; + while (true) { + proof *p, *tmp, *pp; + unsigned todo_sz; + + p = todo.back(); + if (m_cache.find(p, tmp)) { + todo.pop_back(); + continue; + } + + dirty = false; + args.reset(); + todo_sz = todo.size(); + for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) { + pp = m.get_parent(p, i); + if (m_cache.find(pp, tmp)) { + args.push_back(tmp); + dirty |= pp != tmp; + } else { + todo.push_back(pp); } + } - // make sure hypsets for the unit are computed - // AG: is this needed? - compute_hypsets(proof_of_unit); + if (todo_sz < todo.size()) continue; - // if the transformation doesn't create a cycle, perform it - if (!is_ancestor(p, proof_of_unit)) { - res = proof_of_unit; + todo.pop_back(); + + // transform the current proof node + + if (m.is_hypothesis(p)) { + // if possible, replace a hypothesis by a unit derivation + if (m_units.find(m.get_fact(p), tmp)) { + // use already transformed proof of the unit if it is available + proof* proof_of_unit; + if (!m_cache.find(tmp, proof_of_unit)) { + proof_of_unit = tmp; + } + + // make sure hypsets for the unit are computed + // AG: is this needed? + compute_hypsets(proof_of_unit); + + // if the transformation doesn't create a cycle, perform it + if (!is_ancestor(p, proof_of_unit)) { + res = proof_of_unit; + } + else { + // -- failed to transform the proof, perhaps bad + // -- choice of the proof of unit + res = p; + } } else { - // -- failed to transform the proof, perhaps bad - // -- choice of the proof of unit + // -- no unit found to replace the hypothesis res = p; } } + + else if (!dirty) {res = p;} + + else if (m.is_lemma(p)) { + // lemma: reduce the premise; remove reduced consequences + // from conclusion + SASSERT(args.size() == 1); + res = mk_lemma_core(args[0], m.get_fact(p)); + // -- re-compute hypsets + compute_hypsets(res); + } + else if (m.is_unit_resolution(p)) { + // unit: reduce untis; reduce the first premise; rebuild + // unit resolution + res = mk_unit_resolution_core(p, args); + // -- re-compute hypsets + compute_hypsets(res); + } else { - // -- no unit found to replace the hypothesis - res = p; + res = mk_proof_core(p, args); + // -- re-compute hypsets + compute_hypsets(res); + } + + SASSERT(res); + m_cache.insert(p, res); + + // bail out as soon as found a sub-proof of false + if (!m_open_mark.is_marked(res) && m.has_fact(res) && m.is_false(m.get_fact(res))) + return res; + } + UNREACHABLE(); + return nullptr; + } + + proof* hypothesis_reducer::mk_lemma_core(proof* premise, expr *fact) { + SASSERT(m.is_false(m.get_fact(premise))); + SASSERT(m_active_hyps.contains(premise)); + + proof_ptr_vector* active_hyps = m_active_hyps.find(premise); + + // if there is no active hypothesis return the premise + if (!m_open_mark.is_marked(premise)) { + // XXX just in case premise might go away + m_pinned.push_back(premise); + return premise; + } + + // add some stability + std::stable_sort(active_hyps->begin(), active_hyps->end(), ast_lt_proc()); + // otherwise, build a disjunction of the negated active hypotheses + // and add a lemma proof step + expr_ref_buffer args(m); + for (auto hyp : *active_hyps) { + expr *hyp_fact, *t; + hyp_fact = m.get_fact(hyp); + if (m.is_not(hyp_fact, t)) + args.push_back(t); + else + args.push_back(m.mk_not(hyp_fact)); + } + + expr_ref lemma(m); + lemma = mk_or(m, args.size(), args.c_ptr()); + + proof* res; + res = m.mk_lemma(premise, lemma); + m_pinned.push_back(res); + return res; + } + + proof* hypothesis_reducer::mk_unit_resolution_core(proof *ures, + ptr_buffer& args) { + // if any literal is false, we don't need a unit resolution step + // This can be the case due to some previous transformations + for (unsigned i = 1, sz = args.size(); i < sz; ++i) { + if (m.is_false(m.get_fact(args[i]))) { + // XXX pin just in case + m_pinned.push_back(args[i]); + return args[i]; } } - else if (!dirty) {res = p;} + proof* arg0 = args[0]; + app *fact0 = to_app(m.get_fact(arg0)); - else if (m.is_lemma(p)) { - // lemma: reduce the premise; remove reduced consequences - // from conclusion - SASSERT(args.size() == 1); - res = mk_lemma_core(args[0], m.get_fact(p)); - // -- re-compute hypsets - compute_hypsets(res); + + ptr_buffer pf_args; + ptr_buffer pf_fact; + pf_args.push_back(arg0); + + // compute literals to be resolved + ptr_buffer lits; + + // fact0 is a literal whenever the original resolution was a + // binary resolution to an empty clause + if (m.get_num_parents(ures) == 2 && m.is_false(m.get_fact(ures))) { + lits.push_back(fact0); } - else if (m.is_unit_resolution(p)) { - // unit: reduce untis; reduce the first premise; rebuild - // unit resolution - res = mk_unit_resolution_core(p, args); - // -- re-compute hypsets - compute_hypsets(res); + // fact0 is a literal unless it is a dijsunction + else if (!m.is_or(fact0)) { + lits.push_back(fact0); } + // fact0 is a literal only if it appears as a literal in the + // original resolution else { - res = mk_proof_core(p, args); - // -- re-compute hypsets - compute_hypsets(res); + lits.reset(); + app* ures_fact = to_app(m.get_fact(m.get_parent(ures, 0))); + for (unsigned i = 0, sz = ures_fact->get_num_args(); i < sz; ++i) { + if (ures_fact->get_arg(i) == fact0) { + lits.push_back(fact0); + break; + } + } + if (lits.empty()) { + lits.append(fact0->get_num_args(), fact0->get_args()); + } + } - SASSERT(res); - m_cache.insert(p, res); + // -- find all literals that are resolved on + for (unsigned i = 0, sz = lits.size(); i < sz; ++i) { + bool found = false; + for (unsigned j = 1; j < args.size(); ++j) { + if (m.is_complement(lits.get(i), m.get_fact(args[j]))) { + found = true; + pf_args.push_back(args[j]); + break; + } + } + if (!found) {pf_fact.push_back(lits.get(i));} + } - // bail out as soon as found a sub-proof of false - if (!m_open_mark.is_marked(res) && m.has_fact(res) && m.is_false(m.get_fact(res))) - return res; - } - UNREACHABLE(); - return nullptr; -} - -proof* hypothesis_reducer::mk_lemma_core(proof* premise, expr *fact) { - SASSERT(m.is_false(m.get_fact(premise))); - SASSERT(m_active_hyps.contains(premise)); - - proof_ptr_vector* active_hyps = m_active_hyps.find(premise); - - // if there is no active hypothesis return the premise - if (!m_open_mark.is_marked(premise)) { - // XXX just in case premise might go away - m_pinned.push_back(premise); - return premise; - } - - // add some stability - std::stable_sort(active_hyps->begin(), active_hyps->end(), ast_lt_proc()); - // otherwise, build a disjunction of the negated active hypotheses - // and add a lemma proof step - expr_ref_buffer args(m); - for (auto hyp : *active_hyps) { - expr *hyp_fact, *t; - hyp_fact = m.get_fact(hyp); - if (m.is_not(hyp_fact, t)) - args.push_back(t); - else - args.push_back(m.mk_not(hyp_fact)); - } - - expr_ref lemma(m); - lemma = mk_or(m, args.size(), args.c_ptr()); - - proof* res; - res = m.mk_lemma(premise, lemma); - m_pinned.push_back(res); - return res; -} - -proof* hypothesis_reducer::mk_unit_resolution_core(proof *ures, - ptr_buffer& args) { - // if any literal is false, we don't need a unit resolution step - // This can be the case due to some previous transformations - for (unsigned i = 1, sz = args.size(); i < sz; ++i) { - if (m.is_false(m.get_fact(args[i]))) { + // unit resolution got reduced to noop + if (pf_args.size() == 1) { // XXX pin just in case - m_pinned.push_back(args[i]); - return args[i]; + m_pinned.push_back(arg0); + + return arg0; } + + // make unit resolution proof step + // expr_ref tmp(m); + // tmp = mk_or(m, pf_fact.size(), pf_fact.c_ptr()); + // proof* res = m.mk_unit_resolution(pf_args.size(), pf_args.c_ptr(), tmp); + proof *res = m.mk_unit_resolution(pf_args.size(), pf_args.c_ptr()); + m_pinned.push_back(res); + + return res; } - proof* arg0 = args[0]; - app *fact0 = to_app(m.get_fact(arg0)); - - - ptr_buffer pf_args; - ptr_buffer pf_fact; - pf_args.push_back(arg0); - - // compute literals to be resolved - ptr_buffer lits; - - // fact0 is a literal whenever the original resolution was a - // binary resolution to an empty clause - if (m.get_num_parents(ures) == 2 && m.is_false(m.get_fact(ures))) { - lits.push_back(fact0); - } - // fact0 is a literal unless it is a dijsunction - else if (!m.is_or(fact0)) { - lits.push_back(fact0); - } - // fact0 is a literal only if it appears as a literal in the - // original resolution - else { - lits.reset(); - app* ures_fact = to_app(m.get_fact(m.get_parent(ures, 0))); - for (unsigned i = 0, sz = ures_fact->get_num_args(); i < sz; ++i) { - if (ures_fact->get_arg(i) == fact0) { - lits.push_back(fact0); - break; + proof* hypothesis_reducer::mk_proof_core(proof* old, ptr_buffer& args) { + // if any of the literals are false, we don't need a step + for (unsigned i = 0; i < args.size(); ++i) { + if (m.is_false(m.get_fact(args[i]))) { + // XXX just in case + m_pinned.push_back(args[i]); + return args[i]; } } - if (lits.empty()) { - lits.append(fact0->get_num_args(), fact0->get_args()); - } + // otherwise build step + // BUG: I guess this doesn't work with quantifiers (since they are no apps) + args.push_back(to_app(m.get_fact(old))); + + SASSERT(old->get_decl()->get_arity() == args.size()); + + proof* res = m.mk_app(old->get_decl(), args.size(), + (expr * const*)args.c_ptr()); + m_pinned.push_back(res); + return res; } - // -- find all literals that are resolved on - for (unsigned i = 0, sz = lits.size(); i < sz; ++i) { - bool found = false; - for (unsigned j = 1; j < args.size(); ++j) { - if (m.is_complement(lits.get(i), m.get_fact(args[j]))) { - found = true; - pf_args.push_back(args[j]); - break; - } - } - if (!found) {pf_fact.push_back(lits.get(i));} - } - - // unit resolution got reduced to noop - if (pf_args.size() == 1) { - // XXX pin just in case - m_pinned.push_back(arg0); - - return arg0; - } - - // make unit resolution proof step - // expr_ref tmp(m); - // tmp = mk_or(m, pf_fact.size(), pf_fact.c_ptr()); - // proof* res = m.mk_unit_resolution(pf_args.size(), pf_args.c_ptr(), tmp); - proof *res = m.mk_unit_resolution(pf_args.size(), pf_args.c_ptr()); - m_pinned.push_back(res); - - return res; -} - -proof* hypothesis_reducer::mk_proof_core(proof* old, ptr_buffer& args) { - // if any of the literals are false, we don't need a step - for (unsigned i = 0; i < args.size(); ++i) { - if (m.is_false(m.get_fact(args[i]))) { - // XXX just in case - m_pinned.push_back(args[i]); - return args[i]; - } - } - - // otherwise build step - // BUG: I guess this doesn't work with quantifiers (since they are no apps) - args.push_back(to_app(m.get_fact(old))); - - SASSERT(old->get_decl()->get_arity() == args.size()); - - proof* res = m.mk_app(old->get_decl(), args.size(), - (expr * const*)args.c_ptr()); - m_pinned.push_back(res); - return res; -} - }; From ac23002dce7ca21e7504d911738bf9e7f68a5c3a Mon Sep 17 00:00:00 2001 From: Arie Gurfinkel Date: Wed, 20 Jun 2018 23:04:44 -0400 Subject: [PATCH 03/10] Fix bugs in iuc generation --- src/muz/spacer/spacer_iuc_proof.h | 2 +- src/muz/spacer/spacer_unsat_core_plugin.cpp | 83 +++++++++++---------- 2 files changed, 43 insertions(+), 42 deletions(-) diff --git a/src/muz/spacer/spacer_iuc_proof.h b/src/muz/spacer/spacer_iuc_proof.h index a3044ca53..ed36dbf5b 100644 --- a/src/muz/spacer/spacer_iuc_proof.h +++ b/src/muz/spacer/spacer_iuc_proof.h @@ -33,7 +33,7 @@ public: bool is_h_marked(proof* p) {return m_h_mark.is_marked(p);} bool is_b_pure (proof *p) { - return !is_h_marked (p) && is_core_pure(m.get_fact (p)); + return !is_h_marked(p) && !this->is_a_marked(p) && is_core_pure(m.get_fact(p)); } void display_dot(std::ostream &out); diff --git a/src/muz/spacer/spacer_unsat_core_plugin.cpp b/src/muz/spacer/spacer_unsat_core_plugin.cpp index 8fb5feeef..79bafdfeb 100644 --- a/src/muz/spacer/spacer_unsat_core_plugin.cpp +++ b/src/muz/spacer/spacer_unsat_core_plugin.cpp @@ -34,15 +34,15 @@ Revision History: namespace spacer { - unsat_core_plugin::unsat_core_plugin(unsat_core_learner& learner): + unsat_core_plugin::unsat_core_plugin(unsat_core_learner& learner): m(learner.m), m_learner(learner) {}; - + void unsat_core_plugin_lemma::compute_partial_core(proof* step) { SASSERT(m_learner.m_pr.is_a_marked(step)); SASSERT(m_learner.m_pr.is_b_marked(step)); - + for (proof* premise : m.get_parents(step)) { - + if (m_learner.is_b_open (premise)) { // by IH, premises that are AB marked are already closed SASSERT(!m_learner.m_pr.is_a_marked(premise)); @@ -51,18 +51,18 @@ namespace spacer { } m_learner.set_closed(step, true); } - + void unsat_core_plugin_lemma::add_lowest_split_to_core(proof* step) const { SASSERT(m_learner.is_b_open(step)); - + ptr_buffer todo; todo.push_back(step); - + while (!todo.empty()) { proof* pf = todo.back(); todo.pop_back(); - + // if current step hasn't been processed, if (!m_learner.is_closed(pf)) { m_learner.set_closed(pf, true); @@ -71,7 +71,7 @@ namespace spacer { // so if it is also a-marked, it must be closed SASSERT(m_learner.m_pr.is_b_marked(pf)); SASSERT(!m_learner.m_pr.is_a_marked(pf)); - + // the current step needs to be interpolated: expr* fact = m.get_fact(pf); // if we trust the current step and we are able to use it @@ -82,11 +82,11 @@ namespace spacer { } // otherwise recurse on premises else { - for (proof* premise : m.get_parents(pf)) - if (m_learner.is_b_open(premise)) + for (proof* premise : m.get_parents(pf)) + if (m_learner.is_b_open(premise)) todo.push_back(premise); } - + } } } @@ -101,15 +101,15 @@ namespace spacer { func_decl* d = step->get_decl(); symbol sym; if (!m_learner.is_closed(step) && // if step is not already interpolated - is_farkas_lemma(m, step)) { - // weaker check: d->get_num_parameters() >= m.get_num_parents(step) + 2 - + is_farkas_lemma(m, step)) { + // weaker check : d->get_num_parameters() >= m.get_num_parents(step) + 2 + SASSERT(d->get_num_parameters() == m.get_num_parents(step) + 2); SASSERT(m.has_fact(step)); - + coeff_lits_t coeff_lits; expr_ref_vector pinned(m); - + /* The farkas lemma represents a subproof starting from premise(-set)s A, BNP and BP(ure) and * ending in a disjunction D. We need to compute the contribution of BP, i.e. a formula, which * is entailed by BP and together with A and BNP entails D. @@ -134,34 +134,35 @@ namespace spacer { * as workaround we take the absolute value of the provided coefficients. */ parameter const* params = d->get_parameters() + 2; // point to the first Farkas coefficient - - STRACE("spacer.farkas", - verbose_stream() << "Farkas input: "<< "\n"; + + TRACE("spacer.farkas", + tout << "Farkas input: "<< "\n"; for (unsigned i = 0; i < m.get_num_parents(step); ++i) { - proof * prem = m.get_parent(step, i); - rational coef = params[i].get_rational(); + proof * prem = m.get_parent(step, i); + rational coef = params[i].get_rational(); bool b_pure = m_learner.m_pr.is_b_pure (prem); - verbose_stream() << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m.get_fact(prem), m) << "\n"; + tout << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m.get_fact(prem), m) << "\n"; } ); - + bool can_be_closed = true; - + for (unsigned i = 0; i < m.get_num_parents(step); ++i) { proof * premise = m.get_parent(step, i); - + if (m_learner.is_b_open (premise)) { SASSERT(!m_learner.m_pr.is_a_marked(premise)); - - if (m_learner.m_pr.is_b_pure (step)) { + + if (m_learner.m_pr.is_b_pure (premise)) { if (!m_use_constant_from_a) { rational coefficient = params[i].get_rational(); coeff_lits.push_back(std::make_pair(abs(coefficient), (app*)m.get_fact(premise))); } } else { + // -- mixed premise, won't be able to close this proof step can_be_closed = false; - + if (m_use_constant_from_a) { rational coefficient = params[i].get_rational(); coeff_lits.push_back(std::make_pair(abs(coefficient), (app*)m.get_fact(premise))); @@ -175,10 +176,10 @@ namespace spacer { } } } - + if (m_use_constant_from_a) { params += m.get_num_parents(step); // point to the first Farkas coefficient, which corresponds to a formula in the conclusion - + // the conclusion can either be a single formula or a disjunction of several formulas, we have to deal with both situations if (m.get_num_parents(step) + 2 < d->get_num_parameters()) { unsigned num_args = 1; @@ -190,7 +191,7 @@ namespace spacer { args = _or->get_args(); } SASSERT(m.get_num_parents(step) + 2 + num_args == d->get_num_parameters()); - + bool_rewriter brw(m); for (unsigned i = 0; i < num_args; ++i) { expr* premise = args[i]; @@ -205,11 +206,11 @@ namespace spacer { } // only if all b-premises can be used directly, add the farkas core and close the step + // AG: this decision needs to be re-evaluated. If the proof cannot be closed, literals above + // AG: it will go into the core. However, it does not mean that this literal should/could not be added. if (can_be_closed) { m_learner.set_closed(step, true); - expr_ref res = compute_linear_combination(coeff_lits); - m_learner.add_lemma_to_core(res); } } @@ -253,7 +254,7 @@ namespace spacer { verbose_stream() << "Farkas input: "<< "\n"; for (unsigned i = 0; i < m.get_num_parents(step); ++i) { proof * prem = m.get_parent(step, i); - rational coef = params[i].get_rational(); + rational coef = params[i].get_rational(); bool b_pure = m_learner.m_pr.is_b_pure (prem); verbose_stream() << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m.get_fact(prem), m_learner.m) << "\n"; } @@ -340,7 +341,7 @@ namespace spacer { // 4. extract linear combinations from matrix and add result to core for (unsigned k = 0; k < i; ++k)// i points to the row after the last row which is non-zero { - coeff_lits_t coeff_lits; + coeff_lits_t coeff_lits; for (unsigned l = 0; l < matrix.num_cols(); ++l) { if (!matrix.get(k,l).is_zero()) { coeff_lits.push_back(std::make_pair(matrix.get(k, l), ordered_basis[l])); @@ -354,14 +355,14 @@ namespace spacer { } - expr_ref unsat_core_plugin_farkas_lemma_optimized::compute_linear_combination(const coeff_lits_t& coeff_lits) { + expr_ref unsat_core_plugin_farkas_lemma_optimized::compute_linear_combination(const coeff_lits_t& coeff_lits) { smt::farkas_util util(m); for (auto const & p : coeff_lits) { util.add(p.first, p.second); } expr_ref negated_linear_combination = util.get(); SASSERT(m.is_not(negated_linear_combination)); - return expr_ref(mk_not(m, negated_linear_combination), m); + return expr_ref(mk_not(m, negated_linear_combination), m); //TODO: rewrite the get-method to return nonnegated stuff? } @@ -449,7 +450,7 @@ namespace spacer { for (unsigned j = 0; j < matrix.num_cols(); ++j) { SASSERT(matrix.get(i, j).is_int()); app_ref a_ij(util.mk_numeral(matrix.get(i,j), true), m); - + app_ref sum(util.mk_int(0), m); for (unsigned k = 0; k < n; ++k) { sum = util.mk_add(sum, util.mk_mul(coeffs[i][k].get(), bounded_vectors[j][k].get())); @@ -458,7 +459,7 @@ namespace spacer { s->assert_expr(eq); } } - + // check result lbool res = s->check_sat(0, nullptr); @@ -682,7 +683,7 @@ namespace spacer { void unsat_core_plugin_min_cut::finalize() { unsigned_vector cut_nodes; m_min_cut.compute_min_cut(cut_nodes); - + for (unsigned cut_node : cut_nodes) { m_learner.add_lemma_to_core(m_node_to_formula[cut_node]); } From 9c9d0d084007d804610029ccbce7b802ed34bb8c Mon Sep 17 00:00:00 2001 From: Arie Gurfinkel Date: Wed, 20 Jun 2018 23:07:01 -0400 Subject: [PATCH 04/10] convert assign-bounds axioms to farkas lemmas --- src/muz/spacer/spacer_proof_utils.cpp | 74 +++++++++++++++++++++++---- 1 file changed, 65 insertions(+), 9 deletions(-) diff --git a/src/muz/spacer/spacer_proof_utils.cpp b/src/muz/spacer/spacer_proof_utils.cpp index 8dee2ad44..c7d7ffb54 100644 --- a/src/muz/spacer/spacer_proof_utils.cpp +++ b/src/muz/spacer/spacer_proof_utils.cpp @@ -45,7 +45,7 @@ namespace spacer { return false; } -// farkas lemma recognizer + // farkas lemma recognizer bool is_farkas_lemma(ast_manager& m, proof* pr) { if (pr->get_decl_kind() == PR_TH_LEMMA) @@ -59,6 +59,19 @@ namespace spacer { return false; } + static bool is_assign_bounds_lemma(ast_manager &m, proof *pr) { + if (pr->get_decl_kind() == PR_TH_LEMMA) + { + func_decl* d = pr->get_decl(); + symbol sym; + return d->get_num_parameters() >= 2 && + d->get_parameter(0).is_symbol(sym) && sym == "arith" && + d->get_parameter(1).is_symbol(sym) && sym == "assign-bounds"; + } + return false; + } + + /* * ==================================== @@ -71,8 +84,8 @@ namespace spacer { m_pinned.reset(); } - static proof* mk_th_lemma(ast_manager &m, ptr_buffer const &parents, - unsigned num_params, parameter const *params) { + static proof_ref mk_th_lemma(ast_manager &m, ptr_buffer const &parents, + unsigned num_params, parameter const *params) { buffer v; for (unsigned i = 1; i < num_params; ++i) v.push_back(params[i]); @@ -81,12 +94,45 @@ namespace spacer { family_id tid = m.mk_family_id(params[0].get_symbol()); SASSERT(tid != null_family_id); - return m.mk_th_lemma(tid, m.mk_false(), - parents.size(), parents.c_ptr(), - num_params-1, v.c_ptr()); + proof *pf = m.mk_th_lemma(tid, m.mk_false(), + parents.size(), parents.c_ptr(), + v.size(), v.c_ptr()); + return proof_ref(pf, m); } -// -- rewrite theory axioms into theory lemmas + // convert assign-bounds lemma to a farkas lemma by adding missing coeff + // assume that missing coeff is for premise at position 0 + static proof_ref mk_fk_from_ab(ast_manager &m, + ptr_buffer const &parents, + unsigned num_params, + parameter const *params) { + SASSERT(num_params == parents.size() + 1 /* one param is missing */); + buffer v; + v.push_back(parameter(symbol("farkas"))); + v.push_back(parameter(rational(1))); + for (unsigned i = 2; i < num_params; ++i) + v.push_back(params[i]); + + SASSERT(params[0].is_symbol()); + family_id tid = m.mk_family_id(params[0].get_symbol()); + SASSERT(tid != null_family_id); + + proof_ref pf(m); + pf = m.mk_th_lemma(tid, m.mk_false(), + parents.size(), parents.c_ptr(), + v.size(), v.c_ptr()); + + SASSERT(is_arith_lemma(m, pf)); + DEBUG_CODE( + proof_checker pc(m); + expr_ref_vector side(m); + SASSERT(pc.check(pf, side)); + ); + return pf; + + } + + /// -- rewrite theory axioms into theory lemmas proof_ref theory_axiom_reducer::reduce(proof* pr) { proof_post_order pit(pr, m); while (pit.hasNext()) { @@ -124,9 +170,19 @@ namespace spacer { } // (b) Create a theory lemma - proof *th_lemma; + proof_ref th_lemma(m); func_decl *d = p->get_decl(); - th_lemma = mk_th_lemma(m, hyps, d->get_num_parameters(), d->get_parameters()); + if (is_assign_bounds_lemma(m, p)) { + + th_lemma = mk_fk_from_ab(m, hyps, + d->get_num_parameters(), + d->get_parameters()); + } + else { + th_lemma = mk_th_lemma(m, hyps, + d->get_num_parameters(), + d->get_parameters()); + } m_pinned.push_back(th_lemma); SASSERT(is_arith_lemma(m, th_lemma)); From 58dc5451e159866f4655184837de7fbf233d5dc2 Mon Sep 17 00:00:00 2001 From: Arie Gurfinkel Date: Thu, 21 Jun 2018 17:16:23 -0400 Subject: [PATCH 05/10] iuc code cleanup --- src/muz/spacer/spacer_unsat_core_learner.cpp | 33 ++-- src/muz/spacer/spacer_unsat_core_learner.h | 25 +-- src/muz/spacer/spacer_unsat_core_plugin.cpp | 152 +++++++++---------- src/muz/spacer/spacer_unsat_core_plugin.h | 16 +- 4 files changed, 114 insertions(+), 112 deletions(-) diff --git a/src/muz/spacer/spacer_unsat_core_learner.cpp b/src/muz/spacer/spacer_unsat_core_learner.cpp index da0d6ec34..08f7f7ce8 100644 --- a/src/muz/spacer/spacer_unsat_core_learner.cpp +++ b/src/muz/spacer/spacer_unsat_core_learner.cpp @@ -36,28 +36,27 @@ void unsat_core_learner::register_plugin(unsat_core_plugin* plugin) { } void unsat_core_learner::compute_unsat_core(expr_ref_vector& unsat_core) { - // traverse proof proof_post_order it(m_pr.get(), m); while (it.hasNext()) { - proof* currentNode = it.next(); + proof* curr = it.next(); - if (m.get_num_parents(currentNode) > 0) { - bool need_to_mark_closed = true; + bool done = is_closed(curr); + if (done) continue; - for (proof* premise : m.get_parents(currentNode)) { - need_to_mark_closed &= (!m_pr.is_b_marked(premise) || m_closed.is_marked(premise)); - } - - // save result - m_closed.mark(currentNode, need_to_mark_closed); + if (m.get_num_parents(curr) > 0) { + done = true; + for (proof* p : m.get_parents(curr)) done &= !is_b_open(p); + set_closed(curr, done); } - // we have now collected all necessary information, so we can visit the node - // if the node mixes A-reasoning and B-reasoning and contains non-closed premises - if (m_pr.is_a_marked(currentNode) && - m_pr.is_b_marked(currentNode) && - !m_closed.is_marked(currentNode)) { - compute_partial_core(currentNode); // then we need to compute a partial core + // we have now collected all necessary information, + // so we can visit the node + // if the node mixes A-reasoning and B-reasoning + // and contains non-closed premises + if (!done) { + if (m_pr.is_a_marked(curr) && m_pr.is_b_marked(curr)) { + compute_partial_core(curr); + } } } @@ -74,7 +73,7 @@ void unsat_core_learner::compute_unsat_core(expr_ref_vector& unsat_core) { void unsat_core_learner::compute_partial_core(proof* step) { for (unsat_core_plugin* plugin : m_plugins) { - if (m_closed.is_marked(step)) break; + if (is_closed(step)) break; plugin->compute_partial_core(step); } } diff --git a/src/muz/spacer/spacer_unsat_core_learner.h b/src/muz/spacer/spacer_unsat_core_learner.h index 327b12eb6..42ad71501 100644 --- a/src/muz/spacer/spacer_unsat_core_learner.h +++ b/src/muz/spacer/spacer_unsat_core_learner.h @@ -22,6 +22,7 @@ Revision History: #include "ast/ast.h" #include "muz/spacer/spacer_util.h" #include "muz/spacer/spacer_proof_utils.h" +#include "muz/spacer/spacer_iuc_proof.h" namespace spacer { @@ -31,13 +32,25 @@ namespace spacer { class unsat_core_learner { typedef obj_hashtable expr_set; + ast_manager& m; + iuc_proof& m_pr; + + ptr_vector m_plugins; + ast_mark m_closed; + + expr_ref_vector m_unsat_core; + public: unsat_core_learner(ast_manager& m, iuc_proof& pr) : m(m), m_pr(pr), m_unsat_core(m) {}; virtual ~unsat_core_learner(); - ast_manager& m; - iuc_proof& m_pr; + ast_manager& get_manager() {return m;} + + bool is_a(proof *pr) {return m_pr.is_a_marked(pr);} + bool is_b(proof *pr) {return m_pr.is_b_marked(pr);} + bool is_h(proof *pr) {return m_pr.is_h_marked(pr);} + bool is_b_pure(proof *pr) { return m_pr.is_b_pure(pr);} /* * register a plugin for computation of partial unsat cores @@ -67,14 +80,6 @@ namespace spacer { void add_lemma_to_core(expr* lemma); private: - ptr_vector m_plugins; - ast_mark m_closed; - - /* - * collects the lemmas of the unsat-core - * will at the end be inserted into unsat_core. - */ - expr_ref_vector m_unsat_core; /* * computes partial core for step by delegating computation to plugins diff --git a/src/muz/spacer/spacer_unsat_core_plugin.cpp b/src/muz/spacer/spacer_unsat_core_plugin.cpp index 79bafdfeb..ef97d81df 100644 --- a/src/muz/spacer/spacer_unsat_core_plugin.cpp +++ b/src/muz/spacer/spacer_unsat_core_plugin.cpp @@ -34,27 +34,25 @@ Revision History: namespace spacer { - unsat_core_plugin::unsat_core_plugin(unsat_core_learner& learner): - m(learner.m), m_learner(learner) {}; + unsat_core_plugin::unsat_core_plugin(unsat_core_learner& ctx): + m(ctx.get_manager()), m_ctx(ctx) {}; void unsat_core_plugin_lemma::compute_partial_core(proof* step) { - SASSERT(m_learner.m_pr.is_a_marked(step)); - SASSERT(m_learner.m_pr.is_b_marked(step)); + SASSERT(m_ctx.is_a(step)); + SASSERT(m_ctx.is_b(step)); - for (proof* premise : m.get_parents(step)) { - - if (m_learner.is_b_open (premise)) { + for (auto* p : m.get_parents(step)) { + if (m_ctx.is_b_open (p)) { // by IH, premises that are AB marked are already closed - SASSERT(!m_learner.m_pr.is_a_marked(premise)); - add_lowest_split_to_core(premise); + SASSERT(!m_ctx.is_a(p)); + add_lowest_split_to_core(p); } } - m_learner.set_closed(step, true); + m_ctx.set_closed(step, true); } - void unsat_core_plugin_lemma::add_lowest_split_to_core(proof* step) const - { - SASSERT(m_learner.is_b_open(step)); + void unsat_core_plugin_lemma::add_lowest_split_to_core(proof* step) const { + SASSERT(m_ctx.is_b_open(step)); ptr_buffer todo; todo.push_back(step); @@ -64,44 +62,45 @@ namespace spacer { todo.pop_back(); // if current step hasn't been processed, - if (!m_learner.is_closed(pf)) { - m_learner.set_closed(pf, true); + if (!m_ctx.is_closed(pf)) { + m_ctx.set_closed(pf, true); // the step is b-marked and not closed. // by I.H. the step must be already visited // so if it is also a-marked, it must be closed - SASSERT(m_learner.m_pr.is_b_marked(pf)); - SASSERT(!m_learner.m_pr.is_a_marked(pf)); + SASSERT(m_ctx.is_b(pf)); + SASSERT(!m_ctx.is_a(pf)); // the current step needs to be interpolated: expr* fact = m.get_fact(pf); // if we trust the current step and we are able to use it - if (m_learner.m_pr.is_b_pure (pf) && - (m.is_asserted(pf) || is_literal(m, fact))) { + if (m_ctx.is_b_pure (pf) && (m.is_asserted(pf) || is_literal(m, fact))) { // just add it to the core - m_learner.add_lemma_to_core(fact); + m_ctx.add_lemma_to_core(fact); } // otherwise recurse on premises else { for (proof* premise : m.get_parents(pf)) - if (m_learner.is_b_open(premise)) + if (m_ctx.is_b_open(premise)) todo.push_back(premise); } - } } } - void unsat_core_plugin_farkas_lemma::compute_partial_core(proof* step) - { - SASSERT(m_learner.m_pr.is_a_marked(step)); - SASSERT(m_learner.m_pr.is_b_marked(step)); + /*** + * FARKAS + */ + void unsat_core_plugin_farkas_lemma::compute_partial_core(proof* step) { + SASSERT(m_ctx.is_a(step)); + SASSERT(m_ctx.is_b(step)); // XXX this assertion should be true so there is no need to check for it - SASSERT (!m_learner.is_closed (step)); + SASSERT (!m_ctx.is_closed (step)); func_decl* d = step->get_decl(); symbol sym; - if (!m_learner.is_closed(step) && // if step is not already interpolated - is_farkas_lemma(m, step)) { + TRACE("spacer.farkas", + tout << "looking at: " << mk_pp(step, m) << "\n";); + if (!m_ctx.is_closed(step) && is_farkas_lemma(m, step)) { // weaker check : d->get_num_parameters() >= m.get_num_parents(step) + 2 SASSERT(d->get_num_parameters() == m.get_num_parents(step) + 2); @@ -136,24 +135,24 @@ namespace spacer { parameter const* params = d->get_parameters() + 2; // point to the first Farkas coefficient TRACE("spacer.farkas", - tout << "Farkas input: "<< "\n"; - for (unsigned i = 0; i < m.get_num_parents(step); ++i) { - proof * prem = m.get_parent(step, i); - rational coef = params[i].get_rational(); - bool b_pure = m_learner.m_pr.is_b_pure (prem); - tout << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m.get_fact(prem), m) << "\n"; - } - ); + tout << "Farkas input: "<< "\n"; + for (unsigned i = 0; i < m.get_num_parents(step); ++i) { + proof * prem = m.get_parent(step, i); + rational coef = params[i].get_rational(); + bool b_pure = m_ctx.is_b_pure (prem); + tout << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m.get_fact(prem), m) << "\n"; + } + ); - bool can_be_closed = true; + bool done = true; for (unsigned i = 0; i < m.get_num_parents(step); ++i) { proof * premise = m.get_parent(step, i); - if (m_learner.is_b_open (premise)) { - SASSERT(!m_learner.m_pr.is_a_marked(premise)); + if (m_ctx.is_b_open (premise)) { + SASSERT(!m_ctx.is_a(premise)); - if (m_learner.m_pr.is_b_pure (premise)) { + if (m_ctx.is_b_pure (premise)) { if (!m_use_constant_from_a) { rational coefficient = params[i].get_rational(); coeff_lits.push_back(std::make_pair(abs(coefficient), (app*)m.get_fact(premise))); @@ -161,7 +160,7 @@ namespace spacer { } else { // -- mixed premise, won't be able to close this proof step - can_be_closed = false; + done = false; if (m_use_constant_from_a) { rational coefficient = params[i].get_rational(); @@ -177,6 +176,7 @@ namespace spacer { } } + // TBD: factor into another method if (m_use_constant_from_a) { params += m.get_num_parents(step); // point to the first Farkas coefficient, which corresponds to a formula in the conclusion @@ -208,11 +208,9 @@ namespace spacer { // only if all b-premises can be used directly, add the farkas core and close the step // AG: this decision needs to be re-evaluated. If the proof cannot be closed, literals above // AG: it will go into the core. However, it does not mean that this literal should/could not be added. - if (can_be_closed) { - m_learner.set_closed(step, true); - expr_ref res = compute_linear_combination(coeff_lits); - m_learner.add_lemma_to_core(res); - } + m_ctx.set_closed(step, done); + expr_ref res = compute_linear_combination(coeff_lits); + m_ctx.add_lemma_to_core(res); } } @@ -236,12 +234,12 @@ namespace spacer { void unsat_core_plugin_farkas_lemma_optimized::compute_partial_core(proof* step) { - SASSERT(m_learner.m_pr.is_a_marked(step)); - SASSERT(m_learner.m_pr.is_b_marked(step)); + SASSERT(m_ctx.is_a(step)); + SASSERT(m_ctx.is_b(step)); func_decl* d = step->get_decl(); symbol sym; - if (!m_learner.is_closed(step) && // if step is not already interpolated + if (!m_ctx.is_closed(step) && // if step is not already interpolated is_farkas_lemma(m, step)) { SASSERT(d->get_num_parameters() == m.get_num_parents(step) + 2); SASSERT(m.has_fact(step)); @@ -250,25 +248,25 @@ namespace spacer { parameter const* params = d->get_parameters() + 2; // point to the first Farkas coefficient - STRACE("spacer.farkas", - verbose_stream() << "Farkas input: "<< "\n"; - for (unsigned i = 0; i < m.get_num_parents(step); ++i) { - proof * prem = m.get_parent(step, i); - rational coef = params[i].get_rational(); - bool b_pure = m_learner.m_pr.is_b_pure (prem); - verbose_stream() << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m.get_fact(prem), m_learner.m) << "\n"; - } - ); + TRACE("spacer.farkas", + tout << "Farkas input: "<< "\n"; + for (unsigned i = 0; i < m.get_num_parents(step); ++i) { + proof * prem = m.get_parent(step, i); + rational coef = params[i].get_rational(); + bool b_pure = m_ctx.is_b_pure (prem); + tout << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m.get_fact(prem), m) << "\n"; + } + ); bool can_be_closed = true; for (unsigned i = 0; i < m.get_num_parents(step); ++i) { proof * premise = m.get_parent(step, i); - if (m_learner.m_pr.is_b_marked(premise) && !m_learner.is_closed(premise)) + if (m_ctx.is_b(premise) && !m_ctx.is_closed(premise)) { - SASSERT(!m_learner.m_pr.is_a_marked(premise)); + SASSERT(!m_ctx.is_a(premise)); - if (m_learner.m_pr.is_b_pure(premise)) + if (m_ctx.is_b_pure(premise)) { rational coefficient = params[i].get_rational(); linear_combination.push_back @@ -284,7 +282,7 @@ namespace spacer { // only if all b-premises can be used directly, close the step and add linear combinations for later processing if (can_be_closed) { - m_learner.set_closed(step, true); + m_ctx.set_closed(step, true); if (!linear_combination.empty()) { m_linear_combinations.push_back(linear_combination); @@ -350,7 +348,7 @@ namespace spacer { SASSERT(!coeff_lits.empty()); expr_ref linear_combination = compute_linear_combination(coeff_lits); - m_learner.add_lemma_to_core(linear_combination); + m_ctx.add_lemma_to_core(linear_combination); } } @@ -481,7 +479,7 @@ namespace spacer { SASSERT(!coeff_lits.empty()); // since then previous outer loop would have found solution already expr_ref linear_combination = compute_linear_combination(coeff_lits); - m_learner.add_lemma_to_core(linear_combination); + m_ctx.add_lemma_to_core(linear_combination); } return; } @@ -505,10 +503,10 @@ namespace spacer { { ptr_vector todo; - SASSERT(m_learner.m_pr.is_a_marked(step)); - SASSERT(m_learner.m_pr.is_b_marked(step)); + SASSERT(m_ctx.is_a(step)); + SASSERT(m_ctx.is_b(step)); SASSERT(m.get_num_parents(step) > 0); - SASSERT(!m_learner.is_closed(step)); + SASSERT(!m_ctx.is_closed(step)); todo.push_back(step); while (!todo.empty()) @@ -517,7 +515,7 @@ namespace spacer { todo.pop_back(); // if we need to deal with the node and if we haven't added the corresponding edges so far - if (!m_learner.is_closed(current) && !m_visited.is_marked(current)) + if (!m_ctx.is_closed(current) && !m_visited.is_marked(current)) { // compute smallest subproof rooted in current, which has only good edges // add an edge from current to each leaf of that subproof @@ -528,7 +526,7 @@ namespace spacer { } } - m_learner.set_closed(step, true); + m_ctx.set_closed(step, true); } @@ -539,7 +537,7 @@ namespace spacer { ptr_buffer todo_subproof; for (proof* premise : m.get_parents(step)) { - if (m_learner.m_pr.is_b_marked(premise)) { + if (m_ctx.is_b(premise)) { todo_subproof.push_back(premise); } } @@ -549,21 +547,21 @@ namespace spacer { todo_subproof.pop_back(); // if we need to deal with the node - if (!m_learner.is_closed(current)) + if (!m_ctx.is_closed(current)) { - SASSERT(!m_learner.m_pr.is_a_marked(current)); // by I.H. the step must be already visited + SASSERT(!m_ctx.is_a(current)); // by I.H. the step must be already visited // and the current step needs to be interpolated: - if (m_learner.m_pr.is_b_marked(current)) + if (m_ctx.is_b(current)) { // if we trust the current step and we are able to use it - if (m_learner.m_pr.is_b_pure (current) && + if (m_ctx.is_b_pure (current) && (m.is_asserted(current) || is_literal(m, m.get_fact(current)))) { // we found a leaf of the subproof, so // 1) we add corresponding edges - if (m_learner.m_pr.is_a_marked(step)) + if (m_ctx.is_a(step)) { add_edge(nullptr, current); // current is sink } @@ -685,7 +683,7 @@ namespace spacer { m_min_cut.compute_min_cut(cut_nodes); for (unsigned cut_node : cut_nodes) { - m_learner.add_lemma_to_core(m_node_to_formula[cut_node]); + m_ctx.add_lemma_to_core(m_node_to_formula[cut_node]); } } } diff --git a/src/muz/spacer/spacer_unsat_core_plugin.h b/src/muz/spacer/spacer_unsat_core_plugin.h index c05bcc5b9..746f21b19 100644 --- a/src/muz/spacer/spacer_unsat_core_plugin.h +++ b/src/muz/spacer/spacer_unsat_core_plugin.h @@ -35,14 +35,14 @@ namespace spacer { virtual ~unsat_core_plugin() {}; virtual void compute_partial_core(proof* step) = 0; virtual void finalize(){}; - - unsat_core_learner& m_learner; + + unsat_core_learner& m_ctx; }; - class unsat_core_plugin_lemma : public unsat_core_plugin { + class unsat_core_plugin_lemma : public unsat_core_plugin { public: - unsat_core_plugin_lemma(unsat_core_learner& learner) : unsat_core_plugin(learner){}; - void compute_partial_core(proof* step) override; + unsat_core_plugin_lemma(unsat_core_learner& learner) : unsat_core_plugin(learner){}; + void compute_partial_core(proof* step) override; private: void add_lowest_split_to_core(proof* step) const; }; @@ -54,7 +54,7 @@ namespace spacer { bool use_constant_from_a=true) : unsat_core_plugin(learner), m_split_literals(split_literals), - m_use_constant_from_a(use_constant_from_a) {}; + m_use_constant_from_a(use_constant_from_a) {}; void compute_partial_core(proof* step) override; private: bool m_split_literals; @@ -64,8 +64,8 @@ namespace spacer { */ expr_ref compute_linear_combination(const coeff_lits_t& coeff_lits); }; - - class unsat_core_plugin_farkas_lemma_optimized : public unsat_core_plugin { + + class unsat_core_plugin_farkas_lemma_optimized : public unsat_core_plugin { public: unsat_core_plugin_farkas_lemma_optimized(unsat_core_learner& learner, ast_manager& m) : unsat_core_plugin(learner) {}; void compute_partial_core(proof* step) override; From 7b2ca769ef75b46a5fff7fa4fd32f5673f7f8b61 Mon Sep 17 00:00:00 2001 From: Arie Gurfinkel Date: Thu, 21 Jun 2018 23:36:37 -0400 Subject: [PATCH 06/10] Cleanup --- src/muz/spacer/spacer_proof_utils.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/muz/spacer/spacer_proof_utils.cpp b/src/muz/spacer/spacer_proof_utils.cpp index c7d7ffb54..aa7f611c8 100644 --- a/src/muz/spacer/spacer_proof_utils.cpp +++ b/src/muz/spacer/spacer_proof_utils.cpp @@ -94,10 +94,11 @@ namespace spacer { family_id tid = m.mk_family_id(params[0].get_symbol()); SASSERT(tid != null_family_id); - proof *pf = m.mk_th_lemma(tid, m.mk_false(), - parents.size(), parents.c_ptr(), - v.size(), v.c_ptr()); - return proof_ref(pf, m); + proof_ref pf(m); + pf = m.mk_th_lemma(tid, m.mk_false(), + parents.size(), parents.c_ptr(), + v.size(), v.c_ptr()); + return pf; } // convert assign-bounds lemma to a farkas lemma by adding missing coeff From 1764bb8785ed7b1c9262236198672b7042a42d9f Mon Sep 17 00:00:00 2001 From: Arie Gurfinkel Date: Sat, 23 Jun 2018 00:05:14 -0400 Subject: [PATCH 07/10] Cleaning up unsat_core_learner --- src/muz/spacer/spacer_unsat_core_learner.cpp | 5 +---- src/muz/spacer/spacer_unsat_core_learner.h | 10 ++++++++-- src/muz/spacer/spacer_unsat_core_plugin.cpp | 1 + 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/muz/spacer/spacer_unsat_core_learner.cpp b/src/muz/spacer/spacer_unsat_core_learner.cpp index 08f7f7ce8..730ef1629 100644 --- a/src/muz/spacer/spacer_unsat_core_learner.cpp +++ b/src/muz/spacer/spacer_unsat_core_learner.cpp @@ -54,7 +54,7 @@ void unsat_core_learner::compute_unsat_core(expr_ref_vector& unsat_core) { // if the node mixes A-reasoning and B-reasoning // and contains non-closed premises if (!done) { - if (m_pr.is_a_marked(curr) && m_pr.is_b_marked(curr)) { + if (is_a(curr) && is_b(curr)) { compute_partial_core(curr); } } @@ -92,9 +92,6 @@ void unsat_core_learner::set_closed(proof* p, bool value) { m_closed.mark(p, value); } -bool unsat_core_learner::is_b_open(proof *p) { - return m_pr.is_b_marked(p) && !is_closed (p); -} void unsat_core_learner::add_lemma_to_core(expr* lemma) { m_unsat_core.push_back(lemma); diff --git a/src/muz/spacer/spacer_unsat_core_learner.h b/src/muz/spacer/spacer_unsat_core_learner.h index 42ad71501..ead430ca0 100644 --- a/src/muz/spacer/spacer_unsat_core_learner.h +++ b/src/muz/spacer/spacer_unsat_core_learner.h @@ -47,10 +47,17 @@ namespace spacer { ast_manager& get_manager() {return m;} - bool is_a(proof *pr) {return m_pr.is_a_marked(pr);} + + bool is_a(proof *pr) { + // AG: treat hypotheses as A + // AG: assume that all B-hyp have been eliminated + // AG: this is not yet true in case of arithmetic eq_prop + return m_pr.is_a_marked(pr) || is_h(pr); + } bool is_b(proof *pr) {return m_pr.is_b_marked(pr);} bool is_h(proof *pr) {return m_pr.is_h_marked(pr);} bool is_b_pure(proof *pr) { return m_pr.is_b_pure(pr);} + bool is_b_open(proof *p) {return m_pr.is_b_marked(p) && !is_closed (p);} /* * register a plugin for computation of partial unsat cores @@ -72,7 +79,6 @@ namespace spacer { bool is_closed(proof* p); void set_closed(proof* p, bool value); - bool is_b_open (proof *p); /* * adds a lemma to the unsat core diff --git a/src/muz/spacer/spacer_unsat_core_plugin.cpp b/src/muz/spacer/spacer_unsat_core_plugin.cpp index ef97d81df..aeb509c2e 100644 --- a/src/muz/spacer/spacer_unsat_core_plugin.cpp +++ b/src/muz/spacer/spacer_unsat_core_plugin.cpp @@ -210,6 +210,7 @@ namespace spacer { // AG: it will go into the core. However, it does not mean that this literal should/could not be added. m_ctx.set_closed(step, done); expr_ref res = compute_linear_combination(coeff_lits); + TRACE("spacer.farkas", tout << "Farkas core: " << res << "\n";); m_ctx.add_lemma_to_core(res); } } From 8e57ab5d9753f4eb1b650e25656dde694efdd74d Mon Sep 17 00:00:00 2001 From: Arie Gurfinkel Date: Sat, 23 Jun 2018 00:05:53 -0400 Subject: [PATCH 08/10] Computing missing coeff for assign-bounds lemma --- src/muz/spacer/spacer_proof_utils.cpp | 222 +++++++++++++++++++++++++- 1 file changed, 218 insertions(+), 4 deletions(-) diff --git a/src/muz/spacer/spacer_proof_utils.cpp b/src/muz/spacer/spacer_proof_utils.cpp index aa7f611c8..ea91a7d04 100644 --- a/src/muz/spacer/spacer_proof_utils.cpp +++ b/src/muz/spacer/spacer_proof_utils.cpp @@ -73,6 +73,129 @@ namespace spacer { + class linear_combinator { + struct scaled_lit { + bool is_pos; + app *lit; + rational coeff; + scaled_lit(bool is_pos, app *lit, const rational &coeff) : + is_pos(is_pos), lit(lit), coeff(coeff) {} + }; + ast_manager &m; + th_rewriter m_rw; + arith_util m_arith; + expr_ref m_sum; + bool m_is_strict; + rational m_lc; + vector m_lits; + public: + linear_combinator(ast_manager &m) : m(m), m_rw(m), m_arith(m), + m_sum(m), m_is_strict(false), + m_lc(1) {} + + void add_lit(app* lit, rational const &coeff, bool is_pos = true) { + m_lits.push_back(scaled_lit(is_pos, lit, coeff)); + } + + void normalize_coeff() { + for (auto &lit : m_lits) + m_lc = lcm(m_lc, denominator(lit.coeff)); + if (!m_lc.is_one()) { + for (auto &lit : m_lits) + lit.coeff *= m_lc; + } + } + + rational const &lc() const {return m_lc;} + + bool process_lit(scaled_lit &lit0) { + arith_util a(m); + app* lit = lit0.lit; + rational &coeff = lit0.coeff; + bool is_pos = lit0.is_pos; + + + if (m.is_not(lit)) { + lit = to_app(lit->get_arg(0)); + is_pos = !is_pos; + } + if (!m_arith.is_le(lit) && !m_arith.is_lt(lit) && + !m_arith.is_ge(lit) && !m_arith.is_gt(lit) && !m.is_eq(lit)) { + return false; + } + SASSERT(lit->get_num_args() == 2); + sort* s = m.get_sort(lit->get_arg(0)); + bool is_int = m_arith.is_int(s); + if (!is_int && m_arith.is_int_expr(lit->get_arg(0))) { + is_int = true; + s = m_arith.mk_int(); + } + + if (!is_int && is_pos && (m_arith.is_gt(lit) || m_arith.is_lt(lit))) { + m_is_strict = true; + } + if (!is_int && !is_pos && (m_arith.is_ge(lit) || m_arith.is_le(lit))) { + m_is_strict = true; + } + + + SASSERT(m_arith.is_int(s) || m_arith.is_real(s)); + expr_ref sign1(m), sign2(m), term(m); + sign1 = m_arith.mk_numeral(m.is_eq(lit)?coeff:abs(coeff), s); + sign2 = m_arith.mk_numeral(m.is_eq(lit)?-coeff:-abs(coeff), s); + if (!m_sum.get()) { + m_sum = m_arith.mk_numeral(rational(0), s); + } + + expr* a0 = lit->get_arg(0); + expr* a1 = lit->get_arg(1); + + if (is_pos && (m_arith.is_ge(lit) || m_arith.is_gt(lit))) { + std::swap(a0, a1); + } + if (!is_pos && (m_arith.is_le(lit) || m_arith.is_lt(lit))) { + std::swap(a0, a1); + } + + // + // Multiplying by coefficients over strict + // and non-strict inequalities: + // + // (a <= b) * 2 + // (a - b <= 0) * 2 + // (2a - 2b <= 0) + + // (a < b) * 2 <=> + // (a +1 <= b) * 2 <=> + // 2a + 2 <= 2b <=> + // 2a+2-2b <= 0 + + bool strict_ineq = + is_pos?(m_arith.is_gt(lit) || m_arith.is_lt(lit)):(m_arith.is_ge(lit) || m_arith.is_le(lit)); + + if (is_int && strict_ineq) { + m_sum = m_arith.mk_add(m_sum, sign1); + } + + term = m_arith.mk_mul(sign1, a0); + m_sum = m_arith.mk_add(m_sum, term); + term = m_arith.mk_mul(sign2, a1); + m_sum = m_arith.mk_add(m_sum, term); + + m_rw(m_sum); + return true; + } + + expr_ref operator()(){ + if (!m_sum) normalize_coeff(); + m_sum.reset(); + for (auto &lit : m_lits) { + if (!process_lit(lit)) return expr_ref(m); + } + return m_sum; + } + }; + /* * ==================================== * methods for transforming proofs @@ -101,6 +224,60 @@ namespace spacer { return pf; } + static bool match_mul(expr *e, expr_ref &var, expr_ref &val, arith_util &a) { + expr *e1 = nullptr, *e2 = nullptr; + if (!a.is_mul(e, e1, e2)) { + if (a.is_numeral(e)) return false; + if (!var || var == e) { + var = e; + val = a.mk_numeral(rational(1), get_sort(e)); + return true; + } + return false; + } + + if (!a.is_numeral(e1)) std::swap(e1, e2); + if (!a.is_numeral(e1)) return false; + + // if variable is given, match it as well + if (!var || var == e2) { + var = e2; + val = e1; + return true; + } + return false; + } + + static expr_ref get_coeff(expr *lit0, expr_ref &var) { + ast_manager &m = var.m(); + arith_util a(m); + + expr *lit = nullptr; + if (!m.is_not(lit0, lit)) lit = lit0; + + expr *e1 = nullptr, *e2 = nullptr; + // assume e2 is numeral and ignore it + if ((a.is_le(lit, e1, e2) || a.is_lt(lit, e1, e2) || + a.is_ge(lit, e1, e2) || a.is_gt(lit, e1, e2) || + m.is_eq(lit, e1, e2))) { + if (a.is_numeral(e1)) std::swap(e1, e2); + } + else { + e1 = lit; + } + + expr_ref val(m); + if (!a.is_add(e1)) { + if (match_mul(e1, var, val, a)) return val; + } + else { + for (auto *arg : *to_app(e1)) { + if (match_mul(arg, var, val, a)) return val; + } + } + return expr_ref(m); + } + // convert assign-bounds lemma to a farkas lemma by adding missing coeff // assume that missing coeff is for premise at position 0 static proof_ref mk_fk_from_ab(ast_manager &m, @@ -108,9 +285,44 @@ namespace spacer { unsigned num_params, parameter const *params) { SASSERT(num_params == parents.size() + 1 /* one param is missing */); + + // compute missing coefficient + linear_combinator lcb(m); + for (unsigned i = 1, sz = parents.size(); i < sz; ++i) { + app *p = to_app(m.get_fact(parents.get(i))); + rational const &r = params[i+1].get_rational(); + lcb.add_lit(p, r); + } + + TRACE("spacer.fkab", + tout << "lit0 is: " << mk_pp(m.get_fact(parents.get(0)), m) << "\n" + << "LCB is: " << lcb() << "\n";); + + expr_ref var(m), val1(m), val2(m); + val1 = get_coeff(m.get_fact(parents.get(0)), var); + val2 = get_coeff(lcb(), var); + TRACE("spacer.fkab", + tout << "var: " << var + << " val1: " << val1 << " val2: " << val2 << "\n";); + + rational rat1, rat2, coeff0; + arith_util a(m); + if (a.is_numeral(val1, rat1) && a.is_numeral(val2, rat2)) { + coeff0 = abs(rat2/rat1); + coeff0 = coeff0 / lcb.lc(); + TRACE("spacer.fkab", tout << "coeff0: " << coeff0 << "\n";); + } + else { + IF_VERBOSE(1, verbose_stream() + << "\n\n\nFAILED TO FIND COEFFICIENT\n\n\n";); + // failed to find a coefficient + return proof_ref(m); + } + + buffer v; v.push_back(parameter(symbol("farkas"))); - v.push_back(parameter(rational(1))); + v.push_back(parameter(coeff0)); for (unsigned i = 2; i < num_params; ++i) v.push_back(params[i]); @@ -124,11 +336,11 @@ namespace spacer { v.size(), v.c_ptr()); SASSERT(is_arith_lemma(m, pf)); + DEBUG_CODE( proof_checker pc(m); expr_ref_vector side(m); - SASSERT(pc.check(pf, side)); - ); + ENSURE(pc.check(pf, side));); return pf; } @@ -179,7 +391,9 @@ namespace spacer { d->get_num_parameters(), d->get_parameters()); } - else { + + // fall back to th-lemma + if (!th_lemma) { th_lemma = mk_th_lemma(m, hyps, d->get_num_parameters(), d->get_parameters()); From e9069309229259f9c13b38d01f4ca00104351772 Mon Sep 17 00:00:00 2001 From: Arie Gurfinkel Date: Sat, 23 Jun 2018 00:06:27 -0400 Subject: [PATCH 09/10] Debug code --- src/muz/spacer/spacer_iuc_solver.cpp | 34 ++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/src/muz/spacer/spacer_iuc_solver.cpp b/src/muz/spacer/spacer_iuc_solver.cpp index a68db4c0a..27b8ee357 100644 --- a/src/muz/spacer/spacer_iuc_solver.cpp +++ b/src/muz/spacer/spacer_iuc_solver.cpp @@ -322,6 +322,24 @@ void iuc_solver::get_iuc(expr_ref_vector &core) // -- new hypothesis reducer else { +#if 0 + static unsigned bcnt = 0; + { + bcnt++; + TRACE("spacer", tout << "Dumping pf bcnt: " << bcnt << "\n";); + if (bcnt == 123) { + std::ofstream ofs; + ofs.open("/tmp/bpf_" + std::to_string(bcnt) + ".dot"); + iuc_proof iuc_pf_before(m, res.get(), core_lits); + iuc_pf_before.display_dot(ofs); + ofs.close(); + + proof_checker pc(m); + expr_ref_vector side(m); + ENSURE(pc.check(res, side)); + } + } +#endif scoped_watch _t_ (m_hyp_reduce2_sw); // pre-process proof for better iuc extraction @@ -356,6 +374,22 @@ void iuc_solver::get_iuc(expr_ref_vector &core) iuc_proof iuc_pf(m, res, core_lits); +#if 0 + static unsigned cnt = 0; + { + cnt++; + TRACE("spacer", tout << "Dumping pf cnt: " << cnt << "\n";); + if (cnt == 123) { + std::ofstream ofs; + ofs.open("/tmp/pf_" + std::to_string(cnt) + ".dot"); + iuc_pf.display_dot(ofs); + ofs.close(); + proof_checker pc(m); + expr_ref_vector side(m); + ENSURE(pc.check(res, side)); + } + } +#endif unsat_core_learner learner(m, iuc_pf); unsat_core_plugin* plugin; From f330b96a3555b485a12acfc5c3a2f597b4c4313a Mon Sep 17 00:00:00 2001 From: Arie Gurfinkel Date: Sun, 24 Jun 2018 21:03:09 -0400 Subject: [PATCH 10/10] Gracefully failing in assign-bounds to farkas --- src/muz/spacer/spacer_proof_utils.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/muz/spacer/spacer_proof_utils.cpp b/src/muz/spacer/spacer_proof_utils.cpp index ea91a7d04..dc3bbf30c 100644 --- a/src/muz/spacer/spacer_proof_utils.cpp +++ b/src/muz/spacer/spacer_proof_utils.cpp @@ -307,7 +307,10 @@ namespace spacer { rational rat1, rat2, coeff0; arith_util a(m); - if (a.is_numeral(val1, rat1) && a.is_numeral(val2, rat2)) { + CTRACE("spacer.fkab", !(val1 && val2), + tout << "Failed to match variables\n";); + if (val1 && val2 && + a.is_numeral(val1, rat1) && a.is_numeral(val2, rat2)) { coeff0 = abs(rat2/rat1); coeff0 = coeff0 / lcb.lc(); TRACE("spacer.fkab", tout << "coeff0: " << coeff0 << "\n";);