diff --git a/src/muz/spacer/spacer_iuc_proof.cpp b/src/muz/spacer/spacer_iuc_proof.cpp index 1123a7c76..6033c744f 100644 --- a/src/muz/spacer/spacer_iuc_proof.cpp +++ b/src/muz/spacer/spacer_iuc_proof.cpp @@ -1,9 +1,12 @@ +#include +#include "ast/ast_pp_dot.h" + #include "muz/spacer/spacer_iuc_proof.h" #include "ast/for_each_expr.h" #include "ast/array_decl_plugin.h" #include "ast/proofs/proof_utils.h" #include "muz/spacer/spacer_proof_utils.h" - +#include "muz/spacer/spacer_util.h" namespace spacer { /* @@ -191,4 +194,87 @@ void iuc_proof::dump_farkas_stats() << "\n total farkas lemmas " << fl_total << " farkas lemmas in lowest cut " << fl_lowcut << "\n";); } + +void iuc_proof::display_dot(std::ostream& out) { + out << "digraph proof { \n"; + + std::unordered_map ids; + unsigned last_id = 0; + + proof_post_order it(m_pr, m); + while (it.hasNext()) + { + proof* curr = it.next(); + + SASSERT(ids.count(curr->get_id()) == 0); + ids.insert(std::make_pair(curr->get_id(), last_id)); + + std::string color = "white"; + if (this->is_a_marked(curr) && !this->is_b_marked(curr)) + color = "red"; + else if(!this->is_a_marked(curr) && this->is_b_marked(curr)) + color = "blue"; + else if(this->is_a_marked(curr) && this->is_b_marked(curr) ) + color = "purple"; + + // compute node label + std::ostringstream label_ostream; + label_ostream << mk_epp(m.get_fact(curr), m) << "\n"; + std::string label = escape_dot(label_ostream.str()); + + // compute edge-label + std::string edge_label = ""; + if (m.get_num_parents(curr) == 0) { + switch (curr->get_decl_kind()) + { + case PR_ASSERTED: + edge_label = "asserted:"; + break; + case PR_HYPOTHESIS: + edge_label = "hyp:"; + color = "grey"; + break; + case PR_TH_LEMMA: + if (is_farkas_lemma(m, curr)) + edge_label = "th_axiom(farkas):"; + else if (is_arith_lemma(m, curr)) + edge_label = "th_axiom(arith):"; + else + edge_label = "th_axiom:"; + break; + default: + edge_label = "unknown axiom:"; + } + } + else { + if (curr->get_decl_kind() == PR_LEMMA) + edge_label = "lemma:"; + else if (curr->get_decl_kind() == PR_TH_LEMMA) { + if (is_farkas_lemma(m, curr)) + edge_label = "th_lemma(farkas):"; + else if (is_arith_lemma(m, curr)) + edge_label = "th_lemma(arith):"; + else + edge_label = "th_lemma(other):"; + } + } + + // generate entry for node in dot-file + out << "node_" << last_id << " " << "[" + << "shape=box,style=\"filled\"," + << "label=\"" << edge_label << " " << label << "\", " + << "fillcolor=\"" << color << "\"" << "]\n"; + + // add entry for each edge to that node + for (unsigned i = m.get_num_parents(curr); i > 0 ; --i) + { + proof* premise = to_app(curr->get_arg(i-1)); + unsigned pid = ids.at(premise->get_id()); + out << "node_" << pid << " -> " << "node_" << last_id << ";\n"; + } + + ++last_id; + } + out << "\n}" << std::endl; +} } diff --git a/src/muz/spacer/spacer_iuc_proof.h b/src/muz/spacer/spacer_iuc_proof.h index aeb18ef37..a3044ca53 100644 --- a/src/muz/spacer/spacer_iuc_proof.h +++ b/src/muz/spacer/spacer_iuc_proof.h @@ -1,6 +1,7 @@ #ifndef IUC_PROOF_H_ #define IUC_PROOF_H_ +#include #include "ast/ast.h" namespace spacer { @@ -35,6 +36,7 @@ public: return !is_h_marked (p) && is_core_pure(m.get_fact (p)); } + void display_dot(std::ostream &out); // debug method void dump_farkas_stats(); private: diff --git a/src/muz/spacer/spacer_proof_utils.cpp b/src/muz/spacer/spacer_proof_utils.cpp index 32391db91..146436db9 100644 --- a/src/muz/spacer/spacer_proof_utils.cpp +++ b/src/muz/spacer/spacer_proof_utils.cpp @@ -16,7 +16,6 @@ Revision History: --*/ -#include #include "util/params.h" #include "ast/ast_pp.h" #include "ast/ast_util.h" @@ -26,680 +25,498 @@ Revision History: #include "ast/proofs/proof_utils.h" #include "muz/spacer/spacer_proof_utils.h" +#include "muz/spacer/spacer_util.h" namespace spacer { +// arithmetic lemma recognizer +bool is_arith_lemma(ast_manager& m, proof* pr) +{ // arith lemmas: second parameter specifies exact type of lemma, // could be "farkas", "triangle-eq", "eq-propagate", // "assign-bounds", maybe also something else - bool is_arith_lemma(ast_manager& m, proof* pr) + if (pr->get_decl_kind() == PR_TH_LEMMA) { + func_decl* d = pr->get_decl(); + symbol sym; + return d->get_num_parameters() >= 1 && + d->get_parameter(0).is_symbol(sym) && + sym == "arith"; + } + return false; +} + +// farkas lemma recognizer +bool is_farkas_lemma(ast_manager& m, proof* pr) +{ + if (pr->get_decl_kind() == PR_TH_LEMMA) { - if (pr->get_decl_kind() == PR_TH_LEMMA) + func_decl* d = pr->get_decl(); + symbol sym; + return d->get_num_parameters() >= 2 && + d->get_parameter(0).is_symbol(sym) && sym == "arith" && + d->get_parameter(1).is_symbol(sym) && sym == "farkas"; + } + return false; +} + + +/* + * ==================================== + * methods for transforming proofs + * ==================================== + */ + +void theory_axiom_reducer::reset() +{ + m_cache.reset(); + m_pinned.reset(); +} + +proof_ref theory_axiom_reducer::reduce(proof* pr) +{ + proof_post_order pit(pr, m); + while (pit.hasNext()) + { + proof* p = pit.next(); + + if (m.get_num_parents(p) == 0 && is_arith_lemma(m, p)) { - func_decl* d = pr->get_decl(); - symbol sym; - if (d->get_num_parameters() >= 1 && - d->get_parameter(0).is_symbol(sym) && sym == "arith") - { - return true; - } - } - return false; - } + // we have an arith-theory-axiom and want to get rid of it + // we need to replace the axiom with 1a) corresponding hypothesis', 1b) a theory lemma and a 1c) a lemma. Furthermore update datastructures + app *cls_fact = to_app(m.get_fact(p)); + ptr_buffer cls; + if (m.is_or(cls_fact)) { + for (unsigned i = 0, sz = cls_fact->get_num_args(); i < sz; ++i) + { cls.push_back(cls_fact->get_arg(i)); } + } else { cls.push_back(cls_fact); } - bool is_farkas_lemma(ast_manager& m, proof* pr) - { - if (pr->get_decl_kind() == PR_TH_LEMMA) + // 1a) create hypothesis' + ptr_buffer hyps; + for (unsigned i=0; i < cls.size(); ++i) + { + expr* hyp_fact = m.is_not(cls[i]) ? to_app(cls[i])->get_arg(0) : m.mk_not(cls[i]); + proof* hyp = m.mk_hypothesis(hyp_fact); + m_pinned.push_back(hyp); + hyps.push_back(hyp); + } + + // 1b) create farkas lemma: need to rebuild parameters since mk_th_lemma adds tid as first parameter + unsigned num_params = p->get_decl()->get_num_parameters(); + parameter const* params = p->get_decl()->get_parameters(); + vector parameters; + for (unsigned i = 1; i < num_params; ++i) { + parameters.push_back(params[i]); + } + + SASSERT(params[0].is_symbol()); + family_id tid = m.mk_family_id(params[0].get_symbol()); + SASSERT(tid != null_family_id); + + proof* th_lemma = m.mk_th_lemma(tid, m.mk_false(),hyps.size(), hyps.c_ptr(), num_params-1, parameters.c_ptr()); + SASSERT(is_arith_lemma(m, th_lemma)); + + // 1c) create lemma + proof* res = m.mk_lemma(th_lemma, cls_fact); + SASSERT(m.get_fact(res) == m.get_fact(p)); + m_pinned.push_back(res); + m_cache.insert(p, res); + } + else { - func_decl* d = pr->get_decl(); - symbol sym; - if (d->get_num_parameters() >= 2 && // the Farkas coefficients are saved in the parameters of step - d->get_parameter(0).is_symbol(sym) && sym == "arith" && // the first two parameters are "arith", "farkas", - d->get_parameter(1).is_symbol(sym) && sym == "farkas") + bool dirty = false; // proof is dirty, if a subproof of one of its premises has been transformed + + ptr_buffer args; + for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) { - return true; - } - } - return false; - } - - /* - * ==================================== - * methods for dot printing - * ==================================== - */ - void pp_proof_dot_to_stream(ast_manager& m, std::ofstream& dotstream, proof* pr, iuc_proof* iuc_pr = nullptr); - std::string escape_dot(const std::string &s); - void pp_proof_post_process_dot(std::string dot_filepath, std::ofstream &dotstream); - - void pp_proof_dot(ast_manager& m, proof* pr, iuc_proof* iuc_pr) - { - // open temporary dot-file - std::string dotfile_path = "proof.dot"; - std::ofstream dotstream(dotfile_path); - - // dump dot representation to stream - pp_proof_dot_to_stream(m, dotstream, pr, iuc_pr); - - // post process dot-file, TODO: factor this out to a different place - pp_proof_post_process_dot(dotfile_path,dotstream); - } - - void pp_proof_dot_to_stream(ast_manager& m, std::ofstream& dotstream, proof* pr, iuc_proof* iuc_pr) - { - dotstream << "digraph proof { \n"; - std::unordered_map id_to_small_id; - unsigned counter = 0; - - proof_post_order it2(pr, m); - while (it2.hasNext()) - { - proof* currentNode = it2.next(); - - SASSERT(id_to_small_id.find(currentNode->get_id()) == id_to_small_id.end()); - id_to_small_id.insert(std::make_pair(currentNode->get_id(), counter)); - - std::string color = "white"; - if (iuc_pr != nullptr) - { - if (iuc_pr->is_a_marked(currentNode) && !iuc_pr->is_b_marked(currentNode)) + proof* pp = m.get_parent(p, i); + proof* tmp; + if (m_cache.find(pp, tmp)) { - color = "red"; + args.push_back(tmp); + dirty = dirty || pp != tmp; } - else if(iuc_pr->is_b_marked(currentNode) && !iuc_pr->is_a_marked(currentNode)) + else { - color = "blue"; - } - else if(iuc_pr->is_b_marked(currentNode) && iuc_pr->is_a_marked(currentNode)) - { - color = "purple"; + SASSERT(false); } } - - // compute label - params_ref p; - p.set_uint("max_depth", 4294967295u); - p.set_uint("min_alias_size", 4294967295u); - - std::ostringstream label_ostream; - label_ostream << mk_pp(m.get_fact(currentNode),m,p) << "\n"; - std::string label = escape_dot(label_ostream.str()); - - // compute edge-label - std::string edge_label = ""; - if (m.get_num_parents(currentNode) == 0) + if (!dirty) // if not dirty just use the old step { - switch (currentNode->get_decl_kind()) - { - case PR_ASSERTED: - edge_label = "asserted:"; - break; - case PR_HYPOTHESIS: - edge_label = "hyp:"; - color = "grey"; - break; - case PR_TH_LEMMA: - if (is_farkas_lemma(m, currentNode)) - { - edge_label = "th_axiom(farkas):"; - } - else - { - edge_label = "th_axiom:"; - } - break; - default: - edge_label = "unknown axiom-type:"; - } + m_cache.insert(p, p); } - else + else // otherwise create new step with the corresponding proofs of the premises { - if (currentNode->get_decl_kind() == PR_LEMMA) - { - edge_label = "lemma:"; - } - else if (currentNode->get_decl_kind() == PR_TH_LEMMA) - { - func_decl* d = currentNode->get_decl(); - symbol sym; - if (d->get_num_parameters() >= 2 && // the Farkas coefficients are saved in the parameters of step - d->get_parameter(0).is_symbol(sym) && sym == "arith" && // the first two parameters are "arith", "farkas", - d->get_parameter(1).is_symbol(sym) && sym == "farkas") - { - edge_label = "th_lemma(farkas):"; - } - else - { - edge_label = "th_lemma(other):"; - } - } - } - - // generate entry for node in dot-file - dotstream << "node_" << counter << " " - << "[" - << "shape=box,style=\"filled\"," - << "label=\"" << edge_label << " " << label << "\", " - << "fillcolor=\"" << color << "\"" - << "]\n"; - - // add entry for each edge to that node - for (unsigned i = m.get_num_parents(currentNode); i > 0 ; --i) - { - proof* premise = to_app(currentNode->get_arg(i-1)); - unsigned premise_small_id = id_to_small_id[premise->get_id()]; - dotstream << "node_" << premise_small_id - << " -> " - << "node_" << counter - << ";\n"; - } - - ++counter; - } - dotstream << "\n}" << std::endl; - } - - std::string escape_dot(const std::string &s) - { - std::string res; - res.reserve(s.size()); // preallocate - for (auto c : s) { - if (c == '\n') - res.append("\\l"); - else - res.push_back(c); - } - return res; - } - - void pp_proof_post_process_dot(std::string dot_filepath, std::ofstream &dotstream) - { - // replace variables in the dotfiles with nicer descriptions (hack: hard coded replacement for now) - std::vector > predicates; - std::vector l1 = {"L1","i","n","A"}; - predicates.push_back(l1); - std::vector l2 = {"L2","j","m","B"}; - predicates.push_back(l2); - - for(auto& predicate : predicates) - { - std::string predicate_name = predicate[0]; - for (unsigned i=0; i+1 < predicate.size(); ++i) - { - std::string new_name = predicate[i+1]; - std::string substring0 = predicate_name + "_" + std::to_string(i) + "_0"; - std::string substringN = predicate_name + "_" + std::to_string(i) + "_n"; - - std::string command0 = "sed -i '.bak' 's/" + substring0 + "/" + new_name + "/g' " + dot_filepath; - verbose_stream() << command0 << std::endl; - system(command0.c_str()); - - std::string commandN = "sed -i '.bak' s/" + substringN + "/" + new_name + "\\'" + "/g " + dot_filepath; - verbose_stream() << commandN << std::endl; - system(commandN.c_str()); - } - } - - verbose_stream() << "end of postprocessing"; - } - - /* - * ==================================== - * methods for transforming proofs - * ==================================== - */ - - void theory_axiom_reducer::reset() - { - m_cache.reset(); - m_pinned.reset(); - } - - proof_ref theory_axiom_reducer::reduce(proof* pr) - { - proof_post_order pit(pr, m); - while (pit.hasNext()) - { - proof* p = pit.next(); - - if (m.get_num_parents(p) == 0 && is_arith_lemma(m, p)) - { - // we have an arith-theory-axiom and want to get rid of it - // we need to replace the axiom with 1a) corresponding hypothesis', 1b) a theory lemma and a 1c) a lemma. Furthermore update datastructures - app *cls_fact = to_app(m.get_fact(p)); - ptr_buffer cls; - if (m.is_or(cls_fact)) { - for (unsigned i = 0, sz = cls_fact->get_num_args(); i < sz; ++i) - { cls.push_back(cls_fact->get_arg(i)); } - } else { cls.push_back(cls_fact); } - - // 1a) create hypothesis' - ptr_buffer hyps; - for (unsigned i=0; i < cls.size(); ++i) - { - expr* hyp_fact = m.is_not(cls[i]) ? to_app(cls[i])->get_arg(0) : m.mk_not(cls[i]); - proof* hyp = m.mk_hypothesis(hyp_fact); - m_pinned.push_back(hyp); - hyps.push_back(hyp); - } - - // 1b) create farkas lemma: need to rebuild parameters since mk_th_lemma adds tid as first parameter - unsigned num_params = p->get_decl()->get_num_parameters(); - parameter const* params = p->get_decl()->get_parameters(); - vector parameters; - for (unsigned i = 1; i < num_params; ++i) { - parameters.push_back(params[i]); - } - - SASSERT(params[0].is_symbol()); - family_id tid = m.mk_family_id(params[0].get_symbol()); - SASSERT(tid != null_family_id); - - proof* th_lemma = m.mk_th_lemma(tid, m.mk_false(),hyps.size(), hyps.c_ptr(), num_params-1, parameters.c_ptr()); - SASSERT(is_arith_lemma(m, th_lemma)); - - // 1c) create lemma - proof* res = m.mk_lemma(th_lemma, cls_fact); - SASSERT(m.get_fact(res) == m.get_fact(p)); + if (m.has_fact(p)) { args.push_back(m.get_fact(p)); } + SASSERT(p->get_decl()->get_arity() == args.size()); + proof* res = m.mk_app(p->get_decl(), args.size(), (expr * const*)args.c_ptr()); m_pinned.push_back(res); m_cache.insert(p, res); } - else - { - bool dirty = false; // proof is dirty, if a subproof of one of its premises has been transformed - - ptr_buffer args; - for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) - { - proof* pp = m.get_parent(p, i); - proof* tmp; - if (m_cache.find(pp, tmp)) - { - args.push_back(tmp); - dirty = dirty || pp != tmp; - } - else - { - SASSERT(false); - } - } - if (!dirty) // if not dirty just use the old step - { - m_cache.insert(p, p); - } - else // otherwise create new step with the corresponding proofs of the premises - { - if (m.has_fact(p)) { args.push_back(m.get_fact(p)); } - SASSERT(p->get_decl()->get_arity() == args.size()); - proof* res = m.mk_app(p->get_decl(), args.size(), (expr * const*)args.c_ptr()); - m_pinned.push_back(res); - m_cache.insert(p, res); - } - } } - - proof* res; - VERIFY(m_cache.find(pr,res)); - DEBUG_CODE(proof_checker pc(m); - expr_ref_vector side(m); - SASSERT(pc.check(res, side)); - ); - - return proof_ref(res,m); } - void hypothesis_reducer::reset() - { - m_cache.reset(); - m_units.reset(); - m_active_hyps.reset(); - m_parent_hyps.reset(); - m_pinned_active_hyps.reset(); - m_pinned_parent_hyps.reset(); - m_pinned.reset(); - } + proof* res; + VERIFY(m_cache.find(pr,res)); + DEBUG_CODE(proof_checker pc(m); + expr_ref_vector side(m); + SASSERT(pc.check(res, side)); + ); - void hypothesis_reducer::compute_hypsets(proof* pr) - { - ptr_vector todo; - todo.push_back(pr); + return proof_ref(res,m); +} - while (!todo.empty()) +void hypothesis_reducer::reset() +{ + m_cache.reset(); + m_units.reset(); + m_active_hyps.reset(); + m_parent_hyps.reset(); + m_pinned_active_hyps.reset(); + m_pinned_parent_hyps.reset(); + m_pinned.reset(); +} + +void hypothesis_reducer::compute_hypsets(proof* pr) +{ + ptr_vector todo; + todo.push_back(pr); + + while (!todo.empty()) + { + proof* p = todo.back(); + + if (m_active_hyps.contains(p)) { - proof* p = todo.back(); + SASSERT(m_parent_hyps.contains(p)); + todo.pop_back(); + } + // if we haven't already visited the current unit + else + { + bool existsUnvisitedParent = false; - if (m_active_hyps.contains(p)) - { - SASSERT(m_parent_hyps.contains(p)); - todo.pop_back(); - } - // if we haven't already visited the current unit - else - { - bool existsUnvisitedParent = false; + // add unprocessed premises to stack for DFS. If there is at least one unprocessed premise, don't compute the result + // for p now, but wait until those unprocessed premises are processed. + for (unsigned i = 0; i < m.get_num_parents(p); ++i) { + SASSERT(m.is_proof(p->get_arg(i))); + proof* premise = to_app(p->get_arg(i)); - // add unprocessed premises to stack for DFS. If there is at least one unprocessed premise, don't compute the result - // for p now, but wait until those unprocessed premises are processed. - for (unsigned i = 0; i < m.get_num_parents(p); ++i) { - SASSERT(m.is_proof(p->get_arg(i))); - proof* premise = to_app(p->get_arg(i)); - - // if we haven't visited the premise yet - if (!m_active_hyps.contains(premise)) - { - SASSERT(!m_parent_hyps.contains(premise)); - // add it to the stack - todo.push_back(premise); - existsUnvisitedParent = true; - } - } - - // if we already visited all premises, we can visit p too - if (!existsUnvisitedParent) + // if we haven't visited the premise yet + if (!m_active_hyps.contains(premise)) { - todo.pop_back(); + SASSERT(!m_parent_hyps.contains(premise)); + // add it to the stack + todo.push_back(premise); + existsUnvisitedParent = true; + } + } - // create active_hyps-set and parent_hyps-set for step p - proof_set* active_hyps = alloc(proof_set); - m_pinned_active_hyps.insert(active_hyps); - m_active_hyps.insert(p, active_hyps); + // if we already visited all premises, we can visit p too + if (!existsUnvisitedParent) + { + todo.pop_back(); - expr_set* parent_hyps = alloc(expr_set); - m_pinned_parent_hyps.insert(parent_hyps); - m_parent_hyps.insert(p, parent_hyps); + // create active_hyps-set and parent_hyps-set for step p + proof_set* active_hyps = alloc(proof_set); + m_pinned_active_hyps.insert(active_hyps); + m_active_hyps.insert(p, active_hyps); - // fill both sets - if (m.is_hypothesis(p)) + expr_set* parent_hyps = alloc(expr_set); + m_pinned_parent_hyps.insert(parent_hyps); + m_parent_hyps.insert(p, parent_hyps); + + // fill both sets + if (m.is_hypothesis(p)) + { + active_hyps->insert(p); + parent_hyps->insert(m.get_fact(p)); + } + else + { + for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) { - active_hyps->insert(p); - parent_hyps->insert(m.get_fact(p)); - } - else - { - for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) + proof* pp = m.get_parent(p, i); + set_union(*parent_hyps, *m_parent_hyps.find(pp)); + + if (!m.is_lemma(p)) // lemmas clear all hypotheses { - proof* pp = m.get_parent(p, i); - set_union(*parent_hyps, *m_parent_hyps.find(pp)); - - if (!m.is_lemma(p)) // lemmas clear all hypotheses - { - set_union(*active_hyps, *m_active_hyps.find(pp)); - } + set_union(*active_hyps, *m_active_hyps.find(pp)); } } } } } } +} - // collect all units that are hyp-free and are used as hypotheses somewhere - // requires that m_active_hyps and m_parent_hyps have been computed - void hypothesis_reducer::collect_units(proof* pr) - { - expr_set* all_hyps = m_parent_hyps.find(pr); - SASSERT(all_hyps != nullptr); +// collect all units that are hyp-free and are used as hypotheses somewhere +// requires that m_active_hyps and m_parent_hyps have been computed +void hypothesis_reducer::collect_units(proof* pr) +{ + expr_set* all_hyps = m_parent_hyps.find(pr); + SASSERT(all_hyps != nullptr); - proof_post_order pit(pr, m); - while (pit.hasNext()) { - proof* p = pit.next(); - if (!m.is_hypothesis(p)) + proof_post_order pit(pr, m); + while (pit.hasNext()) { + proof* p = pit.next(); + if (!m.is_hypothesis(p)) + { + proof_set* active_hyps = m_active_hyps.find(p); + SASSERT(active_hyps != nullptr); + + // collect units that are hyp-free and are used as hypotheses in the proof pr + if (active_hyps->empty() && m.has_fact(p) && all_hyps->contains(m.get_fact(p))) { - proof_set* active_hyps = m_active_hyps.find(p); - SASSERT(active_hyps != nullptr); - - // collect units that are hyp-free and are used as hypotheses in the proof pr - if (active_hyps->empty() && m.has_fact(p) && all_hyps->contains(m.get_fact(p))) - { - m_units.insert(m.get_fact(p), p); - } + m_units.insert(m.get_fact(p), p); } } } +} - proof_ref hypothesis_reducer::reduce(proof* pr) - { - compute_hypsets(pr); - collect_units(pr); +proof_ref hypothesis_reducer::reduce(proof* pr) +{ + compute_hypsets(pr); + collect_units(pr); - proof* res = compute_transformed_proof(pr); - SASSERT(res != nullptr); + proof* res = compute_transformed_proof(pr); + SASSERT(res != nullptr); - proof_ref res_ref(res,m); + proof_ref res_ref(res,m); - reset(); - DEBUG_CODE(proof_checker pc(m); - expr_ref_vector side(m); - SASSERT(pc.check(res, side)); - ); - return res_ref; - } + reset(); + DEBUG_CODE(proof_checker pc(m); + expr_ref_vector side(m); + SASSERT(pc.check(res, side)); + ); + return res_ref; +} - proof* hypothesis_reducer::compute_transformed_proof(proof* pf) - { - proof *res = NULL; +proof* hypothesis_reducer::compute_transformed_proof(proof* pf) +{ + proof *res = NULL; - ptr_vector todo; - todo.push_back(pf); - ptr_buffer args; - bool dirty = false; + ptr_vector todo; + todo.push_back(pf); + ptr_buffer args; + bool dirty = false; - while (!todo.empty()) { - proof *p, *tmp, *pp; - unsigned todo_sz; + while (!todo.empty()) { + proof *p, *tmp, *pp; + unsigned todo_sz; - p = todo.back(); - if (m_cache.find(p, tmp)) { - todo.pop_back(); - continue; + p = todo.back(); + if (m_cache.find(p, tmp)) { + todo.pop_back(); + continue; + } + + dirty = false; + args.reset(); + todo_sz = todo.size(); + for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) { + pp = m.get_parent(p, i); + if (m_cache.find(pp, tmp)) { + args.push_back(tmp); + dirty = dirty || pp != tmp; + } else { + todo.push_back(pp); } + } - dirty = false; - args.reset(); - todo_sz = todo.size(); - for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) { - pp = m.get_parent(p, i); - if (m_cache.find(pp, tmp)) { - args.push_back(tmp); - dirty = dirty || pp != tmp; - } else { - todo.push_back(pp); - } - } - - if (todo_sz < todo.size()) { continue; } - else { todo.pop_back(); } + if (todo_sz < todo.size()) { continue; } + else { todo.pop_back(); } - // here the proof transformation begins - // INV: whenever we visit p, active_hyps and parent_hyps have been computed for the args. - if (m.is_hypothesis(p)) + // here the proof transformation begins + // INV: whenever we visit p, active_hyps and parent_hyps have been computed for the args. + if (m.is_hypothesis(p)) + { + // hyp: replace by a corresponding unit + if (m_units.find(m.get_fact(p), tmp)) { - // hyp: replace by a corresponding unit - if (m_units.find(m.get_fact(p), tmp)) + // look up the proof of the unit: + // if there is a transformed proof use that one + // otherwise use the original proof + proof* proof_of_unit; + if (!m_cache.find(tmp,proof_of_unit)) { - // look up the proof of the unit: - // if there is a transformed proof use that one - // otherwise use the original proof - proof* proof_of_unit; - if (!m_cache.find(tmp,proof_of_unit)) - { - proof_of_unit = tmp; - } - - // compute hypsets (have not been computed in general, since the unit can be anywhere in the proof) - compute_hypsets(proof_of_unit); - - // if the transformation doesn't create a cycle, perform it - SASSERT(m_parent_hyps.contains(proof_of_unit)); - expr_set* parent_hyps = m_parent_hyps.find(proof_of_unit); - if (!parent_hyps->contains(p)) - { - res = proof_of_unit; - // hypsets have already been computed for proof_of_unit - } - // otherwise don't transform the proof and just use the hypothesis - else - { - res = p; - // hypsets have already been computed for p - } + proof_of_unit = tmp; } + + // compute hypsets (have not been computed in general, since the unit can be anywhere in the proof) + compute_hypsets(proof_of_unit); + + // if the transformation doesn't create a cycle, perform it + SASSERT(m_parent_hyps.contains(proof_of_unit)); + expr_set* parent_hyps = m_parent_hyps.find(proof_of_unit); + if (!parent_hyps->contains(p)) + { + res = proof_of_unit; + // hypsets have already been computed for proof_of_unit + } + // otherwise don't transform the proof and just use the hypothesis else { res = p; // hypsets have already been computed for p } } - else if (!dirty) { res = p; } - - else if (m.is_lemma(p)) - { - //lemma: reduce the premise; remove reduced consequences from conclusion - SASSERT(args.size() == 1); - res = mk_lemma_core(args[0], m.get_fact(p)); - compute_hypsets(res); - } - else if (m.is_unit_resolution(p)) - { - // unit: reduce untis; reduce the first premise; rebuild unit resolution - res = mk_unit_resolution_core(args); - compute_hypsets(res); - } else { - res = mk_step_core(p, args); - compute_hypsets(res); - } - - SASSERT(res); - m_cache.insert(p, res); - - SASSERT(m_active_hyps.contains(res)); - proof_set* active_hyps = m_active_hyps.find(res); - if (active_hyps->empty() && m.has_fact(res) && m.is_false(m.get_fact(res))) - { - return res; + res = p; + // hypsets have already been computed for p } } - UNREACHABLE(); - return nullptr; - } + else if (!dirty) { res = p; } - proof* hypothesis_reducer::mk_lemma_core(proof* premise, expr *fact) - { - SASSERT(m.is_false(m.get_fact(premise))); - - SASSERT(m_active_hyps.contains(premise)); - proof_set* active_hyps = m_active_hyps.find(premise); - - // if there is no active hypothesis return the premise - if (active_hyps->empty()) + else if (m.is_lemma(p)) { - return premise; + //lemma: reduce the premise; remove reduced consequences from conclusion + SASSERT(args.size() == 1); + res = mk_lemma_core(args[0], m.get_fact(p)); + compute_hypsets(res); } - // otherwise build disjunction of the negated active hypothesis' and add lemma step. - else + else if (m.is_unit_resolution(p)) { - expr_ref_buffer args(m); - for (auto hyp : *active_hyps) - { - expr* hyp_fact = m.get_fact(hyp); - expr_ref negated_hyp_fact(m); - negated_hyp_fact = m.is_not(hyp_fact) ? to_app(hyp_fact)->get_arg(0) : m.mk_not(hyp_fact); - args.push_back(negated_hyp_fact); - } - - expr_ref lemma(m); - if (args.size() == 1) - { - lemma = args[0]; - } - else - { - lemma = m.mk_or(args.size(), args.c_ptr()); - } - proof_ref res(m); - res = m.mk_lemma(premise, lemma); - m_pinned.push_back(res); - return res; - } - } - - proof* hypothesis_reducer::mk_unit_resolution_core(ptr_buffer& args) - { - ptr_buffer pf_args; // the arguments of the transformed unit resolution step - pf_args.push_back(args [0]); // the first element of args is the clause to resolve with - - // if any literal is false, we don't need a unit resolution step - // could be the case due to transformations which already have been done - for (unsigned i = 1; i < args.size(); ++i) - { - if (m.is_false(m.get_fact(args[i]))) - { - return args[i]; - } - } - - app *cls_fact = to_app(m.get_fact(args[0])); // BUG: I guess this shouldn't work with quantifiers (since they are no apps) - ptr_buffer cls; - if (m.is_or(cls_fact)) { - for (unsigned i = 0, sz = cls_fact->get_num_args(); i < sz; ++i) - { cls.push_back(cls_fact->get_arg(i)); } - } else { cls.push_back(cls_fact); } - - // construct new resolvent - ptr_buffer new_fact_cls; - bool found; - // XXX quadratic - for (unsigned i = 0, sz = cls.size(); i < sz; ++i) { - found = false; - for (unsigned j = 1; j < args.size(); ++j) { - if (m.is_complement(cls.get(i), m.get_fact(args [j]))) { - found = true; - pf_args.push_back(args [j]); - break; - } - } - if (!found) { - new_fact_cls.push_back(cls.get(i)); - } - } - - SASSERT(new_fact_cls.size() + pf_args.size() - 1 == cls.size()); - expr_ref new_fact(m); - new_fact = mk_or(m, new_fact_cls.size(), new_fact_cls.c_ptr()); - - // create new proof step - if (pf_args.size() == 1) // the only premise is the clause itself - { - return args[0]; + // unit: reduce untis; reduce the first premise; rebuild unit resolution + res = mk_unit_resolution_core(args); + compute_hypsets(res); } else { - proof* res = m.mk_unit_resolution(pf_args.size(), pf_args.c_ptr(), new_fact); - m_pinned.push_back(res); + res = mk_step_core(p, args); + compute_hypsets(res); + } + + SASSERT(res); + m_cache.insert(p, res); + + SASSERT(m_active_hyps.contains(res)); + proof_set* active_hyps = m_active_hyps.find(res); + if (active_hyps->empty() && m.has_fact(res) && m.is_false(m.get_fact(res))) + { return res; } } + UNREACHABLE(); + return nullptr; +} - proof* hypothesis_reducer::mk_step_core(proof* old_step, ptr_buffer& args) +proof* hypothesis_reducer::mk_lemma_core(proof* premise, expr *fact) +{ + SASSERT(m.is_false(m.get_fact(premise))); + + SASSERT(m_active_hyps.contains(premise)); + proof_set* active_hyps = m_active_hyps.find(premise); + + // if there is no active hypothesis return the premise + if (active_hyps->empty()) { - // if any of the literals is false, we don't need a step - for (unsigned i = 0; i < args.size(); ++i) + return premise; + } + // otherwise build disjunction of the negated active hypothesis' and add lemma step. + else + { + expr_ref_buffer args(m); + for (auto hyp : *active_hyps) { - if (m.is_false(m.get_fact(args[i]))) - { - return args[i]; - } + expr* hyp_fact = m.get_fact(hyp); + expr_ref negated_hyp_fact(m); + negated_hyp_fact = m.is_not(hyp_fact) ? to_app(hyp_fact)->get_arg(0) : m.mk_not(hyp_fact); + args.push_back(negated_hyp_fact); } - // otherwise build step - args.push_back(to_app(m.get_fact(old_step))); // BUG: I guess this doesn't work with quantifiers (since they are no apps) - - SASSERT(old_step->get_decl()->get_arity() == args.size()); - proof* res = m.mk_app(old_step->get_decl(), args.size(), (expr * const*)args.c_ptr()); + expr_ref lemma(m); + if (args.size() == 1) + { + lemma = args[0]; + } + else + { + lemma = m.mk_or(args.size(), args.c_ptr()); + } + proof_ref res(m); + res = m.mk_lemma(premise, lemma); m_pinned.push_back(res); return res; } +} + +proof* hypothesis_reducer::mk_unit_resolution_core(ptr_buffer& args) +{ + ptr_buffer pf_args; // the arguments of the transformed unit resolution step + pf_args.push_back(args [0]); // the first element of args is the clause to resolve with + + // if any literal is false, we don't need a unit resolution step + // could be the case due to transformations which already have been done + for (unsigned i = 1; i < args.size(); ++i) + { + if (m.is_false(m.get_fact(args[i]))) + { + return args[i]; + } + } + + app *cls_fact = to_app(m.get_fact(args[0])); // BUG: I guess this shouldn't work with quantifiers (since they are no apps) + ptr_buffer cls; + if (m.is_or(cls_fact)) { + for (unsigned i = 0, sz = cls_fact->get_num_args(); i < sz; ++i) + { cls.push_back(cls_fact->get_arg(i)); } + } else { cls.push_back(cls_fact); } + + // construct new resolvent + ptr_buffer new_fact_cls; + bool found; + // XXX quadratic + for (unsigned i = 0, sz = cls.size(); i < sz; ++i) { + found = false; + for (unsigned j = 1; j < args.size(); ++j) { + if (m.is_complement(cls.get(i), m.get_fact(args [j]))) { + found = true; + pf_args.push_back(args [j]); + break; + } + } + if (!found) { + new_fact_cls.push_back(cls.get(i)); + } + } + + SASSERT(new_fact_cls.size() + pf_args.size() - 1 == cls.size()); + expr_ref new_fact(m); + new_fact = mk_or(m, new_fact_cls.size(), new_fact_cls.c_ptr()); + + // create new proof step + if (pf_args.size() == 1) // the only premise is the clause itself + { + return args[0]; + } + else + { + proof* res = m.mk_unit_resolution(pf_args.size(), pf_args.c_ptr(), new_fact); + m_pinned.push_back(res); + return res; + } +} + +proof* hypothesis_reducer::mk_step_core(proof* old_step, ptr_buffer& args) +{ + // if any of the literals is false, we don't need a step + for (unsigned i = 0; i < args.size(); ++i) + { + if (m.is_false(m.get_fact(args[i]))) + { + return args[i]; + } + } + + // otherwise build step + args.push_back(to_app(m.get_fact(old_step))); // BUG: I guess this doesn't work with quantifiers (since they are no apps) + + SASSERT(old_step->get_decl()->get_arity() == args.size()); + proof* res = m.mk_app(old_step->get_decl(), args.size(), (expr * const*)args.c_ptr()); + m_pinned.push_back(res); + return res; +} }; diff --git a/src/muz/spacer/spacer_proof_utils.h b/src/muz/spacer/spacer_proof_utils.h index e47c9882a..671fda783 100644 --- a/src/muz/spacer/spacer_proof_utils.h +++ b/src/muz/spacer/spacer_proof_utils.h @@ -22,67 +22,74 @@ Revision History: namespace spacer { - bool is_arith_lemma(ast_manager& m, proof* pr); - bool is_farkas_lemma(ast_manager& m, proof* pr); +bool is_arith_lemma(ast_manager& m, proof* pr); +bool is_farkas_lemma(ast_manager& m, proof* pr); - /* - * prints the proof pr in dot representation to the file proof.dot - * if iuc_pr is not nullptr, then it is queried for coloring partitions - */ - class iuc_proof; - void pp_proof_dot(ast_manager& m, proof* pr, iuc_proof* iuc_pr = nullptr); +class theory_axiom_reducer { +public: + theory_axiom_reducer(ast_manager& m) : m(m), m_pinned(m) {} - class theory_axiom_reducer - { - public: - theory_axiom_reducer(ast_manager& m) : m(m), m_pinned(m) {} + // reduce theory axioms and return transformed proof + proof_ref reduce(proof* pr); - // reduce theory axioms and return transformed proof - proof_ref reduce(proof* pr); +private: + ast_manager &m; - private: - ast_manager &m; + // tracking all created expressions + expr_ref_vector m_pinned; - // tracking all created expressions - expr_ref_vector m_pinned; + // maps each proof of a clause to the transformed subproof of + // that clause + obj_map m_cache; - // maps each proof of a clause to the transformed subproof of that clause - obj_map m_cache; + void reset(); +}; - void reset(); - }; +class hypothesis_reducer +{ +public: + hypothesis_reducer(ast_manager &m) : m(m), m_pinned(m) {} - class hypothesis_reducer - { - public: - hypothesis_reducer(ast_manager &m) : m(m), m_pinned(m) {} + // reduce hypothesis and return transformed proof + proof_ref reduce(proof* pf); - // reduce hypothesis and return transformed proof - proof_ref reduce(proof* pf); +private: + typedef obj_hashtable expr_set; + typedef obj_hashtable proof_set; - private: - typedef obj_hashtable expr_set; - typedef obj_hashtable proof_set; + ast_manager &m; - ast_manager &m; + // created expressions + expr_ref_vector m_pinned; - expr_ref_vector m_pinned; // tracking all created expressions - ptr_vector m_pinned_active_hyps; // tracking all created sets of active hypothesis - ptr_vector m_pinned_parent_hyps; // tracking all created sets of parent hypothesis + // created sets of active hypothesis + ptr_vector m_pinned_active_hyps; + // created sets of parent hypothesis + ptr_vector m_pinned_parent_hyps; - obj_map m_cache; // maps each proof of a clause to the transformed subproof of that clause - obj_map m_units; // maps each unit literal to the subproof of that unit - obj_map m_active_hyps; // maps each proof of a clause to the set of proofs of active hypothesis' of the clause - obj_map m_parent_hyps; // maps each proof of a clause to the hypothesis-fact, which are transitive parents of that clause, needed to avoid creating cycles in the proof. + // maps a proof to the transformed proof + obj_map m_cache; - void reset(); - void compute_hypsets(proof* pr); // compute active_hyps and parent_hyps for pr - void collect_units(proof* pr); // compute m_units - proof* compute_transformed_proof(proof* pf); + // maps a unit literal to its derivation + obj_map m_units; - proof* mk_lemma_core(proof *pf, expr *fact); - proof* mk_unit_resolution_core(ptr_buffer& args); - proof* mk_step_core(proof* old_step, ptr_buffer& args); - }; + // maps a proof to the set of proofs of active hypotheses + obj_map m_active_hyps; + // maps a proof to the hypothesis-fact that are transitive + // parents of that proof. Used for cycle detection and avoidance. + obj_map m_parent_hyps; + + void reset(); + + // compute active_hyps and parent_hyps for pr + void compute_hypsets(proof* pr); + // compute m_units + void collect_units(proof* pr); + proof* compute_transformed_proof(proof* pf); + + proof* mk_lemma_core(proof *pf, expr *fact); + proof* mk_unit_resolution_core(ptr_buffer& args); + proof* mk_step_core(proof* old_step, ptr_buffer& args); +}; } #endif