diff --git a/src/muz/spacer/spacer_generalizers.cpp b/src/muz/spacer/spacer_generalizers.cpp index 2f9627398..4b0c3d15c 100644 --- a/src/muz/spacer/spacer_generalizers.cpp +++ b/src/muz/spacer/spacer_generalizers.cpp @@ -158,6 +158,7 @@ void unsat_core_generalizer::operator()(lemma_ref &lemma) unsigned old_sz = lemma->get_cube().size(); unsigned old_level = lemma->level(); + (void)old_level; unsigned uses_level; expr_ref_vector core(m); diff --git a/src/muz/spacer/spacer_itp_solver.cpp b/src/muz/spacer/spacer_itp_solver.cpp index 35e06f9a2..71587b4c6 100644 --- a/src/muz/spacer/spacer_itp_solver.cpp +++ b/src/muz/spacer/spacer_itp_solver.cpp @@ -279,7 +279,7 @@ void itp_solver::get_itp_core (expr_ref_vector &core) else { proof_ref res(get_proof(),m); - + // new code if (m_old_hyp_reducer) { @@ -290,10 +290,10 @@ void itp_solver::get_itp_core (expr_ref_vector &core) verbose_stream() << "\nStats before transformation:"; iuc_before.print_farkas_stats(); } - + proof_utils::reduce_hypotheses(res); proof_utils::permute_unit_resolution(res); - + if (m_print_farkas_stats) { iuc_proof iuc_after(m, res.get(), B); @@ -310,13 +310,13 @@ void itp_solver::get_itp_core (expr_ref_vector &core) verbose_stream() << "\nStats before transformation:"; iuc_before.print_farkas_stats(); } - + theory_axiom_reducer ta_reducer(m); proof_ref pr_without_theory_lemmas = ta_reducer.reduce(res.get()); - + hypothesis_reducer hyp_reducer(m); proof_ref pr_with_less_hypothesis = hyp_reducer.reduce(pr_without_theory_lemmas); - + if (m_print_farkas_stats) { iuc_proof iuc_after(m, pr_with_less_hypothesis.get(), B); @@ -328,9 +328,9 @@ void itp_solver::get_itp_core (expr_ref_vector &core) // construct proof object with contains partition information iuc_proof iuc_pr(m, res, B); - + // configure learner - unsat_core_learner learner(m, iuc_pr, m_print_farkas_stats, m_iuc_debug_proof); + unsat_core_learner learner(m, iuc_pr); if (m_iuc_arith == 0 || m_iuc_arith > 3) { @@ -352,7 +352,7 @@ void itp_solver::get_itp_core (expr_ref_vector &core) unsat_core_plugin_farkas_lemma_bounded* plugin_farkas_lemma_bounded = alloc(unsat_core_plugin_farkas_lemma_bounded, learner,m); learner.register_plugin(plugin_farkas_lemma_bounded); } - + if (m_iuc == 2) { unsat_core_plugin_min_cut* plugin_min_cut = alloc(unsat_core_plugin_min_cut, learner, m); diff --git a/src/muz/spacer/spacer_itp_solver.h b/src/muz/spacer/spacer_itp_solver.h index 6f6a2bf8f..0b3527bb3 100644 --- a/src/muz/spacer/spacer_itp_solver.h +++ b/src/muz/spacer/spacer_itp_solver.h @@ -61,7 +61,6 @@ private: unsigned m_iuc; unsigned m_iuc_arith; bool m_print_farkas_stats; - bool m_iuc_debug_proof; bool m_old_hyp_reducer; bool is_proxy(expr *e, app_ref &def); void undo_proxies_in_core(ptr_vector &v); @@ -69,7 +68,7 @@ private: app* fresh_proxy(); void elim_proxies(expr_ref_vector &v); public: - itp_solver(solver &solver, unsigned iuc, unsigned iuc_arith, bool print_farkas_stats, bool iuc_debug_proof, bool old_hyp_reducer, bool split_literals = false) : + itp_solver(solver &solver, unsigned iuc, unsigned iuc_arith, bool print_farkas_stats, bool old_hyp_reducer, bool split_literals = false) : m(solver.get_manager()), m_solver(solver), m_proxies(m), @@ -83,7 +82,6 @@ public: m_iuc(iuc), m_iuc_arith(iuc_arith), m_print_farkas_stats(print_farkas_stats), - m_iuc_debug_proof(iuc_debug_proof), m_old_hyp_reducer(old_hyp_reducer) {} diff --git a/src/muz/spacer/spacer_proof_utils.cpp b/src/muz/spacer/spacer_proof_utils.cpp index bbe53c362..735f94518 100644 --- a/src/muz/spacer/spacer_proof_utils.cpp +++ b/src/muz/spacer/spacer_proof_utils.cpp @@ -28,7 +28,7 @@ Revision History: #include "muz/base/dl_util.h" namespace spacer { - + // arith lemmas: second parameter specifies exact type of lemma, could be "farkas", "triangle-eq", "eq-propagate", "assign-bounds", maybe also something else bool is_arith_lemma(ast_manager& m, proof* pr) { @@ -44,7 +44,7 @@ namespace spacer { } return false; } - + bool is_farkas_lemma(ast_manager& m, proof* pr) { if (pr->get_decl_kind() == PR_TH_LEMMA) @@ -126,28 +126,28 @@ proof* ProofIteratorPostOrder::next() // open temporary dot-file std::string dotfile_path = "proof.dot"; std::ofstream dotstream(dotfile_path); - + // dump dot representation to stream pp_proof_dot_to_stream(m, dotstream, pr, iuc_pr); - + // post process dot-file, TODO: factor this out to a different place pp_proof_post_process_dot(dotfile_path,dotstream); } - + void pp_proof_dot_to_stream(ast_manager& m, std::ofstream& dotstream, proof* pr, iuc_proof* iuc_pr) { dotstream << "digraph proof { \n"; std::unordered_map id_to_small_id; unsigned counter = 0; - + ProofIteratorPostOrder it2(pr, m); while (it2.hasNext()) { proof* currentNode = it2.next(); - + SASSERT(id_to_small_id.find(currentNode->get_id()) == id_to_small_id.end()); id_to_small_id.insert(std::make_pair(currentNode->get_id(), counter)); - + std::string color = "white"; if (iuc_pr != nullptr) { @@ -169,11 +169,11 @@ proof* ProofIteratorPostOrder::next() params_ref p; p.set_uint("max_depth", 4294967295u); p.set_uint("min_alias_size", 4294967295u); - + std::ostringstream label_ostream; label_ostream << mk_pp(m.get_fact(currentNode),m,p) << "\n"; std::string label = escape_dot(label_ostream.str()); - + // compute edge-label std::string edge_label = ""; if (m.get_num_parents(currentNode) == 0) @@ -223,7 +223,7 @@ proof* ProofIteratorPostOrder::next() } } } - + // generate entry for node in dot-file dotstream << "node_" << counter << " " << "[" @@ -231,7 +231,7 @@ proof* ProofIteratorPostOrder::next() << "label=\"" << edge_label << " " << label << "\", " << "fillcolor=\"" << color << "\"" << "]\n"; - + // add entry for each edge to that node for (unsigned i = m.get_num_parents(currentNode); i > 0 ; --i) { @@ -242,12 +242,12 @@ proof* ProofIteratorPostOrder::next() << "node_" << counter << ";\n"; } - + ++counter; } dotstream << "\n}" << std::endl; } - + std::string escape_dot(const std::string &s) { std::string res; @@ -260,7 +260,7 @@ proof* ProofIteratorPostOrder::next() } return res; } - + void pp_proof_post_process_dot(std::string dot_filepath, std::ofstream &dotstream) { // replace variables in the dotfiles with nicer descriptions (hack: hard coded replacement for now) @@ -269,7 +269,7 @@ proof* ProofIteratorPostOrder::next() predicates.push_back(l1); std::vector l2 = {"L2","j","m","B"}; predicates.push_back(l2); - + for(auto& predicate : predicates) { std::string predicate_name = predicate[0]; @@ -278,17 +278,17 @@ proof* ProofIteratorPostOrder::next() std::string new_name = predicate[i+1]; std::string substring0 = predicate_name + "_" + std::to_string(i) + "_0"; std::string substringN = predicate_name + "_" + std::to_string(i) + "_n"; - + std::string command0 = "sed -i '.bak' 's/" + substring0 + "/" + new_name + "/g' " + dot_filepath; verbose_stream() << command0 << std::endl; system(command0.c_str()); - + std::string commandN = "sed -i '.bak' s/" + substringN + "/" + new_name + "\\'" + "/g " + dot_filepath; verbose_stream() << commandN << std::endl; system(commandN.c_str()); } } - + verbose_stream() << "end of postprocessing"; } @@ -297,20 +297,20 @@ proof* ProofIteratorPostOrder::next() * methods for transforming proofs * ==================================== */ - + void theory_axiom_reducer::reset() { m_cache.reset(); m_pinned.reset(); } - + proof_ref theory_axiom_reducer::reduce(proof* pr) { ProofIteratorPostOrder pit(pr, m); while (pit.hasNext()) { proof* p = pit.next(); - + if (m.get_num_parents(p) == 0 && is_arith_lemma(m, p)) { // we have an arith-theory-axiom and want to get rid of it @@ -321,7 +321,7 @@ proof* ProofIteratorPostOrder::next() for (unsigned i = 0, sz = cls_fact->get_num_args(); i < sz; ++i) { cls.push_back(cls_fact->get_arg(i)); } } else { cls.push_back(cls_fact); } - + // 1a) create hypothesis' ptr_buffer hyps; for (unsigned i=0; i < cls.size(); ++i) @@ -331,7 +331,7 @@ proof* ProofIteratorPostOrder::next() m_pinned.push_back(hyp); hyps.push_back(hyp); } - + // 1b) create farkas lemma: need to rebuild parameters since mk_th_lemma adds tid as first parameter unsigned num_params = p->get_decl()->get_num_parameters(); parameter const* params = p->get_decl()->get_parameters(); @@ -339,14 +339,14 @@ proof* ProofIteratorPostOrder::next() for (unsigned i = 1; i < num_params; ++i) { parameters.push_back(params[i]); } - + SASSERT(params[0].is_symbol()); family_id tid = m.mk_family_id(params[0].get_symbol()); SASSERT(tid != null_family_id); - + proof* th_lemma = m.mk_th_lemma(tid, m.mk_false(),hyps.size(), hyps.c_ptr(), num_params-1, parameters.c_ptr()); SASSERT(is_arith_lemma(m, th_lemma)); - + // 1c) create lemma proof* res = m.mk_lemma(th_lemma, cls_fact); SASSERT(m.get_fact(res) == m.get_fact(p)); @@ -386,15 +386,14 @@ proof* ProofIteratorPostOrder::next() } } } - + proof* res; - bool found = m_cache.find(pr,res); - SASSERT(found); + VERIFY(m_cache.find(pr,res)); DEBUG_CODE(proof_checker pc(m); expr_ref_vector side(m); SASSERT(pc.check(res, side)); ); - + return proof_ref(res,m); } @@ -408,16 +407,16 @@ proof* ProofIteratorPostOrder::next() m_pinned_parent_hyps.reset(); m_pinned.reset(); } - + void hypothesis_reducer::compute_hypsets(proof* pr) { ptr_vector todo; todo.push_back(pr); - + while (!todo.empty()) { proof* p = todo.back(); - + if (m_active_hyps.contains(p)) { SASSERT(m_parent_hyps.contains(p)); @@ -427,13 +426,13 @@ proof* ProofIteratorPostOrder::next() else { bool existsUnvisitedParent = false; - + // add unprocessed premises to stack for DFS. If there is at least one unprocessed premise, don't compute the result // for p now, but wait until those unprocessed premises are processed. for (unsigned i = 0; i < m.get_num_parents(p); ++i) { SASSERT(m.is_proof(p->get_arg(i))); proof* premise = to_app(p->get_arg(i)); - + // if we haven't visited the premise yet if (!m_active_hyps.contains(premise)) { @@ -443,7 +442,7 @@ proof* ProofIteratorPostOrder::next() existsUnvisitedParent = true; } } - + // if we already visited all premises, we can visit p too if (!existsUnvisitedParent) { @@ -453,11 +452,11 @@ proof* ProofIteratorPostOrder::next() proof_set* active_hyps = alloc(proof_set); m_pinned_active_hyps.insert(active_hyps); m_active_hyps.insert(p, active_hyps); - + expr_set* parent_hyps = alloc(expr_set); m_pinned_parent_hyps.insert(parent_hyps); m_parent_hyps.insert(p, parent_hyps); - + // fill both sets if (m.is_hypothesis(p)) { @@ -481,7 +480,7 @@ proof* ProofIteratorPostOrder::next() } } } - + // collect all units that are hyp-free and are used as hypotheses somewhere // requires that m_active_hyps and m_parent_hyps have been computed void hypothesis_reducer::collect_units(proof* pr) @@ -510,12 +509,12 @@ proof* ProofIteratorPostOrder::next() { compute_hypsets(pr); collect_units(pr); - + proof* res = compute_transformed_proof(pr); SASSERT(res != nullptr); - + proof_ref res_ref(res,m); - + reset(); DEBUG_CODE(proof_checker pc(m); expr_ref_vector side(m); @@ -523,7 +522,7 @@ proof* ProofIteratorPostOrder::next() ); return res_ref; } - + proof* hypothesis_reducer::compute_transformed_proof(proof* pf) { proof *res = NULL; @@ -559,7 +558,7 @@ proof* ProofIteratorPostOrder::next() if (todo_sz < todo.size()) { continue; } else { todo.pop_back(); } - + // here the proof transformation begins // INV: whenever we visit p, active_hyps and parent_hyps have been computed for the args. if (m.is_hypothesis(p)) @@ -578,7 +577,7 @@ proof* ProofIteratorPostOrder::next() // compute hypsets (have not been computed in general, since the unit can be anywhere in the proof) compute_hypsets(proof_of_unit); - + // if the transformation doesn't create a cycle, perform it SASSERT(m_parent_hyps.contains(proof_of_unit)); expr_set* parent_hyps = m_parent_hyps.find(proof_of_unit); @@ -622,8 +621,8 @@ proof* ProofIteratorPostOrder::next() } SASSERT(res); - m_cache.insert(p, res); - + m_cache.insert(p, res); + SASSERT(m_active_hyps.contains(res)); proof_set* active_hyps = m_active_hyps.find(res); if (active_hyps->empty() && m.has_fact(res) && m.is_false(m.get_fact(res))) @@ -634,11 +633,11 @@ proof* ProofIteratorPostOrder::next() UNREACHABLE(); return nullptr; } - + proof* hypothesis_reducer::mk_lemma_core(proof* premise, expr *fact) { SASSERT(m.is_false(m.get_fact(premise))); - + SASSERT(m_active_hyps.contains(premise)); proof_set* active_hyps = m_active_hyps.find(premise); @@ -658,7 +657,7 @@ proof* ProofIteratorPostOrder::next() negated_hyp_fact = m.is_not(hyp_fact) ? to_app(hyp_fact)->get_arg(0) : m.mk_not(hyp_fact); args.push_back(negated_hyp_fact); } - + expr_ref lemma(m); if (args.size() == 1) { @@ -731,7 +730,7 @@ proof* ProofIteratorPostOrder::next() return res; } } - + proof* hypothesis_reducer::mk_step_core(proof* old_step, ptr_buffer& args) { // if any of the literals is false, we don't need a step @@ -742,10 +741,10 @@ proof* ProofIteratorPostOrder::next() return args[i]; } } - + // otherwise build step args.push_back(to_app(m.get_fact(old_step))); // BUG: I guess this doesn't work with quantifiers (since they are no apps) - + SASSERT(old_step->get_decl()->get_arity() == args.size()); proof* res = m.mk_app(old_step->get_decl(), args.size(), (expr * const*)args.c_ptr()); m_pinned.push_back(res); diff --git a/src/muz/spacer/spacer_prop_solver.cpp b/src/muz/spacer/spacer_prop_solver.cpp index 03c9b858b..d14cd6e91 100644 --- a/src/muz/spacer/spacer_prop_solver.cpp +++ b/src/muz/spacer/spacer_prop_solver.cpp @@ -60,8 +60,14 @@ prop_solver::prop_solver(manager& pm, fixedpoint_params const& p, symbol const& m_solvers[1] = pm.mk_fresh2(); m_fparams[1] = &pm.fparams2(); - m_contexts[0] = alloc(spacer::itp_solver, *(m_solvers[0]), p.spacer_iuc(), p.spacer_iuc_arith(), p.spacer_print_farkas_stats(), p.spacer_iuc_debug_proof(), p.spacer_old_hyp_reducer(), p.spacer_split_farkas_literals()); - m_contexts[1] = alloc(spacer::itp_solver, *(m_solvers[1]), p.spacer_iuc(), p.spacer_iuc_arith(), p.spacer_print_farkas_stats(), p.spacer_iuc_debug_proof(), p.spacer_old_hyp_reducer(), p.spacer_split_farkas_literals()); + m_contexts[0] = alloc(spacer::itp_solver, *(m_solvers[0]), p.spacer_iuc(), + p.spacer_iuc_arith(), + p.spacer_old_hyp_reducer(), + p.spacer_split_farkas_literals()); + m_contexts[1] = alloc(spacer::itp_solver, *(m_solvers[1]), p.spacer_iuc(), + p.spacer_iuc_arith(), + p.spacer_old_hyp_reducer(), + p.spacer_split_farkas_literals()); for (unsigned i = 0; i < 2; ++i) { m_contexts[i]->assert_expr(m_pm.get_background()); } diff --git a/src/muz/spacer/spacer_unsat_core_learner.h b/src/muz/spacer/spacer_unsat_core_learner.h index 16b27f4ba..a8b5965fc 100644 --- a/src/muz/spacer/spacer_unsat_core_learner.h +++ b/src/muz/spacer/spacer_unsat_core_learner.h @@ -32,7 +32,8 @@ namespace spacer { typedef obj_hashtable expr_set; public: - unsat_core_learner(ast_manager& m, iuc_proof& pr, bool print_farkas_stats = false, bool iuc_debug_proof = false) : m(m), m_pr(pr), m_unsat_core(m), m_print_farkas_stats(print_farkas_stats), m_iuc_debug_proof(iuc_debug_proof) {}; + unsat_core_learner(ast_manager& m, iuc_proof& pr) : + m(m), m_pr(pr), m_unsat_core(m) {}; virtual ~unsat_core_learner(); ast_manager& m; @@ -60,7 +61,7 @@ namespace spacer { void set_closed(proof* p, bool value); bool is_b_open (proof *p); - + /* * adds a lemma to the unsat core */ @@ -72,7 +73,9 @@ namespace spacer { ptr_vector m_plugins; ast_mark m_closed; - expr_ref_vector m_unsat_core; // collects the lemmas of the unsat-core, will at the end be inserted into unsat_core. + // collects the lemmas of the unsat-core + // will at the end be inserted into unsat_core. + expr_ref_vector m_unsat_core; /* * computes partial core for step by delegating computation to plugins @@ -83,9 +86,7 @@ namespace spacer { * finalize computation of unsat-core */ void finalize(); - - bool m_print_farkas_stats; - bool m_iuc_debug_proof; + }; }