3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-04-24 01:25:31 +00:00

Refactor iuc_proof as a separate class

This also adds DOT printing support to interpolating proofs
(color for different parts)

iuc_proof is a proof used for IUC computation
This commit is contained in:
Arie Gurfinkel 2018-05-15 09:43:31 -07:00
parent 10106e8e12
commit 56114a5f6d
9 changed files with 960 additions and 379 deletions

View file

@ -25,6 +25,7 @@ z3_add_component(spacer
spacer_quant_generalizer.cpp
spacer_callback.cpp
spacer_json.cpp
spacer_iuc_proof.cpp
COMPONENT_DEPENDENCIES
arith_tactics
core_tactics

View file

@ -23,6 +23,7 @@ Notes:
#include"ast/rewriter/expr_replacer.h"
#include"muz/spacer/spacer_unsat_core_learner.h"
#include"muz/spacer/spacer_unsat_core_plugin.h"
#include "muz/spacer/spacer_iuc_proof.h"
namespace spacer {
void itp_solver::push ()
@ -261,11 +262,11 @@ void itp_solver::get_itp_core (expr_ref_vector &core)
B.insert (a);
}
proof_ref pr(m);
pr = get_proof ();
if (m_iuc == 0)
{
proof_ref pr(m);
pr = get_proof ();
// old code
farkas_learner learner_old;
learner_old.set_split_literals(m_split_literals);
@ -277,7 +278,19 @@ void itp_solver::get_itp_core (expr_ref_vector &core)
else
{
// new code
unsat_core_learner learner(m, m_print_farkas_stats, m_iuc_debug_proof);
// preprocess proof in order to get a proof which is better suited for unsat-core-extraction
proof_ref pr(get_proof(), m);
spacer::reduce_hypotheses(pr);
STRACE("spacer.unsat_core_learner",
verbose_stream() << "Reduced proof:\n" << mk_ismt2_pp(pr, m) << "\n";
);
// construct proof object with contains partition information
iuc_proof iuc_pr(m, get_proof(), B);
// configure learner
unsat_core_learner learner(m, iuc_pr, m_print_farkas_stats, m_iuc_debug_proof);
if (m_iuc_arith == 0 || m_iuc_arith > 3)
{
@ -311,31 +324,12 @@ void itp_solver::get_itp_core (expr_ref_vector &core)
learner.register_plugin(plugin_lemma);
}
learner.compute_unsat_core(pr, B, core);
// compute interpolating unsat core
learner.compute_unsat_core(core);
// postprocessing, TODO: elim_proxies should be done inside iuc_proof
elim_proxies (core);
simplify_bounds (core); // XXX potentially redundant
// // debug
// expr_ref_vector core2(m);
// unsat_core_learner learner2(m);
//
// unsat_core_plugin_farkas_lemma* plugin_farkas_lemma2 = alloc(unsat_core_plugin_farkas_lemma, learner2, m_split_literals);
// learner2.register_plugin(plugin_farkas_lemma2);
// unsat_core_plugin_lemma* plugin_lemma2 = alloc(unsat_core_plugin_lemma, learner2);
// learner2.register_plugin(plugin_lemma2);
// learner2.compute_unsat_core(pr, B, core2);
//
// elim_proxies (core2);
// simplify_bounds (core2);
//
// IF_VERBOSE(2,
// verbose_stream () << "Itp Core:\n"
// << mk_pp (mk_and (core), m) << "\n";);
// IF_VERBOSE(2,
// verbose_stream () << "Itp Core2:\n"
// << mk_pp (mk_and (core2), m) << "\n";);
//SASSERT(mk_and (core) == mk_and (core2));
}
IF_VERBOSE(2,

View file

@ -0,0 +1,235 @@
#include "muz/spacer/spacer_iuc_proof.h"
#include "ast/for_each_expr.h"
#include "ast/array_decl_plugin.h"
#include "muz/spacer/spacer_proof_utils.h"
namespace spacer {
/*
* ====================================
* init
* ====================================
*/
iuc_proof::iuc_proof(ast_manager& m, proof* pr, expr_set& b_conjuncts) : m(m), m_pr(pr,m)
{
// init A-marks and B-marks
collect_symbols_b(b_conjuncts);
compute_marks(b_conjuncts);
}
proof* iuc_proof::get()
{
return m_pr.get();
}
/*
* ====================================
* methods for computing symbol colors
* ====================================
*/
class collect_pure_proc {
func_decl_set& m_symbs;
public:
collect_pure_proc(func_decl_set& s):m_symbs(s) {}
void operator()(app* a) {
if (a->get_family_id() == null_family_id) {
m_symbs.insert(a->get_decl());
}
}
void operator()(var*) {}
void operator()(quantifier*) {}
};
void iuc_proof::collect_symbols_b(expr_set& b_conjuncts)
{
expr_mark visited;
collect_pure_proc proc(m_symbols_b);
for (expr_set::iterator it = b_conjuncts.begin(); it != b_conjuncts.end(); ++it)
{
for_each_expr(proc, visited, *it);
}
}
class is_pure_expr_proc {
func_decl_set const& m_symbs;
array_util m_au;
public:
struct non_pure {};
is_pure_expr_proc(func_decl_set const& s, ast_manager& m):
m_symbs(s),
m_au (m)
{}
void operator()(app* a) {
if (a->get_family_id() == null_family_id) {
if (!m_symbs.contains(a->get_decl())) {
throw non_pure();
}
}
else if (a->get_family_id () == m_au.get_family_id () &&
a->is_app_of (a->get_family_id (), OP_ARRAY_EXT)) {
throw non_pure();
}
}
void operator()(var*) {}
void operator()(quantifier*) {}
};
// requires that m_symbols_b has already been computed, which is done during initialization.
bool iuc_proof::only_contains_symbols_b(expr* expr) const
{
is_pure_expr_proc proc(m_symbols_b, m);
try {
for_each_expr(proc, expr);
}
catch (is_pure_expr_proc::non_pure)
{
return false;
}
return true;
}
/*
* ====================================
* methods for computing which premises
* have been used to derive the conclusions
* ====================================
*/
void iuc_proof::compute_marks(expr_set& b_conjuncts)
{
ProofIteratorPostOrder it(m_pr, m);
while (it.hasNext())
{
proof* currentNode = it.next();
if (m.get_num_parents(currentNode) == 0)
{
switch(currentNode->get_decl_kind())
{
case PR_ASSERTED: // currentNode is an axiom
{
if (b_conjuncts.contains(m.get_fact(currentNode)))
{
m_b_mark.mark(currentNode, true);
}
else
{
m_a_mark.mark(currentNode, true);
}
break;
}
// currentNode is a hypothesis:
case PR_HYPOTHESIS:
{
m_h_mark.mark(currentNode, true);
break;
}
default:
{
break;
}
}
}
else
{
// collect from parents whether derivation of current node contains A-axioms, B-axioms and hypothesis
bool need_to_mark_a = false;
bool need_to_mark_b = false;
bool need_to_mark_h = false;
for (unsigned i = 0; i < m.get_num_parents(currentNode); ++i)
{
SASSERT(m.is_proof(currentNode->get_arg(i)));
proof* premise = to_app(currentNode->get_arg(i));
need_to_mark_a = need_to_mark_a || m_a_mark.is_marked(premise);
need_to_mark_b = need_to_mark_b || m_b_mark.is_marked(premise);
need_to_mark_h = need_to_mark_h || m_h_mark.is_marked(premise);
}
// if current node is application of lemma, we know that all hypothesis are removed
if(currentNode->get_decl_kind() == PR_LEMMA)
{
need_to_mark_h = false;
}
// save results
m_a_mark.mark(currentNode, need_to_mark_a);
m_b_mark.mark(currentNode, need_to_mark_b);
m_h_mark.mark(currentNode, need_to_mark_h);
}
}
}
bool iuc_proof::is_a_marked(proof* p)
{
return m_a_mark.is_marked(p);
}
bool iuc_proof::is_b_marked(proof* p)
{
return m_b_mark.is_marked(p);
}
bool iuc_proof::is_h_marked(proof* p)
{
return m_h_mark.is_marked(p);
}
/*
* ====================================
* methods for dot printing
* ====================================
*/
void iuc_proof::pp_dot()
{
pp_proof_dot(m, m_pr, this);
}
/*
* ====================================
* statistics
* ====================================
*/
void iuc_proof::print_farkas_stats()
{
unsigned farkas_counter = 0;
unsigned farkas_counter2 = 0;
ProofIteratorPostOrder it3(m_pr, m);
while (it3.hasNext())
{
proof* currentNode = it3.next();
// if node is theory lemma
if (is_farkas_lemma(m, currentNode))
{
farkas_counter++;
// check whether farkas lemma is to be interpolated (could potentially miss farkas lemmas, which are interpolated, because we potentially don't want to use the lowest cut)
bool has_blue_nonred_parent = false;
for (unsigned i = 0; i < m.get_num_parents(currentNode); ++i)
{
proof* premise = to_app(currentNode->get_arg(i));
if (!is_a_marked(premise) && is_b_marked(premise))
{
has_blue_nonred_parent = true;
break;
}
}
if (has_blue_nonred_parent && is_a_marked(currentNode))
{
SASSERT(is_b_marked(currentNode));
farkas_counter2++;
}
}
}
verbose_stream() << "\nThis proof contains " << farkas_counter << " Farkas lemmas. " << farkas_counter2 << " Farkas lemmas participate in the lowest cut\n";
}
}

View file

@ -0,0 +1,65 @@
#ifndef IUC_PROOF_H_
#define IUC_PROOF_H_
#include "ast/ast.h"
namespace spacer {
typedef obj_hashtable<expr> expr_set;
typedef obj_hashtable<func_decl> func_decl_set;
/*
* an iuc_proof is a proof together with information of the coloring of the axioms.
*/
class iuc_proof
{
public:
iuc_proof(ast_manager& m, proof* pr, expr_set& b_conjuncts);
proof* get();
/*
* returns whether symbol contains only symbols which occur in B.
*/
bool only_contains_symbols_b(expr* expr) const;
bool is_a_marked(proof* p);
bool is_b_marked(proof* p);
bool is_h_marked(proof* p);
bool is_b_pure (proof *p)
{return !is_h_marked (p) && only_contains_symbols_b (m.get_fact (p));}
/*
* print dot-representation to file proof.dot
* use Graphviz's dot with option -Tpdf to convert dot-representation into pdf
*/
void pp_dot();
void print_farkas_stats();
private:
ast_manager& m;
proof_ref m_pr;
ast_mark m_a_mark;
ast_mark m_b_mark;
ast_mark m_h_mark;
func_decl_set m_symbols_b; // symbols, which occur in any b-asserted formula
// collect symbols occuring in B
void collect_symbols_b(expr_set& b_conjuncts);
// compute for each formula of the proof whether it derives from A and whether it derives from B
void compute_marks(expr_set& b_conjuncts);
void pp_dot_to_stream(std::ofstream& dotstream);
std::string escape_dot(const std::string &s);
void post_process_dot(std::string dot_filepath, std::ofstream& dotstream);
};
}
#endif /* IUC_PROOF_H_ */

View file

@ -0,0 +1,535 @@
/*++
Copyright (c) 2017 Arie Gurfinkel
Module Name:
spacer_proof_utils.cpp
Abstract:
Utilities to traverse and manipulate proofs
Author:
Bernhard Gleiss
Arie Gurfinkel
Revision History:
--*/
#include "muz/spacer/spacer_proof_utils.h"
#include "ast/ast_util.h"
#include "ast/ast_pp.h"
#include "ast/proof_checker/proof_checker.h"
#include <unordered_map>
#include "params.h"
#include "muz/spacer/spacer_iuc_proof.h"
namespace spacer {
/*
* ====================================
* methods for proof traversal
* ====================================
*/
ProofIteratorPostOrder::ProofIteratorPostOrder(proof* root, ast_manager& manager) : m(manager)
{m_todo.push_back(root);}
bool ProofIteratorPostOrder::hasNext()
{return !m_todo.empty();}
/*
* iterative post-order depth-first search (DFS) through the proof DAG
*/
proof* ProofIteratorPostOrder::next()
{
while (!m_todo.empty()) {
proof* currentNode = m_todo.back();
// if we haven't already visited the current unit
if (!m_visited.is_marked(currentNode)) {
bool existsUnvisitedParent = false;
// add unprocessed premises to stack for DFS. If there is at least one unprocessed premise, don't compute the result
// for currentProof now, but wait until those unprocessed premises are processed.
for (unsigned i = 0; i < m.get_num_parents(currentNode); ++i) {
SASSERT(m.is_proof(currentNode->get_arg(i)));
proof* premise = to_app(currentNode->get_arg(i));
// if we haven't visited the current premise yet
if (!m_visited.is_marked(premise)) {
// add it to the stack
m_todo.push_back(premise);
existsUnvisitedParent = true;
}
}
// if we already visited all parent-inferences, we can visit the inference too
if (!existsUnvisitedParent) {
m_visited.mark(currentNode, true);
m_todo.pop_back();
return currentNode;
}
} else {
m_todo.pop_back();
}
}
// we have already iterated through all inferences
return NULL;
}
/*
* ====================================
* methods for dot printing
* ====================================
*/
void pp_proof_dot_to_stream(ast_manager& m, std::ofstream& dotstream, proof* pr, iuc_proof* iuc_pr = nullptr);
std::string escape_dot(const std::string &s);
void pp_proof_post_process_dot(std::string dot_filepath, std::ofstream &dotstream);
void pp_proof_dot(ast_manager& m, proof* pr, iuc_proof* iuc_pr)
{
// open temporary dot-file
std::string dotfile_path = "proof.dot";
std::ofstream dotstream(dotfile_path);
// dump dot representation to stream
pp_proof_dot_to_stream(m, dotstream, pr, iuc_pr);
// post process dot-file, TODO: factor this out to a different place
pp_proof_post_process_dot(dotfile_path,dotstream);
}
void pp_proof_dot_to_stream(ast_manager& m, std::ofstream& dotstream, proof* pr, iuc_proof* iuc_pr)
{
dotstream << "digraph proof { \n";
std::unordered_map<unsigned, unsigned> id_to_small_id;
unsigned counter = 0;
ProofIteratorPostOrder it2(pr, m);
while (it2.hasNext())
{
proof* currentNode = it2.next();
SASSERT(id_to_small_id.find(currentNode->get_id()) == id_to_small_id.end());
id_to_small_id.insert(std::make_pair(currentNode->get_id(), counter));
std::string color = "white";
if (iuc_pr != nullptr)
{
if (iuc_pr->is_a_marked(currentNode) && !iuc_pr->is_b_marked(currentNode))
{
color = "red";
}
else if(iuc_pr->is_b_marked(currentNode) && !iuc_pr->is_a_marked(currentNode))
{
color = "blue";
}
else if(iuc_pr->is_b_marked(currentNode) && iuc_pr->is_a_marked(currentNode))
{
color = "purple";
}
}
// compute label
params_ref p;
p.set_uint("max_depth", 4294967295u);
p.set_uint("min_alias_size", 4294967295u);
std::ostringstream label_ostream;
label_ostream << mk_pp(m.get_fact(currentNode),m,p) << "\n";
std::string label = escape_dot(label_ostream.str());
// compute edge-label
std::string edge_label = "";
if (m.get_num_parents(currentNode) == 0)
{
switch (currentNode->get_decl_kind())
{
case PR_ASSERTED:
edge_label = "asserted:";
break;
case PR_HYPOTHESIS:
edge_label = "hyp:";
color = "grey";
break;
default:
if (currentNode->get_decl_kind() == PR_TH_LEMMA)
{
edge_label = "th_axiom:";
func_decl* d = currentNode->get_decl();
symbol sym;
if (d->get_num_parameters() >= 2 && // the Farkas coefficients are saved in the parameters of step
d->get_parameter(0).is_symbol(sym) && sym == "arith" && // the first two parameters are "arith", "farkas",
d->get_parameter(1).is_symbol(sym) && sym == "farkas")
{
edge_label = "th_axiom(farkas):";
}
}
else
{
edge_label = "unknown axiom-type:";
break;
}
}
}
else
{
if (currentNode->get_decl_kind() == PR_LEMMA)
{
edge_label = "lemma:";
}
else if (currentNode->get_decl_kind() == PR_TH_LEMMA)
{
func_decl* d = currentNode->get_decl();
symbol sym;
if (d->get_num_parameters() >= 2 && // the Farkas coefficients are saved in the parameters of step
d->get_parameter(0).is_symbol(sym) && sym == "arith" && // the first two parameters are "arith", "farkas",
d->get_parameter(1).is_symbol(sym) && sym == "farkas")
{
edge_label = "th_lemma(farkas):";
}
else
{
edge_label = "th_lemma(other):";
}
}
}
// generate entry for node in dot-file
dotstream << "node_" << counter << " "
<< "["
<< "shape=box,style=\"filled\","
<< "label=\"" << edge_label << " " << label << "\", "
<< "fillcolor=\"" << color << "\""
<< "]\n";
// add entry for each edge to that node
for (unsigned i = m.get_num_parents(currentNode); i > 0 ; --i)
{
proof* premise = to_app(currentNode->get_arg(i-1));
unsigned premise_small_id = id_to_small_id[premise->get_id()];
dotstream << "node_" << premise_small_id
<< " -> "
<< "node_" << counter
<< ";\n";
}
++counter;
}
dotstream << "\n}" << std::endl;
}
std::string escape_dot(const std::string &s)
{
std::string res;
res.reserve(s.size()); // preallocate
for (auto c : s) {
if (c == '\n')
res.append("\\l");
else
res.push_back(c);
}
return res;
}
void pp_proof_post_process_dot(std::string dot_filepath, std::ofstream &dotstream)
{
// replace variables in the dotfiles with nicer descriptions (hack: hard coded replacement for now)
std::vector<std::vector<std::string> > predicates;
std::vector<std::string> l1 = {"L1","i","n","A"};
predicates.push_back(l1);
std::vector<std::string> l2 = {"L2","j","m","B"};
predicates.push_back(l2);
for(auto& predicate : predicates)
{
std::string predicate_name = predicate[0];
for (unsigned i=0; i+1 < predicate.size(); ++i)
{
std::string new_name = predicate[i+1];
std::string substring0 = predicate_name + "_" + std::to_string(i) + "_0";
std::string substringN = predicate_name + "_" + std::to_string(i) + "_n";
std::string command0 = "sed -i '.bak' 's/" + substring0 + "/" + new_name + "/g' " + dot_filepath;
verbose_stream() << command0 << std::endl;
system(command0.c_str());
std::string commandN = "sed -i '.bak' s/" + substringN + "/" + new_name + "\\'" + "/g " + dot_filepath;
verbose_stream() << commandN << std::endl;
system(commandN.c_str());
}
}
verbose_stream() << "end of postprocessing";
}
/*
* ====================================
* methods for reducing hypothesis
* ====================================
*/
class reduce_hypotheses {
ast_manager &m;
// tracking all created expressions
expr_ref_vector m_pinned;
// cache for the transformation
obj_map<proof, proof*> m_cache;
// map from unit literals to their hypotheses-free derivations
obj_map<expr, proof*> m_units;
// -- all hypotheses in the the proof
obj_hashtable<expr> m_hyps;
// marks hypothetical proofs
ast_mark m_hypmark;
// stack
ptr_vector<proof> m_todo;
void reset()
{
m_cache.reset();
m_units.reset();
m_hyps.reset();
m_hypmark.reset();
m_pinned.reset();
}
bool compute_mark1(proof *pr)
{
bool hyp_mark = false;
// lemmas clear all hypotheses
if (!m.is_lemma(pr)) {
for (unsigned i = 0, sz = m.get_num_parents(pr); i < sz; ++i) {
if (m_hypmark.is_marked(m.get_parent(pr, i))) {
hyp_mark = true;
break;
}
}
}
m_hypmark.mark(pr, hyp_mark);
return hyp_mark;
}
void compute_marks(proof* pr)
{
proof *p;
ProofIteratorPostOrder pit(pr, m);
while (pit.hasNext()) {
p = pit.next();
if (m.is_hypothesis(p)) {
m_hypmark.mark(p, true);
m_hyps.insert(m.get_fact(p));
} else {
bool hyp_mark = compute_mark1(p);
// collect units that are hyp-free and are used as hypotheses somewhere
if (!hyp_mark && m.has_fact(p) && m_hyps.contains(m.get_fact(p)))
{ m_units.insert(m.get_fact(p), p); }
}
}
}
void find_units(proof *pr)
{
// optional. not implemented yet.
}
void reduce(proof* pf, proof_ref &out)
{
proof *res = NULL;
m_todo.reset();
m_todo.push_back(pf);
ptr_buffer<proof> args;
bool dirty = false;
while (!m_todo.empty()) {
proof *p, *tmp, *pp;
unsigned todo_sz;
p = m_todo.back();
if (m_cache.find(p, tmp)) {
res = tmp;
m_todo.pop_back();
continue;
}
dirty = false;
args.reset();
todo_sz = m_todo.size();
for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) {
pp = m.get_parent(p, i);
if (m_cache.find(pp, tmp)) {
args.push_back(tmp);
dirty = dirty || pp != tmp;
} else {
m_todo.push_back(pp);
}
}
if (todo_sz < m_todo.size()) { continue; }
else { m_todo.pop_back(); }
if (m.is_hypothesis(p)) {
// hyp: replace by a corresponding unit
if (m_units.find(m.get_fact(p), tmp)) {
res = tmp;
} else { res = p; }
}
else if (!dirty) { res = p; }
else if (m.is_lemma(p)) {
//lemma: reduce the premise; remove reduced consequences from conclusion
SASSERT(args.size() == 1);
res = mk_lemma_core(args.get(0), m.get_fact(p));
compute_mark1(res);
} else if (m.is_unit_resolution(p)) {
// unit: reduce untis; reduce the first premise; rebuild unit resolution
res = mk_unit_resolution_core(args.size(), args.c_ptr());
compute_mark1(res);
} else {
// other: reduce all premises; reapply
if (m.has_fact(p)) { args.push_back(to_app(m.get_fact(p))); }
SASSERT(p->get_decl()->get_arity() == args.size());
res = m.mk_app(p->get_decl(), args.size(), (expr * const*)args.c_ptr());
m_pinned.push_back(res);
compute_mark1(res);
}
SASSERT(res);
m_cache.insert(p, res);
if (m.has_fact(res) && m.is_false(m.get_fact(res))) { break; }
}
out = res;
}
// returns true if (hypothesis (not a)) would be reduced
bool is_reduced(expr *a)
{
expr_ref e(m);
if (m.is_not(a)) { e = to_app(a)->get_arg(0); }
else { e = m.mk_not(a); }
return m_units.contains(e);
}
proof *mk_lemma_core(proof *pf, expr *fact)
{
ptr_buffer<expr> args;
expr_ref lemma(m);
if (m.is_or(fact)) {
for (unsigned i = 0, sz = to_app(fact)->get_num_args(); i < sz; ++i) {
expr *a = to_app(fact)->get_arg(i);
if (!is_reduced(a))
{ args.push_back(a); }
}
} else if (!is_reduced(fact))
{ args.push_back(fact); }
if (args.size() == 0) { return pf; }
else if (args.size() == 1) {
lemma = args.get(0);
} else {
lemma = m.mk_or(args.size(), args.c_ptr());
}
proof* res = m.mk_lemma(pf, lemma);
m_pinned.push_back(res);
// XXX this is wrong because lemma is a proof and m_hyps only
// XXX contains expressions.
// XXX Not sure this is ever needed.
if (m_hyps.contains(lemma)) {
m_units.insert(lemma, res);
}
return res;
}
proof *mk_unit_resolution_core(unsigned num_args, proof* const *args)
{
ptr_buffer<proof> pf_args;
pf_args.push_back(args [0]);
app *cls_fact = to_app(m.get_fact(args[0]));
ptr_buffer<expr> cls;
if (m.is_or(cls_fact)) {
for (unsigned i = 0, sz = cls_fact->get_num_args(); i < sz; ++i)
{ cls.push_back(cls_fact->get_arg(i)); }
} else { cls.push_back(cls_fact); }
// construct new resolvent
ptr_buffer<expr> new_fact_cls;
bool found;
// XXX quadratic
for (unsigned i = 0, sz = cls.size(); i < sz; ++i) {
found = false;
for (unsigned j = 1; j < num_args; ++j) {
if (m.is_complement(cls.get(i), m.get_fact(args [j]))) {
found = true;
pf_args.push_back(args [j]);
break;
}
}
if (!found) {
new_fact_cls.push_back(cls.get(i));
}
}
SASSERT(new_fact_cls.size() + pf_args.size() - 1 == cls.size());
expr_ref new_fact(m);
new_fact = mk_or(m, new_fact_cls.size(), new_fact_cls.c_ptr());
// create new proof step
proof *res = m.mk_unit_resolution(pf_args.size(), pf_args.c_ptr(), new_fact);
m_pinned.push_back(res);
return res;
}
// reduce all units, if any unit reduces to false return true and put its proof into out
bool reduce_units(proof_ref &out)
{
proof_ref res(m);
for (auto entry : m_units) {
reduce(entry.get_value(), res);
if (m.is_false(m.get_fact(res))) {
out = res;
return true;
}
res.reset();
}
return false;
}
public:
reduce_hypotheses(ast_manager &m) : m(m), m_pinned(m) {}
void operator()(proof_ref &pr)
{
compute_marks(pr);
if (!reduce_units(pr)) {
reduce(pr.get(), pr);
}
reset();
}
};
void reduce_hypotheses(proof_ref &pr)
{
ast_manager &m = pr.get_manager();
class reduce_hypotheses hypred(m);
hypred(pr);
DEBUG_CODE(proof_checker pc(m);
expr_ref_vector side(m);
SASSERT(pc.check(pr, side));
);
}
}

View file

@ -0,0 +1,53 @@
/*++
Copyright (c) 2017 Arie Gurfinkel
Module Name:
spacer_proof_utils.cpp
Abstract:
Utilities to traverse and manipulate proofs
Author:
Bernhard Gleiss
Arie Gurfinkel
Revision History:
--*/
#ifndef _SPACER_PROOF_UTILS_H_
#define _SPACER_PROOF_UTILS_H_
#include "ast/ast.h"
namespace spacer {
bool is_farkas_lemma(ast_manager& m, proof* pr);
/*
* iterator, which traverses the proof in depth-first post-order.
*/
class ProofIteratorPostOrder {
public:
ProofIteratorPostOrder(proof* refutation, ast_manager& manager);
bool hasNext();
proof* next();
private:
ptr_vector<proof> m_todo;
ast_mark m_visited; // the proof nodes we have already visited
ast_manager& m;
};
/*
* prints the proof pr in dot representation to the file proof.dot
* if iuc_pr is not nullptr, then it is queried for coloring partitions
*/
class iuc_proof;
void pp_proof_dot(ast_manager& m, proof* pr, iuc_proof* iuc_pr = nullptr);
void reduce_hypotheses(proof_ref &pr);
}
#endif

View file

@ -19,16 +19,18 @@ Revision History:
#include "muz/spacer/spacer_unsat_core_learner.h"
#include "muz/spacer/spacer_unsat_core_plugin.h"
#include "muz/spacer/spacer_iuc_proof.h"
#include "ast/for_each_expr.h"
#include "muz/spacer/spacer_util.h"
namespace spacer
{
unsat_core_learner::~unsat_core_learner()
{
std::for_each(m_plugins.begin(), m_plugins.end(), delete_proc<unsat_core_plugin>());
}
void unsat_core_learner::register_plugin(unsat_core_plugin* plugin)
@ -36,60 +38,16 @@ void unsat_core_learner::register_plugin(unsat_core_plugin* plugin)
m_plugins.push_back(plugin);
}
void unsat_core_learner::compute_unsat_core(proof *root, expr_set& asserted_b, expr_ref_vector& unsat_core)
void unsat_core_learner::compute_unsat_core(expr_ref_vector& unsat_core)
{
// transform proof in order to get a proof which is better suited for unsat-core-extraction
proof_ref pr(root, m);
reduce_hypotheses(pr);
STRACE("spacer.unsat_core_learner",
verbose_stream() << "Reduced proof:\n" << mk_ismt2_pp(pr, m) << "\n";
);
// compute symbols occurring in B
collect_symbols_b(asserted_b);
// traverse proof
proof_post_order it(root, m);
proof_post_order it(m_pr.get(), m);
while (it.hasNext())
{
proof* currentNode = it.next();
if (m.get_num_parents(currentNode) == 0)
if (m.get_num_parents(currentNode) > 0)
{
switch(currentNode->get_decl_kind())
{
case PR_ASSERTED: // currentNode is an axiom
{
if (asserted_b.contains(m.get_fact(currentNode)))
{
m_b_mark.mark(currentNode, true);
}
else
{
m_a_mark.mark(currentNode, true);
}
break;
}
// currentNode is a hypothesis:
case PR_HYPOTHESIS:
{
m_h_mark.mark(currentNode, true);
break;
}
default:
{
break;
}
}
}
else
{
// collect from parents whether derivation of current node contains A-axioms, B-axioms and hypothesis
bool need_to_mark_a = false;
bool need_to_mark_b = false;
bool need_to_mark_h = false;
bool need_to_mark_closed = true;
for (unsigned i = 0; i < m.get_num_parents(currentNode); ++i)
@ -97,31 +55,18 @@ void unsat_core_learner::compute_unsat_core(proof *root, expr_set& asserted_b, e
SASSERT(m.is_proof(currentNode->get_arg(i)));
proof* premise = to_app(currentNode->get_arg(i));
need_to_mark_a = need_to_mark_a || m_a_mark.is_marked(premise);
need_to_mark_b = need_to_mark_b || m_b_mark.is_marked(premise);
need_to_mark_h = need_to_mark_h || m_h_mark.is_marked(premise);
need_to_mark_closed = need_to_mark_closed && (!m_b_mark.is_marked(premise) || m_closed.is_marked(premise));
need_to_mark_closed = need_to_mark_closed && (!m_pr.is_b_marked(premise) || m_closed.is_marked(premise));
}
// if current node is application of lemma, we know that all hypothesis are removed
if(currentNode->get_decl_kind() == PR_LEMMA)
{
need_to_mark_h = false;
}
// save results
m_a_mark.mark(currentNode, need_to_mark_a);
m_b_mark.mark(currentNode, need_to_mark_b);
m_h_mark.mark(currentNode, need_to_mark_h);
// save result
m_closed.mark(currentNode, need_to_mark_closed);
}
// we have now collected all necessary information, so we can visit the node
// if the node mixes A-reasoning and B-reasoning and contains non-closed premises
if (m_a_mark.is_marked(currentNode) && m_b_mark.is_marked(currentNode) && !m_closed.is_marked(currentNode))
if (m_pr.is_a_marked(currentNode) && m_pr.is_b_marked(currentNode) && !m_closed.is_marked(currentNode))
{
compute_partial_core(currentNode); // then we need to compute a partial core
// SASSERT(!(m_a_mark.is_marked(currentNode) && m_b_mark.is_marked(currentNode)) || m_closed.is_marked(currentNode)); TODO: doesn't hold anymore if we do the mincut-thing!
}
}
@ -133,170 +78,14 @@ void unsat_core_learner::compute_unsat_core(proof *root, expr_set& asserted_b, e
// count both number of all Farkas lemmas and number of Farkas lemmas in the cut
if (m_print_farkas_stats)
{
unsigned farkas_counter = 0;
unsigned farkas_counter2 = 0;
ProofIteratorPostOrder it3(root, m);
while (it3.hasNext())
{
proof* currentNode = it3.next();
// if node is theory lemma
if (currentNode->get_decl_kind() == PR_TH_LEMMA)
{
func_decl* d = currentNode->get_decl();
symbol sym;
// and theory lemma is Farkas lemma
if (d->get_num_parameters() >= 2 && // the Farkas coefficients are saved in the parameters of step
d->get_parameter(0).is_symbol(sym) && sym == "arith" && // the first two parameters are "arith", "farkas",
d->get_parameter(1).is_symbol(sym) && sym == "farkas")
{
farkas_counter++;
// check whether farkas lemma is to be interpolated (could potentially miss farkas lemmas, which are interpolated, because we potentially don't want to use the lowest cut)
bool has_no_mixed_parents = true;
for (unsigned i = 0; i < m.get_num_parents(currentNode); ++i)
{
proof* premise = to_app(currentNode->get_arg(i));
if (is_a_marked(premise) && is_b_marked(premise))
{
has_no_mixed_parents = false;
m_pr.print_farkas_stats();
}
}
if (has_no_mixed_parents && is_a_marked(currentNode) && is_b_marked(currentNode))
{
farkas_counter2++;
}
}
}
}
verbose_stream() << "\nThis proof contains " << farkas_counter << " Farkas lemmas. " << farkas_counter2 << " Farkas lemmas participate in the lowest cut\n";
}
//TODO remove this
if(m_iuc_debug_proof)
{
// print proof for debugging
verbose_stream() << "Proof:\n";
std::unordered_map<unsigned, unsigned> id_to_small_id;
unsigned counter = 0;
proof_post_order it2(root, m);
while (it2.hasNext())
{
proof* currentNode = it2.next();
SASSERT(id_to_small_id.find(currentNode->get_id()) == id_to_small_id.end());
id_to_small_id.insert(std::make_pair(currentNode->get_id(), counter));
verbose_stream() << counter << " ";
verbose_stream() << "[";
if (is_a_marked(currentNode))
{
verbose_stream() << "a";
}
if (is_b_marked(currentNode))
{
verbose_stream() << "b";
}
if (is_h_marked(currentNode))
{
verbose_stream() << "h";
}
if (is_closed(currentNode))
{
verbose_stream() << "c";
}
verbose_stream() << "] ";
if (m.get_num_parents(currentNode) == 0)
{
switch (currentNode->get_decl_kind())
{
case PR_ASSERTED:
verbose_stream() << "asserted";
break;
case PR_HYPOTHESIS:
verbose_stream() << "hypothesis";
break;
default:
if (currentNode->get_decl_kind() == PR_TH_LEMMA)
{
verbose_stream() << "th_axiom";
func_decl* d = currentNode->get_decl();
symbol sym;
if (d->get_num_parameters() >= 2 && // the Farkas coefficients are saved in the parameters of step
d->get_parameter(0).is_symbol(sym) && sym == "arith" && // the first two parameters are "arith", "farkas",
d->get_parameter(1).is_symbol(sym) && sym == "farkas")
{
verbose_stream() << "(farkas)";
}
}
else
{
verbose_stream() << "unknown axiom-type";
break;
}
}
}
else
{
if (currentNode->get_decl_kind() == PR_LEMMA)
{
verbose_stream() << "lemma";
}
else if (currentNode->get_decl_kind() == PR_TH_LEMMA)
{
verbose_stream() << "th_lemma";
func_decl* d = currentNode->get_decl();
symbol sym;
if (d->get_num_parameters() >= 2 && // the Farkas coefficients are saved in the parameters of step
d->get_parameter(0).is_symbol(sym) && sym == "arith" && // the first two parameters are "arith", "farkas",
d->get_parameter(1).is_symbol(sym) && sym == "farkas")
{
verbose_stream() << "(farkas)";
}
else
{
verbose_stream() << "(other)";
}
}
else
{
verbose_stream() << "step";
}
verbose_stream() << " from ";
for (unsigned i = m.get_num_parents(currentNode); i > 0 ; --i)
{
proof* premise = to_app(currentNode->get_arg(i));
unsigned premise_small_id = id_to_small_id[premise->get_id()];
if (i > 1)
{
verbose_stream() << premise_small_id << ", ";
}
else
{
verbose_stream() << premise_small_id;
}
}
}
// if (currentNode->get_decl_kind() == PR_TH_LEMMA || (is_a_marked(currentNode) && is_b_marked(currentNode)) || is_h_marked(currentNode) || (!is_a_marked(currentNode) && !is_b_marked(currentNode)))
if (false)
{
verbose_stream() << "\n";
}
else
{
verbose_stream() << ": " << mk_pp(m.get_fact(currentNode), m) << "\n";
}
++counter;
}
}
verbose_stream() << std::endl;
// move all lemmas into vector
for (expr* const* it = m_unsat_core.begin(); it != m_unsat_core.end(); ++it)
{
@ -322,19 +111,6 @@ void unsat_core_learner::finalize()
}
}
bool unsat_core_learner::is_a_marked(proof* p)
{
return m_a_mark.is_marked(p);
}
bool unsat_core_learner::is_b_marked(proof* p)
{
return m_b_mark.is_marked(p);
}
bool unsat_core_learner::is_h_marked(proof* p)
{
return m_h_mark.is_marked(p);
}
bool unsat_core_learner::is_closed(proof*p)
{
return m_closed.is_marked(p);
@ -344,75 +120,13 @@ void unsat_core_learner::set_closed(proof* p, bool value)
m_closed.mark(p, value);
}
void unsat_core_learner::add_lemma_to_core(expr* lemma)
bool unsat_core_learner::is_b_open(proof *p)
{
m_unsat_core.push_back(lemma);
return m_pr.is_b_marked(p) && !is_closed (p);
}
class collect_pure_proc {
func_decl_set& m_symbs;
public:
collect_pure_proc(func_decl_set& s):m_symbs(s) {}
void operator()(app* a) {
if (a->get_family_id() == null_family_id) {
m_symbs.insert(a->get_decl());
}
}
void operator()(var*) {}
void operator()(quantifier*) {}
};
void unsat_core_learner::collect_symbols_b(const expr_set& axioms_b)
void unsat_core_learner::add_lemma_to_core(expr* lemma)
{
expr_mark visited;
collect_pure_proc proc(m_symbols_b);
for (expr_set::iterator it = axioms_b.begin(); it != axioms_b.end(); ++it)
{
for_each_expr(proc, visited, *it);
m_unsat_core.push_back(lemma);
}
}
class is_pure_expr_proc {
func_decl_set const& m_symbs;
array_util m_au;
public:
struct non_pure {};
is_pure_expr_proc(func_decl_set const& s, ast_manager& m):
m_symbs(s),
m_au (m)
{}
void operator()(app* a) {
if (a->get_family_id() == null_family_id) {
if (!m_symbs.contains(a->get_decl())) {
throw non_pure();
}
}
else if (a->get_family_id () == m_au.get_family_id () &&
a->is_app_of (a->get_family_id (), OP_ARRAY_EXT)) {
throw non_pure();
}
}
void operator()(var*) {}
void operator()(quantifier*) {}
};
bool unsat_core_learner::only_contains_symbols_b(expr* expr) const
{
is_pure_expr_proc proc(m_symbols_b, m);
try {
for_each_expr(proc, expr);
}
catch (is_pure_expr_proc::non_pure)
{
return false;
}
return true;
}
}

View file

@ -20,21 +20,23 @@ Revision History:
#include "ast/ast.h"
#include "muz/spacer/spacer_util.h"
#include "ast/proofs/proof_utils.h"
#include "muz/spacer/spacer_proof_utils.h"
namespace spacer {
class unsat_core_plugin;
class iuc_proof;
class unsat_core_learner
{
typedef obj_hashtable<expr> expr_set;
public:
unsat_core_learner(ast_manager& m, bool print_farkas_stats = false, bool iuc_debug_proof = false) : m(m), m_unsat_core(m), m_print_farkas_stats(print_farkas_stats), m_iuc_debug_proof(iuc_debug_proof) {};
unsat_core_learner(ast_manager& m, iuc_proof& pr, bool print_farkas_stats = false, bool iuc_debug_proof = false) : m(m), m_pr(pr), m_unsat_core(m), m_print_farkas_stats(print_farkas_stats), m_iuc_debug_proof(iuc_debug_proof) {};
virtual ~unsat_core_learner();
ast_manager& m;
iuc_proof& m_pr;
/*
* register a plugin for computation of partial unsat cores
@ -45,48 +47,29 @@ namespace spacer {
/*
* compute unsat core using the registered unsat-core-plugins
*/
void compute_unsat_core(proof* root, expr_set& asserted_b, expr_ref_vector& unsat_core);
void compute_unsat_core(expr_ref_vector& unsat_core);
/*
* getter/setter methods for data structures exposed to plugins
* the following invariants can be assumed and need to be maintained by the plugins:
* - a node is a-marked iff it is derived using at least one asserted proof step from A.
* - a node is b-marked iff its derivation contains no asserted proof steps from A and
* no hypothesis (with the additional complication that lemmas conceptually remove hypothesis)
* - a node is h-marked, iff it is derived using at least one hypothesis
* the following invariant can be assumed and need to be maintained by the plugins:
* - a node is closed, iff it has already been interpolated, i.e. its contribution is
* already covered by the unsat-core.
*/
bool is_a_marked(proof* p);
bool is_b_marked(proof* p);
bool is_h_marked(proof* p);
bool is_closed(proof* p);
void set_closed(proof* p, bool value);
bool is_b_open (proof *p);
/*
* adds a lemma to the unsat core
*/
void add_lemma_to_core(expr* lemma);
/*
* helper method, which can be used by plugins
* returns true iff all symbols of expr occur in some b-asserted formula.
* must only be called after a call to collect_symbols_b.
*/
bool only_contains_symbols_b(expr* expr) const;
bool is_b_pure (proof *p)
{return !is_h_marked (p) && only_contains_symbols_b (m.get_fact (p));}
bool is_b_open (proof *p)
{ return is_b_marked (p) && !is_closed (p); }
private:
ptr_vector<unsat_core_plugin> m_plugins;
func_decl_set m_symbols_b; // symbols, which occur in any b-asserted formula
void collect_symbols_b(const expr_set& axioms_b);
ast_mark m_a_mark;
ast_mark m_b_mark;
ast_mark m_h_mark;
ast_mark m_closed;
expr_ref_vector m_unsat_core; // collects the lemmas of the unsat-core, will at the end be inserted into unsat_core.

View file

@ -30,6 +30,7 @@ Revision History:
#include "muz/spacer/spacer_matrix.h"
#include "muz/spacer/spacer_unsat_core_plugin.h"
#include "muz/spacer/spacer_unsat_core_learner.h"
#include "muz/spacer/spacer_iuc_proof.h"
namespace spacer
{
@ -37,8 +38,8 @@ namespace spacer
void unsat_core_plugin_lemma::compute_partial_core(proof* step)
{
SASSERT(m_learner.is_a_marked(step));
SASSERT(m_learner.is_b_marked(step));
SASSERT(m_learner.m_pr.is_a_marked(step));
SASSERT(m_learner.m_pr.is_b_marked(step));
for (unsigned i = 0; i < m_learner.m.get_num_parents(step); ++i)
{
@ -48,7 +49,7 @@ void unsat_core_plugin_lemma::compute_partial_core(proof* step)
if (m_learner.is_b_open (premise))
{
// by IH, premises that are AB marked are already closed
SASSERT(!m_learner.is_a_marked(premise));
SASSERT(!m_learner.m_pr.is_a_marked(premise));
add_lowest_split_to_core(premise);
}
}
@ -75,13 +76,13 @@ void unsat_core_plugin_lemma::add_lowest_split_to_core(proof* step) const
// the step is b-marked and not closed.
// by I.H. the step must be already visited
// so if it is also a-marked, it must be closed
SASSERT(m_learner.is_b_marked(pf));
SASSERT(!m_learner.is_a_marked(pf));
SASSERT(m_learner.m_pr.is_b_marked(pf));
SASSERT(!m_learner.m_pr.is_a_marked(pf));
// the current step needs to be interpolated:
expr* fact = m_learner.m.get_fact(pf);
// if we trust the current step and we are able to use it
if (m_learner.is_b_pure (pf) &&
if (m_learner.m_pr.is_b_pure (pf) &&
(m.is_asserted(pf) || is_literal(m, fact)))
{
// just add it to the core
@ -109,8 +110,8 @@ void unsat_core_plugin_lemma::add_lowest_split_to_core(proof* step) const
void unsat_core_plugin_farkas_lemma::compute_partial_core(proof* step)
{
ast_manager &m = m_learner.m;
SASSERT(m_learner.is_a_marked(step));
SASSERT(m_learner.is_b_marked(step));
SASSERT(m_learner.m_pr.is_a_marked(step));
SASSERT(m_learner.m_pr.is_b_marked(step));
// XXX this assertion should be true so there is no need to check for it
SASSERT (!m_learner.is_closed (step));
func_decl* d = step->get_decl();
@ -162,7 +163,7 @@ void unsat_core_plugin_farkas_lemma::compute_partial_core(proof* step)
rational coef;
VERIFY(params[i].is_rational(coef));
bool b_pure = m_learner.is_b_pure (prem);
bool b_pure = m_learner.m_pr.is_b_pure (prem);
verbose_stream() << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m_learner.m.get_fact(prem), m_learner.m) << "\n";
}
);
@ -176,9 +177,9 @@ void unsat_core_plugin_farkas_lemma::compute_partial_core(proof* step)
if (m_learner.is_b_open (premise))
{
SASSERT(!m_learner.is_a_marked(premise));
SASSERT(!m_learner.m_pr.is_a_marked(premise));
if (m_learner.is_b_pure (step))
if (m_learner.m_pr.is_b_pure (step))
{
if (!m_use_constant_from_a)
{
@ -287,8 +288,8 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
void unsat_core_plugin_farkas_lemma_optimized::compute_partial_core(proof* step)
{
SASSERT(m_learner.is_a_marked(step));
SASSERT(m_learner.is_b_marked(step));
SASSERT(m_learner.m_pr.is_a_marked(step));
SASSERT(m_learner.m_pr.is_b_marked(step));
func_decl* d = step->get_decl();
symbol sym;
@ -315,7 +316,7 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
rational coef;
VERIFY(params[i].is_rational(coef));
bool b_pure = m_learner.is_b_pure (prem);
bool b_pure = m_learner.m_pr.is_b_pure (prem);
verbose_stream() << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m_learner.m.get_fact(prem), m_learner.m) << "\n";
}
);
@ -326,11 +327,11 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
SASSERT(m_learner.m.is_proof(step->get_arg(i)));
proof * premise = m.get_parent (step, i);
if (m_learner.is_b_marked(premise) && !m_learner.is_closed(premise))
if (m_learner.m_pr.is_b_marked(premise) && !m_learner.is_closed(premise))
{
SASSERT(!m_learner.is_a_marked(premise));
SASSERT(!m_learner.m_pr.is_a_marked(premise));
if (m_learner.only_contains_symbols_b(m_learner.m.get_fact(premise)) && !m_learner.is_h_marked(premise))
if (m_learner.m_pr.only_contains_symbols_b(m_learner.m.get_fact(premise)) && !m_learner.m_pr.is_h_marked(premise))
{
rational coefficient;
VERIFY(params[i].is_rational(coefficient));
@ -603,8 +604,8 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
{
ptr_vector<proof> todo;
SASSERT(m_learner.is_a_marked(step));
SASSERT(m_learner.is_b_marked(step));
SASSERT(m_learner.m_pr.is_a_marked(step));
SASSERT(m_learner.m_pr.is_b_marked(step));
SASSERT(m.get_num_parents(step) > 0);
SASSERT(!m_learner.is_closed(step));
todo.push_back(step);
@ -641,7 +642,7 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
{
proof* premise = m.get_parent (step, i);
{
if (m_learner.is_b_marked(premise))
if (m_learner.m_pr.is_b_marked(premise))
{
todo_subproof.push_back(premise);
}
@ -655,19 +656,19 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
// if we need to deal with the node
if (!m_learner.is_closed(current))
{
SASSERT(!m_learner.is_a_marked(current)); // by I.H. the step must be already visited
SASSERT(!m_learner.m_pr.is_a_marked(current)); // by I.H. the step must be already visited
// and the current step needs to be interpolated:
if (m_learner.is_b_marked(current))
if (m_learner.m_pr.is_b_marked(current))
{
// if we trust the current step and we are able to use it
if (m_learner.is_b_pure (current) &&
if (m_learner.m_pr.is_b_pure (current) &&
(m.is_asserted(current) ||
is_literal(m, m.get_fact(current))))
{
// we found a leaf of the subproof, so
// 1) we add corresponding edges
if (m_learner.is_a_marked(step))
if (m_learner.m_pr.is_a_marked(step))
{
add_edge(nullptr, current); // current is sink
}