mirror of
https://github.com/Z3Prover/z3
synced 2025-04-08 02:15:19 +00:00
wworking on generalizing post-image
Signed-off-by: Nikolaj Bjorner <nbjorner@microsoft.com>
This commit is contained in:
parent
4db8db7484
commit
2d6b3fa284
|
@ -249,6 +249,7 @@ namespace pdr {
|
|||
farkas_learner::farkas_learner(smt_params& params, ast_manager& outer_mgr)
|
||||
: m_proof_params(get_proof_params(params)),
|
||||
m_pr(PROOF_MODE),
|
||||
m_combine_farkas_coefficients(true),
|
||||
p2o(m_pr, outer_mgr),
|
||||
o2p(outer_mgr, m_pr)
|
||||
{
|
||||
|
@ -412,11 +413,17 @@ namespace pdr {
|
|||
void farkas_learner::combine_constraints(unsigned n, app * const * lits, rational const * coeffs, expr_ref& res)
|
||||
{
|
||||
ast_manager& m = res.get_manager();
|
||||
constr res_c(m);
|
||||
for(unsigned i = 0; i < n; ++i) {
|
||||
res_c.add(coeffs[i], lits[i]);
|
||||
if (m_combine_farkas_coefficients) {
|
||||
constr res_c(m);
|
||||
for(unsigned i = 0; i < n; ++i) {
|
||||
res_c.add(coeffs[i], lits[i]);
|
||||
}
|
||||
res_c.get(res);
|
||||
}
|
||||
else {
|
||||
bool_rewriter rw(m);
|
||||
rw.mk_or(n, (expr*const*)(lits), res);
|
||||
}
|
||||
res_c.get(res);
|
||||
}
|
||||
|
||||
class farkas_learner::constant_replacer_cfg : public default_rewriter_cfg
|
||||
|
@ -694,7 +701,7 @@ namespace pdr {
|
|||
tout << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m.get_fact(prem), m) << "\n";
|
||||
}
|
||||
tout << mk_pp(m.get_fact(p), m) << "\n";
|
||||
);
|
||||
);
|
||||
|
||||
// NB. Taking 'abs' of coefficients is a workaround.
|
||||
// The Farkas coefficient extraction in arith_core must be wrong.
|
||||
|
@ -753,6 +760,13 @@ namespace pdr {
|
|||
simplify_lemmas(lemmas);
|
||||
}
|
||||
|
||||
void farkas_learner::get_consequences(proof* root, expr_set const& bs, expr_ref_vector& consequences) {
|
||||
TRACE("farkas_learner", tout << "get consequences\n";);
|
||||
m_combine_farkas_coefficients = false;
|
||||
get_lemmas(root, bs, consequences);
|
||||
m_combine_farkas_coefficients = true;
|
||||
}
|
||||
|
||||
void farkas_learner::get_asserted(proof* p, expr_set const& bs, ast_mark& b_closed, obj_hashtable<expr>& lemma_set, expr_ref_vector& lemmas) {
|
||||
ast_manager& m = lemmas.get_manager();
|
||||
ast_mark visited;
|
||||
|
|
|
@ -43,6 +43,12 @@ class farkas_learner {
|
|||
ast_manager m_pr;
|
||||
scoped_ptr<smt::kernel> m_ctx;
|
||||
|
||||
//
|
||||
// true: produce a combined constraint by applying Farkas coefficients.
|
||||
// false: produce a conjunction of the negated literals from the theory lemmas.
|
||||
//
|
||||
bool m_combine_farkas_coefficients;
|
||||
|
||||
|
||||
static smt_params get_proof_params(smt_params& orig_params);
|
||||
|
||||
|
@ -92,6 +98,18 @@ public:
|
|||
*/
|
||||
void get_lemmas(proof* root, expr_set const& bs, expr_ref_vector& lemmas);
|
||||
|
||||
/**
|
||||
Traverse a proof and retrieve consequences of A that are used to establish ~B.
|
||||
The assumption is that:
|
||||
|
||||
A => \/ ~consequences[i] and \/ ~consequences[i] => ~B
|
||||
|
||||
e.g., the second implication can be rewritten as:
|
||||
|
||||
B => /\ consequences[i]
|
||||
*/
|
||||
void get_consequences(proof* root, expr_set const& bs, expr_ref_vector& consequences);
|
||||
|
||||
/**
|
||||
\brief Simplify lemmas using subsumption.
|
||||
*/
|
||||
|
|
|
@ -159,6 +159,7 @@ namespace pdr {
|
|||
}
|
||||
|
||||
void core_convex_hull_generalizer::operator()(model_node& n, expr_ref_vector const& core, bool uses_level, cores& new_cores) {
|
||||
// method3(n, core, uses_level, new_cores);
|
||||
method1(n, core, uses_level, new_cores);
|
||||
}
|
||||
|
||||
|
@ -187,7 +188,7 @@ namespace pdr {
|
|||
new_cores.push_back(std::make_pair(core, uses_level));
|
||||
return;
|
||||
}
|
||||
add_variables(n, eqs);
|
||||
add_variables(n, 2, eqs);
|
||||
if (!mk_convex(core, 0, conv1)) {
|
||||
new_cores.push_back(std::make_pair(core, uses_level));
|
||||
IF_VERBOSE(0, verbose_stream() << "Non-convex: " << mk_pp(pm.mk_and(core), m) << "\n";);
|
||||
|
@ -257,7 +258,7 @@ namespace pdr {
|
|||
IF_VERBOSE(0, verbose_stream() << "unexpected result from satisfiability check\n";);
|
||||
return;
|
||||
}
|
||||
add_variables(n, conv1);
|
||||
add_variables(n, 2, conv1);
|
||||
model_ref mdl;
|
||||
ctx.get_model(mdl);
|
||||
|
||||
|
@ -267,7 +268,7 @@ namespace pdr {
|
|||
expr* left, *right;
|
||||
func_decl* fn0 = n.pt().sig(i);
|
||||
func_decl* fn1 = pm.o2n(fn0, 0);
|
||||
if (m_left.find(fn1, left) && m_right.find(fn1, right)) {
|
||||
if (m_vars[0].find(fn1, left) && m_vars[1].find(fn1, right)) {
|
||||
expr_ref val(m);
|
||||
mdl->eval(fn1, val);
|
||||
if (val) {
|
||||
|
@ -306,34 +307,84 @@ namespace pdr {
|
|||
}
|
||||
}
|
||||
|
||||
void core_convex_hull_generalizer::add_variables(model_node& n, expr_ref_vector& eqs) {
|
||||
/*
|
||||
Extract the lemmas from the transition relation that were used to establish unsatisfiability.
|
||||
Take convex closures of conbinations of these lemmas.
|
||||
*/
|
||||
void core_convex_hull_generalizer::method3(model_node& n, expr_ref_vector const& core, bool uses_level, cores& new_cores) {
|
||||
TRACE("dl", tout << "method: generalize consequences of F(R)\n";
|
||||
for (unsigned i = 0; i < core.size(); ++i) {
|
||||
tout << "B:" << mk_pp(core[i], m) << "\n";
|
||||
});
|
||||
manager& pm = n.pt().get_pdr_manager();
|
||||
if (!m_left.contains(n.pt().head())) {
|
||||
expr_ref left(m), right(m);
|
||||
m_left.insert(n.pt().head(), 0);
|
||||
bool uses_level1;
|
||||
expr_ref_vector core1(m);
|
||||
core1.append(core);
|
||||
obj_hashtable<expr> bs;
|
||||
for (unsigned i = 0; i < core.size(); ++i) {
|
||||
bs.insert(core[i]);
|
||||
}
|
||||
expr_ref_vector consequences(m);
|
||||
{
|
||||
n.pt().get_solver().set_consequences(&consequences);
|
||||
pred_transformer::scoped_farkas sf (n.pt(), true);
|
||||
VERIFY(l_false == n.pt().is_reachable(n, &core1, uses_level1));
|
||||
n.pt().get_solver().set_consequences(0);
|
||||
}
|
||||
IF_VERBOSE(0,
|
||||
verbose_stream() << "Consequences: " << consequences.size() << "\n";
|
||||
for (unsigned i = 0; i < consequences.size(); ++i) {
|
||||
verbose_stream() << mk_pp(consequences[i].get(), m) << "\n";
|
||||
}
|
||||
verbose_stream() << "core: " << core1.size() << "\n";
|
||||
for (unsigned i = 0; i < core1.size(); ++i) {
|
||||
verbose_stream() << mk_pp(core1[i].get(), m) << "\n";
|
||||
});
|
||||
|
||||
// now create the convex closure of the consequences:
|
||||
expr_ref_vector conv(m);
|
||||
for (unsigned i = 0; i < consequences.size(); ++i) {
|
||||
if (m_sigma.size() == i) {
|
||||
m_sigma.push_back(m.mk_fresh_const("sigma", a.mk_real()));
|
||||
}
|
||||
conv.push_back(a.mk_ge(m_sigma[i].get(), a.mk_numeral(rational(0), a.mk_real())));
|
||||
;; // mk_convex
|
||||
}
|
||||
}
|
||||
|
||||
void core_convex_hull_generalizer::add_variables(model_node& n, unsigned num_vars, expr_ref_vector& eqs) {
|
||||
manager& pm = n.pt().get_pdr_manager();
|
||||
if (m_vars.size() < num_vars) {
|
||||
m_vars.resize(num_vars);
|
||||
}
|
||||
if (!m_vars[0].contains(n.pt().head())) {
|
||||
expr_ref var(m);
|
||||
m_vars[0].insert(n.pt().head(), 0);
|
||||
unsigned sz = n.pt().sig_size();
|
||||
for (unsigned i = 0; i < sz; ++i) {
|
||||
func_decl* fn0 = n.pt().sig(i);
|
||||
sort* srt = fn0->get_range();
|
||||
if (a.is_int_real(srt)) {
|
||||
func_decl* fn1 = pm.o2n(fn0, 0);
|
||||
left = m.mk_fresh_const(fn1->get_name().str().c_str(), srt);
|
||||
right = m.mk_fresh_const(fn1->get_name().str().c_str(), srt);
|
||||
m_left.insert(fn1, left);
|
||||
m_right.insert(fn1, right);
|
||||
m_trail.push_back(left);
|
||||
m_trail.push_back(right);
|
||||
for (unsigned j = 0; j < num_vars; ++j) {
|
||||
var = m.mk_fresh_const(fn1->get_name().str().c_str(), srt);
|
||||
m_vars[j].insert(fn1, var);
|
||||
m_trail.push_back(var);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
unsigned sz = n.pt().sig_size();
|
||||
for (unsigned i = 0; i < sz; ++i) {
|
||||
expr* left, *right;
|
||||
expr* var;
|
||||
ptr_vector<expr> vars;
|
||||
func_decl* fn0 = n.pt().sig(i);
|
||||
func_decl* fn1 = pm.o2n(fn0, 0);
|
||||
if (m_left.find(fn1, left) && m_right.find(fn1, right)) {
|
||||
eqs.push_back(m.mk_eq(m.mk_const(fn1), a.mk_add(left, right)));
|
||||
for (unsigned j = 0; j < num_vars; ++j) {
|
||||
VERIFY (m_vars[j].find(fn1, var));
|
||||
vars.push_back(var);
|
||||
}
|
||||
eqs.push_back(m.mk_eq(m.mk_const(fn1), a.mk_add(num_vars, vars.c_ptr())));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -412,11 +463,7 @@ namespace pdr {
|
|||
|
||||
bool core_convex_hull_generalizer::translate(func_decl* f, unsigned index, expr_ref& result) {
|
||||
expr* tmp;
|
||||
if (index == 0 && m_left.find(f, tmp)) {
|
||||
result = tmp;
|
||||
return true;
|
||||
}
|
||||
if (index == 1 && m_right.find(f, tmp)) {
|
||||
if (m_vars[index].find(f, tmp)) {
|
||||
result = tmp;
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -78,8 +78,7 @@ namespace pdr {
|
|||
arith_util a;
|
||||
expr_ref_vector m_sigma;
|
||||
expr_ref_vector m_trail;
|
||||
obj_map<func_decl, expr*> m_left;
|
||||
obj_map<func_decl, expr*> m_right;
|
||||
vector<obj_map<func_decl, expr*> > m_vars;
|
||||
obj_map<expr, expr*> m_models;
|
||||
bool m_is_closure;
|
||||
expr_ref mk_closure(expr* e);
|
||||
|
@ -90,7 +89,8 @@ namespace pdr {
|
|||
bool translate(func_decl* fn, unsigned index, expr_ref& result);
|
||||
void method1(model_node& n, expr_ref_vector const& core, bool uses_level, cores& new_cores);
|
||||
void method2(model_node& n, expr_ref_vector& core, bool& uses_level);
|
||||
void add_variables(model_node& n, expr_ref_vector& eqs);
|
||||
void method3(model_node& n, expr_ref_vector const& core, bool uses_level, cores& new_cores);
|
||||
void add_variables(model_node& n, unsigned num_vars, expr_ref_vector& eqs);
|
||||
public:
|
||||
core_convex_hull_generalizer(context& ctx, bool is_closure);
|
||||
virtual ~core_convex_hull_generalizer() {}
|
||||
|
|
|
@ -236,9 +236,12 @@ namespace pdr {
|
|||
m_neg_level_atoms(m),
|
||||
m_proxies(m),
|
||||
m_core(0),
|
||||
m_model(0),
|
||||
m_consequences(0),
|
||||
m_subset_based_core(false),
|
||||
m_use_farkas(false),
|
||||
m_in_level(false)
|
||||
m_in_level(false),
|
||||
m_current_level(0)
|
||||
{
|
||||
m_ctx->assert_expr(m_pm.get_background());
|
||||
}
|
||||
|
@ -413,6 +416,10 @@ namespace pdr {
|
|||
|
||||
m_core->reset();
|
||||
m_core->append(lemmas);
|
||||
|
||||
if (m_consequences) {
|
||||
fl.get_consequences(pr, bs, *m_consequences);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -48,6 +48,7 @@ namespace pdr {
|
|||
app_ref_vector m_proxies; // predicates for assumptions
|
||||
expr_ref_vector* m_core;
|
||||
model_ref* m_model;
|
||||
expr_ref_vector* m_consequences;
|
||||
bool m_subset_based_core;
|
||||
bool m_assumes_level;
|
||||
bool m_use_farkas;
|
||||
|
@ -84,6 +85,8 @@ namespace pdr {
|
|||
void set_core(expr_ref_vector* core) { m_core = core; }
|
||||
void set_model(model_ref* mdl) { m_model = mdl; }
|
||||
void set_subset_based_core(bool f) { m_subset_based_core = f; }
|
||||
void set_consequences(expr_ref_vector* consequences) { m_consequences = consequences; }
|
||||
|
||||
bool assumes_level() const { return m_assumes_level; }
|
||||
|
||||
void add_level();
|
||||
|
|
Loading…
Reference in a new issue