3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-04-08 10:25:18 +00:00

refactor closure code

Signed-off-by: Nikolaj Bjorner <nbjorner@microsoft.com>
This commit is contained in:
Nikolaj Bjorner 2013-09-01 13:43:19 -07:00
parent 58b16c5585
commit 06a858ef3d
6 changed files with 283 additions and 325 deletions

175
src/muz/pdr/pdr_closure.cpp Normal file
View file

@ -0,0 +1,175 @@
/*++
Copyright (c) 2013 Microsoft Corporation
Module Name:
pdr_closure.cpp
Abstract:
Utility functions for computing closures.
Author:
Nikolaj Bjorner (nbjorner) 2013-9-1.
Revision History:
--*/
#include "pdr_closure.h"
#include "pdr_context.h"
#include "expr_safe_replace.h"
namespace pdr {
expr_ref scaler::operator()(expr* e, expr* k, obj_map<func_decl, expr*>* translate) {
m_cache[0].reset();
m_cache[1].reset();
m_translate = translate;
m_k = k;
return scale(e, false);
}
expr_ref scaler::scale(expr* e, bool is_mul) {
expr* r;
if (m_cache[is_mul].find(e, r)) {
return expr_ref(r, m);
}
if (!is_app(e)) {
return expr_ref(e, m);
}
app* ap = to_app(e);
if (m_translate && m_translate->find(ap->get_decl(), r)) {
return expr_ref(r, m);
}
if (!is_mul && a.is_numeral(e)) {
return expr_ref(a.mk_mul(m_k, e), m);
}
expr_ref_vector args(m);
bool is_mul_rec = is_mul || a.is_mul(e);
for (unsigned i = 0; i < ap->get_num_args(); ++i) {
args.push_back(scale(ap->get_arg(i), is_mul_rec));
}
expr_ref result(m);
result = m.mk_app(ap->get_decl(), args.size(), args.c_ptr());
m_cache[is_mul].insert(e, result);
return result;
}
expr_ref scaler::undo_k(expr* e, expr* k) {
expr_safe_replace sub(m);
th_rewriter rw(m);
expr_ref result(e, m);
sub.insert(k, a.mk_numeral(rational(1), false));
sub(result);
rw(result);
return result;
}
closure::closure(pred_transformer& p, bool is_closure):
m(p.get_manager()), m_pt(p), a(m),
m_is_closure(is_closure), m_sigma(m), m_trail(m) {}
void closure::add_variables(unsigned num_vars, expr_ref_vector& fmls) {
manager& pm = m_pt.get_pdr_manager();
SASSERT(num_vars > 0);
while (m_vars.size() < num_vars) {
m_vars.resize(m_vars.size()+1);
m_sigma.push_back(m.mk_fresh_const("sigma", a.mk_real()));
}
unsigned sz = m_pt.sig_size();
for (unsigned i = 0; i < sz; ++i) {
expr* var;
ptr_vector<expr> vars;
func_decl* fn0 = m_pt.sig(i);
func_decl* fn1 = pm.o2n(fn0, 0);
sort* srt = fn0->get_range();
if (a.is_int_real(srt)) {
for (unsigned j = 0; j < num_vars; ++j) {
if (!m_vars[j].find(fn1, var)) {
var = m.mk_fresh_const(fn1->get_name().str().c_str(), srt);
m_trail.push_back(var);
m_vars[j].insert(fn1, var);
}
vars.push_back(var);
}
fmls.push_back(m.mk_eq(m.mk_const(fn1), a.mk_add(num_vars, vars.c_ptr())));
}
}
if (m_is_closure) {
for (unsigned i = 0; i < num_vars; ++i) {
fmls.push_back(a.mk_ge(m_sigma[i].get(), a.mk_numeral(rational(0), a.mk_real())));
}
}
else {
// is interior:
for (unsigned i = 0; i < num_vars; ++i) {
fmls.push_back(a.mk_gt(m_sigma[i].get(), a.mk_numeral(rational(0), a.mk_real())));
}
}
fmls.push_back(m.mk_eq(a.mk_numeral(rational(1), a.mk_real()), a.mk_add(num_vars, m_sigma.c_ptr())));
}
expr_ref closure::close_fml(expr* e) {
expr* e0, *e1, *e2;
expr_ref result(m);
if (a.is_lt(e, e1, e2)) {
result = a.mk_le(e1, e2);
}
else if (a.is_gt(e, e1, e2)) {
result = a.mk_ge(e1, e2);
}
else if (m.is_not(e, e0) && a.is_ge(e0, e1, e2)) {
result = a.mk_le(e1, e2);
}
else if (m.is_not(e, e0) && a.is_le(e0, e1, e2)) {
result = a.mk_ge(e1, e2);
}
else if (a.is_ge(e) || a.is_le(e) || m.is_eq(e) ||
(m.is_not(e, e0) && (a.is_gt(e0) || a.is_lt(e0)))) {
result = e;
}
else {
IF_VERBOSE(1, verbose_stream() << "Cannot close: " << mk_pp(e, m) << "\n";);
}
return result;
}
expr_ref closure::close_conjunction(expr* fml) {
expr_ref_vector fmls(m);
qe::flatten_and(fml, fmls);
for (unsigned i = 0; i < fmls.size(); ++i) {
fmls[i] = close_fml(fmls[i].get());
}
return expr_ref(m.mk_and(fmls.size(), fmls.c_ptr()), m);
}
expr_ref closure::relax(unsigned i, expr* fml) {
scaler sc(m);
expr_ref result = sc(fml, m_sigma[i].get(), &m_vars[i]);
return close_conjunction(result);
}
expr_ref closure::operator()(expr_ref_vector const& As) {
if (As.empty()) {
return expr_ref(m.mk_false(), m);
}
if (As.size() == 1) {
return expr_ref(As[0], m);
}
expr_ref_vector fmls(m);
expr_ref B(m);
add_variables(As.size(), fmls);
for (unsigned i = 0; i < As.size(); ++i) {
fmls.push_back(relax(i, As[i]));
}
B = qe::mk_and(fmls);
return B;
}
}

67
src/muz/pdr/pdr_closure.h Normal file
View file

@ -0,0 +1,67 @@
/*++
Copyright (c) 2013 Microsoft Corporation
Module Name:
pdr_closure.h
Abstract:
Utility functions for computing closures.
Author:
Nikolaj Bjorner (nbjorner) 2013-9-1.
Revision History:
--*/
#ifndef _PDR_CLOSURE_H_
#define _PDR_CLOSURE_H_
#include "arith_decl_plugin.h"
namespace pdr {
// Arithmetic scaling functor.
// Variables are replaced using
// m_translate. Constants are replaced by
// multiplication with a variable 'k' (scale factor).
class scaler {
ast_manager& m;
arith_util a;
obj_map<expr, expr*> m_cache[2];
expr* m_k;
obj_map<func_decl, expr*>* m_translate;
public:
scaler(ast_manager& m): m(m), a(m), m_translate(0) {}
expr_ref operator()(expr* e, expr* k, obj_map<func_decl, expr*>* translate = 0);
expr_ref undo_k(expr* e, expr* k);
private:
expr_ref scale(expr* e, bool is_mul);
};
class pred_transformer;
class closure {
ast_manager& m;
pred_transformer& m_pt;
arith_util a;
bool m_is_closure;
expr_ref_vector m_sigma;
expr_ref_vector m_trail;
vector<obj_map<func_decl, expr*> > m_vars;
expr_ref relax(unsigned i, expr* fml);
expr_ref close_conjunction(expr* fml);
expr_ref close_fml(expr* fml);
void add_variables(unsigned num_vars, expr_ref_vector& fmls);
public:
closure(pred_transformer& pt, bool is_closure);
expr_ref operator()(expr_ref_vector const& As);
};
}
#endif

View file

@ -148,56 +148,10 @@ namespace pdr {
m_farkas_learner.collect_statistics(st);
}
expr_ref scaler::operator()(expr* e, expr* k, obj_map<func_decl, expr*>* translate) {
m_cache[0].reset();
m_cache[1].reset();
m_translate = translate;
m_k = k;
return scale(e, false);
}
expr_ref scaler::scale(expr* e, bool is_mul) {
expr* r;
if (m_cache[is_mul].find(e, r)) {
return expr_ref(r, m);
}
if (!is_app(e)) {
return expr_ref(e, m);
}
app* ap = to_app(e);
if (m_translate && m_translate->find(ap->get_decl(), r)) {
return expr_ref(r, m);
}
if (!is_mul && a.is_numeral(e)) {
return expr_ref(a.mk_mul(m_k, e), m);
}
expr_ref_vector args(m);
bool is_mul_rec = is_mul || a.is_mul(e);
for (unsigned i = 0; i < ap->get_num_args(); ++i) {
args.push_back(scale(ap->get_arg(i), is_mul_rec));
}
expr_ref result(m);
result = m.mk_app(ap->get_decl(), args.size(), args.c_ptr());
m_cache[is_mul].insert(e, result);
return result;
}
expr_ref scaler::undo_k(expr* e, expr* k) {
expr_safe_replace sub(m);
th_rewriter rw(m);
expr_ref result(e, m);
sub.insert(k, a.mk_numeral(rational(1), false));
sub(result);
rw(result);
return result;
}
core_convex_hull_generalizer::core_convex_hull_generalizer(context& ctx, bool is_closure):
core_generalizer(ctx),
m(ctx.get_manager()),
a(m),
m_sigma(m),
m_trail(m),
m_is_closure(is_closure) {
}
@ -224,38 +178,35 @@ namespace pdr {
// update with new core.
//
void core_convex_hull_generalizer::method1(model_node& n, expr_ref_vector const& core, bool uses_level, cores& new_cores) {
manager& pm = n.pt().get_pdr_manager();
expr_ref_vector conv1(m), conv2(m), core1(m), core2(m), fmls(m);
unsigned orig_size = new_cores.size();
expr_ref_vector conv2(m), fmls(m), fml1_2(m);
bool change = false;
if (core.empty()) {
new_cores.push_back(std::make_pair(core, uses_level));
return;
}
add_variables(n, 2, fmls);
mk_convex(core, 0, conv1);
conv1.append(fmls);
expr_ref fml = n.pt().get_formulas(n.level(), false);
fmls.reset();
qe::flatten_and(fml, fmls);
closure cl(n.pt(), m_is_closure);
expr_ref fml1 = qe::mk_and(core);
expr_ref fml2 = n.pt().get_formulas(n.level(), false);
fml1_2.push_back(fml1);
fml1_2.push_back(0);
qe::flatten_and(fml2, fmls);
for (unsigned i = 0; i < fmls.size(); ++i) {
fml = m.mk_not(fmls[i].get());
core2.reset();
conv2.reset();
qe::flatten_and(fml, core2);
mk_convex(core2, 1, conv2);
conv2.append(conv1);
expr_ref state = pm.mk_and(conv2);
fml2 = m.mk_not(fmls[i].get());
fml1_2[1] = fml2;
expr_ref state = cl(fml1_2);
TRACE("pdr",
tout << "Check states:\n" << mk_pp(state, m) << "\n";
tout << "Old states:\n" << mk_pp(fml, m) << "\n";
tout << "Old states:\n" << mk_pp(fml2, m) << "\n";
);
model_node nd(0, state, n.pt(), n.level());
pred_transformer::scoped_farkas sf(n.pt(), true);
bool uses_level1 = uses_level;
if (l_false == n.pt().is_reachable(nd, &conv2, uses_level1)) {
new_cores.push_back(std::make_pair(conv2, uses_level1));
expr_ref state1 = pm.mk_and(conv2);
change = true;
expr_ref state1 = qe::mk_and(conv2);
TRACE("pdr",
tout << mk_pp(state, m) << "\n";
tout << "Generalized to:\n" << mk_pp(state1, m) << "\n";);
@ -264,70 +215,9 @@ namespace pdr {
verbose_stream() << "Generalized to:\n" << mk_pp(state1, m) << "\n";);
}
}
if (!m_is_closure || new_cores.size() == orig_size) {
if (!m_is_closure || !change) {
new_cores.push_back(std::make_pair(core, uses_level));
}
}
// take as starting point two points from different regions.
void core_convex_hull_generalizer::method2(model_node& n, expr_ref_vector& core, bool& uses_level) {
expr_ref_vector conv1(m), conv2(m), core1(m), core2(m);
if (core.empty()) {
return;
}
manager& pm = n.pt().get_pdr_manager();
smt::kernel ctx(m, m_ctx.get_fparams(), m_ctx.get_params().p);
expr_ref goal(pm.mk_and(core));
ctx.assert_expr(goal);
lbool r = ctx.check();
if (r != l_true) {
IF_VERBOSE(0, verbose_stream() << "unexpected result from satisfiability check\n";);
return;
}
add_variables(n, 2, conv1);
model_ref mdl;
ctx.get_model(mdl);
unsigned sz = n.pt().sig_size();
for (unsigned i = 0; i < sz; ++i) {
expr_ref_vector constr(m);
expr* left, *right;
func_decl* fn0 = n.pt().sig(i);
func_decl* fn1 = pm.o2n(fn0, 0);
if (m_vars[0].find(fn1, left) && m_vars[1].find(fn1, right)) {
expr_ref val(m);
mdl->eval(fn1, val);
if (val) {
conv1.push_back(m.mk_eq(left, val));
constr.push_back(m.mk_eq(right, val));
}
}
expr_ref new_model = pm.mk_and(constr);
m_trail.push_back(new_model);
m_trail.push_back(goal);
m_models.insert(goal, new_model);
}
obj_map<expr, expr*>::iterator it = m_models.begin(), end = m_models.end();
for (; it != end; ++it) {
if (it->m_key == goal) {
continue;
}
conv1.push_back(it->m_value);
expr_ref state = pm.mk_and(conv1);
TRACE("pdr", tout << "Try:\n" << mk_pp(state, m) << "\n";);
model_node nd(0, state, n.pt(), n.level());
pred_transformer::scoped_farkas sf(n.pt(), true);
if (l_false == n.pt().is_reachable(nd, &conv2, uses_level)) {
IF_VERBOSE(0,
verbose_stream() << mk_pp(state, m) << "\n";
verbose_stream() << "Generalized to:\n" << mk_pp(pm.mk_and(conv2), m) << "\n";);
core.reset();
core.append(conv2);
return;
}
conv1.pop_back();
}
}
/*
@ -339,7 +229,6 @@ namespace pdr {
for (unsigned i = 0; i < core.size(); ++i) {
tout << "B:" << mk_pp(core[i], m) << "\n";
});
manager& pm = n.pt().get_pdr_manager();
bool uses_level1;
expr_ref_vector core1(m);
core1.append(core);
@ -395,13 +284,14 @@ namespace pdr {
bool core_convex_hull_generalizer::strengthen_consequences(model_node& n, expr_ref_vector& As, expr* B) {
expr_ref A(m), tmp(m), convA(m);
unsigned sz = As.size();
closure cl(n.pt(), m_is_closure);
for (unsigned i = 0; i < As.size(); ++i) {
expr_ref_vector Hs(m);
Hs.push_back(As[i].get());
for (unsigned j = i + 1; j < As.size(); ++j) {
Hs.push_back(As[j].get());
bool unsat = false;
mk_convex(n, Hs, A);
A = cl(Hs);
tmp = As[i].get();
As[i] = A;
unsat = is_unsat(As, B);
@ -424,16 +314,6 @@ namespace pdr {
return sz > As.size();
}
void core_convex_hull_generalizer::mk_convex(model_node& n, expr_ref_vector const& Hs, expr_ref& A) {
expr_ref_vector fmls(m), es(m);
add_variables(n, Hs.size(), fmls);
for (unsigned i = 0; i < Hs.size(); ++i) {
es.reset();
qe::flatten_and(Hs[i], es);
mk_convex(es, i, fmls);
}
A = m.mk_and(fmls.size(), fmls.c_ptr());
}
bool core_convex_hull_generalizer::is_unsat(expr_ref_vector const& As, expr* B) {
smt::kernel ctx(m, m_ctx.get_fparams(), m_ctx.get_params().p);
@ -445,91 +325,6 @@ namespace pdr {
return l_false == ctx.check();
}
void core_convex_hull_generalizer::add_variables(model_node& n, unsigned num_vars, expr_ref_vector& fmls) {
manager& pm = n.pt().get_pdr_manager();
SASSERT(num_vars > 0);
while (m_vars.size() < num_vars) {
m_vars.resize(m_vars.size()+1);
m_sigma.push_back(m.mk_fresh_const("sigma", a.mk_real()));
}
unsigned sz = n.pt().sig_size();
for (unsigned i = 0; i < sz; ++i) {
expr* var;
ptr_vector<expr> vars;
func_decl* fn0 = n.pt().sig(i);
func_decl* fn1 = pm.o2n(fn0, 0);
sort* srt = fn0->get_range();
if (a.is_int_real(srt)) {
for (unsigned j = 0; j < num_vars; ++j) {
if (!m_vars[j].find(fn1, var)) {
var = m.mk_fresh_const(fn1->get_name().str().c_str(), srt);
m_trail.push_back(var);
m_vars[j].insert(fn1, var);
}
vars.push_back(var);
}
fmls.push_back(m.mk_eq(m.mk_const(fn1), a.mk_add(num_vars, vars.c_ptr())));
}
}
if (m_is_closure) {
for (unsigned i = 0; i < num_vars; ++i) {
fmls.push_back(a.mk_ge(m_sigma[i].get(), a.mk_numeral(rational(0), a.mk_real())));
}
}
else {
// is interior:
for (unsigned i = 0; i < num_vars; ++i) {
fmls.push_back(a.mk_gt(m_sigma[i].get(), a.mk_numeral(rational(0), a.mk_real())));
}
}
fmls.push_back(m.mk_eq(a.mk_numeral(rational(1), a.mk_real()), a.mk_add(num_vars, m_sigma.c_ptr())));
}
expr_ref core_convex_hull_generalizer::mk_closure(expr* e) {
expr* e0, *e1, *e2;
expr_ref result(m);
if (a.is_lt(e, e1, e2)) {
result = a.mk_le(e1, e2);
}
else if (a.is_gt(e, e1, e2)) {
result = a.mk_ge(e1, e2);
}
else if (m.is_not(e, e0) && a.is_ge(e0, e1, e2)) {
result = a.mk_le(e1, e2);
}
else if (m.is_not(e, e0) && a.is_le(e0, e1, e2)) {
result = a.mk_ge(e1, e2);
}
else if (a.is_ge(e) || a.is_le(e) || m.is_eq(e) ||
(m.is_not(e, e0) && (a.is_gt(e0) || a.is_lt(e0)))) {
result = e;
}
else {
IF_VERBOSE(1, verbose_stream() << "Cannot close: " << mk_pp(e, m) << "\n";);
}
return result;
}
bool core_convex_hull_generalizer::mk_closure(expr_ref_vector& conj) {
for (unsigned i = 0; i < conj.size(); ++i) {
conj[i] = mk_closure(conj[i].get());
if (!conj[i].get()) {
return false;
}
}
return true;
}
void core_convex_hull_generalizer::mk_convex(expr_ref_vector const& core, unsigned index, expr_ref_vector& conv) {
scaler sc(m);
for (unsigned i = 0; i < core.size(); ++i) {
conv.push_back(sc(core[i], m_sigma[index].get(), &m_vars[index]));
}
mk_closure(conv);
}
// ---------------------------------
// core_arith_inductive_generalizer
@ -800,7 +595,7 @@ namespace pdr {
for (unsigned i = ut_size; i < t_size; i++) {
conj.push_back(rule.get_tail(i));
}
result = pm.mk_and(conj);
result = qe::mk_and(conj);
if (!sub.empty()) {
expr_ref tmp = result;
var_subst(m, false)(tmp, sub.size(), sub.c_ptr(), result);
@ -983,73 +778,3 @@ namespace pdr {
}
};
#if 0
// now create the convex closure of the consequences:
expr_ref tmp(m), zero(m);
expr_ref_vector conv(m), es(m), enabled(m);
zero = a.mk_numeral(rational(0), a.mk_real());
add_variables(n, consequences.size(), conv);
for (unsigned i = 0; i < consequences.size(); ++i) {
es.reset();
tmp = m.mk_not(consequences[i].get());
qe::flatten_and(tmp, es);
mk_convex(es, i, conv);
es.reset();
//
// enabled[i] = not (sigma_i = 0 and z_i1 = 0 and .. and z_im = 0)
//
es.push_back(m.mk_eq(m_sigma[i].get(), zero));
for (unsigned j = 0; j < n.pt().sig_size(); ++j) {
func_decl* fn0 = n.pt().sig(j);
func_decl* fn1 = pm.o2n(fn0, 0);
expr* var;
VERIFY (m_vars[i].find(fn1, var));
es.push_back(m.mk_eq(var, zero));
}
enabled.push_back(m.mk_not(m.mk_and(es.size(), es.c_ptr())));
}
// the convex closure was created of all consequences.
// now determine a subset of enabled constraints.
smt::kernel ctx(m, m_ctx.get_fparams(), m_ctx.get_params().p);
for (unsigned i = 0; i < conv.size(); ++i) {
ctx.assert_expr(conv[i].get());
IF_VERBOSE(0, verbose_stream() << "CC: " << mk_pp(conv[i].get(), m) << "\n";);
}
for (unsigned i = 0; i < core.size(); ++i) {
ctx.assert_expr(core[i]);
IF_VERBOSE(0, verbose_stream() << "Co: " << mk_pp(core[i], m) << "\n";);
}
vector<unsigned_vector> transversal;
while (l_true == ctx.check()) {
model_ref md;
ctx.get_model(md);
IF_VERBOSE(0,
ctx.display(verbose_stream());
verbose_stream() << "\n";
model_smt2_pp(verbose_stream(), m, *md.get(), 0););
expr_ref_vector lits(m);
unsigned_vector pos;
for (unsigned i = 0; i < consequences.size(); ++i) {
if (md->eval(enabled[i].get(), tmp, false)) {
IF_VERBOSE(0,
verbose_stream() << mk_pp(enabled[i].get(), m) << " |-> " << mk_pp(tmp, m) << "\n";);
if (m.is_true(tmp)) {
lits.push_back(tmp);
pos.push_back(i);
}
}
}
transversal.push_back(pos);
SASSERT(!lits.empty());
tmp = m.mk_not(m.mk_and(lits.size(), lits.c_ptr()));
TRACE("pdr", tout << "add block: " << mk_pp(tmp, m) << "\n";);
ctx.assert_expr(tmp);
}
//
// we could no longer satisfy core using a partition.
//
IF_VERBOSE(0, verbose_stream() << "TBD: tranverse\n";);
#endif

View file

@ -21,6 +21,7 @@ Revision History:
#define _PDR_GENERALIZERS_H_
#include "pdr_context.h"
#include "pdr_closure.h"
#include "arith_decl_plugin.h"
namespace pdr {
@ -73,45 +74,15 @@ namespace pdr {
virtual void collect_statistics(statistics& st) const;
};
// Arithmetic scaling functor.
// Variables are replaced using
// m_translate. Constants are replaced by
// multiplication with a variable 'k' (scale factor).
class scaler {
ast_manager& m;
arith_util a;
obj_map<expr, expr*> m_cache[2];
expr* m_k;
obj_map<func_decl, expr*>* m_translate;
public:
scaler(ast_manager& m): m(m), a(m), m_translate(0) {}
expr_ref operator()(expr* e, expr* k, obj_map<func_decl, expr*>* translate = 0);
expr_ref undo_k(expr* e, expr* k);
private:
expr_ref scale(expr* e, bool is_mul);
};
class core_convex_hull_generalizer : public core_generalizer {
ast_manager& m;
arith_util a;
expr_ref_vector m_sigma;
expr_ref_vector m_trail;
vector<obj_map<func_decl, expr*> > m_vars;
obj_map<expr, expr*> m_models;
bool m_is_closure;
expr_ref mk_closure(expr* e);
bool mk_closure(expr_ref_vector& conj);
void mk_convex(expr_ref_vector const& core, unsigned index, expr_ref_vector& conv);
void mk_convex(expr* fml, unsigned index, expr_ref_vector& conv);
void mk_convex(expr* term, unsigned index, bool is_mul, expr_ref& result);
bool translate(func_decl* fn, unsigned index, expr_ref& result);
void method1(model_node& n, expr_ref_vector const& core, bool uses_level, cores& new_cores);
void method2(model_node& n, expr_ref_vector& core, bool& uses_level);
void method3(model_node& n, expr_ref_vector const& core, bool uses_level, cores& new_cores);
bool strengthen_consequences(model_node& n, expr_ref_vector& As, expr* B);
bool is_unsat(expr_ref_vector const& As, expr* B);
void mk_convex(model_node& n, expr_ref_vector const& Hs, expr_ref& A);
void add_variables(model_node& n, unsigned num_vars, expr_ref_vector& fmls);
public:
core_convex_hull_generalizer(context& ctx, bool is_closure);
virtual ~core_convex_hull_generalizer() {}

View file

@ -1,4 +1,5 @@
#include "qe_util.h"
#include "bool_rewriter.h"
namespace qe {
void flatten_and(expr_ref_vector& result) {
@ -113,4 +114,19 @@ namespace qe {
result.push_back(fml);
flatten_or(result);
}
expr_ref mk_and(expr_ref_vector const& fmls) {
ast_manager& m = fmls.get_manager();
expr_ref result(m);
bool_rewriter(m).mk_and(fmls.size(), fmls.c_ptr(), result);
return result;
}
expr_ref mk_or(expr_ref_vector const& fmls) {
ast_manager& m = fmls.get_manager();
expr_ref result(m);
bool_rewriter(m).mk_or(fmls.size(), fmls.c_ptr(), result);
return result;
}
}

View file

@ -33,5 +33,9 @@ namespace qe {
void flatten_or(expr* fml, expr_ref_vector& result);
expr_ref mk_and(expr_ref_vector const& fmls);
expr_ref mk_or(expr_ref_vector const& fmls);
}
#endif