mirror of
https://github.com/Z3Prover/z3
synced 2025-11-28 16:29:50 +00:00
Merge branch 'master' of https://github.com/z3prover/z3
This commit is contained in:
commit
6fc08e9c9f
236 changed files with 14093 additions and 16593 deletions
|
|
@ -18,5 +18,5 @@ z3_add_component(muz
|
|||
smt
|
||||
smt2parser
|
||||
PYG_FILES
|
||||
fixedpoint_params.pyg
|
||||
fp_params.pyg
|
||||
)
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ Revision History:
|
|||
#include "ast/ast_smt2_pp.h"
|
||||
#include "ast/datatype_decl_plugin.h"
|
||||
#include "ast/scoped_proof.h"
|
||||
#include "muz/base/fixedpoint_params.hpp"
|
||||
#include "muz/base/fp_params.hpp"
|
||||
#include "ast/ast_pp_util.h"
|
||||
|
||||
|
||||
|
|
@ -152,15 +152,15 @@ namespace datalog {
|
|||
|
||||
class context::restore_rules : public trail<context> {
|
||||
rule_set* m_old_rules;
|
||||
void reset() {
|
||||
dealloc(m_old_rules);
|
||||
void reset() {
|
||||
dealloc(m_old_rules);
|
||||
m_old_rules = nullptr;
|
||||
}
|
||||
public:
|
||||
restore_rules(rule_set& r): m_old_rules(alloc(rule_set, r)) {}
|
||||
|
||||
~restore_rules() override {}
|
||||
|
||||
|
||||
void undo(context& ctx) override {
|
||||
ctx.replace_rules(*m_old_rules);
|
||||
reset();
|
||||
|
|
@ -188,10 +188,8 @@ namespace datalog {
|
|||
if (m_trail.get_num_scopes() == 0) {
|
||||
throw default_exception("there are no backtracking points to pop to");
|
||||
}
|
||||
if (m_engine.get()) {
|
||||
throw default_exception("pop operation is only supported by duality engine");
|
||||
}
|
||||
m_trail.pop_scope(1);
|
||||
throw default_exception("pop operation is not supported");
|
||||
m_trail.pop_scope(1);
|
||||
}
|
||||
|
||||
// -----------------------------------
|
||||
|
|
@ -205,7 +203,7 @@ namespace datalog {
|
|||
m_register_engine(re),
|
||||
m_fparams(fp),
|
||||
m_params_ref(pa),
|
||||
m_params(alloc(fixedpoint_params, m_params_ref)),
|
||||
m_params(alloc(fp_params, m_params_ref)),
|
||||
m_decl_util(m),
|
||||
m_rewriter(m),
|
||||
m_var_subst(m),
|
||||
|
|
@ -237,7 +235,7 @@ namespace datalog {
|
|||
|
||||
context::~context() {
|
||||
reset();
|
||||
dealloc(m_params);
|
||||
dealloc(m_params);
|
||||
}
|
||||
|
||||
void context::reset() {
|
||||
|
|
@ -293,14 +291,14 @@ namespace datalog {
|
|||
bool context::similarity_compressor() const { return m_params->datalog_similarity_compressor(); }
|
||||
unsigned context::similarity_compressor_threshold() const { return m_params->datalog_similarity_compressor_threshold(); }
|
||||
unsigned context::soft_timeout() const { return m_fparams.m_timeout; }
|
||||
unsigned context::initial_restart_timeout() const { return m_params->datalog_initial_restart_timeout(); }
|
||||
unsigned context::initial_restart_timeout() const { return m_params->datalog_initial_restart_timeout(); }
|
||||
bool context::generate_explanations() const { return m_params->datalog_generate_explanations(); }
|
||||
bool context::explanations_on_relation_level() const { return m_params->datalog_explanations_on_relation_level(); }
|
||||
bool context::magic_sets_for_queries() const { return m_params->datalog_magic_sets_for_queries(); }
|
||||
symbol context::tab_selection() const { return m_params->tab_selection(); }
|
||||
bool context::xform_coi() const { return m_params->xform_coi(); }
|
||||
bool context::xform_slice() const { return m_params->xform_slice(); }
|
||||
bool context::xform_bit_blast() const { return m_params->xform_bit_blast(); }
|
||||
bool context::xform_bit_blast() const { return m_params->xform_bit_blast(); }
|
||||
bool context::karr() const { return m_params->xform_karr(); }
|
||||
bool context::scale() const { return m_params->xform_scale(); }
|
||||
bool context::magic() const { return m_params->xform_magic(); }
|
||||
|
|
@ -430,7 +428,7 @@ namespace datalog {
|
|||
}
|
||||
|
||||
|
||||
void context::set_predicate_representation(func_decl * pred, unsigned relation_name_cnt,
|
||||
void context::set_predicate_representation(func_decl * pred, unsigned relation_name_cnt,
|
||||
symbol const * relation_names) {
|
||||
if (relation_name_cnt > 0) {
|
||||
ensure_engine();
|
||||
|
|
@ -440,9 +438,9 @@ namespace datalog {
|
|||
}
|
||||
}
|
||||
|
||||
func_decl * context::mk_fresh_head_predicate(symbol const & prefix, symbol const & suffix,
|
||||
func_decl * context::mk_fresh_head_predicate(symbol const & prefix, symbol const & suffix,
|
||||
unsigned arity, sort * const * domain, func_decl* orig_pred) {
|
||||
func_decl* new_pred =
|
||||
func_decl* new_pred =
|
||||
m.mk_fresh_func_decl(prefix, suffix, arity, domain, m.mk_bool_sort());
|
||||
|
||||
register_predicate(new_pred, true);
|
||||
|
|
@ -475,7 +473,7 @@ namespace datalog {
|
|||
//
|
||||
// Update a rule with a new.
|
||||
// It requires basic subsumption.
|
||||
//
|
||||
//
|
||||
void context::update_rule(expr* rl, symbol const& name) {
|
||||
datalog::rule_manager& rm = get_rule_manager();
|
||||
proof* p = nullptr;
|
||||
|
|
@ -496,13 +494,13 @@ namespace datalog {
|
|||
rule* old_rule = nullptr;
|
||||
for (unsigned i = 0; i < size_before; ++i) {
|
||||
if (rls[i]->name() == name) {
|
||||
if (old_rule) {
|
||||
if (old_rule) {
|
||||
std::stringstream strm;
|
||||
strm << "Rule " << name << " occurs twice. It cannot be modified";
|
||||
m_rule_set.del_rule(r);
|
||||
throw default_exception(strm.str());
|
||||
}
|
||||
old_rule = rls[i];
|
||||
old_rule = rls[i];
|
||||
}
|
||||
}
|
||||
if (old_rule) {
|
||||
|
|
@ -558,7 +556,7 @@ namespace datalog {
|
|||
ensure_engine();
|
||||
m_engine->add_cover(level, pred, property);
|
||||
}
|
||||
|
||||
|
||||
void context::add_invariant(func_decl* pred, expr *property)
|
||||
{
|
||||
ensure_engine();
|
||||
|
|
@ -568,34 +566,28 @@ namespace datalog {
|
|||
void context::check_rules(rule_set& r) {
|
||||
m_rule_properties.set_generate_proof(generate_proof_trace());
|
||||
switch(get_engine()) {
|
||||
case DATALOG_ENGINE:
|
||||
case DATALOG_ENGINE:
|
||||
m_rule_properties.collect(r);
|
||||
m_rule_properties.check_quantifier_free();
|
||||
m_rule_properties.check_uninterpreted_free();
|
||||
m_rule_properties.check_nested_free();
|
||||
m_rule_properties.check_nested_free();
|
||||
m_rule_properties.check_infinite_sorts();
|
||||
break;
|
||||
case SPACER_ENGINE:
|
||||
case PDR_ENGINE:
|
||||
m_rule_properties.collect(r);
|
||||
m_rule_properties.check_existential_tail();
|
||||
m_rule_properties.check_for_negated_predicates();
|
||||
m_rule_properties.check_uninterpreted_free();
|
||||
break;
|
||||
case QPDR_ENGINE:
|
||||
m_rule_properties.collect(r);
|
||||
m_rule_properties.check_for_negated_predicates();
|
||||
m_rule_properties.check_uninterpreted_free();
|
||||
break;
|
||||
case BMC_ENGINE:
|
||||
m_rule_properties.collect(r);
|
||||
m_rule_properties.check_for_negated_predicates();
|
||||
break;
|
||||
break;
|
||||
case QBMC_ENGINE:
|
||||
m_rule_properties.collect(r);
|
||||
m_rule_properties.check_existential_tail();
|
||||
m_rule_properties.check_for_negated_predicates();
|
||||
break;
|
||||
break;
|
||||
case TAB_ENGINE:
|
||||
m_rule_properties.collect(r);
|
||||
m_rule_properties.check_existential_tail();
|
||||
|
|
@ -658,7 +650,7 @@ namespace datalog {
|
|||
add_fact(pred, rfact);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void context::add_table_fact(func_decl * pred, unsigned num_args, unsigned args[]) {
|
||||
if (pred->get_arity() != num_args) {
|
||||
std::ostringstream out;
|
||||
|
|
@ -690,7 +682,7 @@ namespace datalog {
|
|||
reopen();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void context::reopen() {
|
||||
SASSERT(m_closed);
|
||||
m_rule_set.reopen();
|
||||
|
|
@ -703,7 +695,7 @@ namespace datalog {
|
|||
transformer.register_plugin(plugin);
|
||||
transform_rules(transformer);
|
||||
}
|
||||
|
||||
|
||||
void context::transform_rules(rule_transformer& transf) {
|
||||
SASSERT(m_closed); //we must finish adding rules before we start transforming them
|
||||
TRACE("dl", display_rules(tout););
|
||||
|
|
@ -732,7 +724,7 @@ namespace datalog {
|
|||
}
|
||||
|
||||
void context::collect_params(param_descrs& p) {
|
||||
fixedpoint_params::collect_param_descrs(p);
|
||||
fp_params::collect_param_descrs(p);
|
||||
insert_timeout(p);
|
||||
}
|
||||
|
||||
|
|
@ -740,8 +732,8 @@ namespace datalog {
|
|||
m_params_ref.copy(p);
|
||||
if (m_engine.get()) m_engine->updt_params();
|
||||
m_generate_proof_trace = m_params->generate_proof_trace();
|
||||
m_unbound_compressor = m_params->datalog_unbound_compressor();
|
||||
m_default_relation = m_params->datalog_default_relation();
|
||||
m_unbound_compressor = m_params->datalog_unbound_compressor();
|
||||
m_default_relation = m_params->datalog_default_relation();
|
||||
}
|
||||
|
||||
expr_ref context::get_background_assertion() {
|
||||
|
|
@ -756,7 +748,7 @@ namespace datalog {
|
|||
|
||||
void context::assert_expr(expr* e) {
|
||||
TRACE("dl", tout << mk_ismt2_pp(e, m) << "\n";);
|
||||
m_background.push_back(e);
|
||||
m_background.push_back(e);
|
||||
}
|
||||
|
||||
void context::cleanup() {
|
||||
|
|
@ -776,19 +768,14 @@ namespace datalog {
|
|||
DL_ENGINE get_engine() const { return m_engine_type; }
|
||||
|
||||
void operator()(expr* e) {
|
||||
if (is_quantifier(e)) {
|
||||
m_engine_type = QPDR_ENGINE;
|
||||
}
|
||||
else if (m_engine_type != QPDR_ENGINE) {
|
||||
if (a.is_int_real(e)) {
|
||||
m_engine_type = PDR_ENGINE;
|
||||
m_engine_type = SPACER_ENGINE;
|
||||
}
|
||||
else if (is_var(e) && m.is_bool(e)) {
|
||||
m_engine_type = PDR_ENGINE;
|
||||
m_engine_type = SPACER_ENGINE;
|
||||
}
|
||||
else if (dt.is_datatype(m.get_sort(e))) {
|
||||
m_engine_type = PDR_ENGINE;
|
||||
}
|
||||
m_engine_type = SPACER_ENGINE;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
@ -798,19 +785,13 @@ namespace datalog {
|
|||
return;
|
||||
}
|
||||
symbol e = m_params->engine();
|
||||
|
||||
|
||||
if (e == symbol("datalog")) {
|
||||
m_engine_type = DATALOG_ENGINE;
|
||||
}
|
||||
else if (e == symbol("spacer")) {
|
||||
m_engine_type = SPACER_ENGINE;
|
||||
}
|
||||
else if (e == symbol("pdr")) {
|
||||
m_engine_type = PDR_ENGINE;
|
||||
}
|
||||
else if (e == symbol("qpdr")) {
|
||||
m_engine_type = QPDR_ENGINE;
|
||||
}
|
||||
else if (e == symbol("bmc")) {
|
||||
m_engine_type = BMC_ENGINE;
|
||||
}
|
||||
|
|
@ -830,7 +811,7 @@ namespace datalog {
|
|||
if (m_engine_type == LAST_ENGINE) {
|
||||
expr_fast_mark1 mark;
|
||||
engine_type_proc proc(m);
|
||||
m_engine_type = DATALOG_ENGINE;
|
||||
m_engine_type = DATALOG_ENGINE;
|
||||
for (unsigned i = 0; m_engine_type == DATALOG_ENGINE && i < m_rule_set.get_num_rules(); ++i) {
|
||||
rule * r = m_rule_set.get_rule(i);
|
||||
quick_for_each_expr(proc, mark, r->get_head());
|
||||
|
|
@ -858,8 +839,6 @@ namespace datalog {
|
|||
switch (get_engine()) {
|
||||
case DATALOG_ENGINE:
|
||||
case SPACER_ENGINE:
|
||||
case PDR_ENGINE:
|
||||
case QPDR_ENGINE:
|
||||
case BMC_ENGINE:
|
||||
case QBMC_ENGINE:
|
||||
case TAB_ENGINE:
|
||||
|
|
@ -882,8 +861,6 @@ namespace datalog {
|
|||
switch (get_engine()) {
|
||||
case DATALOG_ENGINE:
|
||||
case SPACER_ENGINE:
|
||||
case PDR_ENGINE:
|
||||
case QPDR_ENGINE:
|
||||
case BMC_ENGINE:
|
||||
case QBMC_ENGINE:
|
||||
case TAB_ENGINE:
|
||||
|
|
@ -916,15 +893,15 @@ namespace datalog {
|
|||
m_rel = dynamic_cast<rel_context_base*>(m_engine.get());
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lbool context::rel_query(unsigned num_rels, func_decl * const* rels) {
|
||||
lbool context::rel_query(unsigned num_rels, func_decl * const* rels) {
|
||||
m_last_answer = nullptr;
|
||||
ensure_engine();
|
||||
return m_engine->query(num_rels, rels);
|
||||
}
|
||||
|
||||
|
||||
expr* context::get_answer_as_formula() {
|
||||
if (m_last_answer) {
|
||||
return m_last_answer.get();
|
||||
|
|
@ -977,7 +954,7 @@ namespace datalog {
|
|||
|
||||
void context::display(std::ostream & out) const {
|
||||
display_rules(out);
|
||||
if (m_rel) m_rel->display_facts(out);
|
||||
if (m_rel) m_rel->display_facts(out);
|
||||
}
|
||||
|
||||
void context::display_profile(std::ostream& out) const {
|
||||
|
|
@ -1013,10 +990,10 @@ namespace datalog {
|
|||
bool context::result_contains_fact(relation_fact const& f) {
|
||||
return m_rel && m_rel->result_contains_fact(f);
|
||||
}
|
||||
|
||||
|
||||
// NB: algebraic data-types declarations will not be printed.
|
||||
|
||||
static void collect_free_funcs(unsigned sz, expr* const* exprs,
|
||||
static void collect_free_funcs(unsigned sz, expr* const* exprs,
|
||||
ast_pp_util& v,
|
||||
mk_fresh_name& fresh_names) {
|
||||
v.collect(sz, exprs);
|
||||
|
|
@ -1028,7 +1005,7 @@ namespace datalog {
|
|||
fresh_names.add(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void context::get_raw_rule_formulas(expr_ref_vector& rules, svector<symbol>& names, unsigned_vector &bounds) {
|
||||
for (unsigned i = 0; i < m_rule_fmls.size(); ++i) {
|
||||
expr_ref r = bind_vars(m_rule_fmls[i].get(), true);
|
||||
|
|
@ -1041,7 +1018,7 @@ namespace datalog {
|
|||
void context::get_rules_as_formulas(expr_ref_vector& rules, expr_ref_vector& queries, svector<symbol>& names) {
|
||||
expr_ref fml(m);
|
||||
rule_manager& rm = get_rule_manager();
|
||||
|
||||
|
||||
// ensure that rules are all using bound variables.
|
||||
for (unsigned i = m_rule_fmls_head; i < m_rule_fmls.size(); ++i) {
|
||||
m_free_vars(m_rule_fmls[i].get());
|
||||
|
|
@ -1090,7 +1067,7 @@ namespace datalog {
|
|||
}
|
||||
for (unsigned i = m_rule_fmls_head; i < m_rule_fmls.size(); ++i) {
|
||||
rules.push_back(m_rule_fmls[i].get());
|
||||
names.push_back(m_rule_names[i]);
|
||||
names.push_back(m_rule_names[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1103,7 +1080,7 @@ namespace datalog {
|
|||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
|
||||
void context::display_smt2(unsigned num_queries, expr* const* qs, std::ostream& out) {
|
||||
ast_manager& m = get_manager();
|
||||
ast_pp_util visitor(m);
|
||||
|
|
@ -1132,7 +1109,7 @@ namespace datalog {
|
|||
for (unsigned i = 0; i < sz; ++i) {
|
||||
func_decl* f = visitor.coll.get_func_decls()[i];
|
||||
if (f->get_family_id() != null_family_id) {
|
||||
//
|
||||
//
|
||||
}
|
||||
else if (is_predicate(f) && use_fixedpoint_extensions) {
|
||||
rels.insert(f);
|
||||
|
|
@ -1145,12 +1122,12 @@ namespace datalog {
|
|||
if (!use_fixedpoint_extensions) {
|
||||
out << "(set-logic HORN)\n";
|
||||
}
|
||||
for (func_decl * f : rels)
|
||||
for (func_decl * f : rels)
|
||||
visitor.remove_decl(f);
|
||||
|
||||
visitor.display_decls(out);
|
||||
|
||||
for (func_decl * f : rels)
|
||||
for (func_decl * f : rels)
|
||||
display_rel_decl(out, f);
|
||||
|
||||
if (use_fixedpoint_extensions && do_declare_vars) {
|
||||
|
|
@ -1166,7 +1143,7 @@ namespace datalog {
|
|||
PP(axioms[i]);
|
||||
out << ")\n";
|
||||
}
|
||||
for (unsigned i = 0; i < rules.size(); ++i) {
|
||||
for (unsigned i = 0; i < rules.size(); ++i) {
|
||||
out << (use_fixedpoint_extensions?"(rule ":"(assert ");
|
||||
expr* r = rules[i].get();
|
||||
symbol nm = names[i];
|
||||
|
|
@ -1179,7 +1156,7 @@ namespace datalog {
|
|||
while (fresh_names.contains(nm)) {
|
||||
std::ostringstream s;
|
||||
s << nm << "!";
|
||||
nm = symbol(s.str().c_str());
|
||||
nm = symbol(s.str().c_str());
|
||||
}
|
||||
fresh_names.add(nm);
|
||||
display_symbol(out, nm) << ")";
|
||||
|
|
@ -1205,7 +1182,7 @@ namespace datalog {
|
|||
args.push_back(m.mk_var(j, m_free_vars[j]));
|
||||
}
|
||||
qfn = m.mk_implies(q, m.mk_app(fn, args.size(), args.c_ptr()));
|
||||
|
||||
|
||||
out << "(assert ";
|
||||
PP(qfn);
|
||||
out << ")\n";
|
||||
|
|
@ -1232,7 +1209,7 @@ namespace datalog {
|
|||
smt2_pp_environment_dbg env(m);
|
||||
out << "(declare-rel ";
|
||||
display_symbol(out, f->get_name()) << " (";
|
||||
for (unsigned i = 0; i < f->get_arity(); ++i) {
|
||||
for (unsigned i = 0; i < f->get_arity(); ++i) {
|
||||
ast_smt2_pp(out, f->get_domain(i), env);
|
||||
if (i + 1 < f->get_arity()) {
|
||||
out << " ";
|
||||
|
|
@ -1262,12 +1239,12 @@ namespace datalog {
|
|||
void context::declare_vars(expr_ref_vector& rules, mk_fresh_name& fresh_names, std::ostream& out) {
|
||||
//
|
||||
// replace bound variables in rules by 'var declarations'
|
||||
// First remove quantifers, then replace bound variables
|
||||
// First remove quantifers, then replace bound variables
|
||||
// by fresh constants.
|
||||
//
|
||||
//
|
||||
smt2_pp_environment_dbg env(m);
|
||||
var_subst vsubst(m, false);
|
||||
|
||||
|
||||
expr_ref_vector fresh_vars(m), subst(m);
|
||||
expr_ref res(m);
|
||||
obj_map<sort, unsigned_vector> var_idxs;
|
||||
|
|
@ -1280,7 +1257,7 @@ namespace datalog {
|
|||
quantifier* q = to_quantifier(r);
|
||||
if (!q->is_forall()) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (has_quantifiers(q->get_expr())) {
|
||||
continue;
|
||||
}
|
||||
|
|
@ -1310,7 +1287,7 @@ namespace datalog {
|
|||
fresh_vars.push_back(m.mk_const(name, s));
|
||||
out << "(declare-var " << name << " ";
|
||||
ast_smt2_pp(out, s, env);
|
||||
out << ")\n";
|
||||
out << ")\n";
|
||||
}
|
||||
subst.push_back(fresh_vars[vars[max_var]].get());
|
||||
}
|
||||
|
|
@ -1322,4 +1299,3 @@ namespace datalog {
|
|||
|
||||
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ Revision History:
|
|||
#include "muz/base/bind_variables.h"
|
||||
#include "muz/base/rule_properties.h"
|
||||
|
||||
struct fixedpoint_params;
|
||||
struct fp_params;
|
||||
|
||||
namespace datalog {
|
||||
|
||||
|
|
@ -98,7 +98,7 @@ namespace datalog {
|
|||
relation_fact(ast_manager & m) : app_ref_vector(m) {}
|
||||
relation_fact(ast_manager & m, unsigned sz) : app_ref_vector(m) { resize(sz); }
|
||||
relation_fact(context & ctx);
|
||||
|
||||
|
||||
iterator begin() const { return c_ptr(); }
|
||||
iterator end() const { return c_ptr()+size(); }
|
||||
|
||||
|
|
@ -126,7 +126,7 @@ namespace datalog {
|
|||
virtual bool has_facts(func_decl * pred) const = 0;
|
||||
virtual void store_relation(func_decl * pred, relation_base * rel) = 0;
|
||||
virtual void inherit_predicate_kind(func_decl* new_pred, func_decl* orig_pred) = 0;
|
||||
virtual void set_predicate_representation(func_decl * pred, unsigned relation_name_cnt,
|
||||
virtual void set_predicate_representation(func_decl * pred, unsigned relation_name_cnt,
|
||||
symbol const * relation_names) = 0;
|
||||
virtual bool output_profile() const = 0;
|
||||
virtual void collect_non_empty_predicates(func_decl_set& preds) = 0;
|
||||
|
|
@ -147,7 +147,7 @@ namespace datalog {
|
|||
public:
|
||||
contains_pred(context& ctx): ctx(ctx) {}
|
||||
~contains_pred() override {}
|
||||
|
||||
|
||||
bool operator()(expr* e) override {
|
||||
return ctx.is_predicate(e);
|
||||
}
|
||||
|
|
@ -170,7 +170,7 @@ namespace datalog {
|
|||
register_engine_base& m_register_engine;
|
||||
smt_params & m_fparams;
|
||||
params_ref m_params_ref;
|
||||
fixedpoint_params* m_params;
|
||||
fp_params* m_params;
|
||||
bool m_generate_proof_trace; // cached configuration parameter
|
||||
bool m_unbound_compressor; // cached configuration parameter
|
||||
symbol m_default_relation; // cached configuration parameter
|
||||
|
|
@ -227,7 +227,7 @@ namespace datalog {
|
|||
|
||||
void push();
|
||||
void pop();
|
||||
|
||||
|
||||
bool saturation_was_run() const { return m_saturation_was_run; }
|
||||
void notify_saturation_was_run() { m_saturation_was_run = true; }
|
||||
|
||||
|
|
@ -236,7 +236,7 @@ namespace datalog {
|
|||
ast_manager & get_manager() const { return m; }
|
||||
rule_manager & get_rule_manager() { return m_rule_manager; }
|
||||
smt_params & get_fparams() const { return m_fparams; }
|
||||
fixedpoint_params const& get_params() const { return *m_params; }
|
||||
fp_params const& get_params() const { return *m_params; }
|
||||
DL_ENGINE get_engine() { configure_engine(); return m_engine_type; }
|
||||
register_engine_base& get_register_engine() { return m_register_engine; }
|
||||
th_rewriter& get_rewriter() { return m_rewriter; }
|
||||
|
|
@ -251,7 +251,7 @@ namespace datalog {
|
|||
symbol default_table() const;
|
||||
symbol default_relation() const;
|
||||
void set_default_relation(symbol const& s);
|
||||
symbol default_table_checker() const;
|
||||
symbol default_table_checker() const;
|
||||
symbol check_relation() const;
|
||||
bool default_table_checked() const;
|
||||
bool dbg_fpr_nonempty_relation_signature() const;
|
||||
|
|
@ -275,7 +275,7 @@ namespace datalog {
|
|||
bool compress_unbound() const;
|
||||
bool quantify_arrays() const;
|
||||
bool instantiate_quantifiers() const;
|
||||
bool xform_bit_blast() const;
|
||||
bool xform_bit_blast() const;
|
||||
bool xform_slice() const;
|
||||
bool xform_coi() const;
|
||||
bool array_blast() const;
|
||||
|
|
@ -291,9 +291,9 @@ namespace datalog {
|
|||
void register_variable(func_decl* var);
|
||||
|
||||
/*
|
||||
Replace constants that have been registered as
|
||||
Replace constants that have been registered as
|
||||
variables by de-Bruijn indices and corresponding
|
||||
universal (if is_forall is true) or existential
|
||||
universal (if is_forall is true) or existential
|
||||
quantifier.
|
||||
*/
|
||||
expr_ref bind_vars(expr* fml, bool is_forall);
|
||||
|
|
@ -303,7 +303,7 @@ namespace datalog {
|
|||
/**
|
||||
Register datalog relation.
|
||||
|
||||
If named is true, we associate the predicate with its name, so that it can be
|
||||
If named is true, we associate the predicate with its name, so that it can be
|
||||
retrieved by the try_get_predicate_decl() function. Auxiliary predicates introduced
|
||||
e.g. by rule transformations do not need to be named.
|
||||
*/
|
||||
|
|
@ -326,7 +326,7 @@ namespace datalog {
|
|||
/**
|
||||
\brief If a predicate name has a \c func_decl object assigned, return pointer to it;
|
||||
otherwise return 0.
|
||||
|
||||
|
||||
Not all \c func_decl object used as relation identifiers need to be assigned to their
|
||||
names. Generally, the names coming from the parses are registered here.
|
||||
*/
|
||||
|
|
@ -334,13 +334,13 @@ namespace datalog {
|
|||
func_decl * res = nullptr;
|
||||
m_preds_by_name.find(pred_name, res);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
\brief Create a fresh head predicate declaration.
|
||||
|
||||
*/
|
||||
func_decl * mk_fresh_head_predicate(symbol const & prefix, symbol const & suffix,
|
||||
func_decl * mk_fresh_head_predicate(symbol const & prefix, symbol const & suffix,
|
||||
unsigned arity, sort * const * domain, func_decl* orig_pred=nullptr);
|
||||
|
||||
|
||||
|
|
@ -365,13 +365,13 @@ namespace datalog {
|
|||
/**
|
||||
\brief Assign names of variables used in the declaration of a predicate.
|
||||
|
||||
These names are used when printing out the relations to make the output conform
|
||||
These names are used when printing out the relations to make the output conform
|
||||
to the one of bddbddb.
|
||||
*/
|
||||
void set_argument_names(const func_decl * pred, const svector<symbol> & var_names);
|
||||
symbol get_argument_name(const func_decl * pred, unsigned arg_index);
|
||||
|
||||
void set_predicate_representation(func_decl * pred, unsigned relation_name_cnt,
|
||||
void set_predicate_representation(func_decl * pred, unsigned relation_name_cnt,
|
||||
symbol const * relation_names);
|
||||
|
||||
void set_output_predicate(func_decl * pred) { m_rule_set.set_output_predicate(pred); }
|
||||
|
|
@ -385,9 +385,9 @@ namespace datalog {
|
|||
void add_fact(func_decl * pred, const relation_fact & fact);
|
||||
|
||||
bool has_facts(func_decl * pred) const;
|
||||
|
||||
|
||||
void add_rule(rule_ref& r);
|
||||
|
||||
|
||||
void assert_expr(expr* e);
|
||||
expr_ref get_background_assertion();
|
||||
unsigned get_num_assertions() { return m_background.size(); }
|
||||
|
|
@ -397,7 +397,7 @@ namespace datalog {
|
|||
Method exposed from API for adding rules.
|
||||
*/
|
||||
void add_rule(expr* rl, symbol const& name, unsigned bound = UINT_MAX);
|
||||
|
||||
|
||||
|
||||
/**
|
||||
Update a named rule.
|
||||
|
|
@ -421,9 +421,9 @@ namespace datalog {
|
|||
at 'level+1', 'level+2' etc, and include level=-1.
|
||||
*/
|
||||
expr_ref get_cover_delta(int level, func_decl* pred);
|
||||
|
||||
|
||||
/**
|
||||
Add a property of predicate 'pred' at 'level'.
|
||||
Add a property of predicate 'pred' at 'level'.
|
||||
It gets pushed forward when possible.
|
||||
*/
|
||||
void add_cover(int level, func_decl* pred, expr* property);
|
||||
|
|
@ -432,7 +432,7 @@ namespace datalog {
|
|||
Add an invariant of predicate 'pred'.
|
||||
*/
|
||||
void add_invariant (func_decl *pred, expr *property);
|
||||
|
||||
|
||||
/**
|
||||
\brief Check rule subsumption.
|
||||
*/
|
||||
|
|
@ -471,15 +471,15 @@ namespace datalog {
|
|||
proof_converter_ref& get_proof_converter() { return m_pc; }
|
||||
void add_proof_converter(proof_converter* pc) { m_pc = concat(m_pc.get(), pc); }
|
||||
|
||||
void transform_rules(rule_transformer& transf);
|
||||
void transform_rules(rule_transformer& transf);
|
||||
void transform_rules(rule_transformer::plugin* plugin);
|
||||
void replace_rules(rule_set const& rs);
|
||||
void record_transformed_rules();
|
||||
|
||||
void apply_default_transformation();
|
||||
void apply_default_transformation();
|
||||
|
||||
void collect_params(param_descrs& r);
|
||||
|
||||
|
||||
void updt_params(params_ref const& p);
|
||||
|
||||
void display_rules(std::ostream & out) const {
|
||||
|
|
@ -507,7 +507,7 @@ namespace datalog {
|
|||
/**
|
||||
\brief check if query 'q' is satisfied under asserted rules and background.
|
||||
|
||||
If successful, return OK and into \c result assign a relation with all
|
||||
If successful, return OK and into \c result assign a relation with all
|
||||
tuples matching the query. Otherwise return reason for failure and do not modify
|
||||
\c result.
|
||||
|
||||
|
|
@ -515,7 +515,7 @@ namespace datalog {
|
|||
starting from zero.
|
||||
|
||||
The caller becomes an owner of the relation object returned in \c result. The
|
||||
relation object, however, should not outlive the datalog context since it is
|
||||
relation object, however, should not outlive the datalog context since it is
|
||||
linked to a relation plugin in the context.
|
||||
*/
|
||||
|
||||
|
|
@ -524,7 +524,7 @@ namespace datalog {
|
|||
lbool query_from_lvl (expr* q, unsigned lvl);
|
||||
/**
|
||||
\brief retrieve model from inductive invariant that shows query is unsat.
|
||||
|
||||
|
||||
\pre engine == 'pdr' || engine == 'duality' - this option is only supported
|
||||
for PDR mode and Duality mode.
|
||||
*/
|
||||
|
|
@ -532,7 +532,7 @@ namespace datalog {
|
|||
|
||||
/**
|
||||
\brief retrieve proof from derivation of the query.
|
||||
|
||||
|
||||
\pre engine == 'pdr' || engine == 'duality'- this option is only supported
|
||||
for PDR mode and Duality mode.
|
||||
*/
|
||||
|
|
@ -588,12 +588,25 @@ namespace datalog {
|
|||
|
||||
rel_context_base* get_rel_context() { ensure_engine(); return m_rel; }
|
||||
|
||||
void add_callback(void *state,
|
||||
const datalog::t_new_lemma_eh new_lemma_eh,
|
||||
const datalog::t_predecessor_eh predecessor_eh,
|
||||
const datalog::t_unfold_eh unfold_eh) {
|
||||
ensure_engine();
|
||||
m_engine->add_callback(state, new_lemma_eh, predecessor_eh, unfold_eh);
|
||||
}
|
||||
|
||||
void add_constraint (expr *c, unsigned lvl){
|
||||
ensure_engine();
|
||||
m_engine->add_constraint(c, lvl);
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
/**
|
||||
Just reset all tables.
|
||||
*/
|
||||
void reset_tables();
|
||||
void reset_tables();
|
||||
|
||||
|
||||
void flush_add_rules();
|
||||
|
|
@ -614,4 +627,3 @@ namespace datalog {
|
|||
};
|
||||
|
||||
#endif /* DL_CONTEXT_H_ */
|
||||
|
||||
|
|
|
|||
|
|
@ -25,9 +25,7 @@ Revision History:
|
|||
namespace datalog {
|
||||
enum DL_ENGINE {
|
||||
DATALOG_ENGINE,
|
||||
PDR_ENGINE,
|
||||
SPACER_ENGINE,
|
||||
QPDR_ENGINE,
|
||||
BMC_ENGINE,
|
||||
QBMC_ENGINE,
|
||||
TAB_ENGINE,
|
||||
|
|
@ -36,6 +34,10 @@ namespace datalog {
|
|||
LAST_ENGINE
|
||||
};
|
||||
|
||||
typedef void (*t_new_lemma_eh)(void *state, expr *lemma, unsigned level);
|
||||
typedef void (*t_predecessor_eh)(void *state);
|
||||
typedef void (*t_unfold_eh)(void *state);
|
||||
|
||||
class engine_base {
|
||||
ast_manager& m;
|
||||
std::string m_name;
|
||||
|
|
@ -102,6 +104,15 @@ namespace datalog {
|
|||
virtual proof_ref get_proof() {
|
||||
return proof_ref(m.mk_asserted(m.mk_true()), m);
|
||||
}
|
||||
virtual void add_callback(void *state,
|
||||
const t_new_lemma_eh new_lemma_eh,
|
||||
const t_predecessor_eh predecessor_eh,
|
||||
const t_unfold_eh unfold_eh) {
|
||||
throw default_exception(std::string("add_lemma_exchange_callbacks is not supported for ") + m_name);
|
||||
}
|
||||
virtual void add_constraint (expr *c, unsigned lvl){
|
||||
throw default_exception(std::string("add_constraint is not supported for ") + m_name);
|
||||
}
|
||||
virtual void updt_params() {}
|
||||
virtual void cancel() {}
|
||||
virtual void cleanup() {}
|
||||
|
|
|
|||
|
|
@ -1,37 +1,37 @@
|
|||
def_module_params('fixedpoint',
|
||||
def_module_params('fp',
|
||||
description='fixedpoint parameters',
|
||||
export=True,
|
||||
params=(('timeout', UINT, UINT_MAX, 'set timeout'),
|
||||
('engine', SYMBOL, 'auto-config',
|
||||
'Select: auto-config, datalog, spacer, pdr, bmc'),
|
||||
('datalog.default_table', SYMBOL, 'sparse',
|
||||
('engine', SYMBOL, 'auto-config',
|
||||
'Select: auto-config, datalog, bmc, spacer'),
|
||||
('datalog.default_table', SYMBOL, 'sparse',
|
||||
'default table implementation: sparse, hashtable, bitvector, interval'),
|
||||
('datalog.default_relation', SYMBOL, 'pentagon',
|
||||
('datalog.default_relation', SYMBOL, 'pentagon',
|
||||
'default relation implementation: external_relation, pentagon'),
|
||||
('datalog.generate_explanations', BOOL, False,
|
||||
('datalog.generate_explanations', BOOL, False,
|
||||
'produce explanations for produced facts when using the datalog engine'),
|
||||
('datalog.use_map_names', BOOL, True,
|
||||
('datalog.use_map_names', BOOL, True,
|
||||
"use names from map files when displaying tuples"),
|
||||
('datalog.magic_sets_for_queries', BOOL, False,
|
||||
('datalog.magic_sets_for_queries', BOOL, False,
|
||||
"magic set transformation will be used for queries"),
|
||||
('datalog.explanations_on_relation_level', BOOL, False,
|
||||
'if true, explanations are generated as history of each relation, ' +
|
||||
'rather than per fact (generate_explanations must be set to true for ' +
|
||||
('datalog.explanations_on_relation_level', BOOL, False,
|
||||
'if true, explanations are generated as history of each relation, ' +
|
||||
'rather than per fact (generate_explanations must be set to true for ' +
|
||||
'this option to have any effect)'),
|
||||
('datalog.unbound_compressor', BOOL, True,
|
||||
"auxiliary relations will be introduced to avoid unbound variables " +
|
||||
('datalog.unbound_compressor', BOOL, True,
|
||||
"auxiliary relations will be introduced to avoid unbound variables " +
|
||||
"in rule heads"),
|
||||
('datalog.similarity_compressor', BOOL, True,
|
||||
"rules that differ only in values of constants will be merged into " +
|
||||
('datalog.similarity_compressor', BOOL, True,
|
||||
"rules that differ only in values of constants will be merged into " +
|
||||
"a single rule"),
|
||||
('datalog.similarity_compressor_threshold', UINT, 11,
|
||||
"if similarity_compressor is on, this value determines how many " +
|
||||
('datalog.similarity_compressor_threshold', UINT, 11,
|
||||
"if similarity_compressor is on, this value determines how many " +
|
||||
"similar rules there must be in order for them to be merged"),
|
||||
('datalog.all_or_nothing_deltas', BOOL, False,
|
||||
('datalog.all_or_nothing_deltas', BOOL, False,
|
||||
"compile rules so that it is enough for the delta relation in " +
|
||||
"union and widening operations to determine only whether the " +
|
||||
"union and widening operations to determine only whether the " +
|
||||
"updated relation was modified or not"),
|
||||
('datalog.compile_with_widening', BOOL, False,
|
||||
('datalog.compile_with_widening', BOOL, False,
|
||||
"widening will be used to compile recursive rules"),
|
||||
('datalog.default_table_checked', BOOL, False, "if true, the default " +
|
||||
'table will be default_table inside a wrapper that checks that its results ' +
|
||||
|
|
@ -39,15 +39,15 @@ def_module_params('fixedpoint',
|
|||
('datalog.default_table_checker', SYMBOL, 'null', "see default_table_checked"),
|
||||
('datalog.check_relation',SYMBOL,'null', "name of default relation to check. " +
|
||||
"operations on the default relation will be verified using SMT solving"),
|
||||
('datalog.initial_restart_timeout', UINT, 0,
|
||||
"length of saturation run before the first restart (in ms), " +
|
||||
('datalog.initial_restart_timeout', UINT, 0,
|
||||
"length of saturation run before the first restart (in ms), " +
|
||||
"zero means no restarts"),
|
||||
('datalog.output_profile', BOOL, False,
|
||||
"determines whether profile information should be " +
|
||||
('datalog.output_profile', BOOL, False,
|
||||
"determines whether profile information should be " +
|
||||
"output when outputting Datalog rules or instructions"),
|
||||
('datalog.print.tuples', BOOL, True,
|
||||
('datalog.print.tuples', BOOL, True,
|
||||
"determines whether tuples for output predicates should be output"),
|
||||
('datalog.profile_timeout_milliseconds', UINT, 0,
|
||||
('datalog.profile_timeout_milliseconds', UINT, 0,
|
||||
"instructions and rules that took less than the threshold " +
|
||||
"will not be printed when printed the instruction/rule list"),
|
||||
('datalog.dbg_fpr_nonempty_relation_signature', BOOL, False,
|
||||
|
|
@ -56,139 +56,126 @@ def_module_params('fixedpoint',
|
|||
"table columns, if it would have been empty otherwise"),
|
||||
('datalog.subsumption', BOOL, True,
|
||||
"if true, removes/filters predicates with total transitions"),
|
||||
('pdr.bfs_model_search', BOOL, True,
|
||||
"use BFS strategy for expanding model search"),
|
||||
('pdr.farkas', BOOL, True,
|
||||
"use lemma generator based on Farkas (for linear real arithmetic)"),
|
||||
('generate_proof_trace', BOOL, False, "trace for 'sat' answer as proof object"),
|
||||
('pdr.flexible_trace', BOOL, False,
|
||||
"allow PDR generate long counter-examples " +
|
||||
"by extending candidate trace within search area"),
|
||||
('pdr.flexible_trace_depth', UINT, UINT_MAX,
|
||||
'Controls the depth (below the current level) at which flexible trace can be applied'),
|
||||
('pdr.use_model_generalizer', BOOL, False,
|
||||
"use model for backwards propagation (instead of symbolic simulation)"),
|
||||
('pdr.validate_result', BOOL, False,
|
||||
('spacer.push_pob', BOOL, False, "push blocked pobs to higher level"),
|
||||
('spacer.push_pob_max_depth', UINT, UINT_MAX,
|
||||
'Maximum depth at which push_pob is enabled'),
|
||||
('validate', BOOL, False,
|
||||
"validate result (by proof checking or model checking)"),
|
||||
('pdr.simplify_formulas_pre', BOOL, False,
|
||||
"simplify derived formulas before inductive propagation"),
|
||||
('pdr.simplify_formulas_post', BOOL, False,
|
||||
"simplify derived formulas after inductive propagation"),
|
||||
('pdr.use_multicore_generalizer', BOOL, False,
|
||||
"extract multiple cores for blocking states"),
|
||||
('pdr.use_inductive_generalizer', BOOL, True,
|
||||
('spacer.simplify_lemmas_pre', BOOL, False,
|
||||
"simplify derived lemmas before inductive propagation"),
|
||||
('spacer.simplify_lemmas_post', BOOL, False,
|
||||
"simplify derived lemmas after inductive propagation"),
|
||||
('spacer.use_inductive_generalizer', BOOL, True,
|
||||
"generalize lemmas using induction strengthening"),
|
||||
('pdr.use_arith_inductive_generalizer', BOOL, False,
|
||||
"generalize lemmas using arithmetic heuristics for induction strengthening"),
|
||||
('pdr.use_convex_closure_generalizer', BOOL, False,
|
||||
"generalize using convex closures of lemmas"),
|
||||
('pdr.use_convex_interior_generalizer', BOOL, False,
|
||||
"generalize using convex interiors of lemmas"),
|
||||
('pdr.cache_mode', UINT, 0, "use no (0), symbolic (1) or explicit " +
|
||||
"cache (2) for model search"),
|
||||
('pdr.inductive_reachability_check', BOOL, False,
|
||||
"assume negation of the cube on the previous level when " +
|
||||
"checking for reachability (not only during cube weakening)"),
|
||||
('pdr.max_num_contexts', UINT, 500, "maximal number of contexts to create"),
|
||||
('pdr.try_minimize_core', BOOL, False,
|
||||
"try to reduce core size (before inductive minimization)"),
|
||||
('pdr.utvpi', BOOL, True, 'Enable UTVPI strategy'),
|
||||
('print_fixedpoint_extensions', BOOL, True,
|
||||
"use SMT-LIB2 fixedpoint extensions, instead of pure SMT2, " +
|
||||
('spacer.max_num_contexts', UINT, 500, "maximal number of contexts to create"),
|
||||
('print_fixedpoint_extensions', BOOL, True,
|
||||
"use SMT-LIB2 fixedpoint extensions, instead of pure SMT2, " +
|
||||
"when printing rules"),
|
||||
('print_low_level_smt2', BOOL, False,
|
||||
"use (faster) low-level SMT2 printer (the printer is scalable " +
|
||||
('print_low_level_smt2', BOOL, False,
|
||||
"use (faster) low-level SMT2 printer (the printer is scalable " +
|
||||
"but the result may not be as readable)"),
|
||||
('print_with_variable_declarations', BOOL, True,
|
||||
('print_with_variable_declarations', BOOL, True,
|
||||
"use variable declarations when displaying rules " +
|
||||
"(instead of attempting to use original names)"),
|
||||
('print_answer', BOOL, False, 'print answer instance(s) to query'),
|
||||
('print_certificate', BOOL, False,
|
||||
('print_certificate', BOOL, False,
|
||||
'print certificate for reachability or non-reachability'),
|
||||
('print_boogie_certificate', BOOL, False,
|
||||
('print_boogie_certificate', BOOL, False,
|
||||
'print certificate for reachability or non-reachability using a ' +
|
||||
'format understood by Boogie'),
|
||||
('print_statistics', BOOL, False, 'print statistics'),
|
||||
('print_aig', SYMBOL, '',
|
||||
('print_aig', SYMBOL, '',
|
||||
'Dump clauses in AIG text format (AAG) to the given file name'),
|
||||
('tab.selection', SYMBOL, 'weight',
|
||||
('tab.selection', SYMBOL, 'weight',
|
||||
'selection method for tabular strategy: weight (default), first, var-use'),
|
||||
('xform.bit_blast', BOOL, False,
|
||||
('xform.bit_blast', BOOL, False,
|
||||
'bit-blast bit-vectors'),
|
||||
('xform.magic', BOOL, False,
|
||||
('xform.magic', BOOL, False,
|
||||
"perform symbolic magic set transformation"),
|
||||
('xform.scale', BOOL, False,
|
||||
('xform.scale', BOOL, False,
|
||||
"add scaling variable to linear real arithmetic clauses"),
|
||||
('xform.inline_linear', BOOL, True, "try linear inlining method"),
|
||||
('xform.inline_eager', BOOL, True, "try eager inlining of rules"),
|
||||
('xform.inline_linear_branch', BOOL, False,
|
||||
('xform.inline_linear_branch', BOOL, False,
|
||||
"try linear inlining method with potential expansion"),
|
||||
('xform.compress_unbound', BOOL, True, "compress tails with unbound variables"),
|
||||
('xform.fix_unbound_vars', BOOL, False, "fix unbound variables in tail"),
|
||||
('xform.unfold_rules', UINT, 0,
|
||||
"unfold rules statically using iterative squarring"),
|
||||
('xform.unfold_rules', UINT, 0,
|
||||
"unfold rules statically using iterative squaring"),
|
||||
('xform.slice', BOOL, True, "simplify clause set using slicing"),
|
||||
('xform.karr', BOOL, False,
|
||||
('xform.karr', BOOL, False,
|
||||
"Add linear invariants to clauses using Karr's method"),
|
||||
('spacer.use_eqclass', BOOL, False, "Generalizes equalities to equivalence classes"),
|
||||
('xform.transform_arrays', BOOL, False,
|
||||
('spacer.use_euf_gen', BOOL, False, 'Generalize lemmas and pobs using implied equalities'),
|
||||
('xform.transform_arrays', BOOL, False,
|
||||
"Rewrites arrays equalities and applies select over store"),
|
||||
('xform.instantiate_arrays', BOOL, False,
|
||||
('xform.instantiate_arrays', BOOL, False,
|
||||
"Transforms P(a) into P(i, a[i] a)"),
|
||||
('xform.instantiate_arrays.enforce', BOOL, False,
|
||||
('xform.instantiate_arrays.enforce', BOOL, False,
|
||||
"Transforms P(a) into P(i, a[i]), discards a from predicate"),
|
||||
('xform.instantiate_arrays.nb_quantifier', UINT, 1,
|
||||
('xform.instantiate_arrays.nb_quantifier', UINT, 1,
|
||||
"Gives the number of quantifiers per array"),
|
||||
('xform.instantiate_arrays.slice_technique', SYMBOL, "no-slicing",
|
||||
('xform.instantiate_arrays.slice_technique', SYMBOL, "no-slicing",
|
||||
"<no-slicing>=> GetId(i) = i, <smash> => GetId(i) = true"),
|
||||
('xform.quantify_arrays', BOOL, False,
|
||||
('xform.quantify_arrays', BOOL, False,
|
||||
"create quantified Horn clauses from clauses with arrays"),
|
||||
('xform.instantiate_quantifiers', BOOL, False,
|
||||
('xform.instantiate_quantifiers', BOOL, False,
|
||||
"instantiate quantified Horn clauses using E-matching heuristic"),
|
||||
('xform.coalesce_rules', BOOL, False, "coalesce rules"),
|
||||
('xform.tail_simplifier_pve', BOOL, True, "propagate_variable_equivalences"),
|
||||
('xform.subsumption_checker', BOOL, True, "Enable subsumption checker (no support for model conversion)"),
|
||||
('xform.coi', BOOL, True, "use cone of influence simplification"),
|
||||
('spacer.order_children', UINT, 0, 'SPACER: order of enqueuing children in non-linear rules : 0 (original), 1 (reverse)'),
|
||||
('spacer.eager_reach_check', BOOL, True, 'SPACER: eagerly check if a query is reachable using reachability facts of predecessors'),
|
||||
('spacer.order_children', UINT, 0, 'SPACER: order of enqueuing children in non-linear rules : 0 (original), 1 (reverse), 2 (random)'),
|
||||
('spacer.use_lemma_as_cti', BOOL, False, 'SPACER: use a lemma instead of a CTI in flexible_trace'),
|
||||
('spacer.reset_obligation_queue', BOOL, True, 'SPACER: reset obligation queue when entering a new level'),
|
||||
('spacer.init_reach_facts', BOOL, True, 'SPACER: initialize reachability facts with false'),
|
||||
('spacer.reset_pob_queue', BOOL, True, 'SPACER: reset pob obligation queue when entering a new level'),
|
||||
('spacer.use_array_eq_generalizer', BOOL, True, 'SPACER: attempt to generalize lemmas with array equalities'),
|
||||
('spacer.use_derivations', BOOL, True, 'SPACER: using derivation mechanism to cache intermediate results for non-linear rules'),
|
||||
('xform.array_blast', BOOL, False, "try to eliminate local array terms using Ackermannization -- some array terms may remain"),
|
||||
('xform.array_blast_full', BOOL, False, "eliminate all local array variables by QE"),
|
||||
('spacer.skip_propagate', BOOL, False, "Skip propagate/pushing phase. Turns PDR into a BMC that returns either reachable or unknown"),
|
||||
('spacer.use_derivations', BOOL, True, 'SPACER: using derivation mechanism to cache intermediate results for non-linear rules'),
|
||||
('xform.array_blast', BOOL, False, "try to eliminate local array terms using Ackermannization -- some array terms may remain"),
|
||||
('xform.array_blast_full', BOOL, False, "eliminate all local array variables by QE"),
|
||||
('spacer.propagate', BOOL, True, 'Enable propagate/pushing phase'),
|
||||
('spacer.max_level', UINT, UINT_MAX, "Maximum level to explore"),
|
||||
('spacer.elim_aux', BOOL, True, "Eliminate auxiliary variables in reachability facts"),
|
||||
('spacer.reach_as_init', BOOL, True, "Extend initial rules with computed reachability facts"),
|
||||
('spacer.blast_term_ite', BOOL, True, "Expand non-Boolean ite-terms"),
|
||||
('spacer.nondet_tie_break', BOOL, False, "Break ties in obligation queue non-deterministically"),
|
||||
('spacer.reach_dnf', BOOL, True, "Restrict reachability facts to DNF"),
|
||||
('bmc.linear_unrolling_depth', UINT, UINT_MAX, "Maximal level to explore"),
|
||||
('spacer.split_farkas_literals', BOOL, False, "Split Farkas literals"),
|
||||
('spacer.native_mbp', BOOL, False, "Use native mbp of Z3"),
|
||||
('spacer.iuc.split_farkas_literals', BOOL, False, "Split Farkas literals"),
|
||||
('spacer.native_mbp', BOOL, True, "Use native mbp of Z3"),
|
||||
('spacer.eq_prop', BOOL, True, "Enable equality and bound propagation in arithmetic"),
|
||||
('spacer.weak_abs', BOOL, True, "Weak abstraction"),
|
||||
('spacer.restarts', BOOL, False, "Enable reseting obligation queue"),
|
||||
('spacer.restart_initial_threshold', UINT, 10, "Intial threshold for restarts"),
|
||||
('spacer.random_seed', UINT, 0, "Random seed to be used by SMT solver"),
|
||||
('spacer.ground_cti', BOOL, True, "Require CTI to be ground"),
|
||||
('spacer.vs.dump_benchmarks', BOOL, False, 'dump benchmarks in virtual solver'),
|
||||
('spacer.vs.dump_min_time', DOUBLE, 5.0, 'min time to dump benchmark'),
|
||||
('spacer.vs.recheck', BOOL, False, 're-check locally during benchmark dumping'),
|
||||
('spacer.mbqi', BOOL, True, 'use model-based quantifier instantiation'),
|
||||
|
||||
('spacer.mbqi', BOOL, True, 'Enable mbqi'),
|
||||
('spacer.keep_proxy', BOOL, True, 'keep proxy variables (internal parameter)'),
|
||||
('spacer.instantiate', BOOL, True, 'instantiate quantified lemmas'),
|
||||
('spacer.qlemmas', BOOL, True, 'allow quantified lemmas in frames'),
|
||||
('spacer.new_unsat_core', BOOL, True, 'use the new implementation of unsat-core-generation'),
|
||||
('spacer.minimize_unsat_core', BOOL, False, 'compute unsat-core by min-cut'),
|
||||
('spacer.farkas_optimized', BOOL, True, 'use the optimized farkas plugin, which performs gaussian elimination'),
|
||||
('spacer.farkas_a_const', BOOL, True, 'if the unoptimized farkas plugin is used, use the constants from A while constructing unsat_cores'),
|
||||
('spacer.lemma_sanity_check', BOOL, False, 'check during generalization whether lemma is actually correct'),
|
||||
('spacer.reuse_pobs', BOOL, True, 'reuse POBs'),
|
||||
('spacer.simplify_pob', BOOL, False, 'simplify POBs by removing redundant constraints')
|
||||
('spacer.q3', BOOL, True, 'Allow quantified lemmas in frames'),
|
||||
('spacer.q3.instantiate', BOOL, True, 'Instantiate quantified lemmas'),
|
||||
('spacer.iuc', UINT, 1,
|
||||
'0 = use old implementation of unsat-core-generation, ' +
|
||||
'1 = use new implementation of IUC generation, ' +
|
||||
'2 = use new implementation of IUC + min-cut optimization'),
|
||||
('spacer.iuc.arith', UINT, 1,
|
||||
'0 = use simple Farkas plugin, ' +
|
||||
'1 = use simple Farkas plugin with constant from other partition (like old unsat-core-generation),' +
|
||||
'2 = use Gaussian elimination optimization (broken), 3 = use additive IUC plugin'),
|
||||
('spacer.iuc.old_hyp_reducer', BOOL, False, 'use old hyp reducer instead of new implementation, for debugging only'),
|
||||
('spacer.validate_lemmas', BOOL, False, 'Validate each lemma after generalization'),
|
||||
('spacer.reuse_pobs', BOOL, True, 'Reuse pobs'),
|
||||
('spacer.ground_pobs', BOOL, True, 'Ground pobs by using values from a model'),
|
||||
('spacer.iuc.print_farkas_stats', BOOL, False, 'prints for each proof how many Farkas lemmas it contains and how many of these participate in the cut (for debugging)'),
|
||||
('spacer.iuc.debug_proof', BOOL, False, 'prints proof used by unsat-core-learner for debugging purposes (debugging)'),
|
||||
('spacer.simplify_pob', BOOL, False, 'simplify pobs by removing redundant constraints'),
|
||||
('spacer.q3.use_qgen', BOOL, False, 'use quantified lemma generalizer'),
|
||||
('spacer.q3.qgen.normalize', BOOL, True, 'normalize cube before quantified generalization'),
|
||||
('spacer.p3.share_lemmas', BOOL, False, 'Share frame lemmas'),
|
||||
('spacer.p3.share_invariants', BOOL, False, "Share invariants lemmas"),
|
||||
('spacer.min_level', UINT, 0, 'Minimal level to explore'),
|
||||
('spacer.print_json', SYMBOL, '', 'Print pobs tree in JSON format to a given file'),
|
||||
('spacer.ctp', BOOL, True, 'Enable counterexample-to-pushing'),
|
||||
('spacer.use_inc_clause', BOOL, True, 'Use incremental clause to represent trans'),
|
||||
('spacer.dump_benchmarks', BOOL, False, 'Dump SMT queries as benchmarks'),
|
||||
('spacer.dump_threshold', DOUBLE, 5.0, 'Threshold in seconds on dumping benchmarks'),
|
||||
('spacer.gpdr', BOOL, False, 'Use GPDR solving strategy for non-linear CHC'),
|
||||
('spacer.gpdr.bfs', BOOL, True, 'Use BFS exploration strategy for expanding model search'),
|
||||
|
||||
))
|
||||
|
||||
|
||||
|
||||
|
|
@ -146,12 +146,10 @@ void rule_properties::check_existential_tail() {
|
|||
else if (is_quantifier(e)) {
|
||||
tocheck.push_back(to_quantifier(e)->get_expr());
|
||||
}
|
||||
else if ((m.is_eq(e, e1, e2) || m.is_iff(e, e1, e2)) &&
|
||||
m.is_true(e1)) {
|
||||
else if (m.is_eq(e, e1, e2) && m.is_true(e1)) {
|
||||
todo.push_back(e2);
|
||||
}
|
||||
else if ((m.is_eq(e, e1, e2) || m.is_iff(e, e1, e2)) &&
|
||||
m.is_true(e2)) {
|
||||
else if (m.is_eq(e, e1, e2) && m.is_true(e2)) {
|
||||
todo.push_back(e1);
|
||||
}
|
||||
else {
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ Revision History:
|
|||
#include "muz/transforms/dl_mk_rule_inliner.h"
|
||||
#include "ast/scoped_proof.h"
|
||||
|
||||
#include "muz/base/fixedpoint_params.hpp"
|
||||
#include "muz/base/fp_params.hpp"
|
||||
|
||||
namespace datalog {
|
||||
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ z3_add_component(fp
|
|||
clp
|
||||
ddnf
|
||||
muz
|
||||
pdr
|
||||
rel
|
||||
spacer
|
||||
tab
|
||||
|
|
|
|||
|
|
@ -30,23 +30,23 @@ Notes:
|
|||
#include "util/scoped_ctrl_c.h"
|
||||
#include "util/scoped_timer.h"
|
||||
#include "util/trail.h"
|
||||
#include "muz/base/fixedpoint_params.hpp"
|
||||
#include "muz/base/fp_params.hpp"
|
||||
#include<iomanip>
|
||||
|
||||
|
||||
struct dl_context {
|
||||
smt_params m_fparams;
|
||||
params_ref m_params_ref;
|
||||
fixedpoint_params m_params;
|
||||
fp_params m_params;
|
||||
cmd_context & m_cmd;
|
||||
datalog::register_engine m_register_engine;
|
||||
dl_collected_cmds* m_collected_cmds;
|
||||
unsigned m_ref_count;
|
||||
datalog::dl_decl_plugin* m_decl_plugin;
|
||||
scoped_ptr<datalog::context> m_context;
|
||||
scoped_ptr<datalog::context> m_context;
|
||||
trail_stack<dl_context> m_trail;
|
||||
|
||||
fixedpoint_params const& get_params() {
|
||||
fp_params const& get_params() {
|
||||
init();
|
||||
return m_context->get_params();
|
||||
}
|
||||
|
|
@ -58,18 +58,18 @@ struct dl_context {
|
|||
m_ref_count(0),
|
||||
m_decl_plugin(nullptr),
|
||||
m_trail(*this) {}
|
||||
|
||||
|
||||
void inc_ref() {
|
||||
++m_ref_count;
|
||||
}
|
||||
|
||||
|
||||
void dec_ref() {
|
||||
--m_ref_count;
|
||||
if (0 == m_ref_count) {
|
||||
dealloc(this);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void init() {
|
||||
ast_manager& m = m_cmd.m();
|
||||
if (!m_context) {
|
||||
|
|
@ -83,10 +83,10 @@ struct dl_context {
|
|||
else {
|
||||
m_decl_plugin = alloc(datalog::dl_decl_plugin);
|
||||
m.register_plugin(symbol("datalog_relation"), m_decl_plugin);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void reset() {
|
||||
m_context = nullptr;
|
||||
}
|
||||
|
|
@ -97,9 +97,9 @@ struct dl_context {
|
|||
m_trail.push(push_back_vector<dl_context, func_decl_ref_vector>(m_collected_cmds->m_rels));
|
||||
}
|
||||
dlctx().register_predicate(pred, false);
|
||||
dlctx().set_predicate_representation(pred, num_kinds, kinds);
|
||||
dlctx().set_predicate_representation(pred, num_kinds, kinds);
|
||||
}
|
||||
|
||||
|
||||
void add_rule(expr * rule, symbol const& name, unsigned bound) {
|
||||
init();
|
||||
if (m_collected_cmds) {
|
||||
|
|
@ -112,7 +112,7 @@ struct dl_context {
|
|||
else {
|
||||
m_context->add_rule(rule, name, bound);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool collect_query(func_decl* q) {
|
||||
if (m_collected_cmds) {
|
||||
|
|
@ -127,7 +127,7 @@ struct dl_context {
|
|||
m_collected_cmds->m_queries.push_back(qr);
|
||||
m_trail.push(push_back_vector<dl_context, expr_ref_vector>(m_collected_cmds->m_queries));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
|
|
@ -142,7 +142,7 @@ struct dl_context {
|
|||
m_trail.pop_scope(1);
|
||||
dlctx().pop();
|
||||
}
|
||||
|
||||
|
||||
datalog::context & dlctx() {
|
||||
init();
|
||||
return *m_context;
|
||||
|
|
@ -162,7 +162,7 @@ class dl_rule_cmd : public cmd {
|
|||
public:
|
||||
dl_rule_cmd(dl_context * dl_ctx):
|
||||
cmd("rule"),
|
||||
m_dl_ctx(dl_ctx),
|
||||
m_dl_ctx(dl_ctx),
|
||||
m_arg_idx(0),
|
||||
m_t(nullptr),
|
||||
m_bound(UINT_MAX) {}
|
||||
|
|
@ -210,7 +210,7 @@ public:
|
|||
}
|
||||
char const * get_usage() const override { return "predicate"; }
|
||||
char const * get_main_descr() const override {
|
||||
return "pose a query to a predicate based on the Horn rules.";
|
||||
return "pose a query to a predicate based on the Horn rules.";
|
||||
}
|
||||
|
||||
cmd_arg_kind next_arg_kind(cmd_context & ctx) const override {
|
||||
|
|
@ -243,9 +243,9 @@ public:
|
|||
return;
|
||||
}
|
||||
datalog::context& dlctx = m_dl_ctx->dlctx();
|
||||
set_background(ctx);
|
||||
set_background(ctx);
|
||||
dlctx.updt_params(m_params);
|
||||
unsigned timeout = m_dl_ctx->get_params().timeout();
|
||||
unsigned timeout = m_dl_ctx->get_params().timeout();
|
||||
cancel_eh<reslimit> eh(ctx.m().limit());
|
||||
bool query_exn = false;
|
||||
lbool status = l_undef;
|
||||
|
|
@ -271,12 +271,12 @@ public:
|
|||
ctx.regular_stream() << "unsat\n";
|
||||
print_certificate(ctx);
|
||||
break;
|
||||
case l_true:
|
||||
case l_true:
|
||||
ctx.regular_stream() << "sat\n";
|
||||
print_answer(ctx);
|
||||
print_certificate(ctx);
|
||||
break;
|
||||
case l_undef:
|
||||
case l_undef:
|
||||
if (dlctx.get_status() == datalog::BOUNDED){
|
||||
ctx.regular_stream() << "bounded\n";
|
||||
print_certificate(ctx);
|
||||
|
|
@ -287,7 +287,7 @@ public:
|
|||
case datalog::INPUT_ERROR:
|
||||
ctx.regular_stream() << "input error\n";
|
||||
break;
|
||||
|
||||
|
||||
case datalog::MEMOUT:
|
||||
ctx.regular_stream() << "memory bounds exceeded\n";
|
||||
break;
|
||||
|
|
@ -295,12 +295,12 @@ public:
|
|||
case datalog::TIMEOUT:
|
||||
ctx.regular_stream() << "timeout\n";
|
||||
break;
|
||||
|
||||
|
||||
case datalog::APPROX:
|
||||
ctx.regular_stream() << "approximated relations\n";
|
||||
break;
|
||||
|
||||
case datalog::OK:
|
||||
case datalog::OK:
|
||||
(void)query_exn;
|
||||
SASSERT(query_exn);
|
||||
break;
|
||||
|
|
@ -324,7 +324,7 @@ public:
|
|||
void init_pdescrs(cmd_context & ctx, param_descrs & p) override {
|
||||
m_dl_ctx->dlctx().collect_params(p);
|
||||
}
|
||||
|
||||
|
||||
|
||||
private:
|
||||
void set_background(cmd_context& ctx) {
|
||||
|
|
@ -356,8 +356,8 @@ private:
|
|||
statistics st;
|
||||
datalog::context& dlctx = m_dl_ctx->dlctx();
|
||||
dlctx.collect_statistics(st);
|
||||
st.update("time", ctx.get_seconds());
|
||||
st.display_smt2(ctx.regular_stream());
|
||||
st.update("time", ctx.get_seconds());
|
||||
st.display_smt2(ctx.regular_stream());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -391,8 +391,8 @@ public:
|
|||
|
||||
void prepare(cmd_context & ctx) override {
|
||||
ctx.m(); // ensure manager is initialized.
|
||||
m_arg_idx = 0;
|
||||
m_query_arg_idx = 0;
|
||||
m_arg_idx = 0;
|
||||
m_query_arg_idx = 0;
|
||||
m_domain.reset();
|
||||
m_kinds.reset();
|
||||
}
|
||||
|
|
@ -443,21 +443,21 @@ public:
|
|||
m_arg_idx(0),
|
||||
m_dl_ctx(dl_ctx)
|
||||
{}
|
||||
|
||||
|
||||
char const * get_usage() const override { return "<symbol> <sort>"; }
|
||||
char const * get_descr(cmd_context & ctx) const override { return "declare constant as variable"; }
|
||||
unsigned get_arity() const override { return 2; }
|
||||
|
||||
void prepare(cmd_context & ctx) override {
|
||||
ctx.m(); // ensure manager is initialized.
|
||||
m_arg_idx = 0;
|
||||
m_arg_idx = 0;
|
||||
}
|
||||
cmd_arg_kind next_arg_kind(cmd_context & ctx) const override {
|
||||
SASSERT(m_arg_idx <= 1);
|
||||
if (m_arg_idx == 0) {
|
||||
return CPK_SYMBOL;
|
||||
return CPK_SYMBOL;
|
||||
}
|
||||
return CPK_SORT;
|
||||
return CPK_SORT;
|
||||
}
|
||||
|
||||
void set_next_arg(cmd_context & ctx, sort* s) override {
|
||||
|
|
@ -466,7 +466,7 @@ public:
|
|||
}
|
||||
|
||||
void set_next_arg(cmd_context & ctx, symbol const & s) override {
|
||||
m_var_name = s;
|
||||
m_var_name = s;
|
||||
++m_arg_idx;
|
||||
}
|
||||
|
||||
|
|
@ -523,7 +523,7 @@ static void install_dl_cmds_aux(cmd_context& ctx, dl_collected_cmds* collected_c
|
|||
ctx.insert(alloc(dl_query_cmd, dl_ctx));
|
||||
ctx.insert(alloc(dl_declare_rel_cmd, dl_ctx));
|
||||
ctx.insert(alloc(dl_declare_var_cmd, dl_ctx));
|
||||
ctx.insert(alloc(dl_push_cmd, dl_ctx));
|
||||
ctx.insert(alloc(dl_push_cmd, dl_ctx));
|
||||
ctx.insert(alloc(dl_pop_cmd, dl_ctx));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ Revision History:
|
|||
#include "muz/clp/clp_context.h"
|
||||
#include "muz/tab/tab_context.h"
|
||||
#include "muz/rel/rel_context.h"
|
||||
#include "muz/pdr/pdr_dl_interface.h"
|
||||
#include "muz/ddnf/ddnf.h"
|
||||
#include "muz/spacer/spacer_dl_interface.h"
|
||||
|
||||
|
|
@ -30,9 +29,6 @@ namespace datalog {
|
|||
|
||||
engine_base* register_engine::mk_engine(DL_ENGINE engine_type) {
|
||||
switch(engine_type) {
|
||||
case PDR_ENGINE:
|
||||
case QPDR_ENGINE:
|
||||
return alloc(pdr::dl_interface, *m_ctx);
|
||||
case SPACER_ENGINE:
|
||||
return alloc(spacer::dl_interface, *m_ctx);
|
||||
case DATALOG_ENGINE:
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ Revision History:
|
|||
#include "muz/transforms/dl_mk_slice.h"
|
||||
#include "tactic/generic_model_converter.h"
|
||||
#include "muz/transforms/dl_transforms.h"
|
||||
#include "muz/base/fixedpoint_params.hpp"
|
||||
#include "muz/base/fp_params.hpp"
|
||||
#include "ast/ast_util.h"
|
||||
#include "ast/rewriter/var_subst.h"
|
||||
|
||||
|
|
@ -71,7 +71,7 @@ class horn_tactic : public tactic {
|
|||
f = to_quantifier(f)->get_expr();
|
||||
}
|
||||
else if (is_exists(f) && !is_positive) {
|
||||
f = to_quantifier(f)->get_expr();
|
||||
f = to_quantifier(f)->get_expr();
|
||||
}
|
||||
else if (m.is_not(f, e)) {
|
||||
is_positive = !is_positive;
|
||||
|
|
@ -84,7 +84,7 @@ class horn_tactic : public tactic {
|
|||
if (!is_positive) {
|
||||
f = m.mk_not(f);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
bool is_predicate(expr* a) {
|
||||
|
|
@ -144,7 +144,7 @@ class horn_tactic : public tactic {
|
|||
expr* a = nullptr, *a1 = nullptr;
|
||||
flatten_or(tmp, args);
|
||||
for (unsigned i = 0; i < args.size(); ++i) {
|
||||
a = args[i].get();
|
||||
a = args[i].get();
|
||||
check_predicate(mark, a);
|
||||
if (m.is_not(a, a1)) {
|
||||
body.push_back(a1);
|
||||
|
|
@ -176,13 +176,13 @@ class horn_tactic : public tactic {
|
|||
return expr_ref(m.mk_implies(body, head), m);
|
||||
}
|
||||
|
||||
void operator()(goal_ref const & g,
|
||||
void operator()(goal_ref const & g,
|
||||
goal_ref_buffer & result) {
|
||||
SASSERT(g->is_well_sorted());
|
||||
tactic_report report("horn", *g);
|
||||
bool produce_proofs = g->proofs_enabled();
|
||||
|
||||
if (produce_proofs) {
|
||||
if (produce_proofs) {
|
||||
if (!m_ctx.generate_proof_trace()) {
|
||||
params_ref params = m_ctx.get_params().p;
|
||||
params.set_bool("generate_proof_trace", true);
|
||||
|
|
@ -208,7 +208,7 @@ class horn_tactic : public tactic {
|
|||
case IS_QUERY:
|
||||
queries.push_back(f);
|
||||
break;
|
||||
default:
|
||||
default:
|
||||
msg << "formula is not in Horn fragment: " << mk_pp(g->form(i), m) << "\n";
|
||||
TRACE("horn", tout << msg.str(););
|
||||
throw tactic_exception(msg.str().c_str());
|
||||
|
|
@ -243,10 +243,10 @@ class horn_tactic : public tactic {
|
|||
g->set(mc.get());
|
||||
}
|
||||
|
||||
void verify(expr* q,
|
||||
void verify(expr* q,
|
||||
goal_ref const& g,
|
||||
goal_ref_buffer & result,
|
||||
model_converter_ref & mc,
|
||||
goal_ref_buffer & result,
|
||||
model_converter_ref & mc,
|
||||
proof_converter_ref & pc) {
|
||||
|
||||
lbool is_reachable = l_undef;
|
||||
|
|
@ -275,9 +275,9 @@ class horn_tactic : public tactic {
|
|||
else {
|
||||
g->assert_expr(m.mk_false());
|
||||
}
|
||||
break;
|
||||
break;
|
||||
}
|
||||
case l_false: {
|
||||
case l_false: {
|
||||
// goal is sat
|
||||
g->reset();
|
||||
if (produce_models) {
|
||||
|
|
@ -290,11 +290,11 @@ class horn_tactic : public tactic {
|
|||
mc = mc2;
|
||||
}
|
||||
}
|
||||
break;
|
||||
break;
|
||||
}
|
||||
case l_undef:
|
||||
case l_undef:
|
||||
// subgoal is unchanged.
|
||||
break;
|
||||
break;
|
||||
}
|
||||
TRACE("horn", g->display(tout););
|
||||
SASSERT(g->is_well_sorted());
|
||||
|
|
@ -314,20 +314,20 @@ class horn_tactic : public tactic {
|
|||
}
|
||||
}
|
||||
|
||||
void simplify(expr* q,
|
||||
void simplify(expr* q,
|
||||
goal_ref const& g,
|
||||
goal_ref_buffer & result,
|
||||
model_converter_ref & mc,
|
||||
goal_ref_buffer & result,
|
||||
model_converter_ref & mc,
|
||||
proof_converter_ref & pc) {
|
||||
|
||||
expr_ref fml(m);
|
||||
expr_ref fml(m);
|
||||
|
||||
|
||||
func_decl* query_pred = to_app(q)->get_decl();
|
||||
m_ctx.set_output_predicate(query_pred);
|
||||
m_ctx.get_rules(); // flush adding rules.
|
||||
apply_default_transformation(m_ctx);
|
||||
|
||||
|
||||
if (m_ctx.xform_slice()) {
|
||||
datalog::rule_transformer transformer(m_ctx);
|
||||
datalog::mk_slice* slice = alloc(datalog::mk_slice, m_ctx);
|
||||
|
|
@ -351,7 +351,7 @@ class horn_tactic : public tactic {
|
|||
g->assert_expr(fml);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
bool m_is_simplify;
|
||||
|
|
@ -368,7 +368,7 @@ public:
|
|||
tactic * translate(ast_manager & m) override {
|
||||
return alloc(horn_tactic, m_is_simplify, m, m_params);
|
||||
}
|
||||
|
||||
|
||||
~horn_tactic() override {
|
||||
dealloc(m_imp);
|
||||
}
|
||||
|
|
@ -378,16 +378,16 @@ public:
|
|||
m_imp->updt_params(p);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void collect_param_descrs(param_descrs & r) override {
|
||||
m_imp->collect_param_descrs(r);
|
||||
}
|
||||
|
||||
void operator()(goal_ref const & in,
|
||||
|
||||
void operator()(goal_ref const & in,
|
||||
goal_ref_buffer & result) override {
|
||||
(*m_imp)(in, result);
|
||||
}
|
||||
|
||||
|
||||
void collect_statistics(statistics & st) const override {
|
||||
m_imp->collect_statistics(st);
|
||||
st.copy(m_stats);
|
||||
|
|
@ -397,15 +397,15 @@ public:
|
|||
m_stats.reset();
|
||||
m_imp->reset_statistics();
|
||||
}
|
||||
|
||||
|
||||
void cleanup() override {
|
||||
ast_manager & m = m_imp->m;
|
||||
m_imp->collect_statistics(m_stats);
|
||||
dealloc(m_imp);
|
||||
m_imp = alloc(imp, m_is_simplify, m, m_params);
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
};
|
||||
|
||||
|
|
@ -416,4 +416,3 @@ tactic * mk_horn_tactic(ast_manager & m, params_ref const & p) {
|
|||
tactic * mk_horn_simplify_tactic(ast_manager & m, params_ref const & p) {
|
||||
return clean(alloc(horn_tactic, true, m, p));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,20 +0,0 @@
|
|||
z3_add_component(pdr
|
||||
SOURCES
|
||||
pdr_closure.cpp
|
||||
pdr_context.cpp
|
||||
pdr_dl_interface.cpp
|
||||
pdr_farkas_learner.cpp
|
||||
pdr_generalizers.cpp
|
||||
pdr_manager.cpp
|
||||
pdr_prop_solver.cpp
|
||||
pdr_reachable_cache.cpp
|
||||
pdr_smt_context_manager.cpp
|
||||
pdr_sym_mux.cpp
|
||||
pdr_util.cpp
|
||||
COMPONENT_DEPENDENCIES
|
||||
arith_tactics
|
||||
core_tactics
|
||||
muz
|
||||
smt_tactic
|
||||
transforms
|
||||
)
|
||||
|
|
@ -1,177 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2013 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
pdr_closure.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
Utility functions for computing closures.
|
||||
|
||||
Author:
|
||||
|
||||
Nikolaj Bjorner (nbjorner) 2013-9-1.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#include "muz/pdr/pdr_closure.h"
|
||||
#include "muz/pdr/pdr_context.h"
|
||||
#include "ast/rewriter/expr_safe_replace.h"
|
||||
#include "ast/ast_util.h"
|
||||
|
||||
namespace pdr {
|
||||
|
||||
expr_ref scaler::operator()(expr* e, expr* k, obj_map<func_decl, expr*>* translate) {
|
||||
m_cache[0].reset();
|
||||
m_cache[1].reset();
|
||||
m_translate = translate;
|
||||
m_k = k;
|
||||
return scale(e, false);
|
||||
}
|
||||
|
||||
expr_ref scaler::scale(expr* e, bool is_mul) {
|
||||
expr* r;
|
||||
if (m_cache[is_mul].find(e, r)) {
|
||||
return expr_ref(r, m);
|
||||
}
|
||||
if (!is_app(e)) {
|
||||
return expr_ref(e, m);
|
||||
}
|
||||
app* ap = to_app(e);
|
||||
if (m_translate && m_translate->find(ap->get_decl(), r)) {
|
||||
return expr_ref(r, m);
|
||||
}
|
||||
if (!is_mul && a.is_numeral(e)) {
|
||||
return expr_ref(a.mk_mul(m_k, e), m);
|
||||
}
|
||||
expr_ref_vector args(m);
|
||||
bool is_mul_rec = is_mul || a.is_mul(e);
|
||||
for (unsigned i = 0; i < ap->get_num_args(); ++i) {
|
||||
args.push_back(scale(ap->get_arg(i), is_mul_rec));
|
||||
}
|
||||
expr_ref result(m);
|
||||
result = m.mk_app(ap->get_decl(), args.size(), args.c_ptr());
|
||||
m_cache[is_mul].insert(e, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
expr_ref scaler::undo_k(expr* e, expr* k) {
|
||||
expr_safe_replace sub(m);
|
||||
th_rewriter rw(m);
|
||||
expr_ref result(e, m);
|
||||
sub.insert(k, a.mk_numeral(rational(1), false));
|
||||
sub(result);
|
||||
rw(result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
closure::closure(pred_transformer& p, bool is_closure):
|
||||
m(p.get_manager()), m_pt(p), a(m),
|
||||
m_is_closure(is_closure), m_sigma(m), m_trail(m) {}
|
||||
|
||||
|
||||
void closure::add_variables(unsigned num_vars, expr_ref_vector& fmls) {
|
||||
manager& pm = m_pt.get_pdr_manager();
|
||||
SASSERT(num_vars > 0);
|
||||
while (m_vars.size() < num_vars) {
|
||||
m_vars.resize(m_vars.size()+1);
|
||||
m_sigma.push_back(m.mk_fresh_const("sigma", a.mk_real()));
|
||||
}
|
||||
|
||||
unsigned sz = m_pt.sig_size();
|
||||
|
||||
for (unsigned i = 0; i < sz; ++i) {
|
||||
expr* var;
|
||||
ptr_vector<expr> vars;
|
||||
func_decl* fn0 = m_pt.sig(i);
|
||||
func_decl* fn1 = pm.o2n(fn0, 0);
|
||||
sort* srt = fn0->get_range();
|
||||
if (a.is_int_real(srt)) {
|
||||
for (unsigned j = 0; j < num_vars; ++j) {
|
||||
if (!m_vars[j].find(fn1, var)) {
|
||||
var = m.mk_fresh_const(fn1->get_name().str().c_str(), srt);
|
||||
m_trail.push_back(var);
|
||||
m_vars[j].insert(fn1, var);
|
||||
}
|
||||
vars.push_back(var);
|
||||
}
|
||||
fmls.push_back(m.mk_eq(m.mk_const(fn1), a.mk_add(num_vars, vars.c_ptr())));
|
||||
}
|
||||
}
|
||||
if (m_is_closure) {
|
||||
for (unsigned i = 0; i < num_vars; ++i) {
|
||||
fmls.push_back(a.mk_ge(m_sigma[i].get(), a.mk_numeral(rational(0), a.mk_real())));
|
||||
}
|
||||
}
|
||||
else {
|
||||
// is interior:
|
||||
for (unsigned i = 0; i < num_vars; ++i) {
|
||||
fmls.push_back(a.mk_gt(m_sigma[i].get(), a.mk_numeral(rational(0), a.mk_real())));
|
||||
}
|
||||
}
|
||||
fmls.push_back(m.mk_eq(a.mk_numeral(rational(1), a.mk_real()), a.mk_add(num_vars, m_sigma.c_ptr())));
|
||||
}
|
||||
|
||||
expr_ref closure::close_fml(expr* e) {
|
||||
expr* e0, *e1, *e2;
|
||||
expr_ref result(m);
|
||||
if (a.is_lt(e, e1, e2)) {
|
||||
result = a.mk_le(e1, e2);
|
||||
}
|
||||
else if (a.is_gt(e, e1, e2)) {
|
||||
result = a.mk_ge(e1, e2);
|
||||
}
|
||||
else if (m.is_not(e, e0) && a.is_ge(e0, e1, e2)) {
|
||||
result = a.mk_le(e1, e2);
|
||||
}
|
||||
else if (m.is_not(e, e0) && a.is_le(e0, e1, e2)) {
|
||||
result = a.mk_ge(e1, e2);
|
||||
}
|
||||
else if (a.is_ge(e) || a.is_le(e) || m.is_eq(e) ||
|
||||
(m.is_not(e, e0) && (a.is_gt(e0) || a.is_lt(e0)))) {
|
||||
result = e;
|
||||
}
|
||||
else {
|
||||
IF_VERBOSE(1, verbose_stream() << "Cannot close: " << mk_pp(e, m) << "\n";);
|
||||
result = m.mk_true();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
expr_ref closure::close_conjunction(expr* fml) {
|
||||
expr_ref_vector fmls(m);
|
||||
flatten_and(fml, fmls);
|
||||
for (unsigned i = 0; i < fmls.size(); ++i) {
|
||||
fmls[i] = close_fml(fmls[i].get());
|
||||
}
|
||||
return expr_ref(mk_and(fmls), m);
|
||||
}
|
||||
|
||||
expr_ref closure::relax(unsigned i, expr* fml) {
|
||||
scaler sc(m);
|
||||
expr_ref result = sc(fml, m_sigma[i].get(), &m_vars[i]);
|
||||
return close_conjunction(result);
|
||||
}
|
||||
|
||||
expr_ref closure::operator()(expr_ref_vector const& As) {
|
||||
if (As.empty()) {
|
||||
return expr_ref(m.mk_false(), m);
|
||||
}
|
||||
if (As.size() == 1) {
|
||||
return expr_ref(As[0], m);
|
||||
}
|
||||
expr_ref_vector fmls(m);
|
||||
expr_ref B(m);
|
||||
add_variables(As.size(), fmls);
|
||||
for (unsigned i = 0; i < As.size(); ++i) {
|
||||
fmls.push_back(relax(i, As[i]));
|
||||
}
|
||||
B = mk_and(fmls);
|
||||
return B;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -1,67 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2013 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
pdr_closure.h
|
||||
|
||||
Abstract:
|
||||
|
||||
Utility functions for computing closures.
|
||||
|
||||
Author:
|
||||
|
||||
Nikolaj Bjorner (nbjorner) 2013-9-1.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef PDR_CLOSURE_H_
|
||||
#define PDR_CLOSURE_H_
|
||||
|
||||
#include "ast/arith_decl_plugin.h"
|
||||
|
||||
namespace pdr {
|
||||
|
||||
// Arithmetic scaling functor.
|
||||
// Variables are replaced using
|
||||
// m_translate. Constants are replaced by
|
||||
// multiplication with a variable 'k' (scale factor).
|
||||
class scaler {
|
||||
ast_manager& m;
|
||||
arith_util a;
|
||||
obj_map<expr, expr*> m_cache[2];
|
||||
expr* m_k;
|
||||
obj_map<func_decl, expr*>* m_translate;
|
||||
public:
|
||||
scaler(ast_manager& m): m(m), a(m), m_translate(nullptr) {}
|
||||
expr_ref operator()(expr* e, expr* k, obj_map<func_decl, expr*>* translate = nullptr);
|
||||
expr_ref undo_k(expr* e, expr* k);
|
||||
private:
|
||||
expr_ref scale(expr* e, bool is_mul);
|
||||
};
|
||||
|
||||
class pred_transformer;
|
||||
|
||||
class closure {
|
||||
ast_manager& m;
|
||||
pred_transformer& m_pt;
|
||||
arith_util a;
|
||||
bool m_is_closure;
|
||||
expr_ref_vector m_sigma;
|
||||
expr_ref_vector m_trail;
|
||||
vector<obj_map<func_decl, expr*> > m_vars;
|
||||
|
||||
expr_ref relax(unsigned i, expr* fml);
|
||||
expr_ref close_conjunction(expr* fml);
|
||||
expr_ref close_fml(expr* fml);
|
||||
void add_variables(unsigned num_vars, expr_ref_vector& fmls);
|
||||
public:
|
||||
closure(pred_transformer& pt, bool is_closure);
|
||||
expr_ref operator()(expr_ref_vector const& As);
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,448 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
pdr_context.h
|
||||
|
||||
Abstract:
|
||||
|
||||
PDR for datalog
|
||||
|
||||
Author:
|
||||
|
||||
Nikolaj Bjorner (nbjorner) 2011-11-20.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef PDR_CONTEXT_H_
|
||||
#define PDR_CONTEXT_H_
|
||||
|
||||
#ifdef _CYGWIN
|
||||
#undef min
|
||||
#undef max
|
||||
#endif
|
||||
#include <deque>
|
||||
#include "muz/pdr/pdr_manager.h"
|
||||
#include "muz/pdr/pdr_prop_solver.h"
|
||||
#include "muz/pdr/pdr_reachable_cache.h"
|
||||
#include "muz/base/fixedpoint_params.hpp"
|
||||
|
||||
|
||||
namespace datalog {
|
||||
class rule_set;
|
||||
class context;
|
||||
};
|
||||
|
||||
namespace pdr {
|
||||
|
||||
class pred_transformer;
|
||||
class model_node;
|
||||
class context;
|
||||
|
||||
typedef obj_map<datalog::rule const, app_ref_vector*> rule2inst;
|
||||
typedef obj_map<func_decl, pred_transformer*> decl2rel;
|
||||
|
||||
|
||||
//
|
||||
// Predicate transformer state.
|
||||
// A predicate transformer corresponds to the
|
||||
// set of rules that have the same head predicates.
|
||||
//
|
||||
|
||||
class pred_transformer {
|
||||
|
||||
struct stats {
|
||||
unsigned m_num_propagations;
|
||||
stats() { reset(); }
|
||||
void reset() { memset(this, 0, sizeof(*this)); }
|
||||
};
|
||||
|
||||
typedef obj_map<datalog::rule const, expr*> rule2expr;
|
||||
typedef obj_map<datalog::rule const, ptr_vector<app> > rule2apps;
|
||||
|
||||
manager& pm; // pdr-manager
|
||||
ast_manager& m; // manager
|
||||
context& ctx;
|
||||
|
||||
func_decl_ref m_head; // predicate
|
||||
func_decl_ref_vector m_sig; // signature
|
||||
ptr_vector<pred_transformer> m_use; // places where 'this' is referenced.
|
||||
ptr_vector<datalog::rule> m_rules; // rules used to derive transformer
|
||||
prop_solver m_solver; // solver context
|
||||
vector<expr_ref_vector> m_levels; // level formulas
|
||||
expr_ref_vector m_invariants; // properties that are invariant.
|
||||
obj_map<expr, unsigned> m_prop2level; // map property to level where it occurs.
|
||||
obj_map<expr, datalog::rule const*> m_tag2rule; // map tag predicate to rule.
|
||||
rule2expr m_rule2tag; // map rule to predicate tag.
|
||||
rule2inst m_rule2inst; // map rules to instantiations of indices
|
||||
rule2expr m_rule2transition; // map rules to transition
|
||||
rule2apps m_rule2vars; // map rule to auxiliary variables
|
||||
expr_ref m_transition; // transition relation.
|
||||
expr_ref m_initial_state; // initial state.
|
||||
reachable_cache m_reachable;
|
||||
ptr_vector<func_decl> m_predicates;
|
||||
stats m_stats;
|
||||
|
||||
void init_sig();
|
||||
void ensure_level(unsigned level);
|
||||
bool add_property1(expr * lemma, unsigned lvl); // add property 'p' to state at level lvl.
|
||||
void add_child_property(pred_transformer& child, expr* lemma, unsigned lvl);
|
||||
void mk_assumptions(func_decl* head, expr* fml, expr_ref_vector& result);
|
||||
|
||||
// Initialization
|
||||
void init_rules(decl2rel const& pts, expr_ref& init, expr_ref& transition);
|
||||
void init_rule(decl2rel const& pts, datalog::rule const& rule, expr_ref& init,
|
||||
ptr_vector<datalog::rule const>& rules, expr_ref_vector& transition);
|
||||
void init_atom(decl2rel const& pts, app * atom, app_ref_vector& var_reprs, expr_ref_vector& conj, unsigned tail_idx);
|
||||
|
||||
void simplify_formulas(tactic& tac, expr_ref_vector& fmls);
|
||||
|
||||
// Debugging
|
||||
bool check_filled(app_ref_vector const& v) const;
|
||||
|
||||
void add_premises(decl2rel const& pts, unsigned lvl, datalog::rule& rule, expr_ref_vector& r);
|
||||
|
||||
public:
|
||||
pred_transformer(context& ctx, manager& pm, func_decl* head);
|
||||
~pred_transformer();
|
||||
|
||||
void add_rule(datalog::rule* r) { m_rules.push_back(r); }
|
||||
void add_use(pred_transformer* pt) { if (!m_use.contains(pt)) m_use.insert(pt); }
|
||||
void initialize(decl2rel const& pts);
|
||||
|
||||
func_decl* head() const { return m_head; }
|
||||
ptr_vector<datalog::rule> const& rules() const { return m_rules; }
|
||||
func_decl* sig(unsigned i) { init_sig(); return m_sig[i].get(); } // signature
|
||||
func_decl* const* sig() { init_sig(); return m_sig.c_ptr(); }
|
||||
unsigned sig_size() { init_sig(); return m_sig.size(); }
|
||||
expr* transition() const { return m_transition; }
|
||||
expr* initial_state() const { return m_initial_state; }
|
||||
expr* rule2tag(datalog::rule const* r) { return m_rule2tag.find(r); }
|
||||
unsigned get_num_levels() { return m_levels.size(); }
|
||||
expr_ref get_cover_delta(func_decl* p_orig, int level);
|
||||
void add_cover(unsigned level, expr* property);
|
||||
context& get_context() { return ctx; }
|
||||
|
||||
std::ostream& display(std::ostream& strm) const;
|
||||
|
||||
void collect_statistics(statistics& st) const;
|
||||
void reset_statistics();
|
||||
|
||||
bool is_reachable(expr* state);
|
||||
void remove_predecessors(expr_ref_vector& literals);
|
||||
void find_predecessors(datalog::rule const& r, ptr_vector<func_decl>& predicates) const;
|
||||
datalog::rule const& find_rule(model_core const& model) const;
|
||||
expr* get_transition(datalog::rule const& r) { return m_rule2transition.find(&r); }
|
||||
ptr_vector<app>& get_aux_vars(datalog::rule const& r) { return m_rule2vars.find(&r); }
|
||||
|
||||
bool propagate_to_next_level(unsigned level);
|
||||
void propagate_to_infinity(unsigned level);
|
||||
void add_property(expr * lemma, unsigned lvl); // add property 'p' to state at level.
|
||||
|
||||
lbool is_reachable(model_node& n, expr_ref_vector* core, bool& uses_level);
|
||||
bool is_invariant(unsigned level, expr* co_state, bool inductive, bool& assumes_level, expr_ref_vector* core = nullptr);
|
||||
bool check_inductive(unsigned level, expr_ref_vector& state, bool& assumes_level);
|
||||
|
||||
expr_ref get_formulas(unsigned level, bool add_axioms);
|
||||
|
||||
void simplify_formulas();
|
||||
|
||||
expr_ref get_propagation_formula(decl2rel const& pts, unsigned level);
|
||||
|
||||
manager& get_pdr_manager() const { return pm; }
|
||||
ast_manager& get_manager() const { return m; }
|
||||
|
||||
void add_premises(decl2rel const& pts, unsigned lvl, expr_ref_vector& r);
|
||||
|
||||
void close(expr* e);
|
||||
|
||||
app_ref_vector& get_inst(datalog::rule const* r) { return *m_rule2inst.find(r);}
|
||||
|
||||
void inherit_properties(pred_transformer& other);
|
||||
|
||||
void ground_free_vars(expr* e, app_ref_vector& vars, ptr_vector<app>& aux_vars);
|
||||
|
||||
prop_solver& get_solver() { return m_solver; }
|
||||
prop_solver const& get_solver() const { return m_solver; }
|
||||
|
||||
void set_use_farkas(bool f) { get_solver().set_use_farkas(f); }
|
||||
bool get_use_farkas() const { return get_solver().get_use_farkas(); }
|
||||
class scoped_farkas {
|
||||
bool m_old;
|
||||
pred_transformer& m_p;
|
||||
public:
|
||||
scoped_farkas(pred_transformer& p, bool v): m_old(p.get_use_farkas()), m_p(p) {
|
||||
p.set_use_farkas(v);
|
||||
}
|
||||
~scoped_farkas() { m_p.set_use_farkas(m_old); }
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
||||
// structure for counter-example search.
|
||||
class model_node {
|
||||
model_node* m_parent;
|
||||
model_node* m_next;
|
||||
model_node* m_prev;
|
||||
pred_transformer& m_pt;
|
||||
expr_ref m_state;
|
||||
model_ref m_model;
|
||||
ptr_vector<model_node> m_children;
|
||||
unsigned m_level;
|
||||
unsigned m_orig_level;
|
||||
unsigned m_depth;
|
||||
bool m_closed;
|
||||
datalog::rule const* m_rule;
|
||||
public:
|
||||
model_node(model_node* parent, expr_ref& state, pred_transformer& pt, unsigned level):
|
||||
m_parent(parent), m_next(nullptr), m_prev(nullptr), m_pt(pt), m_state(state), m_model(nullptr),
|
||||
m_level(level), m_orig_level(level), m_depth(0), m_closed(false), m_rule(nullptr) {
|
||||
model_node* p = m_parent;
|
||||
if (p) {
|
||||
p->m_children.push_back(this);
|
||||
SASSERT(p->m_level == level+1);
|
||||
SASSERT(p->m_level > 0);
|
||||
m_depth = p->m_depth+1;
|
||||
if (p && p->is_closed()) {
|
||||
p->set_open();
|
||||
}
|
||||
}
|
||||
}
|
||||
void set_model(model_ref& m) { m_model = m; }
|
||||
unsigned level() const { return m_level; }
|
||||
unsigned orig_level() const { return m_orig_level; }
|
||||
unsigned depth() const { return m_depth; }
|
||||
void increase_level() { ++m_level; }
|
||||
expr_ref const& state() const { return m_state; }
|
||||
ptr_vector<model_node> const& children() { return m_children; }
|
||||
pred_transformer& pt() const { return m_pt; }
|
||||
model_node* parent() const { return m_parent; }
|
||||
model* get_model_ptr() const { return m_model.get(); }
|
||||
model const& get_model() const { return *m_model; }
|
||||
unsigned index() const;
|
||||
|
||||
bool is_closed() const { return m_closed; }
|
||||
bool is_open() const { return !is_closed(); }
|
||||
|
||||
bool is_1closed() {
|
||||
if (is_closed()) return true;
|
||||
if (m_children.empty()) return false;
|
||||
for (unsigned i = 0; i < m_children.size(); ++i) {
|
||||
if (m_children[i]->is_open()) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void check_pre_closed();
|
||||
void set_closed();
|
||||
void set_open();
|
||||
void set_pre_closed() { TRACE("pdr", tout << state() << "\n";); m_closed = true; }
|
||||
void reset() { m_children.reset(); }
|
||||
|
||||
void set_rule(datalog::rule const* r) { m_rule = r; }
|
||||
datalog::rule* get_rule();
|
||||
|
||||
void mk_instantiate(datalog::rule_ref& r0, datalog::rule_ref& r1, expr_ref_vector& binding);
|
||||
|
||||
std::ostream& display(std::ostream& out, unsigned indent);
|
||||
|
||||
void dequeue(model_node*& root);
|
||||
void enqueue(model_node* n);
|
||||
model_node* next() const { return m_next; }
|
||||
bool is_goal() const { return nullptr != next(); }
|
||||
};
|
||||
|
||||
class model_search {
|
||||
typedef ptr_vector<model_node> model_nodes;
|
||||
bool m_bfs;
|
||||
model_node* m_root;
|
||||
model_node* m_goal;
|
||||
vector<obj_map<expr, model_nodes > > m_cache;
|
||||
obj_map<expr, model_nodes>& cache(model_node const& n);
|
||||
void erase_children(model_node& n, bool backtrack);
|
||||
void remove_node(model_node& n, bool backtrack);
|
||||
void enqueue_leaf(model_node* n); // add leaf to priority queue.
|
||||
void update_models();
|
||||
void set_leaf(model_node& n); // Set node as leaf, remove children.
|
||||
unsigned num_goals() const;
|
||||
|
||||
public:
|
||||
model_search(bool bfs): m_bfs(bfs), m_root(nullptr), m_goal(nullptr) {}
|
||||
~model_search();
|
||||
|
||||
void reset();
|
||||
model_node* next();
|
||||
void add_leaf(model_node& n); // add fresh node.
|
||||
|
||||
void set_root(model_node* n);
|
||||
model_node& get_root() const { return *m_root; }
|
||||
std::ostream& display(std::ostream& out) const;
|
||||
expr_ref get_trace(context const& ctx);
|
||||
proof_ref get_proof_trace(context const& ctx);
|
||||
void backtrack_level(bool uses_level, model_node& n);
|
||||
void remove_goal(model_node& n);
|
||||
|
||||
void well_formed();
|
||||
};
|
||||
|
||||
struct model_exception { };
|
||||
struct inductive_exception {};
|
||||
|
||||
|
||||
// 'state' is unsatisfiable at 'level' with 'core'.
|
||||
// Minimize or weaken core.
|
||||
class core_generalizer {
|
||||
protected:
|
||||
context& m_ctx;
|
||||
public:
|
||||
typedef vector<std::pair<expr_ref_vector,bool> > cores;
|
||||
core_generalizer(context& ctx): m_ctx(ctx) {}
|
||||
virtual ~core_generalizer() {}
|
||||
virtual void operator()(model_node& n, expr_ref_vector& core, bool& uses_level) = 0;
|
||||
virtual void operator()(model_node& n, expr_ref_vector const& core, bool uses_level, cores& new_cores) {
|
||||
new_cores.push_back(std::make_pair(core, uses_level));
|
||||
if (!core.empty()) {
|
||||
(*this)(n, new_cores.back().first, new_cores.back().second);
|
||||
}
|
||||
}
|
||||
virtual void collect_statistics(statistics& st) const {}
|
||||
virtual void reset_statistics() {}
|
||||
};
|
||||
|
||||
class context {
|
||||
|
||||
struct stats {
|
||||
unsigned m_num_nodes;
|
||||
unsigned m_max_depth;
|
||||
stats() { reset(); }
|
||||
void reset() { memset(this, 0, sizeof(*this)); }
|
||||
};
|
||||
|
||||
smt_params& m_fparams;
|
||||
fixedpoint_params const& m_params;
|
||||
ast_manager& m;
|
||||
datalog::context* m_context;
|
||||
manager m_pm;
|
||||
decl2rel m_rels; // Map from relation predicate to fp-operator.
|
||||
decl2rel m_rels_tmp;
|
||||
func_decl_ref m_query_pred;
|
||||
pred_transformer* m_query;
|
||||
mutable model_search m_search;
|
||||
lbool m_last_result;
|
||||
unsigned m_inductive_lvl;
|
||||
unsigned m_expanded_lvl;
|
||||
ptr_vector<core_generalizer> m_core_generalizers;
|
||||
stats m_stats;
|
||||
model_converter_ref m_mc;
|
||||
proof_converter_ref m_pc;
|
||||
|
||||
// Functions used by search.
|
||||
void solve_impl();
|
||||
bool check_reachability(unsigned level);
|
||||
void propagate(unsigned max_prop_lvl);
|
||||
void close_node(model_node& n);
|
||||
void check_pre_closed(model_node& n);
|
||||
void expand_node(model_node& n);
|
||||
lbool expand_state(model_node& n, expr_ref_vector& cube, bool& uses_level);
|
||||
void create_children(model_node& n);
|
||||
expr_ref mk_sat_answer() const;
|
||||
expr_ref mk_unsat_answer();
|
||||
|
||||
// Generate inductive property
|
||||
void get_level_property(unsigned lvl, expr_ref_vector& res, vector<relation_info> & rs);
|
||||
|
||||
|
||||
// Initialization
|
||||
class classifier_proc;
|
||||
void init_core_generalizers(datalog::rule_set& rules);
|
||||
|
||||
bool check_invariant(unsigned lvl);
|
||||
bool check_invariant(unsigned lvl, func_decl* fn);
|
||||
|
||||
void checkpoint();
|
||||
|
||||
void init_rules(datalog::rule_set& rules, decl2rel& transformers);
|
||||
|
||||
void simplify_formulas();
|
||||
|
||||
void reset_core_generalizers();
|
||||
|
||||
void reset(decl2rel& rels);
|
||||
|
||||
void validate();
|
||||
void validate_proof();
|
||||
void validate_search();
|
||||
void validate_model();
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
Initial values of predicates are stored in corresponding relations in dctx.
|
||||
|
||||
We check whether there is some reachable state of the relation checked_relation.
|
||||
*/
|
||||
context(
|
||||
smt_params& fparams,
|
||||
fixedpoint_params const& params,
|
||||
ast_manager& m);
|
||||
|
||||
~context();
|
||||
|
||||
smt_params& get_fparams() const { return m_fparams; }
|
||||
fixedpoint_params const& get_params() const { return m_params; }
|
||||
ast_manager& get_manager() const { return m; }
|
||||
manager& get_pdr_manager() { return m_pm; }
|
||||
decl2rel const& get_pred_transformers() const { return m_rels; }
|
||||
pred_transformer& get_pred_transformer(func_decl* p) const { return *m_rels.find(p); }
|
||||
datalog::context& get_context() const { SASSERT(m_context); return *m_context; }
|
||||
expr_ref get_answer();
|
||||
|
||||
bool is_dl() const { return m_fparams.m_arith_mode == AS_DIFF_LOGIC; }
|
||||
bool is_utvpi() const { return m_fparams.m_arith_mode == AS_UTVPI; }
|
||||
|
||||
void collect_statistics(statistics& st) const;
|
||||
void reset_statistics();
|
||||
|
||||
std::ostream& display(std::ostream& strm) const;
|
||||
|
||||
void display_certificate(std::ostream& strm);
|
||||
|
||||
lbool solve();
|
||||
|
||||
void reset(bool full = true);
|
||||
|
||||
void set_query(func_decl* q) { m_query_pred = q; }
|
||||
|
||||
void set_unsat() { m_last_result = l_false; }
|
||||
|
||||
void set_model_converter(model_converter_ref& mc) { m_mc = mc; }
|
||||
|
||||
model_converter_ref get_model_converter() { return m_mc; }
|
||||
|
||||
void set_proof_converter(proof_converter_ref& pc) { m_pc = pc; }
|
||||
|
||||
void update_rules(datalog::rule_set& rules);
|
||||
|
||||
void set_axioms(expr* axioms) { m_pm.set_background(axioms); }
|
||||
|
||||
unsigned get_num_levels(func_decl* p);
|
||||
|
||||
expr_ref get_cover_delta(int level, func_decl* p_orig, func_decl* p);
|
||||
|
||||
void add_cover(int level, func_decl* pred, expr* property);
|
||||
|
||||
model_ref get_model();
|
||||
|
||||
proof_ref get_proof() const;
|
||||
|
||||
model_node& get_root() const { return m_search.get_root(); }
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
@ -1,225 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
pdr_dl.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
SMT2 interface for the datalog PDR
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-9-22.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#include "muz/base/dl_context.h"
|
||||
#include "muz/transforms/dl_mk_coi_filter.h"
|
||||
#include "muz/base/dl_rule.h"
|
||||
#include "muz/base/dl_rule_transformer.h"
|
||||
#include "muz/pdr/pdr_context.h"
|
||||
#include "muz/pdr/pdr_dl_interface.h"
|
||||
#include "muz/base/dl_rule_set.h"
|
||||
#include "muz/transforms/dl_mk_slice.h"
|
||||
#include "muz/transforms/dl_mk_unfold.h"
|
||||
#include "muz/transforms/dl_mk_coalesce.h"
|
||||
#include "muz/transforms/dl_transforms.h"
|
||||
#include "ast/scoped_proof.h"
|
||||
#include "model/model_smt2_pp.h"
|
||||
|
||||
using namespace pdr;
|
||||
|
||||
dl_interface::dl_interface(datalog::context& ctx) :
|
||||
engine_base(ctx.get_manager(), "pdr"),
|
||||
m_ctx(ctx),
|
||||
m_pdr_rules(ctx),
|
||||
m_old_rules(ctx),
|
||||
m_context(nullptr),
|
||||
m_refs(ctx.get_manager()) {
|
||||
m_context = alloc(pdr::context, ctx.get_fparams(), ctx.get_params(), ctx.get_manager());
|
||||
}
|
||||
|
||||
|
||||
dl_interface::~dl_interface() {
|
||||
dealloc(m_context);
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// Check if the new rules are weaker so that we can
|
||||
// re-use existing context.
|
||||
//
|
||||
void dl_interface::check_reset() {
|
||||
datalog::rule_set const& new_rules = m_ctx.get_rules();
|
||||
datalog::rule_ref_vector const& old_rules = m_old_rules.get_rules();
|
||||
bool is_subsumed = !old_rules.empty();
|
||||
for (unsigned i = 0; is_subsumed && i < new_rules.get_num_rules(); ++i) {
|
||||
is_subsumed = false;
|
||||
for (unsigned j = 0; !is_subsumed && j < old_rules.size(); ++j) {
|
||||
if (m_ctx.check_subsumes(*old_rules[j], *new_rules.get_rule(i))) {
|
||||
is_subsumed = true;
|
||||
}
|
||||
}
|
||||
if (!is_subsumed) {
|
||||
TRACE("pdr", new_rules.get_rule(i)->display(m_ctx, tout << "Fresh rule "););
|
||||
m_context->reset();
|
||||
}
|
||||
}
|
||||
m_old_rules.replace_rules(new_rules);
|
||||
}
|
||||
|
||||
|
||||
lbool dl_interface::query(expr * query) {
|
||||
//we restore the initial state in the datalog context
|
||||
m_ctx.ensure_opened();
|
||||
m_refs.reset();
|
||||
m_pred2slice.reset();
|
||||
ast_manager& m = m_ctx.get_manager();
|
||||
datalog::rule_manager& rm = m_ctx.get_rule_manager();
|
||||
datalog::rule_set& rules0 = m_ctx.get_rules();
|
||||
|
||||
datalog::rule_set old_rules(rules0);
|
||||
func_decl_ref query_pred(m);
|
||||
rm.mk_query(query, rules0);
|
||||
expr_ref bg_assertion = m_ctx.get_background_assertion();
|
||||
|
||||
check_reset();
|
||||
|
||||
TRACE("pdr",
|
||||
if (!m.is_true(bg_assertion)) {
|
||||
tout << "axioms:\n";
|
||||
tout << mk_pp(bg_assertion, m) << "\n";
|
||||
}
|
||||
tout << "query: " << mk_pp(query, m) << "\n";
|
||||
tout << "rules:\n";
|
||||
m_ctx.display_rules(tout);
|
||||
);
|
||||
|
||||
|
||||
apply_default_transformation(m_ctx);
|
||||
|
||||
if (m_ctx.get_params().xform_slice()) {
|
||||
datalog::rule_transformer transformer(m_ctx);
|
||||
datalog::mk_slice* slice = alloc(datalog::mk_slice, m_ctx);
|
||||
transformer.register_plugin(slice);
|
||||
m_ctx.transform_rules(transformer);
|
||||
|
||||
// track sliced predicates.
|
||||
obj_map<func_decl, func_decl*> const& preds = slice->get_predicates();
|
||||
obj_map<func_decl, func_decl*>::iterator it = preds.begin();
|
||||
obj_map<func_decl, func_decl*>::iterator end = preds.end();
|
||||
for (; it != end; ++it) {
|
||||
m_pred2slice.insert(it->m_key, it->m_value);
|
||||
m_refs.push_back(it->m_key);
|
||||
m_refs.push_back(it->m_value);
|
||||
}
|
||||
}
|
||||
|
||||
if (m_ctx.get_params().xform_unfold_rules() > 0) {
|
||||
unsigned num_unfolds = m_ctx.get_params().xform_unfold_rules();
|
||||
datalog::rule_transformer transf1(m_ctx), transf2(m_ctx);
|
||||
transf1.register_plugin(alloc(datalog::mk_coalesce, m_ctx));
|
||||
transf2.register_plugin(alloc(datalog::mk_unfold, m_ctx));
|
||||
if (m_ctx.get_params().xform_coalesce_rules()) {
|
||||
m_ctx.transform_rules(transf1);
|
||||
}
|
||||
while (num_unfolds > 0) {
|
||||
m_ctx.transform_rules(transf2);
|
||||
--num_unfolds;
|
||||
}
|
||||
}
|
||||
|
||||
const datalog::rule_set& rules = m_ctx.get_rules();
|
||||
if (rules.get_output_predicates().empty()) {
|
||||
m_context->set_unsat();
|
||||
return l_false;
|
||||
}
|
||||
|
||||
query_pred = rules.get_output_predicate();
|
||||
|
||||
TRACE("pdr",
|
||||
tout << "rules:\n";
|
||||
m_ctx.display_rules(tout);
|
||||
m_ctx.display_smt2(0, 0, tout);
|
||||
);
|
||||
|
||||
IF_VERBOSE(2, m_ctx.display_rules(verbose_stream()););
|
||||
m_pdr_rules.replace_rules(rules);
|
||||
m_pdr_rules.close();
|
||||
m_ctx.record_transformed_rules();
|
||||
m_ctx.reopen();
|
||||
m_ctx.replace_rules(old_rules);
|
||||
|
||||
|
||||
scoped_restore_proof _sc(m); // update_rules may overwrite the proof mode.
|
||||
|
||||
m_context->set_proof_converter(m_ctx.get_proof_converter());
|
||||
m_context->set_model_converter(m_ctx.get_model_converter());
|
||||
m_context->set_query(query_pred);
|
||||
m_context->set_axioms(bg_assertion);
|
||||
m_context->update_rules(m_pdr_rules);
|
||||
|
||||
if (m_pdr_rules.get_rules().empty()) {
|
||||
m_context->set_unsat();
|
||||
IF_VERBOSE(1, model_smt2_pp(verbose_stream(), m, *m_context->get_model(),0););
|
||||
return l_false;
|
||||
}
|
||||
|
||||
return m_context->solve();
|
||||
|
||||
}
|
||||
|
||||
expr_ref dl_interface::get_cover_delta(int level, func_decl* pred_orig) {
|
||||
func_decl* pred = pred_orig;
|
||||
m_pred2slice.find(pred_orig, pred);
|
||||
SASSERT(pred);
|
||||
return m_context->get_cover_delta(level, pred_orig, pred);
|
||||
}
|
||||
|
||||
void dl_interface::add_cover(int level, func_decl* pred, expr* property) {
|
||||
if (m_ctx.get_params().xform_slice()) {
|
||||
throw default_exception("Covers are incompatible with slicing. Disable slicing before using covers");
|
||||
}
|
||||
m_context->add_cover(level, pred, property);
|
||||
}
|
||||
|
||||
unsigned dl_interface::get_num_levels(func_decl* pred) {
|
||||
m_pred2slice.find(pred, pred);
|
||||
SASSERT(pred);
|
||||
return m_context->get_num_levels(pred);
|
||||
}
|
||||
|
||||
void dl_interface::collect_statistics(statistics& st) const {
|
||||
m_context->collect_statistics(st);
|
||||
}
|
||||
|
||||
void dl_interface::reset_statistics() {
|
||||
m_context->reset_statistics();
|
||||
}
|
||||
|
||||
void dl_interface::display_certificate(std::ostream& out) const {
|
||||
m_context->display_certificate(out);
|
||||
}
|
||||
|
||||
expr_ref dl_interface::get_answer() {
|
||||
return m_context->get_answer();
|
||||
}
|
||||
|
||||
|
||||
|
||||
void dl_interface::updt_params() {
|
||||
dealloc(m_context);
|
||||
m_context = alloc(pdr::context, m_ctx.get_fparams(), m_ctx.get_params(), m_ctx.get_manager());
|
||||
}
|
||||
|
||||
model_ref dl_interface::get_model() {
|
||||
return m_context->get_model();
|
||||
}
|
||||
|
||||
proof_ref dl_interface::get_proof() {
|
||||
return m_context->get_proof();
|
||||
}
|
||||
|
|
@ -1,78 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
pdr_dl_interface.h
|
||||
|
||||
Abstract:
|
||||
|
||||
SMT2 interface for the datalog PDR
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-9-22.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef PDR_DL_INTERFACE_H_
|
||||
#define PDR_DL_INTERFACE_H_
|
||||
|
||||
#include "util/lbool.h"
|
||||
#include "muz/base/dl_rule.h"
|
||||
#include "muz/base/dl_rule_set.h"
|
||||
#include "muz/base/dl_util.h"
|
||||
#include "muz/base/dl_engine_base.h"
|
||||
#include "util/statistics.h"
|
||||
|
||||
namespace datalog {
|
||||
class context;
|
||||
}
|
||||
|
||||
namespace pdr {
|
||||
|
||||
class context;
|
||||
|
||||
class dl_interface : public datalog::engine_base {
|
||||
datalog::context& m_ctx;
|
||||
datalog::rule_set m_pdr_rules;
|
||||
datalog::rule_set m_old_rules;
|
||||
context* m_context;
|
||||
obj_map<func_decl, func_decl*> m_pred2slice;
|
||||
ast_ref_vector m_refs;
|
||||
|
||||
void check_reset();
|
||||
|
||||
public:
|
||||
dl_interface(datalog::context& ctx);
|
||||
~dl_interface() override;
|
||||
|
||||
lbool query(expr* query) override;
|
||||
|
||||
void display_certificate(std::ostream& out) const override;
|
||||
|
||||
void collect_statistics(statistics& st) const override;
|
||||
|
||||
void reset_statistics() override;
|
||||
|
||||
expr_ref get_answer() override;
|
||||
|
||||
unsigned get_num_levels(func_decl* pred) override;
|
||||
|
||||
expr_ref get_cover_delta(int level, func_decl* pred) override;
|
||||
|
||||
void add_cover(int level, func_decl* pred, expr* property) override;
|
||||
|
||||
void updt_params() override;
|
||||
|
||||
model_ref get_model() override;
|
||||
|
||||
proof_ref get_proof() override;
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,128 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
pdr_farkas_learner.h
|
||||
|
||||
Abstract:
|
||||
|
||||
SMT2 interface for the datalog PDR
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-11-1.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef PDR_FARKAS_LEARNER_H_
|
||||
#define PDR_FARKAS_LEARNER_H_
|
||||
|
||||
#include "ast/arith_decl_plugin.h"
|
||||
#include "ast/ast_translation.h"
|
||||
#include "ast/bv_decl_plugin.h"
|
||||
#include "smt/smt_kernel.h"
|
||||
#include "ast/rewriter/bool_rewriter.h"
|
||||
#include "muz/pdr/pdr_util.h"
|
||||
#include "smt/params/smt_params.h"
|
||||
#include "tactic/tactic.h"
|
||||
|
||||
namespace pdr {
|
||||
|
||||
class farkas_learner {
|
||||
class farkas_collector;
|
||||
class constant_replacer_cfg;
|
||||
class equality_expander_cfg;
|
||||
class constr;
|
||||
|
||||
typedef obj_hashtable<expr> expr_set;
|
||||
|
||||
smt_params m_proof_params;
|
||||
ast_manager m_pr;
|
||||
scoped_ptr<smt::kernel> m_ctx;
|
||||
constr* m_constr;
|
||||
|
||||
//
|
||||
// true: produce a combined constraint by applying Farkas coefficients.
|
||||
// false: produce a conjunction of the negated literals from the theory lemmas.
|
||||
//
|
||||
bool m_combine_farkas_coefficients;
|
||||
|
||||
|
||||
static smt_params get_proof_params(smt_params& orig_params);
|
||||
|
||||
//
|
||||
// all ast objects passed to private functions have m_proof_mgs as their ast_manager
|
||||
//
|
||||
|
||||
ast_translation p2o; /** Translate expression from inner ast_manager to outer one */
|
||||
ast_translation o2p; /** Translate expression from outer ast_manager to inner one */
|
||||
|
||||
|
||||
/** All ast opbjects here are in the m_proof_mgs */
|
||||
void get_lemma_guesses_internal(proof * p, expr* A, expr * B, expr_ref_vector& lemmas);
|
||||
|
||||
bool farkas2lemma(proof * fstep, expr* A, expr * B, expr_ref& res);
|
||||
|
||||
void combine_constraints(unsigned cnt, app * const * constrs, rational const * coeffs, expr_ref& res);
|
||||
|
||||
bool try_ensure_lemma_in_language(expr_ref& lemma, expr* A, const func_decl_set& lang);
|
||||
|
||||
bool is_farkas_lemma(ast_manager& m, expr* e);
|
||||
|
||||
void get_asserted(proof* p, expr_set const& bs, ast_mark& b_closed, obj_hashtable<expr>& lemma_set, expr_ref_vector& lemmas);
|
||||
|
||||
bool is_pure_expr(func_decl_set const& symbs, expr* e) const;
|
||||
|
||||
static void test();
|
||||
|
||||
public:
|
||||
farkas_learner(smt_params& params, ast_manager& m);
|
||||
|
||||
~farkas_learner();
|
||||
|
||||
/**
|
||||
All ast objects have the ast_manager which was passed as
|
||||
an argument to the constructor (i.e. m_outer_mgr)
|
||||
|
||||
B is a conjunction of literals.
|
||||
A && B is unsat, equivalently A => ~B is valid
|
||||
Find a weakened B' such that
|
||||
A && B' is unsat and B' uses vocabulary (and constants) in common with A.
|
||||
return lemmas to weaken B.
|
||||
*/
|
||||
|
||||
bool get_lemma_guesses(expr * A, expr * B, expr_ref_vector& lemmas);
|
||||
|
||||
/**
|
||||
Traverse a proof and retrieve lemmas using the vocabulary from bs.
|
||||
*/
|
||||
void get_lemmas(proof* root, expr_set const& bs, expr_ref_vector& lemmas);
|
||||
|
||||
/**
|
||||
Traverse a proof and retrieve consequences of A that are used to establish ~B.
|
||||
The assumption is that:
|
||||
|
||||
A => \/ ~consequences[i] and \/ ~consequences[i] => ~B
|
||||
|
||||
e.g., the second implication can be rewritten as:
|
||||
|
||||
B => /\ consequences[i]
|
||||
*/
|
||||
void get_consequences(proof* root, expr_set const& bs, expr_ref_vector& consequences);
|
||||
|
||||
/**
|
||||
\brief Simplify lemmas using subsumption.
|
||||
*/
|
||||
void simplify_lemmas(expr_ref_vector& lemmas);
|
||||
|
||||
void collect_statistics(statistics& st) const;
|
||||
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -1,777 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
pdr_generalizers.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
Generalizers of satisfiable states and unsat cores.
|
||||
|
||||
Author:
|
||||
|
||||
Nikolaj Bjorner (nbjorner) 2011-11-20.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
|
||||
#include "muz/pdr/pdr_context.h"
|
||||
#include "muz/pdr/pdr_farkas_learner.h"
|
||||
#include "muz/pdr/pdr_generalizers.h"
|
||||
#include "ast/expr_abstract.h"
|
||||
#include "ast/rewriter/var_subst.h"
|
||||
#include "ast/rewriter/expr_safe_replace.h"
|
||||
#include "model/model_smt2_pp.h"
|
||||
|
||||
|
||||
namespace pdr {
|
||||
|
||||
|
||||
// ------------------------
|
||||
// core_bool_inductive_generalizer
|
||||
|
||||
// main propositional induction generalizer.
|
||||
// drop literals one by one from the core and check if the core is still inductive.
|
||||
//
|
||||
void core_bool_inductive_generalizer::operator()(model_node& n, expr_ref_vector& core, bool& uses_level) {
|
||||
if (core.size() <= 1) {
|
||||
return;
|
||||
}
|
||||
ast_manager& m = core.get_manager();
|
||||
TRACE("pdr", for (unsigned i = 0; i < core.size(); ++i) { tout << mk_pp(core[i].get(), m) << "\n"; });
|
||||
unsigned num_failures = 0, i = 0, old_core_size = core.size();
|
||||
ptr_vector<expr> processed;
|
||||
|
||||
while (i < core.size() && 1 < core.size() && (!m_failure_limit || num_failures <= m_failure_limit)) {
|
||||
expr_ref lit(m);
|
||||
lit = core[i].get();
|
||||
core[i] = m.mk_true();
|
||||
if (n.pt().check_inductive(n.level(), core, uses_level)) {
|
||||
num_failures = 0;
|
||||
for (i = 0; i < core.size() && processed.contains(core[i].get()); ++i);
|
||||
}
|
||||
else {
|
||||
core[i] = lit;
|
||||
processed.push_back(lit);
|
||||
++num_failures;
|
||||
++i;
|
||||
}
|
||||
}
|
||||
IF_VERBOSE(2, verbose_stream() << "old size: " << old_core_size << " new size: " << core.size() << "\n";);
|
||||
TRACE("pdr", tout << "old size: " << old_core_size << " new size: " << core.size() << "\n";);
|
||||
}
|
||||
|
||||
|
||||
void core_multi_generalizer::operator()(model_node& n, expr_ref_vector& core, bool& uses_level) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
/**
|
||||
\brief Find minimal cores.
|
||||
Apply a simple heuristic: find a minimal core, then find minimal cores that exclude at least one
|
||||
literal from each of the literals in the minimal cores.
|
||||
*/
|
||||
void core_multi_generalizer::operator()(model_node& n, expr_ref_vector const& core, bool uses_level, cores& new_cores) {
|
||||
ast_manager& m = core.get_manager();
|
||||
expr_ref_vector old_core(m), core0(core);
|
||||
bool uses_level1 = uses_level;
|
||||
m_gen(n, core0, uses_level1);
|
||||
new_cores.push_back(std::make_pair(core0, uses_level1));
|
||||
obj_hashtable<expr> core_exprs, core1_exprs;
|
||||
set_union(core_exprs, core0);
|
||||
for (unsigned i = 0; i < old_core.size(); ++i) {
|
||||
expr* lit = old_core[i].get();
|
||||
if (core_exprs.contains(lit)) {
|
||||
expr_ref_vector core1(old_core);
|
||||
core1[i] = core1.back();
|
||||
core1.pop_back();
|
||||
uses_level1 = uses_level;
|
||||
m_gen(n, core1, uses_level1);
|
||||
SASSERT(core1.size() <= old_core.size());
|
||||
if (core1.size() < old_core.size()) {
|
||||
new_cores.push_back(std::make_pair(core1, uses_level1));
|
||||
core1_exprs.reset();
|
||||
set_union(core1_exprs, core1);
|
||||
set_intersection(core_exprs, core1_exprs);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------
|
||||
// core_farkas_generalizer
|
||||
|
||||
//
|
||||
// for each disjunct of core:
|
||||
// weaken predecessor.
|
||||
//
|
||||
|
||||
core_farkas_generalizer::core_farkas_generalizer(context& ctx, ast_manager& m, smt_params& p):
|
||||
core_generalizer(ctx),
|
||||
m_farkas_learner(p, m)
|
||||
{}
|
||||
|
||||
void core_farkas_generalizer::operator()(model_node& n, expr_ref_vector& core, bool& uses_level) {
|
||||
ast_manager& m = n.pt().get_manager();
|
||||
if (core.empty()) return;
|
||||
expr_ref A(m), B(mk_and(core)), C(m);
|
||||
expr_ref_vector Bs(m);
|
||||
flatten_or(B, Bs);
|
||||
A = n.pt().get_propagation_formula(m_ctx.get_pred_transformers(), n.level());
|
||||
|
||||
bool change = false;
|
||||
for (unsigned i = 0; i < Bs.size(); ++i) {
|
||||
expr_ref_vector lemmas(m);
|
||||
C = Bs[i].get();
|
||||
if (m_farkas_learner.get_lemma_guesses(A, B, lemmas)) {
|
||||
TRACE("pdr",
|
||||
tout << "Old core:\n" << mk_pp(B, m) << "\n";
|
||||
tout << "New core:\n" << mk_and(lemmas) << "\n";);
|
||||
Bs[i] = mk_and(lemmas);
|
||||
change = true;
|
||||
}
|
||||
}
|
||||
if (change) {
|
||||
C = mk_or(Bs);
|
||||
TRACE("pdr", tout << "prop:\n" << mk_pp(A,m) << "\ngen:" << mk_pp(B, m) << "\nto: " << mk_pp(C, m) << "\n";);
|
||||
core.reset();
|
||||
flatten_and(C, core);
|
||||
uses_level = true;
|
||||
}
|
||||
}
|
||||
|
||||
void core_farkas_generalizer::collect_statistics(statistics& st) const {
|
||||
m_farkas_learner.collect_statistics(st);
|
||||
}
|
||||
|
||||
|
||||
core_convex_hull_generalizer::core_convex_hull_generalizer(context& ctx, bool is_closure):
|
||||
core_generalizer(ctx),
|
||||
m(ctx.get_manager()),
|
||||
m_is_closure(is_closure) {
|
||||
}
|
||||
|
||||
void core_convex_hull_generalizer::operator()(model_node& n, expr_ref_vector const& core, bool uses_level, cores& new_cores) {
|
||||
// method3(n, core, uses_level, new_cores);
|
||||
method1(n, core, uses_level, new_cores);
|
||||
}
|
||||
|
||||
void core_convex_hull_generalizer::operator()(model_node& n, expr_ref_vector& core, bool& uses_level) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// use the entire region as starting point for generalization.
|
||||
//
|
||||
// Constraints:
|
||||
// add_variables: y = y1 + y2
|
||||
// core: Ay <= b -> conv1: A*y1 <= b*sigma1
|
||||
// sigma1 > 0
|
||||
// sigma2 > 0
|
||||
// 1 = sigma1 + sigma2
|
||||
// A'y <= b' -> conv2: A'*y2 <= b'*sigma2
|
||||
//
|
||||
// If Constraints & Transition(y0, y) is unsat, then
|
||||
// update with new core.
|
||||
//
|
||||
void core_convex_hull_generalizer::method1(model_node& n, expr_ref_vector const& core, bool uses_level, cores& new_cores) {
|
||||
expr_ref_vector conv2(m), fmls(m), fml1_2(m);
|
||||
bool change = false;
|
||||
|
||||
if (core.empty()) {
|
||||
new_cores.push_back(std::make_pair(core, uses_level));
|
||||
return;
|
||||
}
|
||||
closure cl(n.pt(), m_is_closure);
|
||||
|
||||
expr_ref fml1 = mk_and(core);
|
||||
expr_ref fml2 = n.pt().get_formulas(n.level(), false);
|
||||
fml1_2.push_back(fml1);
|
||||
fml1_2.push_back(nullptr);
|
||||
flatten_and(fml2, fmls);
|
||||
for (unsigned i = 0; i < fmls.size(); ++i) {
|
||||
fml2 = m.mk_not(fmls[i].get());
|
||||
fml1_2[1] = fml2;
|
||||
expr_ref state = cl(fml1_2);
|
||||
TRACE("pdr",
|
||||
tout << "Check states:\n" << mk_pp(state, m) << "\n";
|
||||
tout << "Old states:\n" << mk_pp(fml2, m) << "\n";
|
||||
);
|
||||
model_node nd(nullptr, state, n.pt(), n.level());
|
||||
pred_transformer::scoped_farkas sf(n.pt(), true);
|
||||
bool uses_level1 = uses_level;
|
||||
if (l_false == n.pt().is_reachable(nd, &conv2, uses_level1)) {
|
||||
new_cores.push_back(std::make_pair(conv2, uses_level1));
|
||||
change = true;
|
||||
expr_ref state1 = mk_and(conv2);
|
||||
TRACE("pdr",
|
||||
tout << mk_pp(state, m) << "\n";
|
||||
tout << "Generalized to:\n" << mk_pp(state1, m) << "\n";);
|
||||
IF_VERBOSE(0,
|
||||
verbose_stream() << mk_pp(state, m) << "\n";
|
||||
verbose_stream() << "Generalized to:\n" << mk_pp(state1, m) << "\n";);
|
||||
}
|
||||
}
|
||||
if (!m_is_closure || !change) {
|
||||
new_cores.push_back(std::make_pair(core, uses_level));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Extract the lemmas from the transition relation that were used to establish unsatisfiability.
|
||||
Take convex closures of conbinations of these lemmas.
|
||||
*/
|
||||
void core_convex_hull_generalizer::method3(model_node& n, expr_ref_vector const& core, bool uses_level, cores& new_cores) {
|
||||
TRACE("dl", tout << "method: generalize consequences of F(R)\n";
|
||||
for (unsigned i = 0; i < core.size(); ++i) {
|
||||
tout << "B:" << mk_pp(core[i], m) << "\n";
|
||||
});
|
||||
bool uses_level1;
|
||||
expr_ref_vector core1(m);
|
||||
core1.append(core);
|
||||
expr_ref_vector consequences(m);
|
||||
{
|
||||
n.pt().get_solver().set_consequences(&consequences);
|
||||
pred_transformer::scoped_farkas sf (n.pt(), true);
|
||||
VERIFY(l_false == n.pt().is_reachable(n, &core1, uses_level1));
|
||||
n.pt().get_solver().set_consequences(nullptr);
|
||||
}
|
||||
IF_VERBOSE(0,
|
||||
verbose_stream() << "Consequences: " << consequences.size() << "\n";
|
||||
for (unsigned i = 0; i < consequences.size(); ++i) {
|
||||
verbose_stream() << mk_pp(consequences[i].get(), m) << "\n";
|
||||
}
|
||||
verbose_stream() << "core: " << core1.size() << "\n";
|
||||
for (unsigned i = 0; i < core1.size(); ++i) {
|
||||
verbose_stream() << mk_pp(core1[i].get(), m) << "\n";
|
||||
});
|
||||
|
||||
expr_ref tmp(m);
|
||||
|
||||
// Check that F(R) => \/ consequences
|
||||
{
|
||||
expr_ref_vector cstate(m);
|
||||
for (unsigned i = 0; i < consequences.size(); ++i) {
|
||||
cstate.push_back(m.mk_not(consequences[i].get()));
|
||||
}
|
||||
tmp = m.mk_and(cstate.size(), cstate.c_ptr());
|
||||
model_node nd(nullptr, tmp, n.pt(), n.level());
|
||||
pred_transformer::scoped_farkas sf (n.pt(), false);
|
||||
VERIFY(l_false == n.pt().is_reachable(nd, &core1, uses_level1));
|
||||
}
|
||||
|
||||
// Create disjunction.
|
||||
tmp = m.mk_and(core.size(), core.c_ptr());
|
||||
|
||||
// Check that \/ consequences => not (core)
|
||||
if (!is_unsat(consequences, tmp)) {
|
||||
IF_VERBOSE(0, verbose_stream() << "Consequences don't contradict the core\n";);
|
||||
return;
|
||||
}
|
||||
IF_VERBOSE(0, verbose_stream() << "Consequences contradict core\n";);
|
||||
|
||||
if (!strengthen_consequences(n, consequences, tmp)) {
|
||||
return;
|
||||
}
|
||||
|
||||
IF_VERBOSE(0, verbose_stream() << "consequences strengthened\n";);
|
||||
// Use the resulting formula to find Farkas lemmas from core.
|
||||
}
|
||||
|
||||
bool core_convex_hull_generalizer::strengthen_consequences(model_node& n, expr_ref_vector& As, expr* B) {
|
||||
expr_ref A(m), tmp(m), convA(m);
|
||||
unsigned sz = As.size();
|
||||
closure cl(n.pt(), m_is_closure);
|
||||
for (unsigned i = 0; i < As.size(); ++i) {
|
||||
expr_ref_vector Hs(m);
|
||||
Hs.push_back(As[i].get());
|
||||
for (unsigned j = i + 1; j < As.size(); ++j) {
|
||||
Hs.push_back(As[j].get());
|
||||
bool unsat = false;
|
||||
A = cl(Hs);
|
||||
tmp = As[i].get();
|
||||
As[i] = A;
|
||||
unsat = is_unsat(As, B);
|
||||
As[i] = tmp;
|
||||
if (unsat) {
|
||||
IF_VERBOSE(0, verbose_stream() << "New convex: " << mk_pp(convA, m) << "\n";);
|
||||
convA = A;
|
||||
As[j] = As.back();
|
||||
As.pop_back();
|
||||
--j;
|
||||
}
|
||||
else {
|
||||
Hs.pop_back();
|
||||
}
|
||||
}
|
||||
if (Hs.size() > 1) {
|
||||
As[i] = convA;
|
||||
}
|
||||
}
|
||||
return sz > As.size();
|
||||
}
|
||||
|
||||
|
||||
bool core_convex_hull_generalizer::is_unsat(expr_ref_vector const& As, expr* B) {
|
||||
smt::kernel ctx(m, m_ctx.get_fparams(), m_ctx.get_params().p);
|
||||
expr_ref disj(m);
|
||||
disj = m.mk_or(As.size(), As.c_ptr());
|
||||
ctx.assert_expr(disj);
|
||||
ctx.assert_expr(B);
|
||||
std::cout << "Checking\n" << mk_pp(disj, m) << "\n" << mk_pp(B, m) << "\n";
|
||||
return l_false == ctx.check();
|
||||
}
|
||||
|
||||
|
||||
// ---------------------------------
|
||||
// core_arith_inductive_generalizer
|
||||
// NB. this is trying out some ideas for generalization in
|
||||
// an ad hoc specialized way. arith_inductive_generalizer should
|
||||
// not be used by default. It is a place-holder for a general purpose
|
||||
// extrapolator of a lattice basis.
|
||||
|
||||
core_arith_inductive_generalizer::core_arith_inductive_generalizer(context& ctx):
|
||||
core_generalizer(ctx),
|
||||
m(ctx.get_manager()),
|
||||
a(m),
|
||||
m_refs(m) {}
|
||||
|
||||
void core_arith_inductive_generalizer::operator()(model_node& n, expr_ref_vector& core, bool& uses_level) {
|
||||
if (core.size() <= 1) {
|
||||
return;
|
||||
}
|
||||
reset();
|
||||
expr_ref e(m), t1(m), t2(m), t3(m);
|
||||
rational r;
|
||||
|
||||
TRACE("pdr", for (unsigned i = 0; i < core.size(); ++i) { tout << mk_pp(core[i].get(), m) << "\n"; });
|
||||
|
||||
svector<eq> eqs;
|
||||
get_eqs(core, eqs);
|
||||
|
||||
if (eqs.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
expr_ref_vector new_core(m);
|
||||
new_core.append(core);
|
||||
|
||||
for (unsigned eq = 0; eq < eqs.size(); ++eq) {
|
||||
rational r = eqs[eq].m_value;
|
||||
expr* x = eqs[eq].m_term;
|
||||
unsigned k = eqs[eq].m_i;
|
||||
unsigned l = eqs[eq].m_j;
|
||||
|
||||
new_core[l] = m.mk_true();
|
||||
new_core[k] = m.mk_true();
|
||||
|
||||
for (unsigned i = 0; i < new_core.size(); ++i) {
|
||||
if (substitute_alias(r, x, new_core[i].get(), e)) {
|
||||
new_core[i] = e;
|
||||
}
|
||||
}
|
||||
if (abs(r) >= rational(2) && a.is_int(x)) {
|
||||
new_core[k] = m.mk_eq(a.mk_mod(x, a.mk_numeral(rational(2), true)), a.mk_numeral(rational(0), true));
|
||||
new_core[l] = a.mk_le(x, a.mk_numeral(rational(0), true));
|
||||
}
|
||||
}
|
||||
|
||||
bool inductive = n.pt().check_inductive(n.level(), new_core, uses_level);
|
||||
|
||||
IF_VERBOSE(1,
|
||||
verbose_stream() << (inductive?"":"non") << "inductive\n";
|
||||
verbose_stream() << "old\n";
|
||||
for (unsigned j = 0; j < core.size(); ++j) {
|
||||
verbose_stream() << mk_pp(core[j].get(), m) << "\n";
|
||||
}
|
||||
verbose_stream() << "new\n";
|
||||
for (unsigned j = 0; j < new_core.size(); ++j) {
|
||||
verbose_stream() << mk_pp(new_core[j].get(), m) << "\n";
|
||||
});
|
||||
|
||||
if (inductive) {
|
||||
core.reset();
|
||||
core.append(new_core);
|
||||
}
|
||||
}
|
||||
|
||||
void core_arith_inductive_generalizer::insert_bound(bool is_lower, expr* x, rational const& r, unsigned i) {
|
||||
if (r.is_neg()) {
|
||||
expr_ref e(m);
|
||||
e = a.mk_uminus(x);
|
||||
m_refs.push_back(e);
|
||||
x = e;
|
||||
is_lower = !is_lower;
|
||||
}
|
||||
|
||||
vector<term_loc_t> bound;
|
||||
bound.push_back(std::make_pair(x, i));
|
||||
if (is_lower) {
|
||||
m_lb.insert(abs(r), bound);
|
||||
}
|
||||
else {
|
||||
m_ub.insert(abs(r), bound);
|
||||
}
|
||||
}
|
||||
|
||||
void core_arith_inductive_generalizer::reset() {
|
||||
m_refs.reset();
|
||||
m_lb.reset();
|
||||
m_ub.reset();
|
||||
}
|
||||
|
||||
void core_arith_inductive_generalizer::get_eqs(expr_ref_vector const& core, svector<eq>& eqs) {
|
||||
expr* e1, *x, *y;
|
||||
expr_ref e(m);
|
||||
rational r;
|
||||
|
||||
for (unsigned i = 0; i < core.size(); ++i) {
|
||||
e = core[i];
|
||||
if (m.is_not(e, e1) && a.is_le(e1, x, y) && a.is_numeral(y, r) && a.is_int(x)) {
|
||||
// not (<= x r) <=> x >= r + 1
|
||||
insert_bound(true, x, r + rational(1), i);
|
||||
}
|
||||
else if (m.is_not(e, e1) && a.is_ge(e1, x, y) && a.is_numeral(y, r) && a.is_int(x)) {
|
||||
// not (>= x r) <=> x <= r - 1
|
||||
insert_bound(false, x, r - rational(1), i);
|
||||
}
|
||||
else if (a.is_le(e, x, y) && a.is_numeral(y, r)) {
|
||||
insert_bound(false, x, r, i);
|
||||
}
|
||||
else if (a.is_ge(e, x, y) && a.is_numeral(y, r)) {
|
||||
insert_bound(true, x, r, i);
|
||||
}
|
||||
}
|
||||
bounds_t::iterator it = m_lb.begin(), end = m_lb.end();
|
||||
for (; it != end; ++it) {
|
||||
rational r = it->m_key;
|
||||
vector<term_loc_t> & terms1 = it->m_value;
|
||||
vector<term_loc_t> terms2;
|
||||
if (r >= rational(2) && m_ub.find(r, terms2)) {
|
||||
for (unsigned i = 0; i < terms1.size(); ++i) {
|
||||
bool done = false;
|
||||
for (unsigned j = 0; !done && j < terms2.size(); ++j) {
|
||||
expr* t1 = terms1[i].first;
|
||||
expr* t2 = terms2[j].first;
|
||||
if (t1 == t2) {
|
||||
eqs.push_back(eq(t1, r, terms1[i].second, terms2[j].second));
|
||||
done = true;
|
||||
}
|
||||
else {
|
||||
e = m.mk_eq(t1, t2);
|
||||
th_rewriter rw(m);
|
||||
rw(e);
|
||||
if (m.is_true(e)) {
|
||||
eqs.push_back(eq(t1, r, terms1[i].second, terms2[j].second));
|
||||
done = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool core_arith_inductive_generalizer::substitute_alias(rational const& r, expr* x, expr* e, expr_ref& result) {
|
||||
rational r2;
|
||||
expr* y, *z, *e1;
|
||||
if (m.is_not(e, e1) && substitute_alias(r, x, e1, result)) {
|
||||
result = m.mk_not(result);
|
||||
return true;
|
||||
}
|
||||
if (a.is_le(e, y, z) && a.is_numeral(z, r2)) {
|
||||
if (r == r2) {
|
||||
result = a.mk_le(y, x);
|
||||
return true;
|
||||
}
|
||||
if (r == r2 + rational(1)) {
|
||||
result = a.mk_lt(y, x);
|
||||
return true;
|
||||
}
|
||||
if (r == r2 - rational(1)) {
|
||||
result = a.mk_le(y, a.mk_sub(x, a.mk_numeral(rational(1), a.is_int(x))));
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
if (a.is_ge(e, y, z) && a.is_numeral(z, r2)) {
|
||||
if (r == r2) {
|
||||
result = a.mk_ge(y, x);
|
||||
return true;
|
||||
}
|
||||
if (r2 == r + rational(1)) {
|
||||
result = a.mk_gt(y, x);
|
||||
return true;
|
||||
}
|
||||
if (r2 == r - rational(1)) {
|
||||
result = a.mk_ge(y, a.mk_sub(x, a.mk_numeral(rational(1), a.is_int(x))));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// < F, phi, i + 1>
|
||||
// |
|
||||
// < G, psi, i >
|
||||
//
|
||||
// where:
|
||||
//
|
||||
// p(x) <- F(x,y,p,q)
|
||||
// q(x) <- G(x,y)
|
||||
//
|
||||
// Hyp:
|
||||
// Q_k(x) => phi(x) j <= k <= i
|
||||
// Q_k(x) => R_k(x) j <= k <= i + 1
|
||||
// Q_k(x) <=> Trans(Q_{k-1}) j < k <= i + 1
|
||||
// Conclusion:
|
||||
// Q_{i+1}(x) => phi(x)
|
||||
//
|
||||
class core_induction_generalizer::imp {
|
||||
context& m_ctx;
|
||||
manager& pm;
|
||||
ast_manager& m;
|
||||
|
||||
//
|
||||
// Create predicate Q_level
|
||||
//
|
||||
func_decl_ref mk_pred(unsigned level, func_decl* f) {
|
||||
func_decl_ref result(m);
|
||||
std::ostringstream name;
|
||||
name << f->get_name() << "_" << level;
|
||||
symbol sname(name.str().c_str());
|
||||
result = m.mk_func_decl(sname, f->get_arity(), f->get_domain(), f->get_range());
|
||||
return result;
|
||||
}
|
||||
|
||||
//
|
||||
// Create formula exists y . z . F[Q_{level-1}, x, y, z]
|
||||
//
|
||||
expr_ref mk_transition_rule(
|
||||
expr_ref_vector const& reps,
|
||||
unsigned level,
|
||||
datalog::rule const& rule)
|
||||
{
|
||||
expr_ref_vector conj(m), sub(m);
|
||||
expr_ref result(m);
|
||||
svector<symbol> names;
|
||||
unsigned ut_size = rule.get_uninterpreted_tail_size();
|
||||
unsigned t_size = rule.get_tail_size();
|
||||
if (0 == level && 0 < ut_size) {
|
||||
result = m.mk_false();
|
||||
return result;
|
||||
}
|
||||
app* atom = rule.get_head();
|
||||
SASSERT(atom->get_num_args() == reps.size());
|
||||
|
||||
for (unsigned i = 0; i < reps.size(); ++i) {
|
||||
expr* arg = atom->get_arg(i);
|
||||
if (is_var(arg)) {
|
||||
unsigned idx = to_var(arg)->get_idx();
|
||||
if (idx >= sub.size()) sub.resize(idx+1);
|
||||
if (sub[idx].get()) {
|
||||
conj.push_back(m.mk_eq(sub[idx].get(), reps[i]));
|
||||
}
|
||||
else {
|
||||
sub[idx] = reps[i];
|
||||
}
|
||||
}
|
||||
else {
|
||||
conj.push_back(m.mk_eq(arg, reps[i]));
|
||||
}
|
||||
}
|
||||
for (unsigned i = 0; 0 < level && i < ut_size; i++) {
|
||||
app* atom = rule.get_tail(i);
|
||||
func_decl* head = atom->get_decl();
|
||||
func_decl_ref fn = mk_pred(level-1, head);
|
||||
conj.push_back(m.mk_app(fn, atom->get_num_args(), atom->get_args()));
|
||||
}
|
||||
for (unsigned i = ut_size; i < t_size; i++) {
|
||||
conj.push_back(rule.get_tail(i));
|
||||
}
|
||||
result = mk_and(conj);
|
||||
if (!sub.empty()) {
|
||||
expr_ref tmp = result;
|
||||
var_subst(m, false)(tmp, sub.size(), sub.c_ptr(), result);
|
||||
}
|
||||
expr_free_vars fv;
|
||||
fv(result);
|
||||
fv.set_default_sort(m.mk_bool_sort());
|
||||
for (unsigned i = 0; i < fv.size(); ++i) {
|
||||
names.push_back(symbol(fv.size() - i - 1));
|
||||
}
|
||||
if (!fv.empty()) {
|
||||
fv.reverse();
|
||||
result = m.mk_exists(fv.size(), fv.c_ptr(), names.c_ptr(), result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
expr_ref bind_head(expr_ref_vector const& reps, expr* fml) {
|
||||
expr_ref result(m);
|
||||
expr_abstract(m, 0, reps.size(), reps.c_ptr(), fml, result);
|
||||
ptr_vector<sort> sorts;
|
||||
svector<symbol> names;
|
||||
unsigned sz = reps.size();
|
||||
for (unsigned i = 0; i < sz; ++i) {
|
||||
sorts.push_back(m.get_sort(reps[sz-i-1]));
|
||||
names.push_back(symbol(sz-i-1));
|
||||
}
|
||||
if (sz > 0) {
|
||||
result = m.mk_forall(sorts.size(), sorts.c_ptr(), names.c_ptr(), result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
expr_ref_vector mk_reps(pred_transformer& pt) {
|
||||
expr_ref_vector reps(m);
|
||||
expr_ref rep(m);
|
||||
for (unsigned i = 0; i < pt.head()->get_arity(); ++i) {
|
||||
rep = m.mk_const(pm.o2n(pt.sig(i), 0));
|
||||
reps.push_back(rep);
|
||||
}
|
||||
return reps;
|
||||
}
|
||||
|
||||
//
|
||||
// extract transition axiom:
|
||||
//
|
||||
// forall x . p_lvl(x) <=> exists y z . F[p_{lvl-1}(y), q_{lvl-1}(z), x]
|
||||
//
|
||||
expr_ref mk_transition_axiom(pred_transformer& pt, unsigned level) {
|
||||
expr_ref fml(m.mk_false(), m), tr(m);
|
||||
expr_ref_vector reps = mk_reps(pt);
|
||||
ptr_vector<datalog::rule> const& rules = pt.rules();
|
||||
for (unsigned i = 0; i < rules.size(); ++i) {
|
||||
tr = mk_transition_rule(reps, level, *rules[i]);
|
||||
fml = (i == 0)?tr.get():m.mk_or(fml, tr);
|
||||
}
|
||||
func_decl_ref fn = mk_pred(level, pt.head());
|
||||
fml = m.mk_iff(m.mk_app(fn, reps.size(), reps.c_ptr()), fml);
|
||||
fml = bind_head(reps, fml);
|
||||
return fml;
|
||||
}
|
||||
|
||||
//
|
||||
// Create implication:
|
||||
// Q_level(x) => phi(x)
|
||||
//
|
||||
expr_ref mk_predicate_property(unsigned level, pred_transformer& pt, expr* phi) {
|
||||
expr_ref_vector reps = mk_reps(pt);
|
||||
func_decl_ref fn = mk_pred(level, pt.head());
|
||||
expr_ref fml(m);
|
||||
fml = m.mk_implies(m.mk_app(fn, reps.size(), reps.c_ptr()), phi);
|
||||
fml = bind_head(reps, fml);
|
||||
return fml;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public:
|
||||
imp(context& ctx): m_ctx(ctx), pm(ctx.get_pdr_manager()), m(ctx.get_manager()) {}
|
||||
|
||||
//
|
||||
// not exists y . F(x,y)
|
||||
//
|
||||
expr_ref mk_blocked_transition(pred_transformer& pt, unsigned level) {
|
||||
SASSERT(level > 0);
|
||||
expr_ref fml(m.mk_true(), m);
|
||||
expr_ref_vector reps = mk_reps(pt), fmls(m);
|
||||
ptr_vector<datalog::rule> const& rules = pt.rules();
|
||||
for (unsigned i = 0; i < rules.size(); ++i) {
|
||||
fmls.push_back(m.mk_not(mk_transition_rule(reps, level, *rules[i])));
|
||||
}
|
||||
fml = mk_and(fmls);
|
||||
TRACE("pdr", tout << mk_pp(fml, m) << "\n";);
|
||||
return fml;
|
||||
}
|
||||
|
||||
expr_ref mk_induction_goal(pred_transformer& pt, unsigned level, unsigned depth) {
|
||||
SASSERT(level >= depth);
|
||||
expr_ref_vector conjs(m);
|
||||
ptr_vector<pred_transformer> pts;
|
||||
unsigned_vector levels;
|
||||
// negated goal
|
||||
expr_ref phi = mk_blocked_transition(pt, level);
|
||||
conjs.push_back(m.mk_not(mk_predicate_property(level, pt, phi)));
|
||||
pts.push_back(&pt);
|
||||
levels.push_back(level);
|
||||
// Add I.H.
|
||||
for (unsigned lvl = level-depth; lvl < level; ++lvl) {
|
||||
if (lvl > 0) {
|
||||
expr_ref psi = mk_blocked_transition(pt, lvl);
|
||||
conjs.push_back(mk_predicate_property(lvl, pt, psi));
|
||||
pts.push_back(&pt);
|
||||
levels.push_back(lvl);
|
||||
}
|
||||
}
|
||||
// Transitions:
|
||||
for (unsigned qhead = 0; qhead < pts.size(); ++qhead) {
|
||||
pred_transformer& qt = *pts[qhead];
|
||||
unsigned lvl = levels[qhead];
|
||||
|
||||
// Add transition definition and properties at level.
|
||||
conjs.push_back(mk_transition_axiom(qt, lvl));
|
||||
conjs.push_back(mk_predicate_property(lvl, qt, qt.get_formulas(lvl, true)));
|
||||
|
||||
// Enqueue additional hypotheses
|
||||
ptr_vector<datalog::rule> const& rules = qt.rules();
|
||||
if (lvl + depth < level || lvl == 0) {
|
||||
continue;
|
||||
}
|
||||
for (unsigned i = 0; i < rules.size(); ++i) {
|
||||
datalog::rule& r = *rules[i];
|
||||
unsigned ut_size = r.get_uninterpreted_tail_size();
|
||||
for (unsigned j = 0; j < ut_size; ++j) {
|
||||
func_decl* f = r.get_tail(j)->get_decl();
|
||||
pred_transformer* rt = m_ctx.get_pred_transformers().find(f);
|
||||
bool found = false;
|
||||
for (unsigned k = 0; !found && k < levels.size(); ++k) {
|
||||
found = (rt == pts[k] && levels[k] + 1 == lvl);
|
||||
}
|
||||
if (!found) {
|
||||
levels.push_back(lvl-1);
|
||||
pts.push_back(rt);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
expr_ref result = mk_and(conjs);
|
||||
TRACE("pdr", tout << mk_pp(result, m) << "\n";);
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
//
|
||||
// Instantiate Peano induction schema.
|
||||
//
|
||||
void core_induction_generalizer::operator()(model_node& n, expr_ref_vector& core, bool& uses_level) {
|
||||
model_node* p = n.parent();
|
||||
if (p == nullptr) {
|
||||
return;
|
||||
}
|
||||
unsigned depth = 2;
|
||||
imp imp(m_ctx);
|
||||
ast_manager& m = core.get_manager();
|
||||
expr_ref goal = imp.mk_induction_goal(p->pt(), p->level(), depth);
|
||||
smt::kernel ctx(m, m_ctx.get_fparams(), m_ctx.get_params().p);
|
||||
ctx.assert_expr(goal);
|
||||
lbool r = ctx.check();
|
||||
TRACE("pdr", tout << r << "\n";
|
||||
for (unsigned i = 0; i < core.size(); ++i) {
|
||||
tout << mk_pp(core[i].get(), m) << "\n";
|
||||
});
|
||||
if (r == l_false) {
|
||||
core.reset();
|
||||
expr_ref phi = imp.mk_blocked_transition(p->pt(), p->level());
|
||||
core.push_back(m.mk_not(phi));
|
||||
uses_level = true;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -1,110 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
pdr_generalizers.h
|
||||
|
||||
Abstract:
|
||||
|
||||
Generalizer plugins.
|
||||
|
||||
Author:
|
||||
|
||||
Nikolaj Bjorner (nbjorner) 2011-11-22.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef PDR_GENERALIZERS_H_
|
||||
#define PDR_GENERALIZERS_H_
|
||||
|
||||
#include "muz/pdr/pdr_context.h"
|
||||
#include "muz/pdr/pdr_closure.h"
|
||||
#include "ast/arith_decl_plugin.h"
|
||||
|
||||
namespace pdr {
|
||||
|
||||
class core_bool_inductive_generalizer : public core_generalizer {
|
||||
unsigned m_failure_limit;
|
||||
public:
|
||||
core_bool_inductive_generalizer(context& ctx, unsigned failure_limit) : core_generalizer(ctx), m_failure_limit(failure_limit) {}
|
||||
~core_bool_inductive_generalizer() override {}
|
||||
void operator()(model_node& n, expr_ref_vector& core, bool& uses_level) override;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class r_map : public map<rational, T, rational::hash_proc, rational::eq_proc> {
|
||||
};
|
||||
|
||||
class core_arith_inductive_generalizer : public core_generalizer {
|
||||
typedef std::pair<expr*, unsigned> term_loc_t;
|
||||
typedef r_map<vector<term_loc_t> > bounds_t;
|
||||
|
||||
ast_manager& m;
|
||||
arith_util a;
|
||||
expr_ref_vector m_refs;
|
||||
bounds_t m_lb;
|
||||
bounds_t m_ub;
|
||||
|
||||
struct eq {
|
||||
expr* m_term;
|
||||
rational m_value;
|
||||
unsigned m_i;
|
||||
unsigned m_j;
|
||||
eq(expr* t, rational const& r, unsigned i, unsigned j): m_term(t), m_value(r), m_i(i), m_j(j) {}
|
||||
};
|
||||
void reset();
|
||||
void insert_bound(bool is_lower, expr* x, rational const& r, unsigned i);
|
||||
void get_eqs(expr_ref_vector const& core, svector<eq>& eqs);
|
||||
bool substitute_alias(rational const&r, expr* x, expr* e, expr_ref& result);
|
||||
public:
|
||||
core_arith_inductive_generalizer(context& ctx);
|
||||
~core_arith_inductive_generalizer() override {}
|
||||
void operator()(model_node& n, expr_ref_vector& core, bool& uses_level) override;
|
||||
};
|
||||
|
||||
class core_farkas_generalizer : public core_generalizer {
|
||||
farkas_learner m_farkas_learner;
|
||||
public:
|
||||
core_farkas_generalizer(context& ctx, ast_manager& m, smt_params& p);
|
||||
~core_farkas_generalizer() override {}
|
||||
void operator()(model_node& n, expr_ref_vector& core, bool& uses_level) override;
|
||||
void collect_statistics(statistics& st) const override;
|
||||
};
|
||||
|
||||
|
||||
class core_convex_hull_generalizer : public core_generalizer {
|
||||
ast_manager& m;
|
||||
obj_map<expr, expr*> m_models;
|
||||
bool m_is_closure;
|
||||
void method1(model_node& n, expr_ref_vector const& core, bool uses_level, cores& new_cores);
|
||||
void method3(model_node& n, expr_ref_vector const& core, bool uses_level, cores& new_cores);
|
||||
bool strengthen_consequences(model_node& n, expr_ref_vector& As, expr* B);
|
||||
bool is_unsat(expr_ref_vector const& As, expr* B);
|
||||
public:
|
||||
core_convex_hull_generalizer(context& ctx, bool is_closure);
|
||||
~core_convex_hull_generalizer() override {}
|
||||
void operator()(model_node& n, expr_ref_vector const& core, bool uses_level, cores& new_cores) override;
|
||||
void operator()(model_node& n, expr_ref_vector& core, bool& uses_level) override;
|
||||
};
|
||||
|
||||
class core_multi_generalizer : public core_generalizer {
|
||||
core_bool_inductive_generalizer m_gen;
|
||||
public:
|
||||
core_multi_generalizer(context& ctx, unsigned max_failures): core_generalizer(ctx), m_gen(ctx, max_failures) {}
|
||||
~core_multi_generalizer() override {}
|
||||
void operator()(model_node& n, expr_ref_vector& core, bool& uses_level) override;
|
||||
void operator()(model_node& n, expr_ref_vector const& core, bool uses_level, cores& new_cores) override;
|
||||
};
|
||||
|
||||
class core_induction_generalizer : public core_generalizer {
|
||||
class imp;
|
||||
public:
|
||||
core_induction_generalizer(context& ctx): core_generalizer(ctx) {}
|
||||
~core_induction_generalizer() override {}
|
||||
void operator()(model_node& n, expr_ref_vector& core, bool& uses_level) override;
|
||||
};
|
||||
};
|
||||
#endif
|
||||
|
|
@ -1,321 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
pdr_manager.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
A manager class for PDR, taking care of creating of AST
|
||||
objects and conversions between them.
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-8-25.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#include <sstream>
|
||||
#include "muz/pdr/pdr_manager.h"
|
||||
#include "ast/ast_smt2_pp.h"
|
||||
#include "ast/for_each_expr.h"
|
||||
#include "ast/has_free_vars.h"
|
||||
#include "ast/rewriter/expr_replacer.h"
|
||||
#include "ast/expr_abstract.h"
|
||||
#include "model/model2expr.h"
|
||||
#include "model/model_smt2_pp.h"
|
||||
#include "tactic/model_converter.h"
|
||||
|
||||
namespace pdr {
|
||||
|
||||
class collect_decls_proc {
|
||||
func_decl_set& m_bound_decls;
|
||||
func_decl_set& m_aux_decls;
|
||||
public:
|
||||
collect_decls_proc(func_decl_set& bound_decls, func_decl_set& aux_decls):
|
||||
m_bound_decls(bound_decls),
|
||||
m_aux_decls(aux_decls) {
|
||||
}
|
||||
|
||||
void operator()(app* a) {
|
||||
if (a->get_family_id() == null_family_id) {
|
||||
func_decl* f = a->get_decl();
|
||||
if (!m_bound_decls.contains(f)) {
|
||||
m_aux_decls.insert(f);
|
||||
}
|
||||
}
|
||||
}
|
||||
void operator()(var* v) {}
|
||||
void operator()(quantifier* q) {}
|
||||
};
|
||||
|
||||
typedef hashtable<symbol, symbol_hash_proc, symbol_eq_proc> symbol_set;
|
||||
|
||||
expr_ref inductive_property::fixup_clause(expr* fml) const {
|
||||
expr_ref_vector disjs(m);
|
||||
flatten_or(fml, disjs);
|
||||
expr_ref result(m);
|
||||
bool_rewriter(m).mk_or(disjs.size(), disjs.c_ptr(), result);
|
||||
return result;
|
||||
}
|
||||
|
||||
expr_ref inductive_property::fixup_clauses(expr* fml) const {
|
||||
expr_ref_vector conjs(m);
|
||||
expr_ref result(m);
|
||||
flatten_and(fml, conjs);
|
||||
for (unsigned i = 0; i < conjs.size(); ++i) {
|
||||
conjs[i] = fixup_clause(conjs[i].get());
|
||||
}
|
||||
bool_rewriter(m).mk_and(conjs.size(), conjs.c_ptr(), result);
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string inductive_property::to_string() const {
|
||||
std::stringstream stm;
|
||||
model_ref md;
|
||||
expr_ref result(m);
|
||||
to_model(md);
|
||||
model_smt2_pp(stm, m, *md.get(), 0);
|
||||
return stm.str();
|
||||
}
|
||||
|
||||
void inductive_property::to_model(model_ref& md) const {
|
||||
md = alloc(model, m);
|
||||
vector<relation_info> const& rs = m_relation_info;
|
||||
expr_ref_vector conjs(m);
|
||||
for (unsigned i = 0; i < rs.size(); ++i) {
|
||||
relation_info ri(rs[i]);
|
||||
func_decl * pred = ri.m_pred;
|
||||
expr_ref prop = fixup_clauses(ri.m_body);
|
||||
func_decl_ref_vector const& sig = ri.m_vars;
|
||||
expr_ref q(m);
|
||||
expr_ref_vector sig_vars(m);
|
||||
for (unsigned j = 0; j < sig.size(); ++j) {
|
||||
sig_vars.push_back(m.mk_const(sig[sig.size()-j-1]));
|
||||
}
|
||||
expr_abstract(m, 0, sig_vars.size(), sig_vars.c_ptr(), prop, q);
|
||||
if (sig.empty()) {
|
||||
md->register_decl(pred, q);
|
||||
}
|
||||
else {
|
||||
func_interp* fi = alloc(func_interp, m, sig.size());
|
||||
fi->set_else(q);
|
||||
md->register_decl(pred, fi);
|
||||
}
|
||||
}
|
||||
TRACE("pdr", model_smt2_pp(tout, m, *md, 0););
|
||||
apply(const_cast<model_converter_ref&>(m_mc), md);
|
||||
}
|
||||
|
||||
expr_ref inductive_property::to_expr() const {
|
||||
model_ref md;
|
||||
expr_ref result(m);
|
||||
to_model(md);
|
||||
model2expr(md, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void inductive_property::display(datalog::rule_manager& rm, ptr_vector<datalog::rule> const& rules, std::ostream& out) const {
|
||||
func_decl_set bound_decls, aux_decls;
|
||||
collect_decls_proc collect_decls(bound_decls, aux_decls);
|
||||
|
||||
for (unsigned i = 0; i < m_relation_info.size(); ++i) {
|
||||
bound_decls.insert(m_relation_info[i].m_pred);
|
||||
func_decl_ref_vector const& sig = m_relation_info[i].m_vars;
|
||||
for (unsigned j = 0; j < sig.size(); ++j) {
|
||||
bound_decls.insert(sig[j]);
|
||||
}
|
||||
for_each_expr(collect_decls, m_relation_info[i].m_body);
|
||||
}
|
||||
for (unsigned i = 0; i < rules.size(); ++i) {
|
||||
bound_decls.insert(rules[i]->get_decl());
|
||||
}
|
||||
for (unsigned i = 0; i < rules.size(); ++i) {
|
||||
unsigned u_sz = rules[i]->get_uninterpreted_tail_size();
|
||||
unsigned t_sz = rules[i]->get_tail_size();
|
||||
for (unsigned j = u_sz; j < t_sz; ++j) {
|
||||
for_each_expr(collect_decls, rules[i]->get_tail(j));
|
||||
}
|
||||
}
|
||||
smt2_pp_environment_dbg env(m);
|
||||
func_decl_set::iterator it = aux_decls.begin(), end = aux_decls.end();
|
||||
for (; it != end; ++it) {
|
||||
func_decl* f = *it;
|
||||
ast_smt2_pp(out, f, env);
|
||||
out << "\n";
|
||||
}
|
||||
|
||||
out << to_string() << "\n";
|
||||
for (unsigned i = 0; i < rules.size(); ++i) {
|
||||
out << "(push)\n";
|
||||
out << "(assert (not\n";
|
||||
rm.display_smt2(*rules[i], out);
|
||||
out << "))\n";
|
||||
out << "(check-sat)\n";
|
||||
out << "(pop)\n";
|
||||
}
|
||||
}
|
||||
|
||||
manager::manager(smt_params& fparams, unsigned max_num_contexts, ast_manager& manager) :
|
||||
m(manager),
|
||||
m_fparams(fparams),
|
||||
m_brwr(m),
|
||||
m_mux(m),
|
||||
m_background(m.mk_true(), m),
|
||||
m_contexts(fparams, max_num_contexts, m),
|
||||
m_next_unique_num(0)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void manager::add_new_state(func_decl * s) {
|
||||
SASSERT(s->get_arity()==0); //we currently don't support non-constant states
|
||||
decl_vector vect;
|
||||
SASSERT(o_index(0)==1); //we assume this in the number of retrieved symbols
|
||||
m_mux.create_tuple(s, s->get_arity(), s->get_domain(), s->get_range(), 2, vect);
|
||||
m_o0_preds.push_back(vect[o_index(0)]);
|
||||
}
|
||||
|
||||
func_decl * manager::get_o_pred(func_decl* s, unsigned idx)
|
||||
{
|
||||
func_decl * res = m_mux.try_get_by_prefix(s, o_index(idx));
|
||||
if(res) { return res; }
|
||||
add_new_state(s);
|
||||
res = m_mux.try_get_by_prefix(s, o_index(idx));
|
||||
SASSERT(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
func_decl * manager::get_n_pred(func_decl* s)
|
||||
{
|
||||
func_decl * res = m_mux.try_get_by_prefix(s, n_index());
|
||||
if(res) { return res; }
|
||||
add_new_state(s);
|
||||
res = m_mux.try_get_by_prefix(s, n_index());
|
||||
SASSERT(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
void manager::mk_model_into_cube(const expr_ref_vector & mdl, expr_ref & res) {
|
||||
m_brwr.mk_and(mdl.size(), mdl.c_ptr(), res);
|
||||
}
|
||||
|
||||
void manager::mk_core_into_cube(const expr_ref_vector & core, expr_ref & res) {
|
||||
m_brwr.mk_and(core.size(), core.c_ptr(), res);
|
||||
}
|
||||
|
||||
void manager::mk_cube_into_lemma(expr * cube, expr_ref & res) {
|
||||
m_brwr.mk_not(cube, res);
|
||||
}
|
||||
|
||||
void manager::mk_lemma_into_cube(expr * lemma, expr_ref & res) {
|
||||
m_brwr.mk_not(lemma, res);
|
||||
}
|
||||
|
||||
expr_ref manager::mk_and(unsigned sz, expr* const* exprs) {
|
||||
expr_ref result(m);
|
||||
m_brwr.mk_and(sz, exprs, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
expr_ref manager::mk_or(unsigned sz, expr* const* exprs) {
|
||||
expr_ref result(m);
|
||||
m_brwr.mk_or(sz, exprs, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
expr_ref manager::mk_not_and(expr_ref_vector const& conjs) {
|
||||
expr_ref result(m), e(m);
|
||||
expr_ref_vector es(conjs);
|
||||
flatten_and(es);
|
||||
for (unsigned i = 0; i < es.size(); ++i) {
|
||||
m_brwr.mk_not(es[i].get(), e);
|
||||
es[i] = e;
|
||||
}
|
||||
m_brwr.mk_or(es.size(), es.c_ptr(), result);
|
||||
return result;
|
||||
}
|
||||
|
||||
void manager::get_or(expr* e, expr_ref_vector& result) {
|
||||
result.push_back(e);
|
||||
for (unsigned i = 0; i < result.size(); ) {
|
||||
e = result[i].get();
|
||||
if (m.is_or(e)) {
|
||||
result.append(to_app(e)->get_num_args(), to_app(e)->get_args());
|
||||
result[i] = result.back();
|
||||
result.pop_back();
|
||||
}
|
||||
else {
|
||||
++i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool manager::try_get_state_and_value_from_atom(expr * atom0, app *& state, app_ref& value)
|
||||
{
|
||||
if(!is_app(atom0)) {
|
||||
return false;
|
||||
}
|
||||
app * atom = to_app(atom0);
|
||||
expr * arg1;
|
||||
expr * arg2;
|
||||
app * candidate_state;
|
||||
app_ref candidate_value(m);
|
||||
if(m.is_not(atom, arg1)) {
|
||||
if(!is_app(arg1)) {
|
||||
return false;
|
||||
}
|
||||
candidate_state = to_app(arg1);
|
||||
candidate_value = m.mk_false();
|
||||
}
|
||||
else if(m.is_eq(atom, arg1, arg2)) {
|
||||
if(!is_app(arg1) || !is_app(arg2)) {
|
||||
return false;
|
||||
}
|
||||
if(!m_mux.is_muxed(to_app(arg1)->get_decl())) {
|
||||
std::swap(arg1, arg2);
|
||||
}
|
||||
candidate_state = to_app(arg1);
|
||||
candidate_value = to_app(arg2);
|
||||
}
|
||||
else {
|
||||
candidate_state = atom;
|
||||
candidate_value = m.mk_true();
|
||||
}
|
||||
if(!m_mux.is_muxed(candidate_state->get_decl())) {
|
||||
return false;
|
||||
}
|
||||
state = candidate_state;
|
||||
value = candidate_value;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool manager::try_get_state_decl_from_atom(expr * atom, func_decl *& state) {
|
||||
app_ref dummy_value_holder(m);
|
||||
app * s;
|
||||
if(try_get_state_and_value_from_atom(atom, s, dummy_value_holder)) {
|
||||
state = s->get_decl();
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool manager::implication_surely_holds(expr * lhs, expr * rhs, expr * bg) {
|
||||
smt::kernel sctx(m, get_fparams());
|
||||
if(bg) {
|
||||
sctx.assert_expr(bg);
|
||||
}
|
||||
sctx.assert_expr(lhs);
|
||||
expr_ref neg_rhs(m.mk_not(rhs),m);
|
||||
sctx.assert_expr(neg_rhs);
|
||||
lbool smt_res = sctx.check();
|
||||
return smt_res==l_false;
|
||||
}
|
||||
|
||||
};
|
||||
|
|
@ -1,304 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
pdr_manager.h
|
||||
|
||||
Abstract:
|
||||
|
||||
A manager class for PDR, taking care of creating of AST
|
||||
objects and conversions between them.
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-8-25.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef PDR_MANAGER_H_
|
||||
#define PDR_MANAGER_H_
|
||||
|
||||
#include <utility>
|
||||
#include <map>
|
||||
#include "ast/rewriter/bool_rewriter.h"
|
||||
#include "ast/rewriter/expr_replacer.h"
|
||||
#include "ast/expr_substitution.h"
|
||||
#include "util/map.h"
|
||||
#include "util/ref_vector.h"
|
||||
#include "smt/smt_kernel.h"
|
||||
#include "muz/pdr/pdr_util.h"
|
||||
#include "muz/pdr/pdr_sym_mux.h"
|
||||
#include "muz/pdr/pdr_farkas_learner.h"
|
||||
#include "muz/pdr/pdr_smt_context_manager.h"
|
||||
#include "muz/base/dl_rule.h"
|
||||
|
||||
|
||||
namespace smt {
|
||||
class context;
|
||||
}
|
||||
|
||||
namespace pdr {
|
||||
|
||||
struct relation_info {
|
||||
func_decl_ref m_pred;
|
||||
func_decl_ref_vector m_vars;
|
||||
expr_ref m_body;
|
||||
relation_info(ast_manager& m, func_decl* pred, ptr_vector<func_decl> const& vars, expr* b):
|
||||
m_pred(pred, m), m_vars(m, vars.size(), vars.c_ptr()), m_body(b, m) {}
|
||||
relation_info(relation_info const& other): m_pred(other.m_pred), m_vars(other.m_vars), m_body(other.m_body) {}
|
||||
};
|
||||
|
||||
class unknown_exception {};
|
||||
|
||||
class inductive_property {
|
||||
ast_manager& m;
|
||||
model_converter_ref m_mc;
|
||||
vector<relation_info> m_relation_info;
|
||||
expr_ref fixup_clauses(expr* property) const;
|
||||
expr_ref fixup_clause(expr* clause) const;
|
||||
public:
|
||||
inductive_property(ast_manager& m, model_converter_ref& mc, vector<relation_info> const& relations):
|
||||
m(m),
|
||||
m_mc(mc),
|
||||
m_relation_info(relations) {}
|
||||
|
||||
std::string to_string() const;
|
||||
|
||||
expr_ref to_expr() const;
|
||||
|
||||
void to_model(model_ref& md) const;
|
||||
|
||||
void display(datalog::rule_manager& rm, ptr_vector<datalog::rule> const& rules, std::ostream& out) const;
|
||||
};
|
||||
|
||||
class manager
|
||||
{
|
||||
ast_manager& m;
|
||||
smt_params& m_fparams;
|
||||
|
||||
mutable bool_rewriter m_brwr;
|
||||
|
||||
sym_mux m_mux;
|
||||
expr_ref m_background;
|
||||
decl_vector m_o0_preds;
|
||||
pdr::smt_context_manager m_contexts;
|
||||
|
||||
/** whenever we need an unique number, we get this one and increase */
|
||||
unsigned m_next_unique_num;
|
||||
|
||||
|
||||
unsigned n_index() const { return 0; }
|
||||
unsigned o_index(unsigned i) const { return i+1; }
|
||||
|
||||
void add_new_state(func_decl * s);
|
||||
|
||||
public:
|
||||
manager(smt_params& fparams, unsigned max_num_contexts, ast_manager & manager);
|
||||
|
||||
ast_manager& get_manager() const { return m; }
|
||||
smt_params& get_fparams() const { return m_fparams; }
|
||||
bool_rewriter& get_brwr() const { return m_brwr; }
|
||||
|
||||
expr_ref mk_and(unsigned sz, expr* const* exprs);
|
||||
expr_ref mk_and(expr_ref_vector const& exprs) {
|
||||
return mk_and(exprs.size(), exprs.c_ptr());
|
||||
}
|
||||
expr_ref mk_and(expr* a, expr* b) {
|
||||
expr* args[2] = { a, b };
|
||||
return mk_and(2, args);
|
||||
}
|
||||
expr_ref mk_or(unsigned sz, expr* const* exprs);
|
||||
expr_ref mk_or(expr_ref_vector const& exprs) {
|
||||
return mk_or(exprs.size(), exprs.c_ptr());
|
||||
}
|
||||
|
||||
expr_ref mk_not_and(expr_ref_vector const& exprs);
|
||||
|
||||
void get_or(expr* e, expr_ref_vector& result);
|
||||
|
||||
//"o" predicates stand for the old states and "n" for the new states
|
||||
func_decl * get_o_pred(func_decl * s, unsigned idx);
|
||||
func_decl * get_n_pred(func_decl * s);
|
||||
|
||||
/**
|
||||
Marks symbol as non-model which means it will not appear in models collected by
|
||||
get_state_cube_from_model function.
|
||||
This is to take care of auxiliary symbols introduced by the disjunction relations
|
||||
to relativize lemmas coming from disjuncts.
|
||||
*/
|
||||
void mark_as_non_model(func_decl * p) {
|
||||
m_mux.mark_as_non_model(p);
|
||||
}
|
||||
|
||||
|
||||
func_decl * const * begin_o0_preds() const { return m_o0_preds.begin(); }
|
||||
func_decl * const * end_o0_preds() const { return m_o0_preds.end(); }
|
||||
|
||||
bool is_state_pred(func_decl * p) const { return m_mux.is_muxed(p); }
|
||||
func_decl * to_o0(func_decl * p) { return m_mux.conv(m_mux.get_primary(p), 0, o_index(0)); }
|
||||
|
||||
bool is_o(func_decl * p, unsigned idx) const {
|
||||
return m_mux.has_index(p, o_index(idx));
|
||||
}
|
||||
bool is_o(expr* e, unsigned idx) const {
|
||||
return is_app(e) && is_o(to_app(e)->get_decl(), idx);
|
||||
}
|
||||
bool is_o(func_decl * p) const {
|
||||
unsigned idx;
|
||||
return m_mux.try_get_index(p, idx) && idx!=n_index();
|
||||
}
|
||||
bool is_o(expr* e) const {
|
||||
return is_app(e) && is_o(to_app(e)->get_decl());
|
||||
}
|
||||
bool is_n(func_decl * p) const {
|
||||
return m_mux.has_index(p, n_index());
|
||||
}
|
||||
bool is_n(expr* e) const {
|
||||
return is_app(e) && is_n(to_app(e)->get_decl());
|
||||
}
|
||||
|
||||
/** true if p should not appead in models propagates into child relations */
|
||||
bool is_non_model_sym(func_decl * p) const
|
||||
{ return m_mux.is_non_model_sym(p); }
|
||||
|
||||
|
||||
/** true if f doesn't contain any n predicates */
|
||||
bool is_o_formula(expr * f) const {
|
||||
return !m_mux.contains(f, n_index());
|
||||
}
|
||||
|
||||
/** true if f contains only o state preds of index o_idx */
|
||||
bool is_o_formula(expr * f, unsigned o_idx) const {
|
||||
return m_mux.is_homogenous_formula(f, o_index(o_idx));
|
||||
}
|
||||
/** true if f doesn't contain any o predicates */
|
||||
bool is_n_formula(expr * f) const {
|
||||
return m_mux.is_homogenous_formula(f, n_index());
|
||||
}
|
||||
|
||||
func_decl * o2n(func_decl * p, unsigned o_idx) {
|
||||
return m_mux.conv(p, o_index(o_idx), n_index());
|
||||
}
|
||||
func_decl * o2o(func_decl * p, unsigned src_idx, unsigned tgt_idx) {
|
||||
return m_mux.conv(p, o_index(src_idx), o_index(tgt_idx));
|
||||
}
|
||||
func_decl * n2o(func_decl * p, unsigned o_idx) {
|
||||
return m_mux.conv(p, n_index(), o_index(o_idx));
|
||||
}
|
||||
|
||||
void formula_o2n(expr * f, expr_ref & result, unsigned o_idx, bool homogenous=true)
|
||||
{ m_mux.conv_formula(f, o_index(o_idx), n_index(), result, homogenous); }
|
||||
|
||||
void formula_n2o(expr * f, expr_ref & result, unsigned o_idx, bool homogenous=true)
|
||||
{ m_mux.conv_formula(f, n_index(), o_index(o_idx), result, homogenous); }
|
||||
|
||||
void formula_n2o(unsigned o_idx, bool homogenous, expr_ref & result)
|
||||
{ m_mux.conv_formula(result.get(), n_index(), o_index(o_idx), result, homogenous); }
|
||||
|
||||
void formula_o2o(expr * src, expr_ref & tgt, unsigned src_idx, unsigned tgt_idx, bool homogenous=true)
|
||||
{ m_mux.conv_formula(src, o_index(src_idx), o_index(tgt_idx), tgt, homogenous); }
|
||||
|
||||
/**
|
||||
Return true if all state symbols which e contains are of one kind (either "n" or one of "o").
|
||||
*/
|
||||
bool is_homogenous_formula(expr * e) const {
|
||||
return m_mux.is_homogenous_formula(e);
|
||||
}
|
||||
|
||||
/**
|
||||
Collect indices used in expression.
|
||||
*/
|
||||
void collect_indices(expr* e, unsigned_vector& indices) const {
|
||||
m_mux.collect_indices(e, indices);
|
||||
}
|
||||
|
||||
/**
|
||||
Collect used variables of each index.
|
||||
*/
|
||||
void collect_variables(expr* e, vector<ptr_vector<app> >& vars) const {
|
||||
m_mux.collect_variables(e, vars);
|
||||
}
|
||||
|
||||
/**
|
||||
Return true iff both s1 and s2 are either "n" or "o" of the same index.
|
||||
If one (or both) of them are not state symbol, return false.
|
||||
*/
|
||||
bool have_different_state_kinds(func_decl * s1, func_decl * s2) const {
|
||||
unsigned i1, i2;
|
||||
return m_mux.try_get_index(s1, i1) && m_mux.try_get_index(s2, i2) && i1!=i2;
|
||||
}
|
||||
|
||||
/**
|
||||
Increase indexes of state symbols in formula by dist.
|
||||
The 'N' index becomes 'O' index with number dist-1.
|
||||
*/
|
||||
void formula_shift(expr * src, expr_ref & tgt, unsigned dist) {
|
||||
SASSERT(n_index()==0);
|
||||
SASSERT(o_index(0)==1);
|
||||
m_mux.shift_formula(src, dist, tgt);
|
||||
}
|
||||
|
||||
void mk_model_into_cube(const expr_ref_vector & mdl, expr_ref & res);
|
||||
void mk_core_into_cube(const expr_ref_vector & core, expr_ref & res);
|
||||
void mk_cube_into_lemma(expr * cube, expr_ref & res);
|
||||
void mk_lemma_into_cube(expr * lemma, expr_ref & res);
|
||||
|
||||
/**
|
||||
Remove from vec all atoms that do not have an "o" state.
|
||||
The order of elements in vec may change.
|
||||
An assumption is that atoms having "o" state of given index
|
||||
do not have "o" states of other indexes or "n" states.
|
||||
*/
|
||||
void filter_o_atoms(expr_ref_vector& vec, unsigned o_idx) const
|
||||
{ m_mux.filter_idx(vec, o_index(o_idx)); }
|
||||
void filter_n_atoms(expr_ref_vector& vec) const
|
||||
{ m_mux.filter_idx(vec, n_index()); }
|
||||
|
||||
/**
|
||||
Partition literals into o_lits and others.
|
||||
*/
|
||||
void partition_o_atoms(expr_ref_vector const& lits,
|
||||
expr_ref_vector& o_lits,
|
||||
expr_ref_vector& other,
|
||||
unsigned o_idx) const {
|
||||
m_mux.partition_o_idx(lits, o_lits, other, o_index(o_idx));
|
||||
}
|
||||
|
||||
void filter_out_non_model_atoms(expr_ref_vector& vec) const
|
||||
{ m_mux.filter_non_model_lits(vec); }
|
||||
|
||||
bool try_get_state_and_value_from_atom(expr * atom, app *& state, app_ref& value);
|
||||
bool try_get_state_decl_from_atom(expr * atom, func_decl *& state);
|
||||
|
||||
|
||||
std::string pp_model(const model_core & mdl) const
|
||||
{ return m_mux.pp_model(mdl); }
|
||||
|
||||
|
||||
void set_background(expr* b) { m_background = b; }
|
||||
|
||||
expr* get_background() const { return m_background; }
|
||||
|
||||
|
||||
/**
|
||||
Return true if we can show that lhs => rhs. The function can have false negatives
|
||||
(i.e. when smt::context returns unknown), but no false positives.
|
||||
|
||||
bg is background knowledge and can be null
|
||||
*/
|
||||
bool implication_surely_holds(expr * lhs, expr * rhs, expr * bg=nullptr);
|
||||
|
||||
unsigned get_unique_num() { return m_next_unique_num++; }
|
||||
|
||||
pdr::smt_context* mk_fresh() { return m_contexts.mk_fresh(); }
|
||||
|
||||
void collect_statistics(statistics& st) const { m_contexts.collect_statistics(st); }
|
||||
|
||||
void reset_statistics() { m_contexts.reset_statistics(); }
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -1,459 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
prop_solver.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
SMT solver abstraction for PDR.
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-8-17.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#include <sstream>
|
||||
#include "model/model.h"
|
||||
#include "muz/pdr/pdr_util.h"
|
||||
#include "muz/pdr/pdr_prop_solver.h"
|
||||
#include "ast/ast_smt2_pp.h"
|
||||
#include "muz/base/dl_util.h"
|
||||
#include "model/model_pp.h"
|
||||
#include "smt/params/smt_params.h"
|
||||
#include "ast/datatype_decl_plugin.h"
|
||||
#include "ast/bv_decl_plugin.h"
|
||||
#include "muz/pdr/pdr_farkas_learner.h"
|
||||
#include "ast/ast_smt2_pp.h"
|
||||
#include "ast/rewriter/expr_replacer.h"
|
||||
|
||||
//
|
||||
// Auxiliary structure to introduce propositional names for assumptions that are not
|
||||
// propositional. It is to work with the smt::context's restriction
|
||||
// that assumptions be propositional literals.
|
||||
//
|
||||
|
||||
namespace pdr {
|
||||
|
||||
class prop_solver::safe_assumptions {
|
||||
prop_solver& s;
|
||||
ast_manager& m;
|
||||
expr_ref_vector m_atoms;
|
||||
expr_ref_vector m_assumptions;
|
||||
obj_map<app,expr *> m_proxies2expr;
|
||||
obj_map<expr, app*> m_expr2proxies;
|
||||
unsigned m_num_proxies;
|
||||
|
||||
app * mk_proxy(expr* literal) {
|
||||
app* res;
|
||||
SASSERT(!is_var(literal)); //it doesn't make sense to introduce names to variables
|
||||
if (m_expr2proxies.find(literal, res)) {
|
||||
return res;
|
||||
}
|
||||
SASSERT(s.m_proxies.size() >= m_num_proxies);
|
||||
if (m_num_proxies == s.m_proxies.size()) {
|
||||
std::stringstream name;
|
||||
name << "pdr_proxy_" << s.m_proxies.size();
|
||||
res = m.mk_const(symbol(name.str().c_str()), m.mk_bool_sort());
|
||||
s.m_proxies.push_back(res);
|
||||
s.m_aux_symbols.insert(res->get_decl());
|
||||
}
|
||||
else {
|
||||
res = s.m_proxies[m_num_proxies].get();
|
||||
}
|
||||
++m_num_proxies;
|
||||
m_expr2proxies.insert(literal, res);
|
||||
m_proxies2expr.insert(res, literal);
|
||||
expr_ref implies(m.mk_or(m.mk_not(res), literal), m);
|
||||
s.m_ctx->assert_expr(implies);
|
||||
m_assumptions.push_back(implies);
|
||||
TRACE("pdr_verbose", tout << "name asserted " << mk_pp(implies, m) << "\n";);
|
||||
return res;
|
||||
}
|
||||
|
||||
void mk_safe(expr_ref_vector& conjs) {
|
||||
flatten_and(conjs);
|
||||
expand_literals(conjs);
|
||||
for (unsigned i = 0; i < conjs.size(); ++i) {
|
||||
expr * lit = conjs[i].get();
|
||||
expr * lit_core = lit;
|
||||
m.is_not(lit, lit_core);
|
||||
SASSERT(!m.is_true(lit));
|
||||
if (!is_uninterp(lit_core) || to_app(lit_core)->get_num_args() != 0) {
|
||||
conjs[i] = mk_proxy(lit);
|
||||
}
|
||||
}
|
||||
m_assumptions.append(conjs);
|
||||
}
|
||||
|
||||
expr* apply_accessor(
|
||||
ptr_vector<func_decl> const& acc,
|
||||
unsigned j,
|
||||
func_decl* f,
|
||||
expr* c) {
|
||||
if (is_app(c) && to_app(c)->get_decl() == f) {
|
||||
return to_app(c)->get_arg(j);
|
||||
}
|
||||
else {
|
||||
return m.mk_app(acc[j], c);
|
||||
}
|
||||
}
|
||||
|
||||
void expand_literals(expr_ref_vector& conjs) {
|
||||
arith_util arith(m);
|
||||
datatype_util dt(m);
|
||||
bv_util bv(m);
|
||||
expr* e1, *e2, *c, *val;
|
||||
rational r;
|
||||
unsigned bv_size;
|
||||
|
||||
TRACE("pdr",
|
||||
tout << "begin expand\n";
|
||||
for (unsigned i = 0; i < conjs.size(); ++i) {
|
||||
tout << mk_pp(conjs[i].get(), m) << "\n";
|
||||
});
|
||||
|
||||
for (unsigned i = 0; i < conjs.size(); ++i) {
|
||||
expr* e = conjs[i].get();
|
||||
if (m.is_eq(e, e1, e2) && arith.is_int_real(e1)) {
|
||||
conjs[i] = arith.mk_le(e1,e2);
|
||||
if (i+1 == conjs.size()) {
|
||||
conjs.push_back(arith.mk_ge(e1, e2));
|
||||
}
|
||||
else {
|
||||
conjs.push_back(conjs[i+1].get());
|
||||
conjs[i+1] = arith.mk_ge(e1, e2);
|
||||
}
|
||||
++i;
|
||||
}
|
||||
else if ((m.is_eq(e, c, val) && is_app(val) && dt.is_constructor(to_app(val))) ||
|
||||
(m.is_eq(e, val, c) && is_app(val) && dt.is_constructor(to_app(val)))){
|
||||
func_decl* f = to_app(val)->get_decl();
|
||||
func_decl* r = dt.get_constructor_is(f);
|
||||
conjs[i] = m.mk_app(r, c);
|
||||
ptr_vector<func_decl> const& acc = *dt.get_constructor_accessors(f);
|
||||
for (unsigned j = 0; j < acc.size(); ++j) {
|
||||
conjs.push_back(m.mk_eq(apply_accessor(acc, j, f, c), to_app(val)->get_arg(j)));
|
||||
}
|
||||
}
|
||||
else if ((m.is_eq(e, c, val) && bv.is_numeral(val, r, bv_size)) ||
|
||||
(m.is_eq(e, val, c) && bv.is_numeral(val, r, bv_size))) {
|
||||
rational two(2);
|
||||
for (unsigned j = 0; j < bv_size; ++j) {
|
||||
parameter p(j);
|
||||
//expr* e = m.mk_app(bv.get_family_id(), OP_BIT2BOOL, 1, &p, 1, &c);
|
||||
expr* e = m.mk_eq(m.mk_app(bv.get_family_id(), OP_BIT1), bv.mk_extract(j, j, c));
|
||||
if ((r % two).is_zero()) {
|
||||
e = m.mk_not(e);
|
||||
}
|
||||
r = div(r, two);
|
||||
if (j == 0) {
|
||||
conjs[i] = e;
|
||||
}
|
||||
else {
|
||||
conjs.push_back(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
TRACE("pdr",
|
||||
tout << "end expand\n";
|
||||
for (unsigned i = 0; i < conjs.size(); ++i) {
|
||||
tout << mk_pp(conjs[i].get(), m) << "\n";
|
||||
});
|
||||
}
|
||||
|
||||
public:
|
||||
safe_assumptions(prop_solver& s, expr_ref_vector const& assumptions):
|
||||
s(s), m(s.m), m_atoms(assumptions), m_assumptions(m), m_num_proxies(0) {
|
||||
mk_safe(m_atoms);
|
||||
}
|
||||
|
||||
~safe_assumptions() {
|
||||
}
|
||||
|
||||
expr_ref_vector const& atoms() const { return m_atoms; }
|
||||
|
||||
unsigned assumptions_size() const { return m_assumptions.size(); }
|
||||
|
||||
expr* assumptions(unsigned i) const { return m_assumptions[i]; }
|
||||
|
||||
void undo_proxies(expr_ref_vector& es) {
|
||||
expr_ref e(m);
|
||||
expr* r;
|
||||
for (unsigned i = 0; i < es.size(); ++i) {
|
||||
e = es[i].get();
|
||||
if (is_app(e) && m_proxies2expr.find(to_app(e), r)) {
|
||||
es[i] = r;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void elim_proxies(expr_ref_vector& es) {
|
||||
expr_substitution sub(m, false, m.proofs_enabled());
|
||||
proof_ref pr(m);
|
||||
if (m.proofs_enabled()) {
|
||||
pr = m.mk_asserted(m.mk_true());
|
||||
}
|
||||
obj_map<app,expr*>::iterator it = m_proxies2expr.begin(), end = m_proxies2expr.end();
|
||||
for (; it != end; ++it) {
|
||||
sub.insert(it->m_key, m.mk_true(), pr);
|
||||
}
|
||||
scoped_ptr<expr_replacer> rep = mk_default_expr_replacer(m);
|
||||
rep->set_substitution(&sub);
|
||||
replace_proxies(*rep, es);
|
||||
}
|
||||
private:
|
||||
|
||||
void replace_proxies(expr_replacer& rep, expr_ref_vector& es) {
|
||||
expr_ref e(m);
|
||||
for (unsigned i = 0; i < es.size(); ++i) {
|
||||
e = es[i].get();
|
||||
rep(e);
|
||||
es[i] = e;
|
||||
if (m.is_true(e)) {
|
||||
es[i] = es.back();
|
||||
es.pop_back();
|
||||
--i;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
prop_solver::prop_solver(manager& pm, symbol const& name) :
|
||||
m_fparams(pm.get_fparams()),
|
||||
m(pm.get_manager()),
|
||||
m_pm(pm),
|
||||
m_name(name),
|
||||
m_ctx(pm.mk_fresh()),
|
||||
m_pos_level_atoms(m),
|
||||
m_neg_level_atoms(m),
|
||||
m_proxies(m),
|
||||
m_core(nullptr),
|
||||
m_model(nullptr),
|
||||
m_consequences(nullptr),
|
||||
m_subset_based_core(false),
|
||||
m_use_farkas(false),
|
||||
m_in_level(false),
|
||||
m_current_level(0)
|
||||
{
|
||||
m_ctx->assert_expr(m_pm.get_background());
|
||||
}
|
||||
|
||||
void prop_solver::add_level() {
|
||||
unsigned idx = level_cnt();
|
||||
std::stringstream name;
|
||||
name << m_name << "#level_" << idx;
|
||||
func_decl * lev_pred = m.mk_fresh_func_decl(name.str().c_str(), 0, nullptr,m.mk_bool_sort());
|
||||
m_aux_symbols.insert(lev_pred);
|
||||
m_level_preds.push_back(lev_pred);
|
||||
|
||||
app_ref pos_la(m.mk_const(lev_pred), m);
|
||||
app_ref neg_la(m.mk_not(pos_la.get()), m);
|
||||
|
||||
m_pos_level_atoms.push_back(pos_la);
|
||||
m_neg_level_atoms.push_back(neg_la);
|
||||
|
||||
m_level_atoms_set.insert(pos_la.get());
|
||||
m_level_atoms_set.insert(neg_la.get());
|
||||
}
|
||||
|
||||
void prop_solver::ensure_level(unsigned lvl) {
|
||||
while (lvl>=level_cnt()) {
|
||||
add_level();
|
||||
}
|
||||
}
|
||||
|
||||
unsigned prop_solver::level_cnt() const {
|
||||
return m_level_preds.size();
|
||||
}
|
||||
|
||||
void prop_solver::push_level_atoms(unsigned level, expr_ref_vector& tgt) const {
|
||||
unsigned lev_cnt = level_cnt();
|
||||
for (unsigned i=0; i<lev_cnt; i++) {
|
||||
bool active = i>=level;
|
||||
app * lev_atom = active ? m_neg_level_atoms[i] : m_pos_level_atoms[i];
|
||||
tgt.push_back(lev_atom);
|
||||
}
|
||||
}
|
||||
|
||||
void prop_solver::add_formula(expr * form) {
|
||||
SASSERT(!m_in_level);
|
||||
m_ctx->assert_expr(form);
|
||||
IF_VERBOSE(21, verbose_stream() << "$ asserted " << mk_pp(form, m) << "\n";);
|
||||
TRACE("pdr", tout << "add_formula: " << mk_pp(form, m) << "\n";);
|
||||
}
|
||||
|
||||
void prop_solver::add_level_formula(expr * form, unsigned level) {
|
||||
ensure_level(level);
|
||||
app * lev_atom = m_pos_level_atoms[level].get();
|
||||
app_ref lform(m.mk_or(form, lev_atom), m);
|
||||
add_formula(lform.get());
|
||||
}
|
||||
|
||||
|
||||
lbool prop_solver::check_safe_assumptions(
|
||||
safe_assumptions& safe,
|
||||
const expr_ref_vector& atoms)
|
||||
{
|
||||
flet<bool> _model(m_fparams.m_model, m_model != nullptr);
|
||||
expr_ref_vector expr_atoms(m);
|
||||
expr_atoms.append(atoms.size(), atoms.c_ptr());
|
||||
|
||||
if (m_in_level) {
|
||||
push_level_atoms(m_current_level, expr_atoms);
|
||||
}
|
||||
|
||||
lbool result = m_ctx->check(expr_atoms);
|
||||
|
||||
TRACE("pdr",
|
||||
tout << mk_pp(m_pm.mk_and(expr_atoms), m) << "\n";
|
||||
tout << result << "\n";);
|
||||
|
||||
if (result == l_true && m_model) {
|
||||
m_ctx->get_model(*m_model);
|
||||
TRACE("pdr_verbose", model_pp(tout, **m_model); );
|
||||
}
|
||||
|
||||
if (result == l_false) {
|
||||
unsigned core_size = m_ctx->get_unsat_core_size();
|
||||
m_assumes_level = false;
|
||||
for (unsigned i = 0; i < core_size; ++i) {
|
||||
if (m_level_atoms_set.contains(m_ctx->get_unsat_core_expr(i))) {
|
||||
m_assumes_level = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (result == l_false &&
|
||||
m_core &&
|
||||
m.proofs_enabled() &&
|
||||
m_use_farkas &&
|
||||
!m_subset_based_core) {
|
||||
extract_theory_core(safe);
|
||||
}
|
||||
else if (result == l_false && m_core) {
|
||||
extract_subset_core(safe);
|
||||
SASSERT(expr_atoms.size() >= m_core->size());
|
||||
}
|
||||
m_core = nullptr;
|
||||
m_model = nullptr;
|
||||
m_subset_based_core = false;
|
||||
return result;
|
||||
}
|
||||
|
||||
void prop_solver::extract_subset_core(safe_assumptions& safe) {
|
||||
unsigned core_size = m_ctx->get_unsat_core_size();
|
||||
m_core->reset();
|
||||
for (unsigned i = 0; i < core_size; ++i) {
|
||||
expr * core_expr = m_ctx->get_unsat_core_expr(i);
|
||||
SASSERT(is_app(core_expr));
|
||||
|
||||
if (m_level_atoms_set.contains(core_expr)) {
|
||||
continue;
|
||||
}
|
||||
if (m_ctx->is_aux_predicate(core_expr)) {
|
||||
continue;
|
||||
}
|
||||
m_core->push_back(to_app(core_expr));
|
||||
}
|
||||
|
||||
safe.undo_proxies(*m_core);
|
||||
|
||||
TRACE("pdr",
|
||||
tout << "core_exprs: ";
|
||||
for (unsigned i = 0; i < core_size; ++i) {
|
||||
tout << mk_pp(m_ctx->get_unsat_core_expr(i), m) << " ";
|
||||
}
|
||||
tout << "\n";
|
||||
tout << "core: " << mk_pp(m_pm.mk_and(*m_core), m) << "\n";
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
void prop_solver::extract_theory_core(safe_assumptions& safe) {
|
||||
proof_ref pr(m);
|
||||
pr = m_ctx->get_proof();
|
||||
IF_VERBOSE(21, verbose_stream() << mk_ismt2_pp(pr, m) << "\n";);
|
||||
farkas_learner fl(m_fparams, m);
|
||||
expr_ref_vector lemmas(m);
|
||||
obj_hashtable<expr> bs;
|
||||
for (unsigned i = 0; i < safe.assumptions_size(); ++i) {
|
||||
bs.insert(safe.assumptions(i));
|
||||
}
|
||||
fl.get_lemmas(pr, bs, lemmas);
|
||||
safe.elim_proxies(lemmas);
|
||||
fl.simplify_lemmas(lemmas); // redundant?
|
||||
|
||||
bool outside_of_logic =
|
||||
(m_fparams.m_arith_mode == AS_DIFF_LOGIC &&
|
||||
!is_difference_logic(m, lemmas.size(), lemmas.c_ptr())) ||
|
||||
(m_fparams.m_arith_mode == AS_UTVPI &&
|
||||
!is_utvpi_logic(m, lemmas.size(), lemmas.c_ptr()));
|
||||
|
||||
if (outside_of_logic) {
|
||||
IF_VERBOSE(2,
|
||||
verbose_stream() << "not diff\n";
|
||||
for (unsigned i = 0; i < lemmas.size(); ++i) {
|
||||
verbose_stream() << mk_pp(lemmas[i].get(), m) << "\n";
|
||||
});
|
||||
extract_subset_core(safe);
|
||||
}
|
||||
else {
|
||||
|
||||
IF_VERBOSE(2,
|
||||
verbose_stream() << "Lemmas\n";
|
||||
for (unsigned i = 0; i < lemmas.size(); ++i) {
|
||||
verbose_stream() << mk_pp(lemmas[i].get(), m) << "\n";
|
||||
});
|
||||
|
||||
m_core->reset();
|
||||
m_core->append(lemmas);
|
||||
|
||||
if (m_consequences) {
|
||||
fl.get_consequences(pr, bs, *m_consequences);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lbool prop_solver::check_assumptions(const expr_ref_vector & atoms) {
|
||||
return check_assumptions_and_formula(atoms, m.mk_true());
|
||||
}
|
||||
|
||||
lbool prop_solver::check_conjunction_as_assumptions(expr * conj) {
|
||||
expr_ref_vector asmp(m);
|
||||
asmp.push_back(conj);
|
||||
return check_assumptions(asmp);
|
||||
}
|
||||
|
||||
lbool prop_solver::check_assumptions_and_formula(const expr_ref_vector & atoms, expr * form)
|
||||
{
|
||||
pdr::smt_context::scoped _scoped(*m_ctx);
|
||||
safe_assumptions safe(*this, atoms);
|
||||
m_ctx->assert_expr(form);
|
||||
CTRACE("pdr", !m.is_true(form), tout << "check with formula: " << mk_pp(form, m) << "\n";);
|
||||
lbool res = check_safe_assumptions(safe, safe.atoms());
|
||||
|
||||
//
|
||||
// we don't have to undo model naming, as from the model
|
||||
// we extract the values for state variables directly
|
||||
//
|
||||
return res;
|
||||
}
|
||||
|
||||
void prop_solver::collect_statistics(statistics& st) const {
|
||||
}
|
||||
|
||||
void prop_solver::reset_statistics() {
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -1,139 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
prop_solver.h
|
||||
|
||||
Abstract:
|
||||
|
||||
SAT solver abstraction for PDR.
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-8-17.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef PROP_SOLVER_H_
|
||||
#define PROP_SOLVER_H_
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include "ast/ast.h"
|
||||
#include "util/obj_hashtable.h"
|
||||
#include "smt/smt_kernel.h"
|
||||
#include "util/util.h"
|
||||
#include "util/vector.h"
|
||||
#include "muz/pdr/pdr_manager.h"
|
||||
#include "muz/pdr/pdr_smt_context_manager.h"
|
||||
|
||||
|
||||
namespace pdr {
|
||||
class prop_solver {
|
||||
|
||||
private:
|
||||
smt_params& m_fparams;
|
||||
ast_manager& m;
|
||||
manager& m_pm;
|
||||
symbol m_name;
|
||||
scoped_ptr<pdr::smt_context> m_ctx;
|
||||
decl_vector m_level_preds;
|
||||
app_ref_vector m_pos_level_atoms; // atoms used to identify level
|
||||
app_ref_vector m_neg_level_atoms; //
|
||||
obj_hashtable<expr> m_level_atoms_set;
|
||||
app_ref_vector m_proxies; // predicates for assumptions
|
||||
expr_ref_vector* m_core;
|
||||
model_ref* m_model;
|
||||
expr_ref_vector* m_consequences;
|
||||
bool m_subset_based_core;
|
||||
bool m_assumes_level;
|
||||
bool m_use_farkas;
|
||||
func_decl_set m_aux_symbols;
|
||||
bool m_in_level;
|
||||
unsigned m_current_level; // set when m_in_level
|
||||
|
||||
/** Add level atoms activating certain level into a vector */
|
||||
void push_level_atoms(unsigned level, expr_ref_vector & tgt) const;
|
||||
|
||||
void ensure_level(unsigned lvl);
|
||||
|
||||
class safe_assumptions;
|
||||
|
||||
void extract_theory_core(safe_assumptions& assumptions);
|
||||
|
||||
void extract_subset_core(safe_assumptions& assumptions);
|
||||
|
||||
lbool check_safe_assumptions(
|
||||
safe_assumptions& assumptions,
|
||||
expr_ref_vector const& atoms);
|
||||
|
||||
|
||||
public:
|
||||
prop_solver(pdr::manager& pm, symbol const& name);
|
||||
|
||||
/** return true is s is a symbol introduced by prop_solver */
|
||||
bool is_aux_symbol(func_decl * s) const {
|
||||
return
|
||||
m_aux_symbols.contains(s) ||
|
||||
m_ctx->is_aux_predicate(s);
|
||||
}
|
||||
|
||||
void set_core(expr_ref_vector* core) { m_core = core; }
|
||||
void set_model(model_ref* mdl) { m_model = mdl; }
|
||||
void set_subset_based_core(bool f) { m_subset_based_core = f; }
|
||||
void set_consequences(expr_ref_vector* consequences) { m_consequences = consequences; }
|
||||
|
||||
bool assumes_level() const { return m_assumes_level; }
|
||||
|
||||
void add_level();
|
||||
unsigned level_cnt() const;
|
||||
|
||||
class scoped_level {
|
||||
bool& m_lev;
|
||||
public:
|
||||
scoped_level(prop_solver& ps, unsigned lvl):m_lev(ps.m_in_level) {
|
||||
SASSERT(!m_lev); m_lev = true; ps.m_current_level = lvl;
|
||||
}
|
||||
~scoped_level() { m_lev = false; }
|
||||
};
|
||||
|
||||
void set_use_farkas(bool f) { m_use_farkas = f; }
|
||||
bool get_use_farkas() const { return m_use_farkas; }
|
||||
|
||||
void add_formula(expr * form);
|
||||
void add_level_formula(expr * form, unsigned level);
|
||||
|
||||
/**
|
||||
* Return true iff conjunction of atoms is consistent with the current state of
|
||||
* the solver.
|
||||
*
|
||||
* If the conjunction of atoms is inconsistent with the solver state and core is non-zero,
|
||||
* core will contain an unsatisfiable core of atoms.
|
||||
*
|
||||
* If the conjunction of atoms is consistent with the solver state and o_model is non-zero,
|
||||
* o_model will contain the "o" literals true in the assignment.
|
||||
*/
|
||||
lbool check_assumptions(const expr_ref_vector & atoms);
|
||||
|
||||
lbool check_conjunction_as_assumptions(expr * conj);
|
||||
|
||||
/**
|
||||
* Like check_assumptions, except it also asserts an extra formula
|
||||
*/
|
||||
lbool check_assumptions_and_formula(
|
||||
const expr_ref_vector & atoms,
|
||||
expr * form);
|
||||
|
||||
void collect_statistics(statistics& st) const;
|
||||
|
||||
void reset_statistics();
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
|
@ -1,132 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
reachable_cache.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
Object for caching of reachable states.
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-9-14.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#include "muz/pdr/pdr_reachable_cache.h"
|
||||
|
||||
namespace pdr {
|
||||
|
||||
reachable_cache::reachable_cache(pdr::manager & pm, datalog::PDR_CACHE_MODE cm)
|
||||
: m(pm.get_manager()),
|
||||
m_pm(pm),
|
||||
m_ctx(nullptr),
|
||||
m_ref_holder(m),
|
||||
m_disj_connector(m),
|
||||
m_cache_mode(cm) {
|
||||
if (m_cache_mode == datalog::CONSTRAINT_CACHE) {
|
||||
m_ctx = pm.mk_fresh();
|
||||
m_ctx->assert_expr(m_pm.get_background());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void reachable_cache::add_disjuncted_formula(expr * f) {
|
||||
app_ref new_connector(m.mk_fresh_const("disj_conn", m.mk_bool_sort()), m);
|
||||
app_ref neg_new_connector(m.mk_not(new_connector), m);
|
||||
app_ref extended_form(m);
|
||||
|
||||
if(m_disj_connector) {
|
||||
extended_form = m.mk_or(m_disj_connector, neg_new_connector, f);
|
||||
}
|
||||
else {
|
||||
extended_form = m.mk_or(neg_new_connector, f);
|
||||
}
|
||||
if (m_ctx) {
|
||||
m_ctx->assert_expr(extended_form);
|
||||
}
|
||||
|
||||
m_disj_connector = new_connector;
|
||||
}
|
||||
|
||||
void reachable_cache::add_reachable(expr * cube) {
|
||||
|
||||
switch (m_cache_mode) {
|
||||
case datalog::NO_CACHE:
|
||||
break;
|
||||
|
||||
case datalog::HASH_CACHE:
|
||||
m_stats.m_inserts++;
|
||||
m_cache.insert(cube);
|
||||
m_ref_holder.push_back(cube);
|
||||
break;
|
||||
|
||||
case datalog::CONSTRAINT_CACHE:
|
||||
m_stats.m_inserts++;
|
||||
TRACE("pdr", tout << mk_pp(cube, m) << "\n";);
|
||||
add_disjuncted_formula(cube);
|
||||
break;
|
||||
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
bool reachable_cache::is_reachable(expr * cube) {
|
||||
bool found = false;
|
||||
switch (m_cache_mode) {
|
||||
case datalog::NO_CACHE:
|
||||
return false;
|
||||
|
||||
case datalog::HASH_CACHE:
|
||||
found = m_cache.contains(cube);
|
||||
break;
|
||||
|
||||
case datalog::CONSTRAINT_CACHE: {
|
||||
if(!m_disj_connector) {
|
||||
found = false;
|
||||
break;
|
||||
}
|
||||
expr * connector = m_disj_connector.get();
|
||||
expr_ref_vector assms(m);
|
||||
assms.push_back(connector);
|
||||
m_ctx->push();
|
||||
m_ctx->assert_expr(cube);
|
||||
lbool res = m_ctx->check(assms);
|
||||
m_ctx->pop();
|
||||
|
||||
TRACE("pdr", tout << "is_reachable: " << res << " " << mk_pp(cube, m) << "\n";);
|
||||
|
||||
found = res == l_true;
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
if (found) {
|
||||
m_stats.m_hits++;
|
||||
}
|
||||
else {
|
||||
m_stats.m_miss++;
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
||||
void reachable_cache::collect_statistics(statistics& st) const {
|
||||
st.update("cache inserts", m_stats.m_inserts);
|
||||
st.update("cache miss", m_stats.m_miss);
|
||||
st.update("cache hits", m_stats.m_hits);
|
||||
}
|
||||
|
||||
void reachable_cache::reset_statistics() {
|
||||
m_stats.reset();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -1,66 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
reachable_cache.h
|
||||
|
||||
Abstract:
|
||||
|
||||
Object for caching of reachable states.
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-9-14.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
|
||||
#ifndef REACHABLE_CACHE_H_
|
||||
#define REACHABLE_CACHE_H_
|
||||
#include "ast/ast.h"
|
||||
#include "util/ref_vector.h"
|
||||
#include "muz/pdr/pdr_manager.h"
|
||||
#include "muz/pdr/pdr_smt_context_manager.h"
|
||||
|
||||
namespace pdr {
|
||||
class reachable_cache {
|
||||
struct stats {
|
||||
unsigned m_hits;
|
||||
unsigned m_miss;
|
||||
unsigned m_inserts;
|
||||
stats() { reset(); }
|
||||
void reset() { memset(this, 0, sizeof(*this)); }
|
||||
};
|
||||
|
||||
ast_manager & m;
|
||||
manager & m_pm;
|
||||
scoped_ptr<smt_context> m_ctx;
|
||||
ast_ref_vector m_ref_holder;
|
||||
app_ref m_disj_connector;
|
||||
obj_hashtable<expr> m_cache;
|
||||
stats m_stats;
|
||||
datalog::PDR_CACHE_MODE m_cache_mode;
|
||||
|
||||
void add_disjuncted_formula(expr * f);
|
||||
|
||||
public:
|
||||
reachable_cache(pdr::manager & pm, datalog::PDR_CACHE_MODE cm);
|
||||
|
||||
void add_init(app * f) { add_disjuncted_formula(f); }
|
||||
|
||||
/** add cube whose all models are reachable */
|
||||
void add_reachable(expr * cube);
|
||||
|
||||
/** return true if there is a model of cube which is reachable */
|
||||
bool is_reachable(expr * cube);
|
||||
|
||||
void collect_statistics(statistics& st) const;
|
||||
|
||||
void reset_statistics();
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -1,167 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
pdr_smt_context_manager.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
Manager of smt contexts
|
||||
|
||||
Author:
|
||||
|
||||
Nikolaj Bjorner (nbjorner) 2011-11-26.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#include "muz/pdr/pdr_smt_context_manager.h"
|
||||
#include "ast/has_free_vars.h"
|
||||
#include "ast/ast_pp.h"
|
||||
#include "ast/ast_smt_pp.h"
|
||||
#include <sstream>
|
||||
#include "smt/params/smt_params.h"
|
||||
|
||||
namespace pdr {
|
||||
|
||||
smt_context::smt_context(smt_context_manager& p, ast_manager& m, app* pred):
|
||||
m_pred(pred, m),
|
||||
m_parent(p),
|
||||
m_in_delay_scope(false),
|
||||
m_pushed(false)
|
||||
{}
|
||||
|
||||
bool smt_context::is_aux_predicate(func_decl* p) {
|
||||
return m_parent.is_aux_predicate(p);
|
||||
}
|
||||
|
||||
smt_context::scoped::scoped(smt_context& ctx): m_ctx(ctx) {
|
||||
SASSERT(!m_ctx.m_in_delay_scope);
|
||||
SASSERT(!m_ctx.m_pushed);
|
||||
m_ctx.m_in_delay_scope = true;
|
||||
}
|
||||
|
||||
smt_context::scoped::~scoped() {
|
||||
SASSERT(m_ctx.m_in_delay_scope);
|
||||
if (m_ctx.m_pushed) {
|
||||
m_ctx.pop();
|
||||
m_ctx.m_pushed = false;
|
||||
}
|
||||
m_ctx.m_in_delay_scope = false;
|
||||
}
|
||||
|
||||
|
||||
_smt_context::_smt_context(smt::kernel & ctx, smt_context_manager& p, app* pred):
|
||||
smt_context(p, ctx.m(), pred),
|
||||
m_context(ctx)
|
||||
{}
|
||||
|
||||
void _smt_context::assert_expr(expr* e) {
|
||||
ast_manager& m = m_context.m();
|
||||
if (m.is_true(e)) {
|
||||
return;
|
||||
}
|
||||
CTRACE("pdr", has_free_vars(e), tout << mk_pp(e, m) << "\n";);
|
||||
SASSERT(!has_free_vars(e));
|
||||
if (m_in_delay_scope && !m_pushed) {
|
||||
m_context.push();
|
||||
m_pushed = true;
|
||||
}
|
||||
expr_ref fml(m);
|
||||
fml = m_pushed?e:m.mk_implies(m_pred, e);
|
||||
m_context.assert_expr(fml);
|
||||
}
|
||||
|
||||
lbool _smt_context::check(expr_ref_vector& assumptions) {
|
||||
ast_manager& m = m_pred.get_manager();
|
||||
if (!m.is_true(m_pred)) {
|
||||
assumptions.push_back(m_pred);
|
||||
}
|
||||
TRACE("pdr_check",
|
||||
{
|
||||
ast_smt_pp pp(m);
|
||||
for (unsigned i = 0; i < m_context.size(); ++i) {
|
||||
pp.add_assumption(m_context.get_formula(i));
|
||||
}
|
||||
for (unsigned i = 0; i < assumptions.size(); ++i) {
|
||||
pp.add_assumption(assumptions[i].get());
|
||||
}
|
||||
|
||||
static unsigned lemma_id = 0;
|
||||
std::ostringstream strm;
|
||||
strm << "pdr-lemma-" << lemma_id << ".smt2";
|
||||
std::ofstream out(strm.str().c_str());
|
||||
pp.display_smt2(out, m.mk_true());
|
||||
out.close();
|
||||
lemma_id++;
|
||||
tout << "pdr_check: " << strm.str() << "\n";
|
||||
});
|
||||
lbool result = m_context.check(assumptions.size(), assumptions.c_ptr());
|
||||
if (!m.is_true(m_pred)) {
|
||||
assumptions.pop_back();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void _smt_context::get_model(model_ref& model) {
|
||||
m_context.get_model(model);
|
||||
}
|
||||
|
||||
proof* _smt_context::get_proof() {
|
||||
return m_context.get_proof();
|
||||
}
|
||||
|
||||
smt_context_manager::smt_context_manager(smt_params& fp, unsigned max_num_contexts, ast_manager& m):
|
||||
m_fparams(fp),
|
||||
m(m),
|
||||
m_max_num_contexts(max_num_contexts),
|
||||
m_num_contexts(0),
|
||||
m_predicate_list(m) {
|
||||
}
|
||||
|
||||
|
||||
smt_context_manager::~smt_context_manager() {
|
||||
TRACE("pdr",tout << "\n";);
|
||||
std::for_each(m_contexts.begin(), m_contexts.end(), delete_proc<smt::kernel>());
|
||||
}
|
||||
|
||||
smt_context* smt_context_manager::mk_fresh() {
|
||||
++m_num_contexts;
|
||||
app_ref pred(m);
|
||||
smt::kernel * ctx = nullptr;
|
||||
if (m_max_num_contexts == 0) {
|
||||
m_contexts.push_back(alloc(smt::kernel, m, m_fparams));
|
||||
pred = m.mk_true();
|
||||
ctx = m_contexts[m_num_contexts-1];
|
||||
}
|
||||
else {
|
||||
if (m_contexts.size() < m_max_num_contexts) {
|
||||
m_contexts.push_back(alloc(smt::kernel, m, m_fparams));
|
||||
}
|
||||
std::stringstream name;
|
||||
name << "#context" << m_num_contexts;
|
||||
pred = m.mk_const(symbol(name.str().c_str()), m.mk_bool_sort());
|
||||
m_predicate_list.push_back(pred);
|
||||
m_predicate_set.insert(pred->get_decl());
|
||||
ctx = m_contexts[(m_num_contexts-1)%m_max_num_contexts];
|
||||
}
|
||||
return alloc(_smt_context, *ctx, *this, pred);
|
||||
}
|
||||
|
||||
void smt_context_manager::collect_statistics(statistics& st) const {
|
||||
for (unsigned i = 0; i < m_contexts.size(); ++i) {
|
||||
m_contexts[i]->collect_statistics(st);
|
||||
}
|
||||
}
|
||||
|
||||
void smt_context_manager::reset_statistics() {
|
||||
for (unsigned i = 0; i < m_contexts.size(); ++i) {
|
||||
m_contexts[i]->reset_statistics();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
|
|
@ -1,92 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
pdr_smt_context_manager.h
|
||||
|
||||
Abstract:
|
||||
|
||||
Manager of smt contexts
|
||||
|
||||
Author:
|
||||
|
||||
Nikolaj Bjorner (nbjorner) 2011-11-26.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef PDR_SMT_CONTEXT_MANAGER_H_
|
||||
#define PDR_SMT_CONTEXT_MANAGER_H_
|
||||
|
||||
#include "smt/smt_kernel.h"
|
||||
#include "ast/func_decl_dependencies.h"
|
||||
#include "muz/base/dl_util.h"
|
||||
|
||||
namespace pdr {
|
||||
|
||||
class smt_context_manager;
|
||||
|
||||
class smt_context {
|
||||
protected:
|
||||
app_ref m_pred;
|
||||
smt_context_manager& m_parent;
|
||||
bool m_in_delay_scope;
|
||||
bool m_pushed;
|
||||
public:
|
||||
smt_context(smt_context_manager& p, ast_manager& m, app* pred);
|
||||
virtual ~smt_context() {}
|
||||
virtual void assert_expr(expr* e) = 0;
|
||||
virtual lbool check(expr_ref_vector& assumptions) = 0;
|
||||
virtual void get_model(model_ref& model) = 0;
|
||||
virtual proof* get_proof() = 0;
|
||||
virtual unsigned get_unsat_core_size() = 0;
|
||||
virtual expr* get_unsat_core_expr(unsigned i) = 0;
|
||||
virtual void push() = 0;
|
||||
virtual void pop() = 0;
|
||||
bool is_aux_predicate(func_decl* p);
|
||||
bool is_aux_predicate(expr* p) { return is_app(p) && is_aux_predicate(to_app(p)->get_decl()); }
|
||||
class scoped {
|
||||
smt_context& m_ctx;
|
||||
public:
|
||||
scoped(smt_context& ctx);
|
||||
~scoped();
|
||||
};
|
||||
};
|
||||
|
||||
class _smt_context : public smt_context {
|
||||
smt::kernel & m_context;
|
||||
public:
|
||||
_smt_context(smt::kernel & ctx, smt_context_manager& p, app* pred);
|
||||
~_smt_context() override {}
|
||||
void assert_expr(expr* e) override;
|
||||
lbool check(expr_ref_vector& assumptions) override;
|
||||
void get_model(model_ref& model) override;
|
||||
proof* get_proof() override;
|
||||
void push() override { m_context.push(); }
|
||||
void pop() override { m_context.pop(1); }
|
||||
unsigned get_unsat_core_size() override { return m_context.get_unsat_core_size(); }
|
||||
expr* get_unsat_core_expr(unsigned i) override { return m_context.get_unsat_core_expr(i); }
|
||||
};
|
||||
|
||||
class smt_context_manager {
|
||||
smt_params& m_fparams;
|
||||
ast_manager& m;
|
||||
unsigned m_max_num_contexts;
|
||||
ptr_vector<smt::kernel> m_contexts;
|
||||
unsigned m_num_contexts;
|
||||
app_ref_vector m_predicate_list;
|
||||
func_decl_set m_predicate_set;
|
||||
public:
|
||||
smt_context_manager(smt_params& fp, unsigned max_num_contexts, ast_manager& m);
|
||||
~smt_context_manager();
|
||||
smt_context* mk_fresh();
|
||||
void collect_statistics(statistics& st) const;
|
||||
void reset_statistics();
|
||||
bool is_aux_predicate(func_decl* p) const { return m_predicate_set.contains(p); }
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
@ -1,601 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
sym_mux.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
A symbol multiplexer that helps with having multiple versions of each of a set of symbols.
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-9-8.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#include <sstream>
|
||||
#include "ast/ast_pp.h"
|
||||
#include "ast/for_each_expr.h"
|
||||
#include "model/model.h"
|
||||
#include "ast/rewriter/rewriter.h"
|
||||
#include "ast/rewriter/rewriter_def.h"
|
||||
#include "muz/pdr/pdr_util.h"
|
||||
#include "muz/pdr/pdr_sym_mux.h"
|
||||
|
||||
using namespace pdr;
|
||||
|
||||
sym_mux::sym_mux(ast_manager & m)
|
||||
: m(m), m_ref_holder(m),
|
||||
m_next_sym_suffix_idx(0) {
|
||||
m_suffixes.push_back("_n");
|
||||
size_t suf_sz = m_suffixes.size();
|
||||
for(unsigned i = 0; i < suf_sz; ++i) {
|
||||
symbol suff_sym = symbol(m_suffixes[i].c_str());
|
||||
m_used_suffixes.insert(suff_sym);
|
||||
}
|
||||
}
|
||||
|
||||
std::string sym_mux::get_suffix(unsigned i) {
|
||||
while(m_suffixes.size() <= i) {
|
||||
std::string new_suffix;
|
||||
symbol new_syffix_sym;
|
||||
do {
|
||||
std::stringstream stm;
|
||||
stm<<'_'<<m_next_sym_suffix_idx;
|
||||
m_next_sym_suffix_idx++;
|
||||
new_suffix = stm.str();
|
||||
new_syffix_sym = symbol(new_suffix.c_str());
|
||||
}
|
||||
while (m_used_suffixes.contains(new_syffix_sym));
|
||||
m_used_suffixes.insert(new_syffix_sym);
|
||||
m_suffixes.push_back(new_suffix);
|
||||
}
|
||||
std::string result = m_suffixes[i];
|
||||
return result;
|
||||
}
|
||||
|
||||
void sym_mux::create_tuple(func_decl* prefix, unsigned arity, sort * const * domain, sort * range,
|
||||
unsigned tuple_length, decl_vector & tuple)
|
||||
{
|
||||
SASSERT(tuple_length>0);
|
||||
while(tuple.size()<tuple_length) {
|
||||
tuple.push_back(0);
|
||||
}
|
||||
SASSERT(tuple.size()==tuple_length);
|
||||
std::string pre = prefix->get_name().str();
|
||||
for(unsigned i=0; i<tuple_length; i++) {
|
||||
|
||||
if (tuple[i] != 0) {
|
||||
SASSERT(tuple[i]->get_arity()==arity);
|
||||
SASSERT(tuple[i]->get_range()==range);
|
||||
//domain should match as well, but we won't bother checking an array equality
|
||||
}
|
||||
else {
|
||||
std::string name = pre+get_suffix(i);
|
||||
tuple[i] = m.mk_func_decl(symbol(name.c_str()), arity, domain, range);
|
||||
}
|
||||
m_ref_holder.push_back(tuple[i]);
|
||||
m_sym2idx.insert(tuple[i], i);
|
||||
m_sym2prim.insert(tuple[i], tuple[0]);
|
||||
}
|
||||
|
||||
m_prim2all.insert(tuple[0], tuple);
|
||||
m_prefix2prim.insert(prefix, tuple[0]);
|
||||
m_prim2prefix.insert(tuple[0], prefix);
|
||||
m_prim_preds.push_back(tuple[0]);
|
||||
m_ref_holder.push_back(prefix);
|
||||
}
|
||||
|
||||
void sym_mux::ensure_tuple_size(func_decl * prim, unsigned sz) {
|
||||
SASSERT(m_prim2all.contains(prim));
|
||||
decl_vector& tuple = m_prim2all.find_core(prim)->get_data().m_value;
|
||||
SASSERT(tuple[0]==prim);
|
||||
|
||||
if(sz <= tuple.size()) { return; }
|
||||
|
||||
func_decl * prefix;
|
||||
TRUSTME(m_prim2prefix.find(prim, prefix));
|
||||
std::string prefix_name = prefix->get_name().bare_str();
|
||||
for(unsigned i = tuple.size(); i < sz; ++i) {
|
||||
std::string name = prefix_name + get_suffix(i);
|
||||
func_decl * new_sym = m.mk_func_decl(symbol(name.c_str()), prefix->get_arity(),
|
||||
prefix->get_domain(), prefix->get_range());
|
||||
|
||||
tuple.push_back(new_sym);
|
||||
m_ref_holder.push_back(new_sym);
|
||||
m_sym2idx.insert(new_sym, i);
|
||||
m_sym2prim.insert(new_sym, prim);
|
||||
}
|
||||
}
|
||||
|
||||
func_decl * sym_mux::conv(func_decl * sym, unsigned src_idx, unsigned tgt_idx)
|
||||
{
|
||||
if(src_idx==tgt_idx) { return sym; }
|
||||
func_decl * prim = (src_idx==0) ? sym : get_primary(sym);
|
||||
if(tgt_idx>src_idx) {
|
||||
ensure_tuple_size(prim, tgt_idx+1);
|
||||
}
|
||||
decl_vector & sym_vect = m_prim2all.find_core(prim)->get_data().m_value;
|
||||
SASSERT(sym_vect[src_idx]==sym);
|
||||
return sym_vect[tgt_idx];
|
||||
}
|
||||
|
||||
|
||||
func_decl * sym_mux::get_or_create_symbol_by_prefix(func_decl* prefix, unsigned idx,
|
||||
unsigned arity, sort * const * domain, sort * range)
|
||||
{
|
||||
func_decl * prim = try_get_primary_by_prefix(prefix);
|
||||
if(prim) {
|
||||
SASSERT(prim->get_arity()==arity);
|
||||
SASSERT(prim->get_range()==range);
|
||||
//domain should match as well, but we won't bother checking an array equality
|
||||
|
||||
return conv(prim, 0, idx);
|
||||
}
|
||||
|
||||
decl_vector syms;
|
||||
create_tuple(prefix, arity, domain, range, idx+1, syms);
|
||||
return syms[idx];
|
||||
}
|
||||
|
||||
bool sym_mux::is_muxed_lit(expr * e, unsigned idx) const
|
||||
{
|
||||
if(!is_app(e)) { return false; }
|
||||
app * a = to_app(e);
|
||||
if(m.is_not(a) && is_app(a->get_arg(0))) {
|
||||
a = to_app(a->get_arg(0));
|
||||
}
|
||||
return is_muxed(a->get_decl());
|
||||
}
|
||||
|
||||
|
||||
struct sym_mux::formula_checker
|
||||
{
|
||||
formula_checker(const sym_mux & parent, bool all, unsigned idx) :
|
||||
m_parent(parent), m_all(all), m_idx(idx),
|
||||
m_found_what_needed(false)
|
||||
{
|
||||
}
|
||||
|
||||
void operator()(expr * e)
|
||||
{
|
||||
if(m_found_what_needed || !is_app(e)) { return; }
|
||||
|
||||
func_decl * sym = to_app(e)->get_decl();
|
||||
unsigned sym_idx;
|
||||
if(!m_parent.try_get_index(sym, sym_idx)) { return; }
|
||||
|
||||
bool have_idx = sym_idx==m_idx;
|
||||
|
||||
if( m_all ? (!have_idx) : have_idx ) {
|
||||
m_found_what_needed = true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
bool all_have_idx() const
|
||||
{
|
||||
SASSERT(m_all); //we were looking for the queried property
|
||||
return !m_found_what_needed;
|
||||
}
|
||||
|
||||
bool some_with_idx() const
|
||||
{
|
||||
SASSERT(!m_all); //we were looking for the queried property
|
||||
return m_found_what_needed;
|
||||
}
|
||||
|
||||
private:
|
||||
const sym_mux & m_parent;
|
||||
bool m_all;
|
||||
unsigned m_idx;
|
||||
|
||||
/**
|
||||
If we check whether all muxed symbols are of given index, we look for
|
||||
counter-examples, checking whether form contains a muxed symbol of an index,
|
||||
we look for symbol of index m_idx.
|
||||
*/
|
||||
bool m_found_what_needed;
|
||||
};
|
||||
|
||||
bool sym_mux::contains(expr * e, unsigned idx) const
|
||||
{
|
||||
formula_checker chck(*this, false, idx);
|
||||
for_each_expr(chck, m_visited, e);
|
||||
m_visited.reset();
|
||||
return chck.some_with_idx();
|
||||
}
|
||||
|
||||
bool sym_mux::is_homogenous_formula(expr * e, unsigned idx) const
|
||||
{
|
||||
formula_checker chck(*this, true, idx);
|
||||
for_each_expr(chck, m_visited, e);
|
||||
m_visited.reset();
|
||||
return chck.all_have_idx();
|
||||
}
|
||||
|
||||
bool sym_mux::is_homogenous(const expr_ref_vector & vect, unsigned idx) const
|
||||
{
|
||||
expr * const * begin = vect.c_ptr();
|
||||
expr * const * end = begin + vect.size();
|
||||
for(expr * const * it = begin; it!=end; it++) {
|
||||
if(!is_homogenous_formula(*it, idx)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
class sym_mux::index_collector {
|
||||
sym_mux const& m_parent;
|
||||
svector<bool> m_indices;
|
||||
public:
|
||||
index_collector(sym_mux const& s):
|
||||
m_parent(s) {}
|
||||
|
||||
void operator()(expr * e) {
|
||||
if (is_app(e)) {
|
||||
func_decl * sym = to_app(e)->get_decl();
|
||||
unsigned idx;
|
||||
if (m_parent.try_get_index(sym, idx)) {
|
||||
SASSERT(idx > 0);
|
||||
--idx;
|
||||
if (m_indices.size() <= idx) {
|
||||
m_indices.resize(idx+1, false);
|
||||
}
|
||||
m_indices[idx] = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void extract(unsigned_vector& indices) {
|
||||
for (unsigned i = 0; i < m_indices.size(); ++i) {
|
||||
if (m_indices[i]) {
|
||||
indices.push_back(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
void sym_mux::collect_indices(expr* e, unsigned_vector& indices) const {
|
||||
indices.reset();
|
||||
index_collector collector(*this);
|
||||
for_each_expr(collector, m_visited, e);
|
||||
m_visited.reset();
|
||||
collector.extract(indices);
|
||||
}
|
||||
|
||||
class sym_mux::variable_collector {
|
||||
sym_mux const& m_parent;
|
||||
vector<ptr_vector<app> >& m_vars;
|
||||
public:
|
||||
variable_collector(sym_mux const& s, vector<ptr_vector<app> >& vars):
|
||||
m_parent(s), m_vars(vars) {}
|
||||
|
||||
void operator()(expr * e) {
|
||||
if (is_app(e)) {
|
||||
func_decl * sym = to_app(e)->get_decl();
|
||||
unsigned idx;
|
||||
if (m_parent.try_get_index(sym, idx)) {
|
||||
SASSERT(idx > 0);
|
||||
--idx;
|
||||
if (m_vars.size() <= idx) {
|
||||
m_vars.resize(idx+1, ptr_vector<app>());
|
||||
}
|
||||
m_vars[idx].push_back(to_app(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void sym_mux::collect_variables(expr* e, vector<ptr_vector<app> >& vars) const {
|
||||
vars.reset();
|
||||
variable_collector collector(*this, vars);
|
||||
for_each_expr(collector, m_visited, e);
|
||||
m_visited.reset();
|
||||
}
|
||||
|
||||
class sym_mux::hmg_checker {
|
||||
const sym_mux & m_parent;
|
||||
|
||||
bool m_found_idx;
|
||||
unsigned m_idx;
|
||||
bool m_multiple_indexes;
|
||||
|
||||
public:
|
||||
hmg_checker(const sym_mux & parent) :
|
||||
m_parent(parent), m_found_idx(false), m_multiple_indexes(false)
|
||||
{
|
||||
}
|
||||
|
||||
void operator()(expr * e)
|
||||
{
|
||||
if(m_multiple_indexes || !is_app(e)) { return; }
|
||||
|
||||
func_decl * sym = to_app(e)->get_decl();
|
||||
unsigned sym_idx;
|
||||
if(!m_parent.try_get_index(sym, sym_idx)) { return; }
|
||||
|
||||
if(!m_found_idx) {
|
||||
m_found_idx = true;
|
||||
m_idx = sym_idx;
|
||||
return;
|
||||
}
|
||||
if(m_idx==sym_idx) { return; }
|
||||
m_multiple_indexes = true;
|
||||
}
|
||||
|
||||
bool has_multiple_indexes() const
|
||||
{
|
||||
return m_multiple_indexes;
|
||||
}
|
||||
};
|
||||
|
||||
bool sym_mux::is_homogenous_formula(expr * e) const {
|
||||
hmg_checker chck(*this);
|
||||
for_each_expr(chck, m_visited, e);
|
||||
m_visited.reset();
|
||||
return !chck.has_multiple_indexes();
|
||||
}
|
||||
|
||||
|
||||
struct sym_mux::conv_rewriter_cfg : public default_rewriter_cfg
|
||||
{
|
||||
private:
|
||||
ast_manager & m;
|
||||
sym_mux & m_parent;
|
||||
unsigned m_from_idx;
|
||||
unsigned m_to_idx;
|
||||
bool m_homogenous;
|
||||
public:
|
||||
conv_rewriter_cfg(sym_mux & parent, unsigned from_idx, unsigned to_idx, bool homogenous)
|
||||
: m(parent.get_manager()),
|
||||
m_parent(parent),
|
||||
m_from_idx(from_idx),
|
||||
m_to_idx(to_idx),
|
||||
m_homogenous(homogenous) {}
|
||||
|
||||
bool get_subst(expr * s, expr * & t, proof * & t_pr) {
|
||||
if(!is_app(s)) { return false; }
|
||||
app * a = to_app(s);
|
||||
func_decl * sym = a->get_decl();
|
||||
if(!m_parent.has_index(sym, m_from_idx)) {
|
||||
(void) m_homogenous;
|
||||
SASSERT(!m_homogenous || !m_parent.is_muxed(sym));
|
||||
return false;
|
||||
}
|
||||
func_decl * tgt = m_parent.conv(sym, m_from_idx, m_to_idx);
|
||||
|
||||
t = m.mk_app(tgt, a->get_args());
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
void sym_mux::conv_formula(expr * f, unsigned src_idx, unsigned tgt_idx, expr_ref & res, bool homogenous)
|
||||
{
|
||||
if(src_idx==tgt_idx) {
|
||||
res = f;
|
||||
return;
|
||||
}
|
||||
conv_rewriter_cfg r_cfg(*this, src_idx, tgt_idx, homogenous);
|
||||
rewriter_tpl<conv_rewriter_cfg> rwr(m, false, r_cfg);
|
||||
rwr(f, res);
|
||||
}
|
||||
|
||||
struct sym_mux::shifting_rewriter_cfg : public default_rewriter_cfg
|
||||
{
|
||||
private:
|
||||
ast_manager & m;
|
||||
sym_mux & m_parent;
|
||||
int m_shift;
|
||||
public:
|
||||
shifting_rewriter_cfg(sym_mux & parent, int shift)
|
||||
: m(parent.get_manager()),
|
||||
m_parent(parent),
|
||||
m_shift(shift) {}
|
||||
|
||||
bool get_subst(expr * s, expr * & t, proof * & t_pr) {
|
||||
if(!is_app(s)) { return false; }
|
||||
app * a = to_app(s);
|
||||
func_decl * sym = a->get_decl();
|
||||
|
||||
unsigned idx;
|
||||
if(!m_parent.try_get_index(sym, idx)) {
|
||||
return false;
|
||||
}
|
||||
SASSERT(static_cast<int>(idx)+m_shift>=0);
|
||||
func_decl * tgt = m_parent.conv(sym, idx, idx+m_shift);
|
||||
t = m.mk_app(tgt, a->get_args());
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
void sym_mux::shift_formula(expr * f, int dist, expr_ref & res)
|
||||
{
|
||||
if(dist==0) {
|
||||
res = f;
|
||||
return;
|
||||
}
|
||||
shifting_rewriter_cfg r_cfg(*this, dist);
|
||||
rewriter_tpl<shifting_rewriter_cfg> rwr(m, false, r_cfg);
|
||||
rwr(f, res);
|
||||
}
|
||||
|
||||
void sym_mux::conv_formula_vector(const expr_ref_vector & vect, unsigned src_idx, unsigned tgt_idx,
|
||||
expr_ref_vector & res)
|
||||
{
|
||||
res.reset();
|
||||
expr * const * begin = vect.c_ptr();
|
||||
expr * const * end = begin + vect.size();
|
||||
for(expr * const * it = begin; it!=end; it++) {
|
||||
expr_ref converted(m);
|
||||
conv_formula(*it, src_idx, tgt_idx, converted);
|
||||
res.push_back(converted);
|
||||
}
|
||||
}
|
||||
|
||||
void sym_mux::filter_idx(expr_ref_vector & vect, unsigned idx) const {
|
||||
unsigned i = 0;
|
||||
while (i < vect.size()) {
|
||||
expr* e = vect[i].get();
|
||||
if (contains(e, idx) && is_homogenous_formula(e, idx)) {
|
||||
i++;
|
||||
}
|
||||
else {
|
||||
// we don't allow mixing states inside vector elements
|
||||
SASSERT(!contains(e, idx));
|
||||
vect[i] = vect.back();
|
||||
vect.pop_back();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void sym_mux::partition_o_idx(
|
||||
expr_ref_vector const& lits,
|
||||
expr_ref_vector& o_lits,
|
||||
expr_ref_vector& other, unsigned idx) const {
|
||||
|
||||
for (unsigned i = 0; i < lits.size(); ++i) {
|
||||
if (contains(lits[i], idx) && is_homogenous_formula(lits[i], idx)) {
|
||||
o_lits.push_back(lits[i]);
|
||||
}
|
||||
else {
|
||||
other.push_back(lits[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
class sym_mux::nonmodel_sym_checker {
|
||||
const sym_mux & m_parent;
|
||||
|
||||
bool m_found;
|
||||
public:
|
||||
nonmodel_sym_checker(const sym_mux & parent) :
|
||||
m_parent(parent), m_found(false) {
|
||||
}
|
||||
|
||||
void operator()(expr * e) {
|
||||
if(m_found || !is_app(e)) { return; }
|
||||
|
||||
func_decl * sym = to_app(e)->get_decl();
|
||||
|
||||
if(m_parent.is_non_model_sym(sym)) {
|
||||
m_found = true;
|
||||
}
|
||||
}
|
||||
|
||||
bool found() const {
|
||||
return m_found;
|
||||
}
|
||||
};
|
||||
|
||||
bool sym_mux::has_nonmodel_symbol(expr * e) const {
|
||||
nonmodel_sym_checker chck(*this);
|
||||
for_each_expr(chck, e);
|
||||
return chck.found();
|
||||
}
|
||||
|
||||
void sym_mux::filter_non_model_lits(expr_ref_vector & vect) const {
|
||||
unsigned i = 0;
|
||||
while (i < vect.size()) {
|
||||
if (!has_nonmodel_symbol(vect[i].get())) {
|
||||
i++;
|
||||
}
|
||||
else {
|
||||
vect[i] = vect.back();
|
||||
vect.pop_back();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class sym_mux::decl_idx_comparator
|
||||
{
|
||||
const sym_mux & m_parent;
|
||||
public:
|
||||
decl_idx_comparator(const sym_mux & parent)
|
||||
: m_parent(parent)
|
||||
{ }
|
||||
|
||||
bool operator()(func_decl * sym1, func_decl * sym2)
|
||||
{
|
||||
unsigned idx1, idx2;
|
||||
if (!m_parent.try_get_index(sym1, idx1)) { idx1 = UINT_MAX; }
|
||||
if (!m_parent.try_get_index(sym2, idx2)) { idx2 = UINT_MAX; }
|
||||
|
||||
if (idx1 != idx2) { return idx1<idx2; }
|
||||
return lt(sym1->get_name(), sym2->get_name());
|
||||
}
|
||||
};
|
||||
|
||||
std::string sym_mux::pp_model(const model_core & mdl) const {
|
||||
decl_vector consts;
|
||||
unsigned sz = mdl.get_num_constants();
|
||||
for (unsigned i = 0; i < sz; i++) {
|
||||
func_decl * d = mdl.get_constant(i);
|
||||
consts.push_back(d);
|
||||
}
|
||||
|
||||
std::sort(consts.begin(), consts.end(), decl_idx_comparator(*this));
|
||||
|
||||
std::stringstream res;
|
||||
|
||||
decl_vector::iterator end = consts.end();
|
||||
for (decl_vector::iterator it = consts.begin(); it!=end; it++) {
|
||||
func_decl * d = *it;
|
||||
std::string name = d->get_name().str();
|
||||
const char * arrow = " -> ";
|
||||
res << name << arrow;
|
||||
unsigned indent = static_cast<unsigned>(name.length() + strlen(arrow));
|
||||
res << mk_pp(mdl.get_const_interp(d), m, indent) << "\n";
|
||||
|
||||
if (it+1 != end) {
|
||||
unsigned idx1, idx2;
|
||||
if (!try_get_index(*it, idx1)) { idx1 = UINT_MAX; }
|
||||
if (!try_get_index(*(it+1), idx2)) { idx2 = UINT_MAX; }
|
||||
if (idx1 != idx2) { res << "\n"; }
|
||||
}
|
||||
}
|
||||
return res.str();
|
||||
}
|
||||
|
||||
|
||||
#if 0
|
||||
|
||||
class sym_mux::index_renamer_cfg : public default_rewriter_cfg{
|
||||
const sym_mux & m_parent;
|
||||
unsigned m_idx;
|
||||
|
||||
public:
|
||||
index_renamer_cfg(const sym_mux & p, unsigned idx) : m_parent(p), m_idx(idx) {}
|
||||
|
||||
bool get_subst(expr * s, expr * & t, proof * & t_pr) {
|
||||
if (!is_app(s)) return false;
|
||||
app * a = to_app(s);
|
||||
if (a->get_family_id() != null_family_id) {
|
||||
return false;
|
||||
}
|
||||
func_decl * sym = a->get_decl();
|
||||
unsigned idx;
|
||||
if(!m_parent.try_get_index(sym, idx)) {
|
||||
return false;
|
||||
}
|
||||
if (m_idx == idx) {
|
||||
return false;
|
||||
}
|
||||
ast_manager& m = m_parent.get_manager();
|
||||
symbol name = symbol((sym->get_name().str() + "!").c_str());
|
||||
func_decl * tgt = m.mk_func_decl(name, sym->get_arity(), sym->get_domain(), sym->get_range());
|
||||
t = m.mk_app(tgt, a->get_num_args(), a->get_args());
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
@ -1,247 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
sym_mux.h
|
||||
|
||||
Abstract:
|
||||
|
||||
A symbol multiplexer that helps with having multiple versions of each of a set of symbols.
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-9-8.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef SYM_MUX_H_
|
||||
#define SYM_MUX_H_
|
||||
|
||||
#include "ast/ast.h"
|
||||
#include "util/map.h"
|
||||
#include "util/vector.h"
|
||||
#include <vector>
|
||||
|
||||
class model_core;
|
||||
|
||||
namespace pdr {
|
||||
class sym_mux
|
||||
{
|
||||
public:
|
||||
typedef ptr_vector<app> app_vector;
|
||||
typedef ptr_vector<func_decl> decl_vector;
|
||||
private:
|
||||
typedef obj_map<func_decl,unsigned> sym2u;
|
||||
typedef obj_map<func_decl, decl_vector> sym2dv;
|
||||
typedef obj_map<func_decl,func_decl *> sym2sym;
|
||||
typedef obj_map<func_decl, func_decl *> sym2pred;
|
||||
typedef hashtable<symbol, symbol_hash_proc, symbol_eq_proc> symbols;
|
||||
|
||||
ast_manager & m;
|
||||
mutable ast_ref_vector m_ref_holder;
|
||||
mutable expr_mark m_visited;
|
||||
|
||||
mutable unsigned m_next_sym_suffix_idx;
|
||||
mutable symbols m_used_suffixes;
|
||||
/** Here we have default suffixes for each of the variants */
|
||||
std::vector<std::string> m_suffixes;
|
||||
|
||||
|
||||
/**
|
||||
Primary symbol is the 0-th variant. This member maps from primary symbol
|
||||
to vector of all its variants (including the primary variant).
|
||||
*/
|
||||
sym2dv m_prim2all;
|
||||
|
||||
/**
|
||||
For each symbol contains its variant index
|
||||
*/
|
||||
mutable sym2u m_sym2idx;
|
||||
/**
|
||||
For each symbol contains its primary variant
|
||||
*/
|
||||
mutable sym2sym m_sym2prim;
|
||||
|
||||
/**
|
||||
Maps prefixes passed to the create_tuple to
|
||||
the primary symbol created from it.
|
||||
*/
|
||||
sym2pred m_prefix2prim;
|
||||
|
||||
/**
|
||||
Maps pripary symbols to prefixes that were used to create them.
|
||||
*/
|
||||
sym2sym m_prim2prefix;
|
||||
|
||||
decl_vector m_prim_preds;
|
||||
|
||||
obj_hashtable<func_decl> m_non_model_syms;
|
||||
|
||||
struct formula_checker;
|
||||
struct conv_rewriter_cfg;
|
||||
struct shifting_rewriter_cfg;
|
||||
class decl_idx_comparator;
|
||||
class hmg_checker;
|
||||
class nonmodel_sym_checker;
|
||||
class index_renamer_cfg;
|
||||
class index_collector;
|
||||
class variable_collector;
|
||||
|
||||
std::string get_suffix(unsigned i);
|
||||
void ensure_tuple_size(func_decl * prim, unsigned sz);
|
||||
|
||||
public:
|
||||
sym_mux(ast_manager & m);
|
||||
|
||||
ast_manager & get_manager() const { return m; }
|
||||
|
||||
bool is_muxed(func_decl * sym) const { return m_sym2idx.contains(sym); }
|
||||
|
||||
bool try_get_index(func_decl * sym, unsigned & idx) const {
|
||||
return m_sym2idx.find(sym,idx);
|
||||
}
|
||||
|
||||
bool has_index(func_decl * sym, unsigned idx) const {
|
||||
unsigned actual_idx;
|
||||
return try_get_index(sym, actual_idx) && idx==actual_idx;
|
||||
}
|
||||
|
||||
/** Return primary symbol. sym must be muxed. */
|
||||
func_decl * get_primary(func_decl * sym) const {
|
||||
func_decl * prim;
|
||||
TRUSTME(m_sym2prim.find(sym, prim));
|
||||
return prim;
|
||||
}
|
||||
|
||||
/**
|
||||
Return primary symbol created from prefix, or 0 if the prefix was never used.
|
||||
*/
|
||||
func_decl * try_get_primary_by_prefix(func_decl* prefix) const {
|
||||
func_decl * res;
|
||||
if(!m_prefix2prim.find(prefix, res)) {
|
||||
return nullptr;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
Return symbol created from prefix, or 0 if the prefix was never used.
|
||||
*/
|
||||
func_decl * try_get_by_prefix(func_decl* prefix, unsigned idx) {
|
||||
func_decl * prim = try_get_primary_by_prefix(prefix);
|
||||
if(!prim) {
|
||||
return nullptr;
|
||||
}
|
||||
return conv(prim, 0, idx);
|
||||
}
|
||||
|
||||
/**
|
||||
Marks symbol as non-model which means it will not appear in models collected by
|
||||
get_muxed_cube_from_model function.
|
||||
This is to take care of auxiliary symbols introduced by the disjunction relations
|
||||
to relativize lemmas coming from disjuncts.
|
||||
*/
|
||||
void mark_as_non_model(func_decl * sym) {
|
||||
SASSERT(is_muxed(sym));
|
||||
m_non_model_syms.insert(get_primary(sym));
|
||||
}
|
||||
|
||||
func_decl * get_or_create_symbol_by_prefix(func_decl* prefix, unsigned idx,
|
||||
unsigned arity, sort * const * domain, sort * range);
|
||||
|
||||
|
||||
|
||||
bool is_muxed_lit(expr * e, unsigned idx) const;
|
||||
|
||||
bool is_non_model_sym(func_decl * s) const {
|
||||
return is_muxed(s) && m_non_model_syms.contains(get_primary(s));
|
||||
}
|
||||
|
||||
/**
|
||||
Create a multiplexed tuple of propositional constants.
|
||||
Symbols may be suplied in the tuple vector,
|
||||
those beyond the size of the array and those with corresponding positions
|
||||
assigned to zero will be created using prefix.
|
||||
Tuple length must be at least one.
|
||||
*/
|
||||
void create_tuple(func_decl* prefix, unsigned arity, sort * const * domain, sort * range,
|
||||
unsigned tuple_length, decl_vector & tuple);
|
||||
|
||||
/**
|
||||
Return true if the only multiplexed symbols which e contains are of index idx.
|
||||
*/
|
||||
bool is_homogenous_formula(expr * e, unsigned idx) const;
|
||||
bool is_homogenous(const expr_ref_vector & vect, unsigned idx) const;
|
||||
|
||||
/**
|
||||
Return true if all multiplexed symbols which e contains are of one index.
|
||||
*/
|
||||
bool is_homogenous_formula(expr * e) const;
|
||||
|
||||
/**
|
||||
Return true if expression e contains a muxed symbol of index idx.
|
||||
*/
|
||||
bool contains(expr * e, unsigned idx) const;
|
||||
|
||||
/**
|
||||
Collect indices used in expression.
|
||||
*/
|
||||
void collect_indices(expr* e, unsigned_vector& indices) const;
|
||||
|
||||
/**
|
||||
Collect used variables of each index.
|
||||
*/
|
||||
void collect_variables(expr* e, vector<ptr_vector<app> >& vars) const;
|
||||
|
||||
/**
|
||||
Convert symbol sym which has to be of src_idx variant into variant tgt_idx.
|
||||
*/
|
||||
func_decl * conv(func_decl * sym, unsigned src_idx, unsigned tgt_idx);
|
||||
|
||||
|
||||
/**
|
||||
Convert src_idx symbols in formula f variant into tgt_idx.
|
||||
If homogenous is true, formula cannot contain symbols of other variants.
|
||||
*/
|
||||
void conv_formula(expr * f, unsigned src_idx, unsigned tgt_idx, expr_ref & res, bool homogenous=true);
|
||||
void conv_formula_vector(const expr_ref_vector & vect, unsigned src_idx, unsigned tgt_idx,
|
||||
expr_ref_vector & res);
|
||||
|
||||
/**
|
||||
Shifts the muxed symbols in f by dist. Dist can be negative, but it should never shift
|
||||
symbol index to a negative value.
|
||||
*/
|
||||
void shift_formula(expr * f, int dist, expr_ref & res);
|
||||
|
||||
/**
|
||||
Remove from vect literals (atoms or negations of atoms) of symbols
|
||||
that contain multiplexed symbols with indexes other than idx.
|
||||
|
||||
Each of the literals can contain only symbols multiplexed with one index
|
||||
(this trivially holds if the literals are propositional).
|
||||
|
||||
Order of elements in vect may be modified by this function
|
||||
*/
|
||||
void filter_idx(expr_ref_vector & vect, unsigned idx) const;
|
||||
|
||||
/**
|
||||
Partition literals into o_literals and others.
|
||||
*/
|
||||
void partition_o_idx(expr_ref_vector const& lits,
|
||||
expr_ref_vector& o_lits,
|
||||
expr_ref_vector& other, unsigned idx) const;
|
||||
|
||||
bool has_nonmodel_symbol(expr * e) const;
|
||||
void filter_non_model_lits(expr_ref_vector & vect) const;
|
||||
|
||||
func_decl * const * begin_prim_preds() const { return m_prim_preds.begin(); }
|
||||
func_decl * const * end_prim_preds() const { return m_prim_preds.end(); }
|
||||
|
||||
std::string pp_model(const model_core & mdl) const;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -1,508 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
pdr_util.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
Utility functions for PDR.
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-8-19.
|
||||
|
||||
Revision History:
|
||||
|
||||
|
||||
Notes:
|
||||
|
||||
|
||||
--*/
|
||||
|
||||
#include <sstream>
|
||||
#include "util/util.h"
|
||||
#include "util/ref_vector.h"
|
||||
#include "ast/array_decl_plugin.h"
|
||||
#include "ast/ast_pp.h"
|
||||
#include "ast/for_each_expr.h"
|
||||
#include "ast/scoped_proof.h"
|
||||
#include "ast/arith_decl_plugin.h"
|
||||
#include "ast/rewriter/expr_replacer.h"
|
||||
#include "ast/rewriter/bool_rewriter.h"
|
||||
#include "ast/rewriter/poly_rewriter.h"
|
||||
#include "ast/rewriter/poly_rewriter_def.h"
|
||||
#include "ast/rewriter/arith_rewriter.h"
|
||||
#include "ast/rewriter/rewriter.h"
|
||||
#include "ast/rewriter/rewriter_def.h"
|
||||
#include "smt/params/smt_params.h"
|
||||
#include "model/model.h"
|
||||
#include "muz/base/dl_util.h"
|
||||
#include "muz/pdr/pdr_manager.h"
|
||||
#include "muz/pdr/pdr_util.h"
|
||||
#include "model/model_smt2_pp.h"
|
||||
|
||||
|
||||
|
||||
namespace pdr {
|
||||
|
||||
unsigned ceil_log2(unsigned u) {
|
||||
if (u == 0) { return 0; }
|
||||
unsigned pow2 = next_power_of_two(u);
|
||||
return get_num_1bits(pow2-1);
|
||||
}
|
||||
|
||||
std::string pp_cube(const ptr_vector<expr>& model, ast_manager& m) {
|
||||
return pp_cube(model.size(), model.c_ptr(), m);
|
||||
}
|
||||
|
||||
std::string pp_cube(const expr_ref_vector& model, ast_manager& m) {
|
||||
return pp_cube(model.size(), model.c_ptr(), m);
|
||||
}
|
||||
|
||||
std::string pp_cube(const app_ref_vector& model, ast_manager& m) {
|
||||
return pp_cube(model.size(), model.c_ptr(), m);
|
||||
}
|
||||
|
||||
std::string pp_cube(const app_vector& model, ast_manager& m) {
|
||||
return pp_cube(model.size(), model.c_ptr(), m);
|
||||
}
|
||||
|
||||
std::string pp_cube(unsigned sz, app * const * lits, ast_manager& m) {
|
||||
return pp_cube(sz, (expr * const *)(lits), m);
|
||||
}
|
||||
|
||||
std::string pp_cube(unsigned sz, expr * const * lits, ast_manager& m) {
|
||||
std::stringstream res;
|
||||
res << "(";
|
||||
expr * const * end = lits+sz;
|
||||
for (expr * const * it = lits; it!=end; it++) {
|
||||
res << mk_pp(*it, m);
|
||||
if (it+1!=end) {
|
||||
res << ", ";
|
||||
}
|
||||
}
|
||||
res << ")";
|
||||
return res.str();
|
||||
}
|
||||
|
||||
void reduce_disequalities(model& model, unsigned threshold, expr_ref& fml) {
|
||||
ast_manager& m = fml.get_manager();
|
||||
expr_ref_vector conjs(m);
|
||||
flatten_and(fml, conjs);
|
||||
obj_map<expr, unsigned> diseqs;
|
||||
expr* n, *lhs, *rhs;
|
||||
for (unsigned i = 0; i < conjs.size(); ++i) {
|
||||
if (m.is_not(conjs[i].get(), n) &&
|
||||
m.is_eq(n, lhs, rhs)) {
|
||||
if (!m.is_value(rhs)) {
|
||||
std::swap(lhs, rhs);
|
||||
}
|
||||
if (!m.is_value(rhs)) {
|
||||
continue;
|
||||
}
|
||||
diseqs.insert_if_not_there2(lhs, 0)->get_data().m_value++;
|
||||
}
|
||||
}
|
||||
expr_substitution sub(m);
|
||||
|
||||
unsigned orig_size = conjs.size();
|
||||
unsigned num_deleted = 0;
|
||||
expr_ref val(m), tmp(m);
|
||||
proof_ref pr(m);
|
||||
pr = m.mk_asserted(m.mk_true());
|
||||
obj_map<expr, unsigned>::iterator it = diseqs.begin();
|
||||
obj_map<expr, unsigned>::iterator end = diseqs.end();
|
||||
for (; it != end; ++it) {
|
||||
if (it->m_value >= threshold) {
|
||||
model.eval(it->m_key, val);
|
||||
sub.insert(it->m_key, val, pr);
|
||||
conjs.push_back(m.mk_eq(it->m_key, val));
|
||||
num_deleted += it->m_value;
|
||||
}
|
||||
}
|
||||
if (orig_size < conjs.size()) {
|
||||
scoped_ptr<expr_replacer> rep = mk_expr_simp_replacer(m);
|
||||
rep->set_substitution(&sub);
|
||||
for (unsigned i = 0; i < orig_size; ++i) {
|
||||
tmp = conjs[i].get();
|
||||
(*rep)(tmp);
|
||||
if (m.is_true(tmp)) {
|
||||
conjs[i] = conjs.back();
|
||||
SASSERT(orig_size <= conjs.size());
|
||||
conjs.pop_back();
|
||||
SASSERT(orig_size <= 1 + conjs.size());
|
||||
if (i + 1 == orig_size) {
|
||||
// no-op.
|
||||
}
|
||||
else if (orig_size <= conjs.size()) {
|
||||
// no-op
|
||||
}
|
||||
else {
|
||||
SASSERT(orig_size == 1 + conjs.size());
|
||||
--orig_size;
|
||||
--i;
|
||||
}
|
||||
}
|
||||
else {
|
||||
conjs[i] = tmp;
|
||||
}
|
||||
}
|
||||
IF_VERBOSE(2, verbose_stream() << "Deleted " << num_deleted << " disequalities " << conjs.size() << " conjuncts\n";);
|
||||
}
|
||||
fml = m.mk_and(conjs.size(), conjs.c_ptr());
|
||||
}
|
||||
|
||||
class test_diff_logic {
|
||||
ast_manager& m;
|
||||
arith_util a;
|
||||
bv_util bv;
|
||||
bool m_is_dl;
|
||||
bool m_test_for_utvpi;
|
||||
|
||||
bool is_numeric(expr* e) const {
|
||||
if (a.is_numeral(e)) {
|
||||
return true;
|
||||
}
|
||||
expr* cond, *th, *el;
|
||||
if (m.is_ite(e, cond, th, el)) {
|
||||
return is_numeric(th) && is_numeric(el);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool is_arith_expr(expr *e) const {
|
||||
return is_app(e) && a.get_family_id() == to_app(e)->get_family_id();
|
||||
}
|
||||
|
||||
bool is_offset(expr* e) const {
|
||||
if (a.is_numeral(e)) {
|
||||
return true;
|
||||
}
|
||||
expr* cond, *th, *el, *e1, *e2;
|
||||
if (m.is_ite(e, cond, th, el)) {
|
||||
return is_offset(th) && is_offset(el);
|
||||
}
|
||||
// recognize offsets.
|
||||
if (a.is_add(e, e1, e2)) {
|
||||
if (is_numeric(e1)) {
|
||||
return is_offset(e2);
|
||||
}
|
||||
if (is_numeric(e2)) {
|
||||
return is_offset(e1);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
if (m_test_for_utvpi) {
|
||||
if (a.is_mul(e, e1, e2)) {
|
||||
if (is_minus_one(e1)) {
|
||||
return is_offset(e2);
|
||||
}
|
||||
if (is_minus_one(e2)) {
|
||||
return is_offset(e1);
|
||||
}
|
||||
}
|
||||
}
|
||||
return !is_arith_expr(e);
|
||||
}
|
||||
|
||||
bool is_minus_one(expr const * e) const {
|
||||
rational r; return a.is_numeral(e, r) && r.is_minus_one();
|
||||
}
|
||||
|
||||
bool test_ineq(expr* e) const {
|
||||
SASSERT(a.is_le(e) || a.is_ge(e) || m.is_eq(e));
|
||||
SASSERT(to_app(e)->get_num_args() == 2);
|
||||
expr * lhs = to_app(e)->get_arg(0);
|
||||
expr * rhs = to_app(e)->get_arg(1);
|
||||
if (is_offset(lhs) && is_offset(rhs))
|
||||
return true;
|
||||
if (!is_numeric(rhs))
|
||||
std::swap(lhs, rhs);
|
||||
if (!is_numeric(rhs))
|
||||
return false;
|
||||
// lhs can be 'x' or '(+ x (* -1 y))'
|
||||
if (is_offset(lhs))
|
||||
return true;
|
||||
expr* arg1, *arg2;
|
||||
if (!a.is_add(lhs, arg1, arg2))
|
||||
return false;
|
||||
// x
|
||||
if (m_test_for_utvpi) {
|
||||
return is_offset(arg1) && is_offset(arg2);
|
||||
}
|
||||
if (is_arith_expr(arg1))
|
||||
std::swap(arg1, arg2);
|
||||
if (is_arith_expr(arg1))
|
||||
return false;
|
||||
// arg2: (* -1 y)
|
||||
expr* m1, *m2;
|
||||
if (!a.is_mul(arg2, m1, m2))
|
||||
return false;
|
||||
return is_minus_one(m1) && is_offset(m2);
|
||||
}
|
||||
|
||||
bool test_eq(expr* e) const {
|
||||
expr* lhs = nullptr, *rhs = nullptr;
|
||||
VERIFY(m.is_eq(e, lhs, rhs));
|
||||
if (!a.is_int_real(lhs)) {
|
||||
return true;
|
||||
}
|
||||
if (a.is_numeral(lhs) || a.is_numeral(rhs)) {
|
||||
return test_ineq(e);
|
||||
}
|
||||
return
|
||||
test_term(lhs) &&
|
||||
test_term(rhs) &&
|
||||
!a.is_mul(lhs) &&
|
||||
!a.is_mul(rhs);
|
||||
}
|
||||
|
||||
bool test_term(expr* e) const {
|
||||
if (m.is_bool(e)) {
|
||||
return true;
|
||||
}
|
||||
if (a.is_numeral(e)) {
|
||||
return true;
|
||||
}
|
||||
if (is_offset(e)) {
|
||||
return true;
|
||||
}
|
||||
expr* lhs, *rhs;
|
||||
if (a.is_add(e, lhs, rhs)) {
|
||||
if (!a.is_numeral(lhs)) {
|
||||
std::swap(lhs, rhs);
|
||||
}
|
||||
return a.is_numeral(lhs) && is_offset(rhs);
|
||||
}
|
||||
if (a.is_mul(e, lhs, rhs)) {
|
||||
return is_minus_one(lhs) || is_minus_one(rhs);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool is_non_arith_or_basic(expr* e) {
|
||||
if (!is_app(e)) {
|
||||
return false;
|
||||
}
|
||||
family_id fid = to_app(e)->get_family_id();
|
||||
|
||||
if (fid == null_family_id &&
|
||||
!m.is_bool(e) &&
|
||||
to_app(e)->get_num_args() > 0) {
|
||||
return true;
|
||||
}
|
||||
return
|
||||
fid != m.get_basic_family_id() &&
|
||||
fid != null_family_id &&
|
||||
fid != a.get_family_id() &&
|
||||
fid != bv.get_family_id();
|
||||
}
|
||||
|
||||
public:
|
||||
test_diff_logic(ast_manager& m): m(m), a(m), bv(m), m_is_dl(true), m_test_for_utvpi(false) {}
|
||||
|
||||
void test_for_utvpi() { m_test_for_utvpi = true; }
|
||||
|
||||
void operator()(expr* e) {
|
||||
if (!m_is_dl) {
|
||||
return;
|
||||
}
|
||||
if (a.is_le(e) || a.is_ge(e)) {
|
||||
m_is_dl = test_ineq(e);
|
||||
}
|
||||
else if (m.is_eq(e)) {
|
||||
m_is_dl = test_eq(e);
|
||||
}
|
||||
else if (is_non_arith_or_basic(e)) {
|
||||
m_is_dl = false;
|
||||
}
|
||||
else if (is_app(e)) {
|
||||
app* a = to_app(e);
|
||||
for (unsigned i = 0; m_is_dl && i < a->get_num_args(); ++i) {
|
||||
m_is_dl = test_term(a->get_arg(i));
|
||||
}
|
||||
}
|
||||
|
||||
if (!m_is_dl) {
|
||||
char const* msg = "non-diff: ";
|
||||
if (m_test_for_utvpi) {
|
||||
msg = "non-utvpi: ";
|
||||
}
|
||||
IF_VERBOSE(1, verbose_stream() << msg << mk_pp(e, m) << "\n";);
|
||||
}
|
||||
}
|
||||
|
||||
bool is_dl() const { return m_is_dl; }
|
||||
};
|
||||
|
||||
bool is_difference_logic(ast_manager& m, unsigned num_fmls, expr* const* fmls) {
|
||||
test_diff_logic test(m);
|
||||
expr_fast_mark1 mark;
|
||||
for (unsigned i = 0; i < num_fmls; ++i) {
|
||||
quick_for_each_expr(test, mark, fmls[i]);
|
||||
}
|
||||
return test.is_dl();
|
||||
}
|
||||
|
||||
bool is_utvpi_logic(ast_manager& m, unsigned num_fmls, expr* const* fmls) {
|
||||
test_diff_logic test(m);
|
||||
test.test_for_utvpi();
|
||||
expr_fast_mark1 mark;
|
||||
for (unsigned i = 0; i < num_fmls; ++i) {
|
||||
quick_for_each_expr(test, mark, fmls[i]);
|
||||
}
|
||||
return test.is_dl();
|
||||
}
|
||||
|
||||
class arith_normalizer : public poly_rewriter<arith_rewriter_core> {
|
||||
ast_manager& m;
|
||||
arith_util m_util;
|
||||
enum op_kind { LE, GE, EQ };
|
||||
public:
|
||||
arith_normalizer(ast_manager& m, params_ref const& p = params_ref()): poly_rewriter<arith_rewriter_core>(m, p), m(m), m_util(m) {}
|
||||
|
||||
br_status mk_app_core(func_decl* f, unsigned num_args, expr* const* args, expr_ref& result) {
|
||||
br_status st = BR_FAILED;
|
||||
if (m.is_eq(f)) {
|
||||
SASSERT(num_args == 2); return mk_eq_core(args[0], args[1], result);
|
||||
}
|
||||
|
||||
if (f->get_family_id() != get_fid()) {
|
||||
return st;
|
||||
}
|
||||
switch (f->get_decl_kind()) {
|
||||
case OP_NUM: st = BR_FAILED; break;
|
||||
case OP_IRRATIONAL_ALGEBRAIC_NUM: st = BR_FAILED; break;
|
||||
case OP_LE: SASSERT(num_args == 2); st = mk_le_core(args[0], args[1], result); break;
|
||||
case OP_GE: SASSERT(num_args == 2); st = mk_ge_core(args[0], args[1], result); break;
|
||||
case OP_LT: SASSERT(num_args == 2); st = mk_lt_core(args[0], args[1], result); break;
|
||||
case OP_GT: SASSERT(num_args == 2); st = mk_gt_core(args[0], args[1], result); break;
|
||||
default: st = BR_FAILED; break;
|
||||
}
|
||||
return st;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
br_status mk_eq_core(expr* arg1, expr* arg2, expr_ref& result) {
|
||||
return mk_le_ge_eq_core(arg1, arg2, EQ, result);
|
||||
}
|
||||
br_status mk_le_core(expr* arg1, expr* arg2, expr_ref& result) {
|
||||
return mk_le_ge_eq_core(arg1, arg2, LE, result);
|
||||
}
|
||||
br_status mk_ge_core(expr* arg1, expr* arg2, expr_ref& result) {
|
||||
return mk_le_ge_eq_core(arg1, arg2, GE, result);
|
||||
}
|
||||
br_status mk_lt_core(expr* arg1, expr* arg2, expr_ref& result) {
|
||||
result = m.mk_not(m_util.mk_ge(arg1, arg2));
|
||||
return BR_REWRITE2;
|
||||
}
|
||||
br_status mk_gt_core(expr* arg1, expr* arg2, expr_ref& result) {
|
||||
result = m.mk_not(m_util.mk_le(arg1, arg2));
|
||||
return BR_REWRITE2;
|
||||
}
|
||||
|
||||
br_status mk_le_ge_eq_core(expr* arg1, expr* arg2, op_kind kind, expr_ref& result) {
|
||||
if (m_util.is_real(arg1)) {
|
||||
numeral g(0);
|
||||
get_coeffs(arg1, g);
|
||||
get_coeffs(arg2, g);
|
||||
if (!g.is_one() && !g.is_zero()) {
|
||||
SASSERT(g.is_pos());
|
||||
expr_ref new_arg1 = rdiv_polynomial(arg1, g);
|
||||
expr_ref new_arg2 = rdiv_polynomial(arg2, g);
|
||||
switch(kind) {
|
||||
case LE: result = m_util.mk_le(new_arg1, new_arg2); return BR_DONE;
|
||||
case GE: result = m_util.mk_ge(new_arg1, new_arg2); return BR_DONE;
|
||||
case EQ: result = m_util.mk_eq(new_arg1, new_arg2); return BR_DONE;
|
||||
}
|
||||
}
|
||||
}
|
||||
return BR_FAILED;
|
||||
}
|
||||
|
||||
void update_coeff(numeral const& r, numeral& g) {
|
||||
if (g.is_zero() || abs(r) < g) {
|
||||
g = abs(r);
|
||||
}
|
||||
}
|
||||
|
||||
void get_coeffs(expr* e, numeral& g) {
|
||||
rational r;
|
||||
unsigned sz;
|
||||
expr* const* args = get_monomials(e, sz);
|
||||
for (unsigned i = 0; i < sz; ++i) {
|
||||
expr* arg = args[i];
|
||||
if (!m_util.is_numeral(arg, r)) {
|
||||
get_power_product(arg, r);
|
||||
}
|
||||
update_coeff(r, g);
|
||||
}
|
||||
}
|
||||
|
||||
expr_ref rdiv_polynomial(expr* e, numeral const& g) {
|
||||
rational r;
|
||||
SASSERT(g.is_pos());
|
||||
SASSERT(!g.is_one());
|
||||
expr_ref_vector monomes(m);
|
||||
unsigned sz;
|
||||
expr* const* args = get_monomials(e, sz);
|
||||
for (unsigned i = 0; i < sz; ++i) {
|
||||
expr* arg = args[i];
|
||||
if (m_util.is_numeral(arg, r)) {
|
||||
monomes.push_back(m_util.mk_numeral(r/g, false));
|
||||
}
|
||||
else {
|
||||
expr* p = get_power_product(arg, r);
|
||||
r /= g;
|
||||
if (r.is_one()) {
|
||||
monomes.push_back(p);
|
||||
}
|
||||
else {
|
||||
monomes.push_back(m_util.mk_mul(m_util.mk_numeral(r, false), p));
|
||||
}
|
||||
}
|
||||
}
|
||||
expr_ref result(m);
|
||||
mk_add(monomes.size(), monomes.c_ptr(), result);
|
||||
return result;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
struct arith_normalizer_cfg: public default_rewriter_cfg {
|
||||
arith_normalizer m_r;
|
||||
bool rewrite_patterns() const { return false; }
|
||||
br_status reduce_app(func_decl * f, unsigned num, expr * const * args, expr_ref & result, proof_ref & result_pr) {
|
||||
return m_r.mk_app_core(f, num, args, result);
|
||||
}
|
||||
arith_normalizer_cfg(ast_manager & m, params_ref const & p):m_r(m,p) {}
|
||||
};
|
||||
|
||||
class arith_normalizer_star : public rewriter_tpl<arith_normalizer_cfg> {
|
||||
arith_normalizer_cfg m_cfg;
|
||||
public:
|
||||
arith_normalizer_star(ast_manager & m, params_ref const & p):
|
||||
rewriter_tpl<arith_normalizer_cfg>(m, false, m_cfg),
|
||||
m_cfg(m, p) {}
|
||||
};
|
||||
|
||||
|
||||
void normalize_arithmetic(expr_ref& t) {
|
||||
ast_manager& m = t.get_manager();
|
||||
scoped_no_proof _sp(m);
|
||||
params_ref p;
|
||||
arith_normalizer_star rw(m, p);
|
||||
expr_ref tmp(m);
|
||||
rw(t, tmp);
|
||||
t = tmp;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
template class rewriter_tpl<pdr::arith_normalizer_cfg>;
|
||||
|
||||
|
||||
|
|
@ -1,81 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
pdr_util.h
|
||||
|
||||
Abstract:
|
||||
|
||||
Utility functions for PDR.
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-8-19.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef PDR_UTIL_H_
|
||||
#define PDR_UTIL_H_
|
||||
|
||||
#include "ast/ast.h"
|
||||
#include "ast/ast_pp.h"
|
||||
#include "ast/ast_util.h"
|
||||
#include "util/obj_hashtable.h"
|
||||
#include "util/ref_vector.h"
|
||||
#include "util/trace.h"
|
||||
#include "util/vector.h"
|
||||
#include "ast/arith_decl_plugin.h"
|
||||
#include "ast/array_decl_plugin.h"
|
||||
#include "ast/bv_decl_plugin.h"
|
||||
|
||||
|
||||
class model;
|
||||
class model_core;
|
||||
|
||||
namespace pdr {
|
||||
|
||||
/**
|
||||
* Return the ceiling of base 2 logarithm of a number,
|
||||
* or zero if the nmber is zero.
|
||||
*/
|
||||
unsigned ceil_log2(unsigned u);
|
||||
|
||||
typedef ptr_vector<app> app_vector;
|
||||
typedef ptr_vector<func_decl> decl_vector;
|
||||
typedef obj_hashtable<func_decl> func_decl_set;
|
||||
|
||||
std::string pp_cube(const ptr_vector<expr>& model, ast_manager& manager);
|
||||
std::string pp_cube(const expr_ref_vector& model, ast_manager& manager);
|
||||
std::string pp_cube(const ptr_vector<app>& model, ast_manager& manager);
|
||||
std::string pp_cube(const app_ref_vector& model, ast_manager& manager);
|
||||
std::string pp_cube(unsigned sz, app * const * lits, ast_manager& manager);
|
||||
std::string pp_cube(unsigned sz, expr * const * lits, ast_manager& manager);
|
||||
|
||||
|
||||
/**
|
||||
\brief replace variables that are used in many disequalities by
|
||||
an equality using the model.
|
||||
|
||||
Assumption: the model satisfies the conjunctions.
|
||||
*/
|
||||
void reduce_disequalities(model& model, unsigned threshold, expr_ref& fml);
|
||||
|
||||
/**
|
||||
\brief normalize coefficients in polynomials so that least coefficient is 1.
|
||||
*/
|
||||
void normalize_arithmetic(expr_ref& t);
|
||||
|
||||
|
||||
/**
|
||||
\brief determine if formulas belong to difference logic or UTVPI fragment.
|
||||
*/
|
||||
bool is_difference_logic(ast_manager& m, unsigned num_fmls, expr* const* fmls);
|
||||
|
||||
bool is_utvpi_logic(ast_manager& m, unsigned num_fmls, expr* const* fmls);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -869,7 +869,7 @@ namespace datalog {
|
|||
dm.set(*d, idx, BIT_1);
|
||||
result.intersect(dm, *d);
|
||||
}
|
||||
else if ((m.is_eq(g, e1, e2) || m.is_iff(g, e1, e2)) && m.is_bool(e1)) {
|
||||
else if (m.is_iff(g, e1, e2)) {
|
||||
udoc diff1, diff2;
|
||||
diff1.push_back(dm.allocateX());
|
||||
diff2.push_back(dm.allocateX());
|
||||
|
|
|
|||
|
|
@ -8,18 +8,25 @@ z3_add_component(spacer
|
|||
spacer_generalizers.cpp
|
||||
spacer_manager.cpp
|
||||
spacer_prop_solver.cpp
|
||||
spacer_smt_context_manager.cpp
|
||||
spacer_sym_mux.cpp
|
||||
spacer_util.cpp
|
||||
spacer_itp_solver.cpp
|
||||
spacer_virtual_solver.cpp
|
||||
spacer_iuc_solver.cpp
|
||||
spacer_legacy_mbp.cpp
|
||||
spacer_proof_utils.cpp
|
||||
spacer_unsat_core_learner.cpp
|
||||
spacer_unsat_core_plugin.cpp
|
||||
spacer_matrix.cpp
|
||||
spacer_antiunify.cpp
|
||||
spacer_mev_array.cpp
|
||||
spacer_qe_project.cpp
|
||||
spacer_sem_matcher.cpp
|
||||
spacer_quant_generalizer.cpp
|
||||
spacer_callback.cpp
|
||||
spacer_json.cpp
|
||||
spacer_iuc_proof.cpp
|
||||
spacer_mbc.cpp
|
||||
spacer_pdr.cpp
|
||||
spacer_sat_answer.cpp
|
||||
COMPONENT_DEPENDENCIES
|
||||
arith_tactics
|
||||
core_tactics
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ Revision History:
|
|||
|
||||
namespace spacer {
|
||||
|
||||
|
||||
// Abstracts numeric values by variables
|
||||
struct var_abs_rewriter : public default_rewriter_cfg {
|
||||
ast_manager &m;
|
||||
|
|
@ -56,8 +57,8 @@ struct var_abs_rewriter : public default_rewriter_cfg {
|
|||
{
|
||||
bool contains_const_child = false;
|
||||
app* a = to_app(t);
|
||||
for (unsigned i=0, sz = a->get_num_args(); i < sz; ++i) {
|
||||
if (m_util.is_numeral(a->get_arg(i))) {
|
||||
for (expr * arg : *a) {
|
||||
if (m_util.is_numeral(arg)) {
|
||||
contains_const_child = true;
|
||||
}
|
||||
}
|
||||
|
|
@ -102,190 +103,73 @@ struct var_abs_rewriter : public default_rewriter_cfg {
|
|||
|
||||
};
|
||||
|
||||
/*
|
||||
* construct m_g, which is a generalization of t, where every constant
|
||||
* is replaced by a variable for any variable in m_g, remember the
|
||||
* substitution to get back t and save it in m_substitutions
|
||||
*/
|
||||
anti_unifier::anti_unifier(expr* t, ast_manager& man) : m(man), m_pinned(m), m_g(m)
|
||||
{
|
||||
m_pinned.push_back(t);
|
||||
|
||||
obj_map<expr, expr*> substitution;
|
||||
anti_unifier::anti_unifier(ast_manager &manager) : m(manager), m_pinned(m) {}
|
||||
|
||||
var_abs_rewriter var_abs_cfg(m, substitution);
|
||||
rewriter_tpl<var_abs_rewriter> var_abs_rw (m, false, var_abs_cfg);
|
||||
var_abs_rw (t, m_g);
|
||||
|
||||
m_substitutions.push_back(substitution); //TODO: refactor into vector, remove k
|
||||
void anti_unifier::reset() {
|
||||
m_subs.reset();
|
||||
m_cache.reset();
|
||||
m_todo.reset();
|
||||
m_pinned.reset();
|
||||
}
|
||||
|
||||
/* traverses m_g and t in parallel. if they only differ in constants
|
||||
* (i.e. m_g contains a variable, where t contains a constant), then
|
||||
* add the substitutions, which need to be applied to m_g to get t, to
|
||||
* m_substitutions.
|
||||
*/
|
||||
bool anti_unifier::add_term(expr* t) {
|
||||
m_pinned.push_back(t);
|
||||
void anti_unifier::operator()(expr *e1, expr *e2, expr_ref &res,
|
||||
substitution &s1, substitution &s2) {
|
||||
|
||||
ptr_vector<expr> todo;
|
||||
ptr_vector<expr> todo2;
|
||||
todo.push_back(m_g);
|
||||
todo2.push_back(t);
|
||||
reset();
|
||||
if (e1 == e2) {res = e1; s1.reset(); s2.reset(); return;}
|
||||
|
||||
ast_mark visited;
|
||||
m_todo.push_back(expr_pair(e1, e2));
|
||||
while (!m_todo.empty()) {
|
||||
const expr_pair &p = m_todo.back();
|
||||
SASSERT(is_app(p.first));
|
||||
SASSERT(is_app(p.second));
|
||||
|
||||
arith_util util(m);
|
||||
app * n1 = to_app(p.first);
|
||||
app * n2 = to_app(p.second);
|
||||
|
||||
obj_map<expr, expr*> substitution;
|
||||
|
||||
while (!todo.empty()) {
|
||||
expr* current = todo.back();
|
||||
todo.pop_back();
|
||||
expr* current2 = todo2.back();
|
||||
todo2.pop_back();
|
||||
|
||||
if (!visited.is_marked(current)) {
|
||||
visited.mark(current, true);
|
||||
|
||||
if (is_var(current)) {
|
||||
// TODO: for now we don't allow variables in the terms we want to antiunify
|
||||
SASSERT(m_substitutions[0].contains(current));
|
||||
if (util.is_numeral(current2)) {
|
||||
substitution.insert(current, current2);
|
||||
}
|
||||
else {return false;}
|
||||
}
|
||||
else {
|
||||
SASSERT(is_app(current));
|
||||
|
||||
if (is_app(current2) &&
|
||||
to_app(current)->get_decl() == to_app(current2)->get_decl() &&
|
||||
to_app(current)->get_num_args() == to_app(current2)->get_num_args()) {
|
||||
// TODO: what to do for numerals here? E.g. if we
|
||||
// have 1 and 2, do they have the same decl or are
|
||||
// the decls already different?
|
||||
SASSERT (!util.is_numeral(current) || current == current2);
|
||||
for (unsigned i = 0, num_args = to_app(current)->get_num_args();
|
||||
i < num_args; ++i) {
|
||||
todo.push_back(to_app(current)->get_arg(i));
|
||||
todo2.push_back(to_app(current2)->get_arg(i));
|
||||
}
|
||||
}
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// we now know that the terms can be anti-unified, so add the cached substitution
|
||||
m_substitutions.push_back(substitution);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* returns m_g, where additionally any variable, which has only equal
|
||||
* substitutions, is substituted with that substitution
|
||||
*/
|
||||
void anti_unifier::finalize() {
|
||||
ptr_vector<expr> todo;
|
||||
todo.push_back(m_g);
|
||||
|
||||
ast_mark visited;
|
||||
|
||||
obj_map<expr, expr*> generalization;
|
||||
|
||||
arith_util util(m);
|
||||
|
||||
// post-order traversel which ignores constants and handles them
|
||||
// directly when the enclosing term of the constant is handled
|
||||
while (!todo.empty()) {
|
||||
expr* current = todo.back();
|
||||
SASSERT(is_app(current));
|
||||
|
||||
// if we haven't already visited current
|
||||
if (!visited.is_marked(current)) {
|
||||
bool existsUnvisitedParent = false;
|
||||
|
||||
for (unsigned i = 0, sz = to_app(current)->get_num_args(); i < sz; ++i) {
|
||||
expr* argument = to_app(current)->get_arg(i);
|
||||
|
||||
if (!is_var(argument)) {
|
||||
SASSERT(is_app(argument));
|
||||
// if we haven't visited the current parent yet
|
||||
if(!visited.is_marked(argument)) {
|
||||
// add it to the stack
|
||||
todo.push_back(argument);
|
||||
existsUnvisitedParent = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we already visited all parents, we can visit current too
|
||||
if (!existsUnvisitedParent) {
|
||||
visited.mark(current, true);
|
||||
todo.pop_back();
|
||||
|
||||
ptr_buffer<expr> arg_list;
|
||||
for (unsigned i = 0, num_args = to_app(current)->get_num_args();
|
||||
i < num_args; ++i) {
|
||||
expr* argument = to_app(current)->get_arg(i);
|
||||
|
||||
if (is_var(argument)) {
|
||||
// compute whether there are different
|
||||
// substitutions for argument
|
||||
bool containsDifferentSubstitutions = false;
|
||||
|
||||
for (unsigned i=0, sz = m_substitutions.size(); i+1 < sz; ++i) {
|
||||
SASSERT(m_substitutions[i].contains(argument));
|
||||
SASSERT(m_substitutions[i+1].contains(argument));
|
||||
|
||||
// TODO: how to check equality?
|
||||
if (m_substitutions[i][argument] !=
|
||||
m_substitutions[i+1][argument])
|
||||
{
|
||||
containsDifferentSubstitutions = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// if yes, use the variable
|
||||
if (containsDifferentSubstitutions) {
|
||||
arg_list.push_back(argument);
|
||||
}
|
||||
// otherwise use the concrete value instead
|
||||
// and remove the substitutions
|
||||
else
|
||||
{
|
||||
arg_list.push_back(m_substitutions[0][argument]);
|
||||
|
||||
for (unsigned i=0, sz = m_substitutions.size(); i < sz; ++i) {
|
||||
SASSERT(m_substitutions[i].contains(argument));
|
||||
m_substitutions[i].remove(argument);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
SASSERT(generalization.contains(argument));
|
||||
arg_list.push_back(generalization[argument]);
|
||||
}
|
||||
}
|
||||
|
||||
SASSERT(to_app(current)->get_num_args() == arg_list.size());
|
||||
expr_ref application(m.mk_app(to_app(current)->get_decl(),
|
||||
to_app(current)->get_num_args(),
|
||||
arg_list.c_ptr()), m);
|
||||
m_pinned.push_back(application);
|
||||
generalization.insert(current, application);
|
||||
}
|
||||
unsigned num_arg1 = n1->get_num_args();
|
||||
unsigned num_arg2 = n2->get_num_args();
|
||||
if (n1->get_decl() != n2->get_decl() || num_arg1 != num_arg2) {
|
||||
expr_ref v(m);
|
||||
v = m.mk_var(m_subs.size(), get_sort(n1));
|
||||
m_pinned.push_back(v);
|
||||
m_subs.push_back(expr_pair(n1, n2));
|
||||
m_cache.insert(n1, n2, v);
|
||||
}
|
||||
else {
|
||||
todo.pop_back();
|
||||
expr *tmp;
|
||||
unsigned todo_sz = m_todo.size();
|
||||
ptr_buffer<expr> kids;
|
||||
for (unsigned i = 0; i < num_arg1; ++i) {
|
||||
expr *arg1 = n1->get_arg(i);
|
||||
expr *arg2 = n2->get_arg(i);
|
||||
if (arg1 == arg2) {kids.push_back(arg1);}
|
||||
else if (m_cache.find(arg1, arg2, tmp)) {kids.push_back(tmp);}
|
||||
else {m_todo.push_back(expr_pair(arg1, arg2));}
|
||||
}
|
||||
if (m_todo.size() > todo_sz) {continue;}
|
||||
|
||||
expr_ref u(m);
|
||||
u = m.mk_app(n1->get_decl(), kids.size(), kids.c_ptr());
|
||||
m_pinned.push_back(u);
|
||||
m_cache.insert(n1, n2, u);
|
||||
}
|
||||
}
|
||||
|
||||
m_g = generalization[m_g];
|
||||
expr *r;
|
||||
VERIFY(m_cache.find(e1, e2, r));
|
||||
res = r;
|
||||
|
||||
// create substitutions
|
||||
s1.reserve(2, m_subs.size());
|
||||
s2.reserve(2, m_subs.size());
|
||||
|
||||
for (unsigned i = 0, sz = m_subs.size(); i < sz; ++i) {
|
||||
expr_pair p = m_subs.get(i);
|
||||
s1.insert(i, 0, expr_offset(p.first, 1));
|
||||
s2.insert(i, 0, expr_offset(p.second, 1));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -318,6 +202,8 @@ public:
|
|||
*/
|
||||
bool naive_convex_closure::compute_closure(anti_unifier& au, ast_manager& m,
|
||||
expr_ref& result) {
|
||||
NOT_IMPLEMENTED_YET();
|
||||
#if 0
|
||||
arith_util util(m);
|
||||
|
||||
SASSERT(au.get_num_substitutions() > 0);
|
||||
|
|
@ -411,6 +297,7 @@ bool naive_convex_closure::compute_closure(anti_unifier& au, ast_manager& m,
|
|||
result = expr_ref(m.mk_exists(vars.size(), sorts.c_ptr(), names.c_ptr(), body),m);
|
||||
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool naive_convex_closure::get_range(vector<unsigned int>& v,
|
||||
|
|
@ -453,7 +340,83 @@ void naive_convex_closure::substitute_vars_by_const(ast_manager& m, expr* t,
|
|||
subs_rw (t, res);
|
||||
}
|
||||
|
||||
|
||||
/// Construct a pattern by abstracting all numbers by variables
|
||||
struct mk_num_pat_rewriter : public default_rewriter_cfg {
|
||||
ast_manager &m;
|
||||
arith_util m_arith;
|
||||
|
||||
// -- mark already seen expressions
|
||||
ast_mark m_seen;
|
||||
// -- true if the expression is known to have a number as a sub-expression
|
||||
ast_mark m_has_num;
|
||||
// -- expressions created during the transformation
|
||||
expr_ref_vector m_pinned;
|
||||
// -- map from introduced variables to expressions they replace
|
||||
app_ref_vector &m_subs;
|
||||
|
||||
|
||||
// -- stack of expressions being processed to have access to expressions
|
||||
// -- before rewriting
|
||||
ptr_buffer<expr> m_stack;
|
||||
|
||||
mk_num_pat_rewriter (ast_manager &manager, app_ref_vector& subs) :
|
||||
m(manager), m_arith(m), m_pinned(m), m_subs(subs) {}
|
||||
|
||||
bool pre_visit(expr * t) {
|
||||
// -- don't touch multiplication
|
||||
if (m_arith.is_mul(t)) return false;
|
||||
|
||||
bool r = (!m_seen.is_marked(t) || m_has_num.is_marked(t));
|
||||
if (r) {m_stack.push_back (t);}
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
br_status reduce_app (func_decl * f, unsigned num, expr * const * args,
|
||||
expr_ref & result, proof_ref & result_pr) {
|
||||
expr *s;
|
||||
s = m_stack.back();
|
||||
m_stack.pop_back();
|
||||
if (is_app(s)) {
|
||||
app *a = to_app(s);
|
||||
for (unsigned i = 0, sz = a->get_num_args(); i < sz; ++i) {
|
||||
if (m_has_num.is_marked(a->get_arg(i))) {
|
||||
m_has_num.mark(a, true);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return BR_FAILED;
|
||||
}
|
||||
|
||||
bool cache_all_results() const { return false; }
|
||||
bool cache_results() const { return false; }
|
||||
|
||||
bool get_subst(expr * s, expr * & t, proof * & t_pr) {
|
||||
if (m_arith.is_numeral(s)) {
|
||||
t = m.mk_var(m_subs.size(), m.get_sort(s));
|
||||
m_pinned.push_back(t);
|
||||
m_subs.push_back(to_app(s));
|
||||
|
||||
m_has_num.mark(t, true);
|
||||
m_seen.mark(t, true);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
void mk_num_pat(expr *e, expr_ref &result, app_ref_vector &subs) {
|
||||
SASSERT(subs.empty());
|
||||
mk_num_pat_rewriter rw_cfg(result.m(), subs);
|
||||
rewriter_tpl<mk_num_pat_rewriter> rw(result.m(), false, rw_cfg);
|
||||
rw(e, result);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
template class rewriter_tpl<spacer::var_abs_rewriter>;
|
||||
template class rewriter_tpl<spacer::subs_rewriter_cfg>;
|
||||
template class rewriter_tpl<spacer::mk_num_pat_rewriter>;
|
||||
|
|
|
|||
|
|
@ -22,32 +22,36 @@ Revision History:
|
|||
#define _SPACER_ANTIUNIFY_H_
|
||||
|
||||
#include "ast/ast.h"
|
||||
|
||||
#include "ast/substitution/substitution.h"
|
||||
#include "util/obj_pair_hashtable.h"
|
||||
namespace spacer {
|
||||
/**
|
||||
\brief Anti-unifier for ground expressions
|
||||
*/
|
||||
class anti_unifier
|
||||
{
|
||||
public:
|
||||
anti_unifier(expr* t, ast_manager& m);
|
||||
~anti_unifier() {}
|
||||
typedef std::pair<expr *, expr *> expr_pair;
|
||||
typedef pair_hash<obj_ptr_hash<expr>, obj_ptr_hash<expr> > expr_pair_hash;
|
||||
typedef obj_pair_map<expr, expr, expr*> cache_ty;
|
||||
|
||||
bool add_term(expr* t);
|
||||
void finalize();
|
||||
|
||||
expr* get_generalization() {return m_g;}
|
||||
unsigned get_num_substitutions() {return m_substitutions.size();}
|
||||
obj_map<expr, expr*> get_substitution(unsigned index){
|
||||
SASSERT(index < m_substitutions.size());
|
||||
return m_substitutions[index];
|
||||
}
|
||||
|
||||
private:
|
||||
ast_manager& m;
|
||||
// tracking all created expressions
|
||||
ast_manager &m;
|
||||
expr_ref_vector m_pinned;
|
||||
|
||||
expr_ref m_g;
|
||||
svector<expr_pair> m_todo;
|
||||
cache_ty m_cache;
|
||||
svector<expr_pair> m_subs;
|
||||
|
||||
vector<obj_map<expr, expr*>> m_substitutions;
|
||||
public:
|
||||
anti_unifier(ast_manager& m);
|
||||
|
||||
void reset();
|
||||
|
||||
/**
|
||||
\brief Computes anti-unifier of two ground expressions. Returns
|
||||
the anti-unifier and the corresponding substitutions
|
||||
*/
|
||||
void operator() (expr *e1, expr *e2, expr_ref &res,
|
||||
substitution &s1, substitution &s2);
|
||||
};
|
||||
|
||||
class naive_convex_closure
|
||||
|
|
@ -63,5 +67,8 @@ private:
|
|||
expr_ref& res);
|
||||
};
|
||||
|
||||
/// Abstracts numbers in the given ground expression by variables
|
||||
/// Returns the created pattern and the corresponding substitution.
|
||||
void mk_num_pat(expr *e, expr_ref &result, app_ref_vector &subs);
|
||||
}
|
||||
#endif
|
||||
|
|
|
|||
38
src/muz/spacer/spacer_callback.cpp
Normal file
38
src/muz/spacer/spacer_callback.cpp
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
/**++
|
||||
Copyright (c) 2017 Microsoft Corporation and Matteo Marescotti
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_callback.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
SPACER plugin for handling events
|
||||
|
||||
Author:
|
||||
|
||||
Matteo Marescotti
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
|
||||
#include "spacer_callback.h"
|
||||
#include "muz/spacer/spacer_context.h"
|
||||
|
||||
|
||||
namespace spacer {
|
||||
|
||||
void user_callback::new_lemma_eh(expr *lemma, unsigned level) {
|
||||
m_new_lemma_eh(m_state, lemma, level);
|
||||
}
|
||||
|
||||
void user_callback::predecessor_eh() {
|
||||
m_predecessor_eh(m_state);
|
||||
}
|
||||
|
||||
void user_callback::unfold_eh() {
|
||||
m_unfold_eh(m_state);
|
||||
}
|
||||
|
||||
}
|
||||
65
src/muz/spacer/spacer_callback.h
Normal file
65
src/muz/spacer/spacer_callback.h
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
/**++
|
||||
Copyright (c) 2017 Microsoft Corporation and Matteo Marescotti
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_callback.h
|
||||
|
||||
Abstract:
|
||||
|
||||
SPACER plugin for handling events
|
||||
|
||||
Author:
|
||||
|
||||
Matteo Marescotti
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _SPACER_CALLBACK_H_
|
||||
#define _SPACER_CALLBACK_H_
|
||||
|
||||
#include "muz/spacer/spacer_context.h"
|
||||
#include "muz/base/dl_engine_base.h"
|
||||
|
||||
|
||||
namespace spacer {
|
||||
|
||||
class user_callback : public spacer_callback {
|
||||
private:
|
||||
void *m_state;
|
||||
const datalog::t_new_lemma_eh m_new_lemma_eh;
|
||||
const datalog::t_predecessor_eh m_predecessor_eh;
|
||||
const datalog::t_unfold_eh m_unfold_eh;
|
||||
|
||||
public:
|
||||
user_callback(context &context,
|
||||
void *state,
|
||||
const datalog::t_new_lemma_eh new_lemma_eh,
|
||||
const datalog::t_predecessor_eh predecessor_eh,
|
||||
const datalog::t_unfold_eh unfold_eh) :
|
||||
spacer_callback(context),
|
||||
m_state(state),
|
||||
m_new_lemma_eh(new_lemma_eh),
|
||||
m_predecessor_eh(predecessor_eh),
|
||||
m_unfold_eh(unfold_eh) {}
|
||||
|
||||
inline bool new_lemma() override { return m_new_lemma_eh != nullptr; }
|
||||
|
||||
void new_lemma_eh(expr *lemma, unsigned level) override;
|
||||
|
||||
inline bool predecessor() override { return m_predecessor_eh != nullptr; }
|
||||
|
||||
void predecessor_eh() override;
|
||||
|
||||
inline bool unfold() override { return m_unfold_eh != nullptr; }
|
||||
|
||||
void unfold_eh() override;
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
#endif //_SPACER_CALLBACK_H_
|
||||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -34,6 +34,7 @@ Revision History:
|
|||
#include "model/model_smt2_pp.h"
|
||||
#include "ast/scoped_proof.h"
|
||||
#include "muz/transforms/dl_transforms.h"
|
||||
#include "muz/spacer/spacer_callback.h"
|
||||
|
||||
using namespace spacer;
|
||||
|
||||
|
|
@ -92,19 +93,14 @@ lbool dl_interface::query(expr * query)
|
|||
datalog::rule_set old_rules(rules0);
|
||||
func_decl_ref query_pred(m);
|
||||
rm.mk_query(query, m_ctx.get_rules());
|
||||
expr_ref bg_assertion = m_ctx.get_background_assertion();
|
||||
|
||||
check_reset();
|
||||
|
||||
TRACE("spacer",
|
||||
if (!m.is_true(bg_assertion)) {
|
||||
tout << "axioms:\n";
|
||||
tout << mk_pp(bg_assertion, m) << "\n";
|
||||
}
|
||||
tout << "query: " << mk_pp(query, m) << "\n";
|
||||
tout << "rules:\n";
|
||||
m_ctx.display_rules(tout);
|
||||
);
|
||||
tout << "query: " << mk_pp(query, m) << "\n";
|
||||
tout << "rules:\n";
|
||||
m_ctx.display_rules(tout);
|
||||
);
|
||||
|
||||
|
||||
apply_default_transformation(m_ctx);
|
||||
|
|
@ -160,7 +156,6 @@ lbool dl_interface::query(expr * query)
|
|||
m_context->set_proof_converter(m_ctx.get_proof_converter());
|
||||
m_context->set_model_converter(m_ctx.get_model_converter());
|
||||
m_context->set_query(query_pred);
|
||||
m_context->set_axioms(bg_assertion);
|
||||
m_context->update_rules(m_spacer_rules);
|
||||
|
||||
if (m_spacer_rules.get_rules().empty()) {
|
||||
|
|
@ -169,7 +164,7 @@ lbool dl_interface::query(expr * query)
|
|||
return l_false;
|
||||
}
|
||||
|
||||
return m_context->solve();
|
||||
return m_context->solve(m_ctx.get_params().spacer_min_level());
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -254,7 +249,6 @@ lbool dl_interface::query_from_lvl(expr * query, unsigned lvl)
|
|||
m_context->set_proof_converter(m_ctx.get_proof_converter());
|
||||
m_context->set_model_converter(m_ctx.get_model_converter());
|
||||
m_context->set_query(query_pred);
|
||||
m_context->set_axioms(bg_assertion);
|
||||
m_context->update_rules(m_spacer_rules);
|
||||
|
||||
if (m_spacer_rules.get_rules().empty()) {
|
||||
|
|
@ -352,3 +346,14 @@ proof_ref dl_interface::get_proof()
|
|||
{
|
||||
return m_context->get_proof();
|
||||
}
|
||||
|
||||
void dl_interface::add_callback(void *state,
|
||||
const datalog::t_new_lemma_eh new_lemma_eh,
|
||||
const datalog::t_predecessor_eh predecessor_eh,
|
||||
const datalog::t_unfold_eh unfold_eh){
|
||||
m_context->callbacks().push_back(alloc(user_callback, *m_context, state, new_lemma_eh, predecessor_eh, unfold_eh));
|
||||
}
|
||||
|
||||
void dl_interface::add_constraint (expr *c, unsigned lvl){
|
||||
m_context->add_constraint(c, lvl);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -79,6 +79,13 @@ public:
|
|||
|
||||
proof_ref get_proof() override;
|
||||
|
||||
void add_callback(void *state,
|
||||
const datalog::t_new_lemma_eh new_lemma_eh,
|
||||
const datalog::t_predecessor_eh predecessor_eh,
|
||||
const datalog::t_unfold_eh unfold_eh) override;
|
||||
|
||||
void add_constraint (expr *c, unsigned lvl) override;
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,12 +24,6 @@ Revision History:
|
|||
|
||||
namespace spacer {
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class farkas_learner {
|
||||
typedef obj_hashtable<expr> expr_set;
|
||||
|
||||
|
|
|
|||
|
|
@ -21,11 +21,16 @@ Revision History:
|
|||
|
||||
#include "muz/spacer/spacer_context.h"
|
||||
#include "muz/spacer/spacer_generalizers.h"
|
||||
#include "ast/ast_util.h"
|
||||
#include "ast/expr_abstract.h"
|
||||
#include "ast/rewriter/var_subst.h"
|
||||
#include "ast/for_each_expr.h"
|
||||
#include "ast/factor_equivs.h"
|
||||
|
||||
#include "ast/rewriter/expr_safe_replace.h"
|
||||
#include "ast/substitution/matcher.h"
|
||||
#include "ast/expr_functors.h"
|
||||
#include "smt/smt_solver.h"
|
||||
#include "qe/qe_term_graph.h"
|
||||
|
||||
namespace spacer {
|
||||
void lemma_sanity_checker::operator()(lemma_ref &lemma) {
|
||||
|
|
@ -33,9 +38,23 @@ void lemma_sanity_checker::operator()(lemma_ref &lemma) {
|
|||
expr_ref_vector cube(lemma->get_ast_manager());
|
||||
cube.append(lemma->get_cube());
|
||||
ENSURE(lemma->get_pob()->pt().check_inductive(lemma->level(),
|
||||
cube, uses_level));
|
||||
cube, uses_level,
|
||||
lemma->weakness()));
|
||||
}
|
||||
|
||||
namespace{
|
||||
class contains_array_op_proc : public i_expr_pred {
|
||||
ast_manager &m;
|
||||
family_id m_array_fid;
|
||||
public:
|
||||
contains_array_op_proc(ast_manager &manager) :
|
||||
m(manager), m_array_fid(m.mk_family_id("array"))
|
||||
{}
|
||||
virtual bool operator()(expr *e) {
|
||||
return is_app(e) && to_app(e)->get_family_id() == m_array_fid;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// ------------------------
|
||||
// lemma_bool_inductive_generalizer
|
||||
|
|
@ -50,6 +69,9 @@ void lemma_bool_inductive_generalizer::operator()(lemma_ref &lemma) {
|
|||
pred_transformer &pt = lemma->get_pob()->pt();
|
||||
ast_manager &m = pt.get_ast_manager();
|
||||
|
||||
contains_array_op_proc has_array_op(m);
|
||||
check_pred has_arrays(has_array_op, m);
|
||||
|
||||
expr_ref_vector cube(m);
|
||||
cube.append(lemma->get_cube());
|
||||
|
||||
|
|
@ -58,14 +80,21 @@ void lemma_bool_inductive_generalizer::operator()(lemma_ref &lemma) {
|
|||
ptr_vector<expr> processed;
|
||||
expr_ref_vector extra_lits(m);
|
||||
|
||||
unsigned weakness = lemma->weakness();
|
||||
|
||||
unsigned i = 0, num_failures = 0;
|
||||
while (i < cube.size() &&
|
||||
(!m_failure_limit || num_failures < m_failure_limit)) {
|
||||
expr_ref lit(m);
|
||||
lit = cube.get(i);
|
||||
if (m_array_only && !has_arrays(lit)) {
|
||||
processed.push_back(lit);
|
||||
++i;
|
||||
continue;
|
||||
}
|
||||
cube[i] = true_expr;
|
||||
if (cube.size() > 1 &&
|
||||
pt.check_inductive(lemma->level(), cube, uses_level)) {
|
||||
pt.check_inductive(lemma->level(), cube, uses_level, weakness)) {
|
||||
num_failures = 0;
|
||||
dirty = true;
|
||||
for (i = 0; i < cube.size() &&
|
||||
|
|
@ -82,7 +111,7 @@ void lemma_bool_inductive_generalizer::operator()(lemma_ref &lemma) {
|
|||
SASSERT(extra_lits.size() > 1);
|
||||
for (unsigned j = 0, sz = extra_lits.size(); !found && j < sz; ++j) {
|
||||
cube[i] = extra_lits.get(j);
|
||||
if (pt.check_inductive(lemma->level(), cube, uses_level)) {
|
||||
if (pt.check_inductive(lemma->level(), cube, uses_level, weakness)) {
|
||||
num_failures = 0;
|
||||
dirty = true;
|
||||
found = true;
|
||||
|
|
@ -130,10 +159,11 @@ void unsat_core_generalizer::operator()(lemma_ref &lemma)
|
|||
|
||||
unsigned old_sz = lemma->get_cube().size();
|
||||
unsigned old_level = lemma->level();
|
||||
(void)old_level;
|
||||
|
||||
unsigned uses_level;
|
||||
expr_ref_vector core(m);
|
||||
VERIFY(pt.is_invariant(old_level, lemma->get_expr(), uses_level, &core));
|
||||
VERIFY(pt.is_invariant(lemma->level(), lemma.get(), uses_level, &core));
|
||||
|
||||
CTRACE("spacer", old_sz > core.size(),
|
||||
tout << "unsat core reduced lemma from: "
|
||||
|
|
@ -176,118 +206,131 @@ public:
|
|||
};
|
||||
}
|
||||
|
||||
bool lemma_array_eq_generalizer::is_array_eq (ast_manager &m, expr* e) {
|
||||
|
||||
expr *e1 = nullptr, *e2 = nullptr;
|
||||
if (m.is_eq(e, e1, e2) && is_app(e1) && is_app(e2)) {
|
||||
app *a1 = to_app(e1);
|
||||
app *a2 = to_app(e2);
|
||||
array_util au(m);
|
||||
if (a1->get_family_id() == null_family_id &&
|
||||
a2->get_family_id() == null_family_id &&
|
||||
au.is_array(a1) && au.is_array(a2))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void lemma_array_eq_generalizer::operator() (lemma_ref &lemma)
|
||||
{
|
||||
TRACE("core_array_eq", tout << "Looking for equalities\n";);
|
||||
|
||||
// -- find array constants
|
||||
ast_manager &m = lemma->get_ast_manager();
|
||||
manager &pm = m_ctx.get_manager();
|
||||
(void)pm;
|
||||
|
||||
|
||||
expr_ref_vector core(m);
|
||||
expr_ref v(m);
|
||||
func_decl_set symb;
|
||||
collect_array_proc cap(m, symb);
|
||||
|
||||
|
||||
// -- find array constants
|
||||
core.append (lemma->get_cube());
|
||||
v = mk_and(core);
|
||||
for_each_expr(cap, v);
|
||||
|
||||
TRACE("core_array_eq",
|
||||
CTRACE("core_array_eq", symb.size() > 1 && symb.size() <= 8,
|
||||
tout << "found " << symb.size() << " array variables in: \n"
|
||||
<< mk_pp(v, m) << "\n";);
|
||||
<< v << "\n";);
|
||||
|
||||
// too few constants
|
||||
if (symb.size() <= 1) { return; }
|
||||
// too many constants, skip this
|
||||
if (symb.size() >= 8) { return; }
|
||||
// too few constants or too many constants
|
||||
if (symb.size() <= 1 || symb.size() > 8) { return; }
|
||||
|
||||
|
||||
// -- for every pair of variables, try an equality
|
||||
typedef func_decl_set::iterator iterator;
|
||||
// -- for every pair of constants (A, B), check whether the
|
||||
// -- equality (A=B) generalizes a literal in the lemma
|
||||
|
||||
ptr_vector<func_decl> vsymbs;
|
||||
for (iterator it = symb.begin(), end = symb.end();
|
||||
it != end; ++it)
|
||||
{ vsymbs.push_back(*it); }
|
||||
for (auto * fdecl : symb) {vsymbs.push_back(fdecl);}
|
||||
|
||||
// create all equalities
|
||||
expr_ref_vector eqs(m);
|
||||
for (unsigned i = 0, sz = vsymbs.size(); i < sz; ++i) {
|
||||
for (unsigned j = i + 1; j < sz; ++j) {
|
||||
eqs.push_back(m.mk_eq(m.mk_const(vsymbs.get(i)),
|
||||
m.mk_const(vsymbs.get(j))));
|
||||
}
|
||||
}
|
||||
|
||||
for (unsigned i = 0, sz = vsymbs.size(); i < sz; ++i)
|
||||
for (unsigned j = i + 1; j < sz; ++j)
|
||||
{ eqs.push_back(m.mk_eq(m.mk_const(vsymbs.get(i)),
|
||||
m.mk_const(vsymbs.get(j)))); }
|
||||
|
||||
smt::kernel solver(m, m_ctx.get_manager().fparams2());
|
||||
// smt-solver to check whether a literal is generalized. using
|
||||
// default params. There has to be a simpler way to approximate
|
||||
// this check
|
||||
ref<solver> sol = mk_smt_solver(m, params_ref::get_empty(), symbol::null);
|
||||
// literals of the new lemma
|
||||
expr_ref_vector lits(m);
|
||||
for (unsigned i = 0, core_sz = core.size(); i < core_sz; ++i) {
|
||||
SASSERT(lits.size() == i);
|
||||
solver.push();
|
||||
solver.assert_expr(core.get(i));
|
||||
for (unsigned j = 0, eqs_sz = eqs.size(); j < eqs_sz; ++j) {
|
||||
solver.push();
|
||||
solver.assert_expr(eqs.get(j));
|
||||
lbool res = solver.check();
|
||||
solver.pop(1);
|
||||
lits.append(core);
|
||||
expr *t = nullptr;
|
||||
bool dirty = false;
|
||||
for (unsigned i = 0, sz = core.size(); i < sz; ++i) {
|
||||
// skip a literal is it is already an array equality
|
||||
if (m.is_not(lits.get(i), t) && is_array_eq(m, t)) continue;
|
||||
solver::scoped_push _pp_(*sol);
|
||||
sol->assert_expr(lits.get(i));
|
||||
for (auto *e : eqs) {
|
||||
solver::scoped_push _p_(*sol);
|
||||
sol->assert_expr(e);
|
||||
lbool res = sol->check_sat(0, nullptr);
|
||||
|
||||
if (res == l_false) {
|
||||
TRACE("core_array_eq",
|
||||
tout << "strengthened " << mk_pp(core.get(i), m)
|
||||
<< " with " << mk_pp(m.mk_not(eqs.get(j)), m) << "\n";);
|
||||
lits.push_back(m.mk_not(eqs.get(j)));
|
||||
tout << "strengthened " << mk_pp(lits.get(i), m)
|
||||
<< " with " << mk_pp(mk_not(m, e), m) << "\n";);
|
||||
lits[i] = mk_not(m, e);
|
||||
dirty = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
solver.pop(1);
|
||||
if (lits.size() == i) { lits.push_back(core.get(i)); }
|
||||
}
|
||||
|
||||
/**
|
||||
HACK: if the first 3 arguments of pt are boolean, assume
|
||||
they correspond to SeaHorn encoding and condition the equality on them.
|
||||
*/
|
||||
// pred_transformer &pt = n.pt ();
|
||||
// if (pt.sig_size () >= 3 &&
|
||||
// m.is_bool (pt.sig (0)->get_range ()) &&
|
||||
// m.is_bool (pt.sig (1)->get_range ()) &&
|
||||
// m.is_bool (pt.sig (2)->get_range ()))
|
||||
// {
|
||||
// lits.push_back (m.mk_const (pm.o2n(pt.sig (0), 0)));
|
||||
// lits.push_back (m.mk_not (m.mk_const (pm.o2n(pt.sig (1), 0))));
|
||||
// lits.push_back (m.mk_not (m.mk_const (pm.o2n(pt.sig (2), 0))));
|
||||
// }
|
||||
// nothing changed
|
||||
if (!dirty) return;
|
||||
|
||||
TRACE("core_array_eq", tout << "new possible core "
|
||||
<< mk_pp(pm.mk_and(lits), m) << "\n";);
|
||||
TRACE("core_array_eq",
|
||||
tout << "new possible core " << mk_and(lits) << "\n";);
|
||||
|
||||
|
||||
pred_transformer &pt = lemma->get_pob()->pt();
|
||||
// -- check if it is consistent with the transition relation
|
||||
// -- check if the generalized result is consistent with trans
|
||||
unsigned uses_level1;
|
||||
if (pt.check_inductive(lemma->level(), lits, uses_level1)) {
|
||||
if (pt.check_inductive(lemma->level(), lits, uses_level1, lemma->weakness())) {
|
||||
TRACE("core_array_eq", tout << "Inductive!\n";);
|
||||
lemma->update_cube(lemma->get_pob(),lits);
|
||||
lemma->update_cube(lemma->get_pob(), lits);
|
||||
lemma->set_level(uses_level1);
|
||||
return;
|
||||
} else
|
||||
{ TRACE("core_array_eq", tout << "Not-Inductive!\n";);}
|
||||
}
|
||||
else
|
||||
{TRACE("core_array_eq", tout << "Not-Inductive!\n";);}
|
||||
}
|
||||
|
||||
void lemma_eq_generalizer::operator() (lemma_ref &lemma)
|
||||
{
|
||||
TRACE("core_eq", tout << "Transforming equivalence classes\n";);
|
||||
|
||||
ast_manager &m = m_ctx.get_ast_manager();
|
||||
expr_ref_vector core(m);
|
||||
core.append (lemma->get_cube());
|
||||
if (lemma->get_cube().empty()) return;
|
||||
|
||||
bool dirty;
|
||||
expr_equiv_class eq_classes(m);
|
||||
factor_eqs(core, eq_classes);
|
||||
// create all possible equalities to allow for simple inductive generalization
|
||||
dirty = equiv_to_expr_full(eq_classes, core);
|
||||
if (dirty) {
|
||||
ast_manager &m = m_ctx.get_ast_manager();
|
||||
qe::term_graph egraph(m);
|
||||
egraph.add_lits(lemma->get_cube());
|
||||
|
||||
// -- expand the cube with all derived equalities
|
||||
expr_ref_vector core(m);
|
||||
egraph.to_lits(core, true);
|
||||
|
||||
// -- if the core looks different from the original cube
|
||||
if (core.size() != lemma->get_cube().size() ||
|
||||
core.get(0) != lemma->get_cube().get(0)) {
|
||||
// -- update the lemma
|
||||
lemma->update_cube(lemma->get_pob(), core);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
|
|
|||
|
|
@ -48,11 +48,14 @@ class lemma_bool_inductive_generalizer : public lemma_generalizer {
|
|||
};
|
||||
|
||||
unsigned m_failure_limit;
|
||||
bool m_array_only;
|
||||
stats m_st;
|
||||
|
||||
public:
|
||||
lemma_bool_inductive_generalizer(context& ctx, unsigned failure_limit) :
|
||||
lemma_generalizer(ctx), m_failure_limit(failure_limit) {}
|
||||
lemma_bool_inductive_generalizer(context& ctx, unsigned failure_limit,
|
||||
bool array_only = false) :
|
||||
lemma_generalizer(ctx), m_failure_limit(failure_limit),
|
||||
m_array_only(array_only) {}
|
||||
~lemma_bool_inductive_generalizer() override {}
|
||||
void operator()(lemma_ref &lemma) override;
|
||||
|
||||
|
|
@ -80,6 +83,8 @@ public:
|
|||
};
|
||||
|
||||
class lemma_array_eq_generalizer : public lemma_generalizer {
|
||||
private:
|
||||
bool is_array_eq(ast_manager &m, expr *e);
|
||||
public:
|
||||
lemma_array_eq_generalizer(context &ctx) : lemma_generalizer(ctx) {}
|
||||
~lemma_array_eq_generalizer() override {}
|
||||
|
|
@ -94,6 +99,45 @@ public:
|
|||
void operator()(lemma_ref &lemma) override;
|
||||
};
|
||||
|
||||
class lemma_quantifier_generalizer : public lemma_generalizer {
|
||||
struct stats {
|
||||
unsigned count;
|
||||
unsigned num_failures;
|
||||
stopwatch watch;
|
||||
stats() {reset();}
|
||||
void reset() {count = 0; num_failures = 0; watch.reset();}
|
||||
};
|
||||
|
||||
ast_manager &m;
|
||||
arith_util m_arith;
|
||||
stats m_st;
|
||||
expr_ref_vector m_cube;
|
||||
|
||||
bool m_normalize_cube;
|
||||
int m_offset;
|
||||
public:
|
||||
lemma_quantifier_generalizer(context &ctx, bool normalize_cube = true);
|
||||
virtual ~lemma_quantifier_generalizer() {}
|
||||
virtual void operator()(lemma_ref &lemma);
|
||||
|
||||
virtual void collect_statistics(statistics& st) const;
|
||||
virtual void reset_statistics() {m_st.reset();}
|
||||
private:
|
||||
bool generalize(lemma_ref &lemma, app *term);
|
||||
|
||||
void find_candidates(expr *e, app_ref_vector &candidate);
|
||||
bool is_ub(var *var, expr *e);
|
||||
bool is_lb(var *var, expr *e);
|
||||
void mk_abs_cube (lemma_ref &lemma, app *term, var *var,
|
||||
expr_ref_vector &gnd_cube,
|
||||
expr_ref_vector &abs_cube,
|
||||
expr *&lb, expr *&ub, unsigned &stride);
|
||||
|
||||
bool match_sk_idx(expr *e, app_ref_vector const &zks, expr *&idx, app *&sk);
|
||||
void cleanup(expr_ref_vector& cube, app_ref_vector const &zks, expr_ref &bind);
|
||||
|
||||
bool find_stride(expr_ref_vector &c, expr_ref &pattern, unsigned &stride);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -1,355 +0,0 @@
|
|||
/**
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_itp_solver.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
A solver that produces interpolated unsat cores
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
#include"muz/spacer/spacer_itp_solver.h"
|
||||
#include"ast/ast.h"
|
||||
#include"muz/spacer/spacer_util.h"
|
||||
#include"muz/spacer/spacer_farkas_learner.h"
|
||||
#include"ast/rewriter/expr_replacer.h"
|
||||
#include"muz/spacer/spacer_unsat_core_learner.h"
|
||||
#include"muz/spacer/spacer_unsat_core_plugin.h"
|
||||
|
||||
namespace spacer {
|
||||
void itp_solver::push ()
|
||||
{
|
||||
m_defs.push_back (def_manager (*this));
|
||||
m_solver.push ();
|
||||
}
|
||||
|
||||
void itp_solver::pop (unsigned n)
|
||||
{
|
||||
m_solver.pop (n);
|
||||
unsigned lvl = m_defs.size ();
|
||||
SASSERT (n <= lvl);
|
||||
unsigned new_lvl = lvl-n;
|
||||
while (m_defs.size() > new_lvl) {
|
||||
m_num_proxies -= m_defs.back ().m_defs.size ();
|
||||
m_defs.pop_back ();
|
||||
}
|
||||
}
|
||||
|
||||
app* itp_solver::fresh_proxy ()
|
||||
{
|
||||
if (m_num_proxies == m_proxies.size()) {
|
||||
std::stringstream name;
|
||||
name << "spacer_proxy!" << m_proxies.size ();
|
||||
app_ref res(m);
|
||||
res = m.mk_const (symbol (name.str ().c_str ()),
|
||||
m.mk_bool_sort ());
|
||||
m_proxies.push_back (res);
|
||||
|
||||
// -- add the new proxy to proxy eliminator
|
||||
proof_ref pr(m);
|
||||
pr = m.mk_asserted (m.mk_true ());
|
||||
m_elim_proxies_sub.insert (res, m.mk_true (), pr);
|
||||
|
||||
}
|
||||
return m_proxies.get (m_num_proxies++);
|
||||
}
|
||||
|
||||
app* itp_solver::mk_proxy (expr *v)
|
||||
{
|
||||
{
|
||||
expr *e = v;
|
||||
m.is_not (v, e);
|
||||
if (is_uninterp_const(e)) { return to_app(v); }
|
||||
}
|
||||
|
||||
def_manager &def = m_defs.size () > 0 ? m_defs.back () : m_base_defs;
|
||||
return def.mk_proxy (v);
|
||||
}
|
||||
|
||||
bool itp_solver::mk_proxies (expr_ref_vector &v, unsigned from)
|
||||
{
|
||||
bool dirty = false;
|
||||
for (unsigned i = from, sz = v.size(); i < sz; ++i) {
|
||||
app *p = mk_proxy (v.get (i));
|
||||
dirty |= (v.get (i) != p);
|
||||
v[i] = p;
|
||||
}
|
||||
return dirty;
|
||||
}
|
||||
|
||||
void itp_solver::push_bg (expr *e)
|
||||
{
|
||||
if (m_assumptions.size () > m_first_assumption)
|
||||
{ m_assumptions.shrink(m_first_assumption); }
|
||||
m_assumptions.push_back (e);
|
||||
m_first_assumption = m_assumptions.size ();
|
||||
}
|
||||
|
||||
void itp_solver::pop_bg (unsigned n)
|
||||
{
|
||||
if (n == 0) { return; }
|
||||
|
||||
if (m_assumptions.size () > m_first_assumption)
|
||||
{ m_assumptions.shrink(m_first_assumption); }
|
||||
m_first_assumption = m_first_assumption > n ? m_first_assumption - n : 0;
|
||||
m_assumptions.shrink (m_first_assumption);
|
||||
}
|
||||
|
||||
unsigned itp_solver::get_num_bg () {return m_first_assumption;}
|
||||
|
||||
lbool itp_solver::check_sat (unsigned num_assumptions, expr * const *assumptions)
|
||||
{
|
||||
// -- remove any old assumptions
|
||||
if (m_assumptions.size () > m_first_assumption)
|
||||
{ m_assumptions.shrink(m_first_assumption); }
|
||||
|
||||
// -- replace theory literals in background assumptions with proxies
|
||||
mk_proxies (m_assumptions);
|
||||
// -- in case mk_proxies added new literals, they are all background
|
||||
m_first_assumption = m_assumptions.size ();
|
||||
|
||||
m_assumptions.append (num_assumptions, assumptions);
|
||||
m_is_proxied = mk_proxies (m_assumptions, m_first_assumption);
|
||||
|
||||
lbool res;
|
||||
res = m_solver.check_sat (m_assumptions.size (), m_assumptions.c_ptr ());
|
||||
set_status (res);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
app* itp_solver::def_manager::mk_proxy (expr *v)
|
||||
{
|
||||
app* r;
|
||||
if (m_expr2proxy.find(v, r)) { return r; }
|
||||
|
||||
ast_manager &m = m_parent.m;
|
||||
app_ref proxy(m);
|
||||
app_ref def(m);
|
||||
proxy = m_parent.fresh_proxy ();
|
||||
def = m.mk_or (m.mk_not (proxy), v);
|
||||
m_defs.push_back (def);
|
||||
m_expr2proxy.insert (v, proxy);
|
||||
m_proxy2def.insert (proxy, def);
|
||||
|
||||
m_parent.assert_expr (def.get ());
|
||||
return proxy;
|
||||
}
|
||||
|
||||
bool itp_solver::def_manager::is_proxy (app *k, app_ref &def)
|
||||
{
|
||||
app *r = nullptr;
|
||||
bool found = m_proxy2def.find (k, r);
|
||||
def = r;
|
||||
return found;
|
||||
}
|
||||
|
||||
void itp_solver::def_manager::reset ()
|
||||
{
|
||||
m_expr2proxy.reset ();
|
||||
m_proxy2def.reset ();
|
||||
m_defs.reset ();
|
||||
}
|
||||
|
||||
bool itp_solver::def_manager::is_proxy_def (expr *v)
|
||||
{
|
||||
// XXX This might not be the most robust way to check
|
||||
return m_defs.contains (v);
|
||||
}
|
||||
|
||||
bool itp_solver::is_proxy(expr *e, app_ref &def)
|
||||
{
|
||||
if (!is_uninterp_const(e)) { return false; }
|
||||
|
||||
app *a = to_app (e);
|
||||
|
||||
for (int i = m_defs.size (); i > 0; --i)
|
||||
if (m_defs[i-1].is_proxy (a, def))
|
||||
{ return true; }
|
||||
|
||||
if (m_base_defs.is_proxy (a, def))
|
||||
{ return true; }
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void itp_solver::collect_statistics (statistics &st) const
|
||||
{
|
||||
m_solver.collect_statistics (st);
|
||||
st.update ("time.itp_solver.itp_core", m_itp_watch.get_seconds ());
|
||||
}
|
||||
|
||||
void itp_solver::reset_statistics ()
|
||||
{
|
||||
m_itp_watch.reset ();
|
||||
}
|
||||
|
||||
void itp_solver::get_unsat_core (ptr_vector<expr> &core)
|
||||
{
|
||||
m_solver.get_unsat_core (core);
|
||||
undo_proxies_in_core (core);
|
||||
}
|
||||
void itp_solver::undo_proxies_in_core (ptr_vector<expr> &r)
|
||||
{
|
||||
app_ref e(m);
|
||||
expr_fast_mark1 bg;
|
||||
for (unsigned i = 0; i < m_first_assumption; ++i)
|
||||
{ bg.mark(m_assumptions.get(i)); }
|
||||
|
||||
// expand proxies
|
||||
unsigned j = 0;
|
||||
for (unsigned i = 0, sz = r.size(); i < sz; ++i) {
|
||||
// skip background assumptions
|
||||
if (bg.is_marked(r[i])) { continue; }
|
||||
|
||||
// -- undo proxies, but only if they were introduced in check_sat
|
||||
if (m_is_proxied && is_proxy(r[i], e)) {
|
||||
SASSERT (m.is_or (e));
|
||||
r[j] = e->get_arg (1);
|
||||
} else if (i != j) { r[j] = r[i]; }
|
||||
j++;
|
||||
}
|
||||
r.shrink (j);
|
||||
}
|
||||
|
||||
void itp_solver::undo_proxies (expr_ref_vector &r)
|
||||
{
|
||||
app_ref e(m);
|
||||
// expand proxies
|
||||
for (unsigned i = 0, sz = r.size (); i < sz; ++i)
|
||||
if (is_proxy(r.get(i), e)) {
|
||||
SASSERT (m.is_or (e));
|
||||
r[i] = e->get_arg (1);
|
||||
}
|
||||
}
|
||||
|
||||
void itp_solver::get_unsat_core (expr_ref_vector &_core)
|
||||
{
|
||||
ptr_vector<expr> core;
|
||||
get_unsat_core (core);
|
||||
_core.append (core.size (), core.c_ptr ());
|
||||
}
|
||||
|
||||
void itp_solver::elim_proxies (expr_ref_vector &v)
|
||||
{
|
||||
expr_ref f = mk_and (v);
|
||||
scoped_ptr<expr_replacer> rep = mk_expr_simp_replacer (m);
|
||||
rep->set_substitution (&m_elim_proxies_sub);
|
||||
(*rep) (f);
|
||||
v.reset ();
|
||||
flatten_and (f, v);
|
||||
}
|
||||
|
||||
void itp_solver::get_itp_core (expr_ref_vector &core)
|
||||
{
|
||||
scoped_watch _t_ (m_itp_watch);
|
||||
|
||||
typedef obj_hashtable<expr> expr_set;
|
||||
expr_set B;
|
||||
for (unsigned i = m_first_assumption, sz = m_assumptions.size(); i < sz; ++i) {
|
||||
expr *a = m_assumptions.get (i);
|
||||
app_ref def(m);
|
||||
if (is_proxy(a, def)) { B.insert(def.get()); }
|
||||
B.insert (a);
|
||||
}
|
||||
|
||||
proof_ref pr(m);
|
||||
pr = get_proof ();
|
||||
|
||||
if (!m_new_unsat_core) {
|
||||
// old code
|
||||
farkas_learner learner_old;
|
||||
learner_old.set_split_literals(m_split_literals);
|
||||
|
||||
learner_old.get_lemmas (pr, B, core);
|
||||
elim_proxies (core);
|
||||
simplify_bounds (core); // XXX potentially redundant
|
||||
} else {
|
||||
// new code
|
||||
unsat_core_learner learner(m);
|
||||
|
||||
if (m_farkas_optimized) {
|
||||
if (true) // TODO: proper options
|
||||
{
|
||||
unsat_core_plugin_farkas_lemma_optimized* plugin_farkas_lemma_optimized = alloc(unsat_core_plugin_farkas_lemma_optimized, learner,m);
|
||||
learner.register_plugin(plugin_farkas_lemma_optimized);
|
||||
}
|
||||
else
|
||||
{
|
||||
unsat_core_plugin_farkas_lemma_bounded* plugin_farkas_lemma_bounded = alloc(unsat_core_plugin_farkas_lemma_bounded, learner,m);
|
||||
learner.register_plugin(plugin_farkas_lemma_bounded);
|
||||
}
|
||||
|
||||
} else {
|
||||
unsat_core_plugin_farkas_lemma* plugin_farkas_lemma = alloc(unsat_core_plugin_farkas_lemma, learner, m_split_literals, m_farkas_a_const);
|
||||
learner.register_plugin(plugin_farkas_lemma);
|
||||
}
|
||||
|
||||
if (m_minimize_unsat_core) {
|
||||
unsat_core_plugin_min_cut* plugin_min_cut = alloc(unsat_core_plugin_min_cut, learner, m);
|
||||
learner.register_plugin(plugin_min_cut);
|
||||
} else {
|
||||
unsat_core_plugin_lemma* plugin_lemma = alloc(unsat_core_plugin_lemma, learner);
|
||||
learner.register_plugin(plugin_lemma);
|
||||
}
|
||||
|
||||
learner.compute_unsat_core(pr, B, core);
|
||||
|
||||
elim_proxies (core);
|
||||
simplify_bounds (core); // XXX potentially redundant
|
||||
|
||||
// // debug
|
||||
// expr_ref_vector core2(m);
|
||||
// unsat_core_learner learner2(m);
|
||||
//
|
||||
// unsat_core_plugin_farkas_lemma* plugin_farkas_lemma2 = alloc(unsat_core_plugin_farkas_lemma, learner2, m_split_literals);
|
||||
// learner2.register_plugin(plugin_farkas_lemma2);
|
||||
// unsat_core_plugin_lemma* plugin_lemma2 = alloc(unsat_core_plugin_lemma, learner2);
|
||||
// learner2.register_plugin(plugin_lemma2);
|
||||
// learner2.compute_unsat_core(pr, B, core2);
|
||||
//
|
||||
// elim_proxies (core2);
|
||||
// simplify_bounds (core2);
|
||||
//
|
||||
// IF_VERBOSE(2,
|
||||
// verbose_stream () << "Itp Core:\n"
|
||||
// << mk_pp (mk_and (core), m) << "\n";);
|
||||
// IF_VERBOSE(2,
|
||||
// verbose_stream () << "Itp Core2:\n"
|
||||
// << mk_pp (mk_and (core2), m) << "\n";);
|
||||
//SASSERT(mk_and (core) == mk_and (core2));
|
||||
}
|
||||
|
||||
IF_VERBOSE(2,
|
||||
verbose_stream () << "Itp Core:\n"
|
||||
<< mk_pp (mk_and (core), m) << "\n";);
|
||||
|
||||
}
|
||||
|
||||
void itp_solver::refresh ()
|
||||
{
|
||||
// only refresh in non-pushed state
|
||||
SASSERT (m_defs.size () == 0);
|
||||
expr_ref_vector assertions (m);
|
||||
for (unsigned i = 0, e = m_solver.get_num_assertions(); i < e; ++i) {
|
||||
expr* a = m_solver.get_assertion (i);
|
||||
if (!m_base_defs.is_proxy_def(a)) { assertions.push_back(a); }
|
||||
|
||||
}
|
||||
m_base_defs.reset ();
|
||||
NOT_IMPLEMENTED_YET ();
|
||||
// solver interface does not have a reset method. need to introduce it somewhere.
|
||||
// m_solver.reset ();
|
||||
for (unsigned i = 0, e = assertions.size (); i < e; ++i)
|
||||
{ m_solver.assert_expr(assertions.get(i)); }
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -1,172 +0,0 @@
|
|||
/**
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_itp_solver.h
|
||||
|
||||
Abstract:
|
||||
|
||||
A solver that produces interpolated unsat cores
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
#ifndef SPACER_ITP_SOLVER_H_
|
||||
#define SPACER_ITP_SOLVER_H_
|
||||
|
||||
#include"solver/solver.h"
|
||||
#include"ast/expr_substitution.h"
|
||||
#include"util/stopwatch.h"
|
||||
namespace spacer {
|
||||
class itp_solver : public solver {
|
||||
private:
|
||||
struct def_manager {
|
||||
itp_solver &m_parent;
|
||||
obj_map<expr, app*> m_expr2proxy;
|
||||
obj_map<app, app*> m_proxy2def;
|
||||
|
||||
expr_ref_vector m_defs;
|
||||
|
||||
def_manager(itp_solver &parent) :
|
||||
m_parent(parent), m_defs(m_parent.m)
|
||||
{}
|
||||
|
||||
bool is_proxy(app *k, app_ref &v);
|
||||
app* mk_proxy(expr *v);
|
||||
void reset();
|
||||
bool is_proxy_def(expr *v);
|
||||
|
||||
};
|
||||
|
||||
friend struct def_manager;
|
||||
ast_manager &m;
|
||||
solver &m_solver;
|
||||
app_ref_vector m_proxies;
|
||||
unsigned m_num_proxies;
|
||||
vector<def_manager> m_defs;
|
||||
def_manager m_base_defs;
|
||||
expr_ref_vector m_assumptions;
|
||||
unsigned m_first_assumption;
|
||||
bool m_is_proxied;
|
||||
|
||||
stopwatch m_itp_watch;
|
||||
|
||||
expr_substitution m_elim_proxies_sub;
|
||||
bool m_split_literals;
|
||||
bool m_new_unsat_core;
|
||||
bool m_minimize_unsat_core;
|
||||
bool m_farkas_optimized;
|
||||
bool m_farkas_a_const;
|
||||
|
||||
bool is_proxy(expr *e, app_ref &def);
|
||||
void undo_proxies_in_core(ptr_vector<expr> &v);
|
||||
app* mk_proxy(expr *v);
|
||||
app* fresh_proxy();
|
||||
void elim_proxies(expr_ref_vector &v);
|
||||
public:
|
||||
itp_solver(solver &solver, bool new_unsat_core, bool minimize_unsat_core, bool farkas_optimized, bool farkas_a_const, bool split_literals = false) :
|
||||
m(solver.get_manager()),
|
||||
m_solver(solver),
|
||||
m_proxies(m),
|
||||
m_num_proxies(0),
|
||||
m_base_defs(*this),
|
||||
m_assumptions(m),
|
||||
m_first_assumption(0),
|
||||
m_is_proxied(false),
|
||||
m_elim_proxies_sub(m, false, true),
|
||||
m_split_literals(split_literals),
|
||||
m_new_unsat_core(new_unsat_core),
|
||||
m_minimize_unsat_core(minimize_unsat_core),
|
||||
m_farkas_optimized(farkas_optimized),
|
||||
m_farkas_a_const(farkas_a_const)
|
||||
{}
|
||||
|
||||
~itp_solver() override {}
|
||||
|
||||
/* itp solver specific */
|
||||
void get_unsat_core(expr_ref_vector &core) override;
|
||||
virtual void get_itp_core(expr_ref_vector &core);
|
||||
void set_split_literals(bool v) {m_split_literals = v;}
|
||||
bool mk_proxies(expr_ref_vector &v, unsigned from = 0);
|
||||
void undo_proxies(expr_ref_vector &v);
|
||||
|
||||
void push_bg(expr *e);
|
||||
void pop_bg(unsigned n);
|
||||
unsigned get_num_bg();
|
||||
|
||||
void get_full_unsat_core(ptr_vector<expr> &core)
|
||||
{m_solver.get_unsat_core(core);}
|
||||
|
||||
/* solver interface */
|
||||
|
||||
solver* translate(ast_manager &m, params_ref const &p) override { return m_solver.translate(m, p);}
|
||||
void updt_params(params_ref const &p) override { m_solver.updt_params(p);}
|
||||
void collect_param_descrs(param_descrs &r) override { m_solver.collect_param_descrs(r);}
|
||||
void set_produce_models(bool f) override { m_solver.set_produce_models(f);}
|
||||
void assert_expr_core(expr *t) override { m_solver.assert_expr(t);}
|
||||
void assert_expr_core2(expr *t, expr *a) override { NOT_IMPLEMENTED_YET();}
|
||||
expr_ref_vector cube(expr_ref_vector&, unsigned) override { return expr_ref_vector(m); }
|
||||
|
||||
void push() override;
|
||||
void pop(unsigned n) override;
|
||||
unsigned get_scope_level() const override
|
||||
{return m_solver.get_scope_level();}
|
||||
|
||||
lbool check_sat(unsigned num_assumptions, expr * const *assumptions) override;
|
||||
void set_progress_callback(progress_callback *callback) override
|
||||
{m_solver.set_progress_callback(callback);}
|
||||
unsigned get_num_assertions() const override
|
||||
{return m_solver.get_num_assertions();}
|
||||
expr * get_assertion(unsigned idx) const override
|
||||
{return m_solver.get_assertion(idx);}
|
||||
unsigned get_num_assumptions() const override
|
||||
{return m_solver.get_num_assumptions();}
|
||||
expr * get_assumption(unsigned idx) const override
|
||||
{return m_solver.get_assumption(idx);}
|
||||
std::ostream &display(std::ostream &out, unsigned n, expr* const* es) const override
|
||||
{ return m_solver.display(out, n, es); }
|
||||
|
||||
/* check_sat_result interface */
|
||||
|
||||
void collect_statistics(statistics &st) const override ;
|
||||
virtual void reset_statistics();
|
||||
|
||||
void get_unsat_core(ptr_vector<expr> &r) override;
|
||||
void get_model_core(model_ref &m) override {m_solver.get_model(m);}
|
||||
proof *get_proof() override {return m_solver.get_proof();}
|
||||
std::string reason_unknown() const override
|
||||
{return m_solver.reason_unknown();}
|
||||
void set_reason_unknown(char const* msg) override
|
||||
{m_solver.set_reason_unknown(msg);}
|
||||
void get_labels(svector<symbol> &r) override
|
||||
{m_solver.get_labels(r);}
|
||||
ast_manager &get_manager() const override {return m;}
|
||||
|
||||
virtual void refresh();
|
||||
|
||||
class scoped_mk_proxy {
|
||||
itp_solver &m_s;
|
||||
expr_ref_vector &m_v;
|
||||
public:
|
||||
scoped_mk_proxy(itp_solver &s, expr_ref_vector &v) : m_s(s), m_v(v)
|
||||
{m_s.mk_proxies(m_v);}
|
||||
~scoped_mk_proxy()
|
||||
{m_s.undo_proxies(m_v);}
|
||||
};
|
||||
|
||||
class scoped_bg {
|
||||
itp_solver &m_s;
|
||||
unsigned m_bg_sz;
|
||||
public:
|
||||
scoped_bg(itp_solver &s) : m_s(s), m_bg_sz(m_s.get_num_bg()) {}
|
||||
~scoped_bg()
|
||||
{if (m_s.get_num_bg() > m_bg_sz) { m_s.pop_bg(m_s.get_num_bg() - m_bg_sz); }}
|
||||
};
|
||||
};
|
||||
}
|
||||
#endif
|
||||
280
src/muz/spacer/spacer_iuc_proof.cpp
Normal file
280
src/muz/spacer/spacer_iuc_proof.cpp
Normal file
|
|
@ -0,0 +1,280 @@
|
|||
#include <unordered_map>
|
||||
#include "ast/ast_pp_dot.h"
|
||||
|
||||
#include "muz/spacer/spacer_iuc_proof.h"
|
||||
#include "ast/for_each_expr.h"
|
||||
#include "ast/array_decl_plugin.h"
|
||||
#include "ast/proofs/proof_utils.h"
|
||||
#include "muz/spacer/spacer_proof_utils.h"
|
||||
#include "muz/spacer/spacer_util.h"
|
||||
namespace spacer {
|
||||
|
||||
/*
|
||||
* ====================================
|
||||
* init
|
||||
* ====================================
|
||||
*/
|
||||
iuc_proof::iuc_proof(ast_manager& m, proof* pr, const expr_set& core_lits) :
|
||||
m(m), m_pr(pr,m) {
|
||||
for (auto lit : core_lits) m_core_lits.insert(lit);
|
||||
// init A-marks and B-marks
|
||||
collect_core_symbols();
|
||||
compute_marks();
|
||||
}
|
||||
|
||||
iuc_proof::iuc_proof(ast_manager& m, proof* pr, const expr_ref_vector& core_lits) :
|
||||
m(m), m_pr(pr,m) {
|
||||
for (auto lit : core_lits) m_core_lits.insert(lit);
|
||||
// init A-marks and B-marks
|
||||
collect_core_symbols();
|
||||
compute_marks();
|
||||
}
|
||||
/*
|
||||
* ====================================
|
||||
* methods for computing symbol colors
|
||||
* ====================================
|
||||
*/
|
||||
class collect_pure_proc {
|
||||
func_decl_set& m_symbs;
|
||||
public:
|
||||
collect_pure_proc(func_decl_set& s):m_symbs(s) {}
|
||||
|
||||
void operator()(app* a) {
|
||||
if (a->get_family_id() == null_family_id) {
|
||||
m_symbs.insert(a->get_decl());
|
||||
}
|
||||
}
|
||||
void operator()(var*) {}
|
||||
void operator()(quantifier*) {}
|
||||
};
|
||||
|
||||
void iuc_proof::collect_core_symbols()
|
||||
{
|
||||
expr_mark visited;
|
||||
collect_pure_proc proc(m_core_symbols);
|
||||
for (auto lit : m_core_lits)
|
||||
for_each_expr(proc, visited, lit);
|
||||
}
|
||||
|
||||
class is_pure_expr_proc {
|
||||
func_decl_set const& m_symbs;
|
||||
array_util m_au;
|
||||
public:
|
||||
struct non_pure {};
|
||||
|
||||
is_pure_expr_proc(func_decl_set const& s, ast_manager& m):
|
||||
m_symbs(s),
|
||||
m_au (m)
|
||||
{}
|
||||
|
||||
void operator()(app* a) {
|
||||
if (a->get_family_id() == null_family_id) {
|
||||
if (!m_symbs.contains(a->get_decl())) {
|
||||
throw non_pure();
|
||||
}
|
||||
}
|
||||
else if (a->get_family_id () == m_au.get_family_id () &&
|
||||
a->is_app_of (a->get_family_id (), OP_ARRAY_EXT)) {
|
||||
throw non_pure();
|
||||
}
|
||||
}
|
||||
void operator()(var*) {}
|
||||
void operator()(quantifier*) {}
|
||||
};
|
||||
|
||||
bool iuc_proof::is_core_pure(expr* e) const
|
||||
{
|
||||
is_pure_expr_proc proc(m_core_symbols, m);
|
||||
try {
|
||||
for_each_expr(proc, e);
|
||||
}
|
||||
catch (is_pure_expr_proc::non_pure)
|
||||
{return false;}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void iuc_proof::compute_marks()
|
||||
{
|
||||
proof_post_order it(m_pr, m);
|
||||
while (it.hasNext())
|
||||
{
|
||||
proof* cur = it.next();
|
||||
if (m.get_num_parents(cur) == 0)
|
||||
{
|
||||
switch(cur->get_decl_kind())
|
||||
{
|
||||
case PR_ASSERTED:
|
||||
if (m_core_lits.contains(m.get_fact(cur)))
|
||||
m_b_mark.mark(cur, true);
|
||||
else
|
||||
m_a_mark.mark(cur, true);
|
||||
break;
|
||||
case PR_HYPOTHESIS:
|
||||
m_h_mark.mark(cur, true);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// collect from parents whether derivation of current node
|
||||
// contains A-axioms, B-axioms and hypothesis
|
||||
bool need_to_mark_a = false;
|
||||
bool need_to_mark_b = false;
|
||||
bool need_to_mark_h = false;
|
||||
|
||||
for (unsigned i = 0; i < m.get_num_parents(cur); ++i)
|
||||
{
|
||||
SASSERT(m.is_proof(cur->get_arg(i)));
|
||||
proof* premise = to_app(cur->get_arg(i));
|
||||
|
||||
need_to_mark_a |= m_a_mark.is_marked(premise);
|
||||
need_to_mark_b |= m_b_mark.is_marked(premise);
|
||||
need_to_mark_h |= m_h_mark.is_marked(premise);
|
||||
}
|
||||
|
||||
// if current node is application of a lemma, then all
|
||||
// active hypotheses are removed
|
||||
if(cur->get_decl_kind() == PR_LEMMA) need_to_mark_h = false;
|
||||
|
||||
// save results
|
||||
m_a_mark.mark(cur, need_to_mark_a);
|
||||
m_b_mark.mark(cur, need_to_mark_b);
|
||||
m_h_mark.mark(cur, need_to_mark_h);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* ====================================
|
||||
* statistics
|
||||
* ====================================
|
||||
*/
|
||||
|
||||
// debug method
|
||||
void iuc_proof::dump_farkas_stats()
|
||||
{
|
||||
unsigned fl_total = 0;
|
||||
unsigned fl_lowcut = 0;
|
||||
|
||||
proof_post_order it(m_pr, m);
|
||||
while (it.hasNext())
|
||||
{
|
||||
proof* cur = it.next();
|
||||
|
||||
// if node is theory lemma
|
||||
if (is_farkas_lemma(m, cur))
|
||||
{
|
||||
fl_total++;
|
||||
|
||||
// check whether farkas lemma is to be interpolated (could
|
||||
// potentially miss farkas lemmas, which are interpolated,
|
||||
// because we potentially don't want to use the lowest
|
||||
// cut)
|
||||
bool has_blue_nonred_parent = false;
|
||||
for (unsigned i = 0; i < m.get_num_parents(cur); ++i) {
|
||||
proof* premise = to_app(cur->get_arg(i));
|
||||
if (!is_a_marked(premise) && is_b_marked(premise)) {
|
||||
has_blue_nonred_parent = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (has_blue_nonred_parent && is_a_marked(cur))
|
||||
{
|
||||
SASSERT(is_b_marked(cur));
|
||||
fl_lowcut++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
IF_VERBOSE(1, verbose_stream()
|
||||
<< "\n total farkas lemmas " << fl_total
|
||||
<< " farkas lemmas in lowest cut " << fl_lowcut << "\n";);
|
||||
}
|
||||
|
||||
void iuc_proof::display_dot(std::ostream& out) {
|
||||
out << "digraph proof { \n";
|
||||
|
||||
std::unordered_map<unsigned, unsigned> ids;
|
||||
unsigned last_id = 0;
|
||||
|
||||
proof_post_order it(m_pr, m);
|
||||
while (it.hasNext())
|
||||
{
|
||||
proof* curr = it.next();
|
||||
|
||||
SASSERT(ids.count(curr->get_id()) == 0);
|
||||
ids.insert(std::make_pair(curr->get_id(), last_id));
|
||||
|
||||
std::string color = "white";
|
||||
if (this->is_a_marked(curr) && !this->is_b_marked(curr))
|
||||
color = "red";
|
||||
else if(!this->is_a_marked(curr) && this->is_b_marked(curr))
|
||||
color = "blue";
|
||||
else if(this->is_a_marked(curr) && this->is_b_marked(curr) )
|
||||
color = "purple";
|
||||
|
||||
// compute node label
|
||||
std::ostringstream label_ostream;
|
||||
label_ostream << mk_epp(m.get_fact(curr), m) << "\n";
|
||||
std::string label = escape_dot(label_ostream.str());
|
||||
|
||||
// compute edge-label
|
||||
std::string edge_label = "";
|
||||
if (m.get_num_parents(curr) == 0) {
|
||||
switch (curr->get_decl_kind())
|
||||
{
|
||||
case PR_ASSERTED:
|
||||
edge_label = "asserted:";
|
||||
break;
|
||||
case PR_HYPOTHESIS:
|
||||
edge_label = "hyp:";
|
||||
color = "grey";
|
||||
break;
|
||||
case PR_TH_LEMMA:
|
||||
if (is_farkas_lemma(m, curr))
|
||||
edge_label = "th_axiom(farkas):";
|
||||
else if (is_arith_lemma(m, curr))
|
||||
edge_label = "th_axiom(arith):";
|
||||
else
|
||||
edge_label = "th_axiom:";
|
||||
break;
|
||||
default:
|
||||
edge_label = "unknown axiom:";
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (curr->get_decl_kind() == PR_LEMMA)
|
||||
edge_label = "lemma:";
|
||||
else if (curr->get_decl_kind() == PR_TH_LEMMA) {
|
||||
if (is_farkas_lemma(m, curr))
|
||||
edge_label = "th_lemma(farkas):";
|
||||
else if (is_arith_lemma(m, curr))
|
||||
edge_label = "th_lemma(arith):";
|
||||
else
|
||||
edge_label = "th_lemma(other):";
|
||||
}
|
||||
}
|
||||
|
||||
// generate entry for node in dot-file
|
||||
out << "node_" << last_id << " " << "["
|
||||
<< "shape=box,style=\"filled\","
|
||||
<< "label=\"" << edge_label << " " << label << "\", "
|
||||
<< "fillcolor=\"" << color << "\"" << "]\n";
|
||||
|
||||
// add entry for each edge to that node
|
||||
for (unsigned i = m.get_num_parents(curr); i > 0 ; --i)
|
||||
{
|
||||
proof* premise = to_app(curr->get_arg(i-1));
|
||||
unsigned pid = ids.at(premise->get_id());
|
||||
out << "node_" << pid << " -> " << "node_" << last_id << ";\n";
|
||||
}
|
||||
|
||||
++last_id;
|
||||
}
|
||||
out << "\n}" << std::endl;
|
||||
}
|
||||
}
|
||||
67
src/muz/spacer/spacer_iuc_proof.h
Normal file
67
src/muz/spacer/spacer_iuc_proof.h
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
#ifndef IUC_PROOF_H_
|
||||
#define IUC_PROOF_H_
|
||||
|
||||
#include <ostream>
|
||||
#include "ast/ast.h"
|
||||
|
||||
namespace spacer {
|
||||
typedef obj_hashtable<expr> expr_set;
|
||||
typedef obj_hashtable<func_decl> func_decl_set;
|
||||
|
||||
/*
|
||||
* An iuc_proof is a proof together with information of the
|
||||
* coloring of the axioms.
|
||||
*/
|
||||
class iuc_proof
|
||||
{
|
||||
public:
|
||||
|
||||
// Constructs an iuc_proof given an ast_manager, a proof, and a set of
|
||||
// literals core_lits that might be included in the unsat core
|
||||
iuc_proof(ast_manager& m, proof* pr, const expr_set& core_lits);
|
||||
iuc_proof(ast_manager& m, proof* pr, const expr_ref_vector &core_lits);
|
||||
|
||||
// returns the proof object
|
||||
proof* get() {return m_pr.get();}
|
||||
|
||||
// returns true if all uninterpreted symbols of e are from the core literals
|
||||
// requires that m_core_symbols has already been computed
|
||||
bool is_core_pure(expr* e) const;
|
||||
|
||||
bool is_a_marked(proof* p) {return m_a_mark.is_marked(p);}
|
||||
bool is_b_marked(proof* p) {return m_b_mark.is_marked(p);}
|
||||
bool is_h_marked(proof* p) {return m_h_mark.is_marked(p);}
|
||||
|
||||
bool is_b_pure (proof *p) {
|
||||
return !is_h_marked (p) && is_core_pure(m.get_fact (p));
|
||||
}
|
||||
|
||||
void display_dot(std::ostream &out);
|
||||
// debug method
|
||||
void dump_farkas_stats();
|
||||
private:
|
||||
ast_manager& m;
|
||||
proof_ref m_pr;
|
||||
|
||||
ast_mark m_a_mark;
|
||||
ast_mark m_b_mark;
|
||||
ast_mark m_h_mark;
|
||||
|
||||
// -- literals that are part of the core
|
||||
expr_set m_core_lits;
|
||||
|
||||
// symbols that occur in any literals in the core
|
||||
func_decl_set m_core_symbols;
|
||||
|
||||
// collect symbols occurring in B (the core)
|
||||
void collect_core_symbols();
|
||||
|
||||
// compute for each formula of the proof whether it derives
|
||||
// from A or from B
|
||||
void compute_marks();
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif /* IUC_PROOF_H_ */
|
||||
435
src/muz/spacer/spacer_iuc_solver.cpp
Normal file
435
src/muz/spacer/spacer_iuc_solver.cpp
Normal file
|
|
@ -0,0 +1,435 @@
|
|||
/**
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_iuc_solver.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
A solver that produces interpolated unsat cores (IUCs)
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
#include"muz/spacer/spacer_iuc_solver.h"
|
||||
#include"ast/ast.h"
|
||||
#include"muz/spacer/spacer_util.h"
|
||||
#include"ast/proofs/proof_utils.h"
|
||||
#include"muz/spacer/spacer_farkas_learner.h"
|
||||
#include"ast/rewriter/expr_replacer.h"
|
||||
#include"muz/spacer/spacer_unsat_core_learner.h"
|
||||
#include"muz/spacer/spacer_unsat_core_plugin.h"
|
||||
#include "muz/spacer/spacer_iuc_proof.h"
|
||||
|
||||
namespace spacer {
|
||||
void iuc_solver::push ()
|
||||
{
|
||||
m_defs.push_back (def_manager (*this));
|
||||
m_solver.push ();
|
||||
}
|
||||
|
||||
void iuc_solver::pop (unsigned n)
|
||||
{
|
||||
m_solver.pop (n);
|
||||
unsigned lvl = m_defs.size ();
|
||||
SASSERT (n <= lvl);
|
||||
unsigned new_lvl = lvl-n;
|
||||
while (m_defs.size() > new_lvl) {
|
||||
m_num_proxies -= m_defs.back ().m_defs.size ();
|
||||
m_defs.pop_back ();
|
||||
}
|
||||
}
|
||||
|
||||
app* iuc_solver::fresh_proxy ()
|
||||
{
|
||||
if (m_num_proxies == m_proxies.size()) {
|
||||
std::stringstream name;
|
||||
name << "spacer_proxy!" << m_proxies.size ();
|
||||
app_ref res(m);
|
||||
res = m.mk_const (symbol (name.str ().c_str ()),
|
||||
m.mk_bool_sort ());
|
||||
m_proxies.push_back (res);
|
||||
|
||||
// -- add the new proxy to proxy eliminator
|
||||
proof_ref pr(m);
|
||||
pr = m.mk_asserted (m.mk_true ());
|
||||
m_elim_proxies_sub.insert (res, m.mk_true (), pr);
|
||||
|
||||
}
|
||||
return m_proxies.get (m_num_proxies++);
|
||||
}
|
||||
|
||||
app* iuc_solver::mk_proxy (expr *v)
|
||||
{
|
||||
{
|
||||
expr *e = v;
|
||||
m.is_not (v, e);
|
||||
if (is_uninterp_const(e)) { return to_app(v); }
|
||||
}
|
||||
|
||||
def_manager &def = m_defs.size () > 0 ? m_defs.back () : m_base_defs;
|
||||
return def.mk_proxy (v);
|
||||
}
|
||||
|
||||
bool iuc_solver::mk_proxies (expr_ref_vector &v, unsigned from)
|
||||
{
|
||||
bool dirty = false;
|
||||
for (unsigned i = from, sz = v.size(); i < sz; ++i) {
|
||||
app *p = mk_proxy (v.get (i));
|
||||
dirty |= (v.get (i) != p);
|
||||
v[i] = p;
|
||||
}
|
||||
return dirty;
|
||||
}
|
||||
|
||||
void iuc_solver::push_bg (expr *e)
|
||||
{
|
||||
if (m_assumptions.size () > m_first_assumption)
|
||||
{ m_assumptions.shrink(m_first_assumption); }
|
||||
m_assumptions.push_back (e);
|
||||
m_first_assumption = m_assumptions.size ();
|
||||
}
|
||||
|
||||
void iuc_solver::pop_bg (unsigned n)
|
||||
{
|
||||
if (n == 0) { return; }
|
||||
|
||||
if (m_assumptions.size () > m_first_assumption) {
|
||||
m_assumptions.shrink(m_first_assumption);
|
||||
}
|
||||
m_first_assumption = m_first_assumption > n ? m_first_assumption - n : 0;
|
||||
m_assumptions.shrink (m_first_assumption);
|
||||
}
|
||||
|
||||
unsigned iuc_solver::get_num_bg () {return m_first_assumption;}
|
||||
|
||||
lbool iuc_solver::check_sat (unsigned num_assumptions, expr * const *assumptions)
|
||||
{
|
||||
// -- remove any old assumptions
|
||||
m_assumptions.shrink(m_first_assumption);
|
||||
|
||||
// -- replace theory literals in background assumptions with proxies
|
||||
mk_proxies (m_assumptions);
|
||||
// -- in case mk_proxies added new literals, they are all background
|
||||
m_first_assumption = m_assumptions.size ();
|
||||
|
||||
m_assumptions.append (num_assumptions, assumptions);
|
||||
m_is_proxied = mk_proxies (m_assumptions, m_first_assumption);
|
||||
|
||||
return set_status (m_solver.check_sat (m_assumptions));
|
||||
}
|
||||
|
||||
lbool iuc_solver::check_sat_cc(const expr_ref_vector &cube,
|
||||
vector<expr_ref_vector> const & clauses) {
|
||||
if (clauses.empty())
|
||||
return check_sat(cube.size(), cube.c_ptr());
|
||||
|
||||
// -- remove any old assumptions
|
||||
m_assumptions.shrink(m_first_assumption);
|
||||
|
||||
// -- replace theory literals in background assumptions with proxies
|
||||
mk_proxies(m_assumptions);
|
||||
// -- in case mk_proxies added new literals, they are all background
|
||||
m_first_assumption = m_assumptions.size();
|
||||
|
||||
m_assumptions.append(cube);
|
||||
m_is_proxied = mk_proxies(m_assumptions, m_first_assumption);
|
||||
|
||||
return set_status (m_solver.check_sat_cc(m_assumptions, clauses));
|
||||
}
|
||||
|
||||
|
||||
app* iuc_solver::def_manager::mk_proxy (expr *v)
|
||||
{
|
||||
app* r;
|
||||
if (m_expr2proxy.find(v, r))
|
||||
return r;
|
||||
|
||||
ast_manager &m = m_parent.m;
|
||||
app* proxy = m_parent.fresh_proxy ();
|
||||
app* def = m.mk_or (m.mk_not (proxy), v);
|
||||
m_defs.push_back (def);
|
||||
m_expr2proxy.insert (v, proxy);
|
||||
m_proxy2def.insert (proxy, def);
|
||||
|
||||
m_parent.assert_expr (def);
|
||||
return proxy;
|
||||
}
|
||||
|
||||
bool iuc_solver::def_manager::is_proxy (app *k, app_ref &def)
|
||||
{
|
||||
app *r = nullptr;
|
||||
bool found = m_proxy2def.find (k, r);
|
||||
def = r;
|
||||
return found;
|
||||
}
|
||||
|
||||
void iuc_solver::def_manager::reset ()
|
||||
{
|
||||
m_expr2proxy.reset ();
|
||||
m_proxy2def.reset ();
|
||||
m_defs.reset ();
|
||||
}
|
||||
|
||||
bool iuc_solver::def_manager::is_proxy_def (expr *v)
|
||||
{
|
||||
// XXX This might not be the most robust way to check
|
||||
return m_defs.contains (v);
|
||||
}
|
||||
|
||||
bool iuc_solver::is_proxy(expr *e, app_ref &def)
|
||||
{
|
||||
if (!is_uninterp_const(e))
|
||||
return false;
|
||||
|
||||
app* a = to_app (e);
|
||||
|
||||
for (int i = m_defs.size (); i-- > 0; )
|
||||
if (m_defs[i].is_proxy (a, def))
|
||||
return true;
|
||||
|
||||
return m_base_defs.is_proxy (a, def);
|
||||
}
|
||||
|
||||
void iuc_solver::collect_statistics (statistics &st) const
|
||||
{
|
||||
m_solver.collect_statistics (st);
|
||||
st.update ("time.iuc_solver.get_iuc", m_iuc_sw.get_seconds());
|
||||
st.update ("time.iuc_solver.get_iuc.hyp_reduce1", m_hyp_reduce1_sw.get_seconds());
|
||||
st.update ("time.iuc_solver.get_iuc.hyp_reduce2", m_hyp_reduce2_sw.get_seconds());
|
||||
st.update ("time.iuc_solver.get_iuc.learn_core", m_learn_core_sw.get_seconds());
|
||||
|
||||
st.update("iuc_solver.num_proxies", m_proxies.size());
|
||||
}
|
||||
|
||||
void iuc_solver::reset_statistics ()
|
||||
{
|
||||
m_iuc_sw.reset();
|
||||
m_hyp_reduce1_sw.reset();
|
||||
m_hyp_reduce2_sw.reset();
|
||||
m_learn_core_sw.reset();
|
||||
}
|
||||
|
||||
void iuc_solver::get_unsat_core (expr_ref_vector &core) {
|
||||
m_solver.get_unsat_core (core);
|
||||
undo_proxies_in_core (core);
|
||||
}
|
||||
|
||||
void iuc_solver::undo_proxies_in_core (expr_ref_vector &r)
|
||||
{
|
||||
app_ref e(m);
|
||||
expr_fast_mark1 bg;
|
||||
for (unsigned i = 0; i < m_first_assumption; ++i) {
|
||||
bg.mark(m_assumptions.get(i));
|
||||
}
|
||||
|
||||
// expand proxies
|
||||
unsigned j = 0;
|
||||
for (expr* rr : r) {
|
||||
// skip background assumptions
|
||||
if (bg.is_marked(rr))
|
||||
continue;
|
||||
|
||||
// -- undo proxies, but only if they were introduced in check_sat
|
||||
if (m_is_proxied && is_proxy(rr, e)) {
|
||||
SASSERT (m.is_or (e));
|
||||
r[j++] = e->get_arg (1);
|
||||
}
|
||||
else {
|
||||
r[j++] = rr;
|
||||
}
|
||||
}
|
||||
r.shrink (j);
|
||||
}
|
||||
|
||||
void iuc_solver::undo_proxies (expr_ref_vector &r)
|
||||
{
|
||||
app_ref e(m);
|
||||
// expand proxies
|
||||
for (unsigned i = 0, sz = r.size (); i < sz; ++i)
|
||||
if (is_proxy(r.get(i), e)) {
|
||||
SASSERT (m.is_or (e));
|
||||
r[i] = e->get_arg (1);
|
||||
}
|
||||
}
|
||||
|
||||
void iuc_solver::elim_proxies (expr_ref_vector &v)
|
||||
{
|
||||
expr_ref f = mk_and (v);
|
||||
scoped_ptr<expr_replacer> rep = mk_expr_simp_replacer (m);
|
||||
rep->set_substitution (&m_elim_proxies_sub);
|
||||
(*rep)(f);
|
||||
v.reset();
|
||||
flatten_and(f, v);
|
||||
}
|
||||
|
||||
void iuc_solver::get_iuc(expr_ref_vector &core)
|
||||
{
|
||||
scoped_watch _t_ (m_iuc_sw);
|
||||
|
||||
typedef obj_hashtable<expr> expr_set;
|
||||
expr_set core_lits;
|
||||
for (unsigned i = m_first_assumption, sz = m_assumptions.size(); i < sz; ++i) {
|
||||
expr *a = m_assumptions.get (i);
|
||||
app_ref def(m);
|
||||
if (is_proxy(a, def)) { core_lits.insert(def.get()); }
|
||||
core_lits.insert (a);
|
||||
}
|
||||
|
||||
if (m_iuc == 0) {
|
||||
// ORIGINAL PDR CODE
|
||||
// AG: deprecated
|
||||
proof_ref pr(m);
|
||||
pr = get_proof ();
|
||||
|
||||
farkas_learner learner_old;
|
||||
learner_old.set_split_literals(m_split_literals);
|
||||
|
||||
learner_old.get_lemmas (pr, core_lits, core);
|
||||
elim_proxies (core);
|
||||
simplify_bounds (core); // XXX potentially redundant
|
||||
}
|
||||
else {
|
||||
// NEW IUC
|
||||
proof_ref res(get_proof(), m);
|
||||
|
||||
// -- old hypothesis reducer while the new one is broken
|
||||
if (m_old_hyp_reducer) {
|
||||
scoped_watch _t_ (m_hyp_reduce1_sw);
|
||||
// AG: deprecated
|
||||
// pre-process proof in order to get a proof which is
|
||||
// better suited for unsat-core-extraction
|
||||
if (m_print_farkas_stats) {
|
||||
iuc_proof iuc_before(m, res.get(), core_lits);
|
||||
verbose_stream() << "\nOld reduce_hypotheses. Before:";
|
||||
iuc_before.dump_farkas_stats();
|
||||
}
|
||||
|
||||
proof_utils::reduce_hypotheses(res);
|
||||
proof_utils::permute_unit_resolution(res);
|
||||
|
||||
if (m_print_farkas_stats) {
|
||||
iuc_proof iuc_after(m, res.get(), core_lits);
|
||||
verbose_stream() << "Old reduce_hypothesis. After:";
|
||||
iuc_after.dump_farkas_stats();
|
||||
}
|
||||
}
|
||||
// -- new hypothesis reducer
|
||||
else
|
||||
{
|
||||
scoped_watch _t_ (m_hyp_reduce2_sw);
|
||||
|
||||
// pre-process proof for better iuc extraction
|
||||
if (m_print_farkas_stats) {
|
||||
iuc_proof iuc_before(m, res.get(), core_lits);
|
||||
verbose_stream() << "\n New hypothesis_reducer. Before:";
|
||||
iuc_before.dump_farkas_stats();
|
||||
}
|
||||
|
||||
proof_ref pr1(m);
|
||||
{
|
||||
scoped_watch _t_ (m_hyp_reduce1_sw);
|
||||
theory_axiom_reducer ta_reducer(m);
|
||||
pr1 = ta_reducer.reduce (res.get());
|
||||
}
|
||||
|
||||
proof_ref pr2(m);
|
||||
{
|
||||
scoped_watch _t_ (m_hyp_reduce2_sw);
|
||||
hypothesis_reducer hyp_reducer(m);
|
||||
pr2 = hyp_reducer.reduce(pr1);
|
||||
}
|
||||
|
||||
res = pr2;
|
||||
|
||||
if (m_print_farkas_stats) {
|
||||
iuc_proof iuc_after(m, res.get(), core_lits);
|
||||
verbose_stream() << "New hypothesis_reducer. After:";
|
||||
iuc_after.dump_farkas_stats();
|
||||
}
|
||||
}
|
||||
|
||||
iuc_proof iuc_pf(m, res, core_lits);
|
||||
|
||||
unsat_core_learner learner(m, iuc_pf);
|
||||
|
||||
unsat_core_plugin* plugin;
|
||||
// -- register iuc plugins
|
||||
switch (m_iuc_arith) {
|
||||
case 0:
|
||||
case 1:
|
||||
plugin =
|
||||
alloc(unsat_core_plugin_farkas_lemma,
|
||||
learner, m_split_literals,
|
||||
(m_iuc_arith == 1) /* use constants from A */);
|
||||
learner.register_plugin(plugin);
|
||||
break;
|
||||
case 2:
|
||||
SASSERT(false && "Broken");
|
||||
plugin = alloc(unsat_core_plugin_farkas_lemma_optimized, learner, m);
|
||||
learner.register_plugin(plugin);
|
||||
break;
|
||||
case 3:
|
||||
plugin = alloc(unsat_core_plugin_farkas_lemma_bounded, learner, m);
|
||||
learner.register_plugin(plugin);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
|
||||
switch (m_iuc) {
|
||||
case 1:
|
||||
// -- iuc based on the lowest cut in the proof
|
||||
plugin = alloc(unsat_core_plugin_lemma, learner);
|
||||
learner.register_plugin(plugin);
|
||||
break;
|
||||
case 2:
|
||||
// -- iuc based on the smallest cut in the proof
|
||||
plugin = alloc(unsat_core_plugin_min_cut, learner, m);
|
||||
learner.register_plugin(plugin);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
|
||||
{
|
||||
scoped_watch _t_ (m_learn_core_sw);
|
||||
// compute interpolating unsat core
|
||||
learner.compute_unsat_core(core);
|
||||
}
|
||||
|
||||
elim_proxies (core);
|
||||
// AG: this should be taken care of by minimizing the iuc cut
|
||||
simplify_bounds (core);
|
||||
}
|
||||
|
||||
IF_VERBOSE(2,
|
||||
verbose_stream () << "IUC Core:\n" << core << "\n";);
|
||||
}
|
||||
|
||||
void iuc_solver::refresh ()
|
||||
{
|
||||
// only refresh in non-pushed state
|
||||
SASSERT (m_defs.empty());
|
||||
expr_ref_vector assertions (m);
|
||||
for (unsigned i = 0, e = m_solver.get_num_assertions(); i < e; ++i) {
|
||||
expr* a = m_solver.get_assertion (i);
|
||||
if (!m_base_defs.is_proxy_def(a)) { assertions.push_back(a); }
|
||||
|
||||
}
|
||||
m_base_defs.reset ();
|
||||
NOT_IMPLEMENTED_YET ();
|
||||
// solver interface does not have a reset method. need to introduce it somewhere.
|
||||
// m_solver.reset ();
|
||||
for (unsigned i = 0, e = assertions.size (); i < e; ++i)
|
||||
{ m_solver.assert_expr(assertions.get(i)); }
|
||||
}
|
||||
|
||||
}
|
||||
181
src/muz/spacer/spacer_iuc_solver.h
Normal file
181
src/muz/spacer/spacer_iuc_solver.h
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
/**
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_iuc_solver.h
|
||||
|
||||
Abstract:
|
||||
|
||||
A solver that produces interpolated unsat cores
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
#ifndef SPACER_IUC_SOLVER_H_
|
||||
#define SPACER_IUC_SOLVER_H_
|
||||
|
||||
#include"solver/solver.h"
|
||||
#include"ast/expr_substitution.h"
|
||||
#include"util/stopwatch.h"
|
||||
namespace spacer {
|
||||
class iuc_solver : public solver {
|
||||
private:
|
||||
struct def_manager {
|
||||
iuc_solver & m_parent;
|
||||
expr_ref_vector m_defs;
|
||||
obj_map<expr, app*> m_expr2proxy;
|
||||
obj_map<app, app*> m_proxy2def;
|
||||
|
||||
def_manager(iuc_solver &parent) :
|
||||
m_parent(parent), m_defs(m_parent.m)
|
||||
{}
|
||||
|
||||
bool is_proxy(app *k, app_ref &v);
|
||||
app* mk_proxy(expr *v);
|
||||
void reset();
|
||||
bool is_proxy_def(expr *v);
|
||||
|
||||
};
|
||||
|
||||
friend struct def_manager;
|
||||
ast_manager& m;
|
||||
solver& m_solver;
|
||||
app_ref_vector m_proxies;
|
||||
unsigned m_num_proxies;
|
||||
vector<def_manager> m_defs;
|
||||
def_manager m_base_defs;
|
||||
expr_ref_vector m_assumptions;
|
||||
unsigned m_first_assumption;
|
||||
bool m_is_proxied;
|
||||
|
||||
stopwatch m_iuc_sw;
|
||||
stopwatch m_hyp_reduce1_sw;
|
||||
stopwatch m_hyp_reduce2_sw;
|
||||
stopwatch m_learn_core_sw;
|
||||
|
||||
expr_substitution m_elim_proxies_sub;
|
||||
bool m_split_literals;
|
||||
unsigned m_iuc;
|
||||
unsigned m_iuc_arith;
|
||||
bool m_print_farkas_stats;
|
||||
bool m_old_hyp_reducer;
|
||||
bool is_proxy(expr *e, app_ref &def);
|
||||
void undo_proxies_in_core(expr_ref_vector &v);
|
||||
app* mk_proxy(expr *v);
|
||||
app* fresh_proxy();
|
||||
void elim_proxies(expr_ref_vector &v);
|
||||
public:
|
||||
iuc_solver(solver &solver, unsigned iuc, unsigned iuc_arith,
|
||||
bool print_farkas_stats, bool old_hyp_reducer,
|
||||
bool split_literals = false) :
|
||||
m(solver.get_manager()),
|
||||
m_solver(solver),
|
||||
m_proxies(m),
|
||||
m_num_proxies(0),
|
||||
m_base_defs(*this),
|
||||
m_assumptions(m),
|
||||
m_first_assumption(0),
|
||||
m_is_proxied(false),
|
||||
m_elim_proxies_sub(m, false, true),
|
||||
m_split_literals(split_literals),
|
||||
m_iuc(iuc),
|
||||
m_iuc_arith(iuc_arith),
|
||||
m_print_farkas_stats(print_farkas_stats),
|
||||
m_old_hyp_reducer(old_hyp_reducer)
|
||||
{}
|
||||
|
||||
~iuc_solver() override {}
|
||||
|
||||
/* iuc solver specific */
|
||||
virtual void get_iuc(expr_ref_vector &core);
|
||||
void set_split_literals(bool v) { m_split_literals = v; }
|
||||
bool mk_proxies(expr_ref_vector &v, unsigned from = 0);
|
||||
void undo_proxies(expr_ref_vector &v);
|
||||
|
||||
void push_bg(expr *e);
|
||||
void pop_bg(unsigned n);
|
||||
unsigned get_num_bg();
|
||||
|
||||
void get_full_unsat_core(ptr_vector<expr> &core) {
|
||||
expr_ref_vector _core(m);
|
||||
m_solver.get_unsat_core(_core);
|
||||
core.append(_core.size(), _core.c_ptr());
|
||||
}
|
||||
|
||||
/* solver interface */
|
||||
|
||||
solver* translate(ast_manager &m, params_ref const &p) override {
|
||||
return m_solver.translate(m, p);
|
||||
}
|
||||
void updt_params(params_ref const &p) override { m_solver.updt_params(p); }
|
||||
void reset_params(params_ref const &p) override { m_solver.reset_params(p); }
|
||||
const params_ref &get_params() const override { return m_solver.get_params(); }
|
||||
void push_params() override { m_solver.push_params(); }
|
||||
void pop_params() override { m_solver.pop_params(); }
|
||||
void collect_param_descrs(param_descrs &r) override { m_solver.collect_param_descrs(r); }
|
||||
void set_produce_models(bool f) override { m_solver.set_produce_models(f); }
|
||||
void assert_expr_core(expr *t) override { m_solver.assert_expr(t); }
|
||||
void assert_expr_core2(expr *t, expr *a) override { NOT_IMPLEMENTED_YET(); }
|
||||
expr_ref_vector cube(expr_ref_vector&, unsigned) override { return expr_ref_vector(m); }
|
||||
|
||||
void push() override;
|
||||
void pop(unsigned n) override;
|
||||
unsigned get_scope_level() const override { return m_solver.get_scope_level(); }
|
||||
|
||||
lbool check_sat(unsigned num_assumptions, expr * const *assumptions) override;
|
||||
lbool check_sat_cc(const expr_ref_vector &cube, vector<expr_ref_vector> const & clauses) override;
|
||||
void set_progress_callback(progress_callback *callback) override {
|
||||
m_solver.set_progress_callback(callback);
|
||||
}
|
||||
unsigned get_num_assertions() const override { return m_solver.get_num_assertions(); }
|
||||
expr * get_assertion(unsigned idx) const override { return m_solver.get_assertion(idx); }
|
||||
unsigned get_num_assumptions() const override { return m_solver.get_num_assumptions(); }
|
||||
expr * get_assumption(unsigned idx) const override { return m_solver.get_assumption(idx); }
|
||||
std::ostream &display(std::ostream &out, unsigned n, expr* const* es) const override {
|
||||
return m_solver.display(out, n, es);
|
||||
}
|
||||
|
||||
/* check_sat_result interface */
|
||||
|
||||
void collect_statistics(statistics &st) const override ;
|
||||
virtual void reset_statistics();
|
||||
|
||||
void get_unsat_core(expr_ref_vector &r) override;
|
||||
void get_model_core(model_ref &m) override {m_solver.get_model(m);}
|
||||
proof *get_proof() override {return m_solver.get_proof();}
|
||||
std::string reason_unknown() const override { return m_solver.reason_unknown(); }
|
||||
void set_reason_unknown(char const* msg) override { m_solver.set_reason_unknown(msg); }
|
||||
void get_labels(svector<symbol> &r) override { m_solver.get_labels(r); }
|
||||
ast_manager& get_manager() const override { return m; }
|
||||
|
||||
virtual void refresh();
|
||||
|
||||
class scoped_mk_proxy {
|
||||
iuc_solver &m_s;
|
||||
expr_ref_vector &m_v;
|
||||
public:
|
||||
scoped_mk_proxy(iuc_solver &s, expr_ref_vector &v) : m_s(s), m_v(v) {
|
||||
m_s.mk_proxies(m_v);
|
||||
}
|
||||
~scoped_mk_proxy() { m_s.undo_proxies(m_v); }
|
||||
};
|
||||
|
||||
class scoped_bg {
|
||||
iuc_solver &m_s;
|
||||
unsigned m_bg_sz;
|
||||
public:
|
||||
scoped_bg(iuc_solver &s) : m_s(s), m_bg_sz(m_s.get_num_bg()) {}
|
||||
~scoped_bg() {
|
||||
if (m_s.get_num_bg() > m_bg_sz) {
|
||||
m_s.pop_bg(m_s.get_num_bg() - m_bg_sz);
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
}
|
||||
#endif
|
||||
191
src/muz/spacer/spacer_json.cpp
Normal file
191
src/muz/spacer/spacer_json.cpp
Normal file
|
|
@ -0,0 +1,191 @@
|
|||
/**++
|
||||
Copyright (c) 2017 Microsoft Corporation and Matteo Marescotti
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_json.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
SPACER json marshalling support
|
||||
|
||||
Author:
|
||||
|
||||
Matteo Marescotti
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
|
||||
#include <iomanip>
|
||||
#include "spacer_context.h"
|
||||
#include "spacer_json.h"
|
||||
#include "spacer_util.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
static std::ostream &json_marshal(std::ostream &out, ast *t, ast_manager &m) {
|
||||
|
||||
mk_epp pp = mk_epp(t, m);
|
||||
std::ostringstream ss;
|
||||
ss << pp;
|
||||
out << "\"";
|
||||
for (auto &c:ss.str()) {
|
||||
switch (c) {
|
||||
case '"':
|
||||
out << "\\\"";
|
||||
break;
|
||||
case '\\':
|
||||
out << "\\\\";
|
||||
break;
|
||||
case '\b':
|
||||
out << "\\b";
|
||||
break;
|
||||
case '\f':
|
||||
out << "\\f";
|
||||
break;
|
||||
case '\n':
|
||||
out << "\\n";
|
||||
break;
|
||||
case '\r':
|
||||
out << "\\r";
|
||||
break;
|
||||
case '\t':
|
||||
out << "\\t";
|
||||
break;
|
||||
default:
|
||||
if ('\x00' <= c && c <= '\x1f') {
|
||||
out << "\\u"
|
||||
<< std::hex << std::setw(4) << std::setfill('0') << (int) c;
|
||||
} else {
|
||||
out << c;
|
||||
}
|
||||
}
|
||||
}
|
||||
out << "\"";
|
||||
return out;
|
||||
}
|
||||
|
||||
static std::ostream &json_marshal(std::ostream &out, lemma *l) {
|
||||
out << "{"
|
||||
<< R"("init_level":")" << l->init_level()
|
||||
<< R"(", "level":")" << l->level()
|
||||
<< R"(", "expr":)";
|
||||
json_marshal(out, l->get_expr(), l->get_ast_manager());
|
||||
out << "}";
|
||||
return out;
|
||||
}
|
||||
|
||||
static std::ostream &json_marshal(std::ostream &out, const lemma_ref_vector &lemmas) {
|
||||
|
||||
std::ostringstream ls;
|
||||
for (auto l:lemmas) {
|
||||
ls << ((unsigned)ls.tellp() == 0 ? "" : ",");
|
||||
json_marshal(ls, l);
|
||||
}
|
||||
out << "[" << ls.str() << "]";
|
||||
return out;
|
||||
}
|
||||
|
||||
|
||||
void json_marshaller::register_lemma(lemma *l) {
|
||||
if (l->has_pob()) {
|
||||
m_relations[&*l->get_pob()][l->get_pob()->depth()].push_back(l);
|
||||
}
|
||||
}
|
||||
|
||||
void json_marshaller::register_pob(pob *p) {
|
||||
m_relations[p];
|
||||
}
|
||||
|
||||
void json_marshaller::marshal_lemmas_old(std::ostream &out) const {
|
||||
unsigned pob_id = 0;
|
||||
for (auto &pob_map:m_relations) {
|
||||
std::ostringstream pob_lemmas;
|
||||
for (auto &depth_lemmas : pob_map.second) {
|
||||
pob_lemmas << ((unsigned)pob_lemmas.tellp() == 0 ? "" : ",")
|
||||
<< "\"" << depth_lemmas.first << "\":";
|
||||
json_marshal(pob_lemmas, depth_lemmas.second);
|
||||
}
|
||||
if (pob_lemmas.tellp()) {
|
||||
out << ((unsigned)out.tellp() == 0 ? "" : ",\n");
|
||||
out << "\"" << pob_id << "\":{" << pob_lemmas.str() << "}";
|
||||
}
|
||||
pob_id++;
|
||||
}
|
||||
}
|
||||
void json_marshaller::marshal_lemmas_new(std::ostream &out) const {
|
||||
unsigned pob_id = 0;
|
||||
for (auto &pob_map:m_relations) {
|
||||
std::ostringstream pob_lemmas;
|
||||
pob *n = pob_map.first;
|
||||
unsigned i = 0;
|
||||
for (auto *l : n->lemmas()) {
|
||||
pob_lemmas << ((unsigned)pob_lemmas.tellp() == 0 ? "" : ",")
|
||||
<< "\"" << i++ << "\":";
|
||||
lemma_ref_vector lemmas_vec;
|
||||
lemmas_vec.push_back(l);
|
||||
json_marshal(pob_lemmas, lemmas_vec);
|
||||
}
|
||||
|
||||
if (pob_lemmas.tellp()) {
|
||||
out << ((unsigned)out.tellp() == 0 ? "" : ",\n");
|
||||
out << "\"" << pob_id << "\":{" << pob_lemmas.str() << "}";
|
||||
}
|
||||
pob_id++;
|
||||
}
|
||||
}
|
||||
|
||||
std::ostream &json_marshaller::marshal(std::ostream &out) const {
|
||||
std::ostringstream nodes;
|
||||
std::ostringstream edges;
|
||||
std::ostringstream lemmas;
|
||||
|
||||
if (m_old_style)
|
||||
marshal_lemmas_old(lemmas);
|
||||
else
|
||||
marshal_lemmas_new(lemmas);
|
||||
|
||||
unsigned pob_id = 0;
|
||||
unsigned depth = 0;
|
||||
while (true) {
|
||||
double root_expand_time = m_ctx->get_root().get_expand_time(depth);
|
||||
bool a = false;
|
||||
pob_id = 0;
|
||||
for (auto &pob_map:m_relations) {
|
||||
pob *n = pob_map.first;
|
||||
double expand_time = n->get_expand_time(depth);
|
||||
if (expand_time > 0) {
|
||||
a = true;
|
||||
std::ostringstream pob_expr;
|
||||
json_marshal(pob_expr, n->post(), n->get_ast_manager());
|
||||
|
||||
nodes << ((unsigned)nodes.tellp() == 0 ? "" : ",\n") <<
|
||||
"{\"id\":\"" << depth << n <<
|
||||
"\",\"relative_time\":\"" << expand_time / root_expand_time <<
|
||||
"\",\"absolute_time\":\"" << std::setprecision(2) << expand_time <<
|
||||
"\",\"predicate\":\"" << n->pt().head()->get_name() <<
|
||||
"\",\"expr_id\":\"" << n->post()->get_id() <<
|
||||
"\",\"pob_id\":\"" << pob_id <<
|
||||
"\",\"depth\":\"" << depth <<
|
||||
"\",\"expr\":" << pob_expr.str() << "}";
|
||||
if (n->parent()) {
|
||||
edges << ((unsigned)edges.tellp() == 0 ? "" : ",\n") <<
|
||||
"{\"from\":\"" << depth << n->parent() <<
|
||||
"\",\"to\":\"" << depth << n << "\"}";
|
||||
}
|
||||
}
|
||||
pob_id++;
|
||||
}
|
||||
if (!a) {
|
||||
break;
|
||||
}
|
||||
depth++;
|
||||
}
|
||||
out << "{\n\"nodes\":[\n" << nodes.str() << "\n],\n";
|
||||
out << "\"edges\":[\n" << edges.str() << "\n],\n";
|
||||
out << "\"lemmas\":{\n" << lemmas.str() << "\n}\n}\n";
|
||||
return out;
|
||||
}
|
||||
|
||||
}
|
||||
61
src/muz/spacer/spacer_json.h
Normal file
61
src/muz/spacer/spacer_json.h
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
/**++
|
||||
Copyright (c) 2017 Microsoft Corporation and Matteo Marescotti
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_json.h
|
||||
|
||||
Abstract:
|
||||
|
||||
SPACER json marshalling support
|
||||
|
||||
Author:
|
||||
|
||||
Matteo Marescotti
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef Z3_SPACER_JSON_H
|
||||
#define Z3_SPACER_JSON_H
|
||||
|
||||
#include<iostream>
|
||||
#include<map>
|
||||
#include "util/ref.h"
|
||||
#include "util/ref_vector.h"
|
||||
|
||||
class ast;
|
||||
|
||||
class ast_manager;
|
||||
|
||||
namespace spacer {
|
||||
|
||||
class lemma;
|
||||
typedef sref_vector<lemma> lemma_ref_vector;
|
||||
class context;
|
||||
class pob;
|
||||
|
||||
|
||||
class json_marshaller {
|
||||
context *m_ctx;
|
||||
bool m_old_style;
|
||||
std::map<pob*, std::map<unsigned, lemma_ref_vector>> m_relations;
|
||||
|
||||
void marshal_lemmas_old(std::ostream &out) const;
|
||||
void marshal_lemmas_new(std::ostream &out) const;
|
||||
public:
|
||||
json_marshaller(context *ctx, bool old_style = false) :
|
||||
m_ctx(ctx), m_old_style(old_style) {}
|
||||
|
||||
void register_lemma(lemma *l);
|
||||
|
||||
void register_pob(pob *p);
|
||||
|
||||
std::ostream &marshal(std::ostream &out) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
#endif //Z3_SPACER_JSON_H
|
||||
|
|
@ -99,7 +99,7 @@ void qe_project(ast_manager& m, app_ref_vector& vars, expr_ref& fml, model_ref&
|
|||
);
|
||||
{
|
||||
scoped_no_proof _sp(m);
|
||||
qe::arith_project(*M, arith_vars, fml, map);
|
||||
spacer_qe::arith_project(*M, arith_vars, fml, map);
|
||||
}
|
||||
SASSERT(arith_vars.empty());
|
||||
TRACE("spacer",
|
||||
|
|
|
|||
|
|
@ -138,7 +138,6 @@ void model_evaluator::process_formula(app* e, ptr_vector<expr>& todo, ptr_vector
|
|||
case OP_FALSE:
|
||||
break;
|
||||
case OP_EQ:
|
||||
case OP_IFF:
|
||||
if (args[0] == args[1]) {
|
||||
SASSERT(v);
|
||||
// no-op
|
||||
|
|
@ -634,10 +633,6 @@ void model_evaluator::eval_basic(app* e)
|
|||
set_x(e);
|
||||
}
|
||||
break;
|
||||
case OP_IFF:
|
||||
VERIFY(m.is_iff(e, arg1, arg2));
|
||||
eval_eq(e, arg1, arg2);
|
||||
break;
|
||||
case OP_XOR:
|
||||
VERIFY(m.is_xor(e, arg1, arg2));
|
||||
eval_eq(e, arg1, arg2);
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@ Revision History:
|
|||
#include "model/model_smt2_pp.h"
|
||||
#include "tactic/model_converter.h"
|
||||
|
||||
#include "smt/smt_solver.h"
|
||||
namespace spacer {
|
||||
|
||||
class collect_decls_proc {
|
||||
|
|
@ -168,167 +169,30 @@ void inductive_property::display(datalog::rule_manager& rm, ptr_vector<datalog::
|
|||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> manager::get_state_suffixes()
|
||||
{
|
||||
std::vector<std::string> res;
|
||||
res.push_back("_n");
|
||||
return res;
|
||||
}
|
||||
|
||||
manager::manager(unsigned max_num_contexts, ast_manager& manager) :
|
||||
m(manager),
|
||||
m_brwr(m),
|
||||
m_mux(m, get_state_suffixes()),
|
||||
m_background(m.mk_true(), m),
|
||||
m_contexts(m, max_num_contexts),
|
||||
m_contexts2(m, max_num_contexts),
|
||||
m_contexts3(m, max_num_contexts),
|
||||
m_next_unique_num(0)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void manager::add_new_state(func_decl * s)
|
||||
{
|
||||
SASSERT(s->get_arity() == 0); //we currently don't support non-constant states
|
||||
decl_vector vect;
|
||||
manager::manager(ast_manager& manager) : m(manager), m_mux(m) {}
|
||||
|
||||
SASSERT(o_index(0) == 1); //we assume this in the number of retrieved symbols
|
||||
m_mux.create_tuple(s, s->get_arity(), s->get_domain(), s->get_range(), 2, vect);
|
||||
m_o0_preds.push_back(vect[o_index(0)]);
|
||||
}
|
||||
|
||||
func_decl * manager::get_o_pred(func_decl* s, unsigned idx)
|
||||
{
|
||||
func_decl * res = m_mux.try_get_by_prefix(s, o_index(idx));
|
||||
if (res) { return res; }
|
||||
add_new_state(s);
|
||||
res = m_mux.try_get_by_prefix(s, o_index(idx));
|
||||
func_decl * manager::get_o_pred(func_decl* s, unsigned idx) {
|
||||
func_decl * res = m_mux.find_by_decl(s, o_index(idx));
|
||||
if (!res) {
|
||||
m_mux.register_decl(s);
|
||||
res = m_mux.find_by_decl(s, o_index(idx));
|
||||
}
|
||||
SASSERT(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
func_decl * manager::get_n_pred(func_decl* s)
|
||||
{
|
||||
func_decl * res = m_mux.try_get_by_prefix(s, n_index());
|
||||
if (res) { return res; }
|
||||
add_new_state(s);
|
||||
res = m_mux.try_get_by_prefix(s, n_index());
|
||||
func_decl * manager::get_n_pred(func_decl* s) {
|
||||
func_decl * res = m_mux.find_by_decl(s, n_index());
|
||||
if (!res) {
|
||||
m_mux.register_decl(s);
|
||||
res = m_mux.find_by_decl(s, n_index());
|
||||
}
|
||||
SASSERT(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
void manager::mk_model_into_cube(const expr_ref_vector & mdl, expr_ref & res)
|
||||
{
|
||||
m_brwr.mk_and(mdl.size(), mdl.c_ptr(), res);
|
||||
}
|
||||
|
||||
void manager::mk_core_into_cube(const expr_ref_vector & core, expr_ref & res)
|
||||
{
|
||||
m_brwr.mk_and(core.size(), core.c_ptr(), res);
|
||||
}
|
||||
|
||||
void manager::mk_cube_into_lemma(expr * cube, expr_ref & res)
|
||||
{
|
||||
m_brwr.mk_not(cube, res);
|
||||
}
|
||||
|
||||
void manager::mk_lemma_into_cube(expr * lemma, expr_ref & res)
|
||||
{
|
||||
m_brwr.mk_not(lemma, res);
|
||||
}
|
||||
|
||||
expr_ref manager::mk_and(unsigned sz, expr* const* exprs)
|
||||
{
|
||||
expr_ref result(m);
|
||||
m_brwr.mk_and(sz, exprs, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
expr_ref manager::mk_or(unsigned sz, expr* const* exprs)
|
||||
{
|
||||
expr_ref result(m);
|
||||
m_brwr.mk_or(sz, exprs, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
expr_ref manager::mk_not_and(expr_ref_vector const& conjs)
|
||||
{
|
||||
expr_ref result(m), e(m);
|
||||
expr_ref_vector es(conjs);
|
||||
flatten_and(es);
|
||||
for (unsigned i = 0; i < es.size(); ++i) {
|
||||
m_brwr.mk_not(es[i].get(), e);
|
||||
es[i] = e;
|
||||
}
|
||||
m_brwr.mk_or(es.size(), es.c_ptr(), result);
|
||||
return result;
|
||||
}
|
||||
|
||||
void manager::get_or(expr* e, expr_ref_vector& result)
|
||||
{
|
||||
result.push_back(e);
|
||||
for (unsigned i = 0; i < result.size();) {
|
||||
e = result[i].get();
|
||||
if (m.is_or(e)) {
|
||||
result.append(to_app(e)->get_num_args(), to_app(e)->get_args());
|
||||
result[i] = result.back();
|
||||
result.pop_back();
|
||||
} else {
|
||||
++i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool manager::try_get_state_and_value_from_atom(expr * atom0, app *& state, app_ref& value)
|
||||
{
|
||||
if (!is_app(atom0)) {
|
||||
return false;
|
||||
}
|
||||
app * atom = to_app(atom0);
|
||||
expr * arg1;
|
||||
expr * arg2;
|
||||
app * candidate_state;
|
||||
app_ref candidate_value(m);
|
||||
if (m.is_not(atom, arg1)) {
|
||||
if (!is_app(arg1)) {
|
||||
return false;
|
||||
}
|
||||
candidate_state = to_app(arg1);
|
||||
candidate_value = m.mk_false();
|
||||
} else if (m.is_eq(atom, arg1, arg2)) {
|
||||
if (!is_app(arg1) || !is_app(arg2)) {
|
||||
return false;
|
||||
}
|
||||
if (!m_mux.is_muxed(to_app(arg1)->get_decl())) {
|
||||
std::swap(arg1, arg2);
|
||||
}
|
||||
candidate_state = to_app(arg1);
|
||||
candidate_value = to_app(arg2);
|
||||
} else {
|
||||
candidate_state = atom;
|
||||
candidate_value = m.mk_true();
|
||||
}
|
||||
if (!m_mux.is_muxed(candidate_state->get_decl())) {
|
||||
return false;
|
||||
}
|
||||
state = candidate_state;
|
||||
value = candidate_value;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool manager::try_get_state_decl_from_atom(expr * atom, func_decl *& state)
|
||||
{
|
||||
app_ref dummy_value_holder(m);
|
||||
app * s;
|
||||
if (try_get_state_and_value_from_atom(atom, s, dummy_value_holder)) {
|
||||
state = s->get_decl();
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new skolem constant
|
||||
*/
|
||||
|
|
@ -340,22 +204,27 @@ app* mk_zk_const(ast_manager &m, unsigned idx, sort *s) {
|
|||
|
||||
namespace find_zk_const_ns {
|
||||
struct proc {
|
||||
int m_max;
|
||||
app_ref_vector &m_out;
|
||||
proc (app_ref_vector &out) : m_out(out) {}
|
||||
proc (app_ref_vector &out) : m_max(-1), m_out(out) {}
|
||||
void operator() (var const * n) const {}
|
||||
void operator() (app *n) const {
|
||||
if (is_uninterp_const(n) &&
|
||||
n->get_decl()->get_name().str().compare (0, 3, "sk!") == 0) {
|
||||
m_out.push_back (n);
|
||||
void operator() (app *n) {
|
||||
int idx;
|
||||
if (is_zk_const(n, idx)) {
|
||||
m_out.push_back(n);
|
||||
if (idx > m_max) {
|
||||
m_max = idx;
|
||||
}
|
||||
}
|
||||
}
|
||||
void operator() (quantifier const *n) const {}
|
||||
};
|
||||
}
|
||||
|
||||
void find_zk_const(expr *e, app_ref_vector &res) {
|
||||
int find_zk_const(expr *e, app_ref_vector &res) {
|
||||
find_zk_const_ns::proc p(res);
|
||||
for_each_expr (p, e);
|
||||
return p.m_max;
|
||||
}
|
||||
|
||||
namespace has_zk_const_ns {
|
||||
|
|
@ -363,8 +232,8 @@ struct found {};
|
|||
struct proc {
|
||||
void operator() (var const *n) const {}
|
||||
void operator() (app const *n) const {
|
||||
if (is_uninterp_const(n) &&
|
||||
n->get_decl()->get_name().str().compare(0, 3, "sk!") == 0) {
|
||||
int idx;
|
||||
if (is_zk_const(n, idx)) {
|
||||
throw found();
|
||||
}
|
||||
}
|
||||
|
|
@ -384,4 +253,26 @@ bool has_zk_const(expr *e){
|
|||
return false;
|
||||
}
|
||||
|
||||
bool is_zk_const (const app *a, int &n) {
|
||||
if (!is_uninterp_const(a)) return false;
|
||||
|
||||
const symbol &name = a->get_decl()->get_name();
|
||||
if (name.str().compare (0, 3, "sk!") != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
n = std::stoi(name.str().substr(3));
|
||||
return true;
|
||||
}
|
||||
bool sk_lt_proc::operator()(const app *a1, const app *a2) {
|
||||
if (a1 == a2) return false;
|
||||
int n1, n2;
|
||||
bool z1, z2;
|
||||
z1 = is_zk_const(a1, n1);
|
||||
z2 = is_zk_const(a2, n2);
|
||||
if (z1 && z2) return n1 < n2;
|
||||
if (z1 != z2) return z1;
|
||||
return ast_lt_proc()(a1, a2);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ Abstract:
|
|||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-8-25.
|
||||
Arie Gurfinkel
|
||||
|
||||
Revision History:
|
||||
|
||||
|
|
@ -34,12 +35,10 @@ Revision History:
|
|||
#include "muz/spacer/spacer_util.h"
|
||||
#include "muz/spacer/spacer_sym_mux.h"
|
||||
#include "muz/spacer/spacer_farkas_learner.h"
|
||||
#include "muz/spacer/spacer_smt_context_manager.h"
|
||||
#include "muz/base/dl_rule.h"
|
||||
|
||||
namespace smt {
|
||||
class context;
|
||||
}
|
||||
#include "solver/solver.h"
|
||||
#include "solver/solver_pool.h"
|
||||
namespace smt {class context;}
|
||||
|
||||
namespace spacer {
|
||||
|
||||
|
|
@ -67,280 +66,74 @@ public:
|
|||
m_relation_info(relations) {}
|
||||
|
||||
std::string to_string() const;
|
||||
|
||||
expr_ref to_expr() const;
|
||||
|
||||
void to_model(model_ref& md) const;
|
||||
|
||||
void display(datalog::rule_manager& rm, ptr_vector<datalog::rule> const& rules, std::ostream& out) const;
|
||||
void display(datalog::rule_manager& rm,
|
||||
ptr_vector<datalog::rule> const& rules,
|
||||
std::ostream& out) const;
|
||||
};
|
||||
|
||||
class manager {
|
||||
ast_manager& m;
|
||||
|
||||
mutable bool_rewriter m_brwr;
|
||||
|
||||
// manager of multiplexed names
|
||||
sym_mux m_mux;
|
||||
expr_ref m_background;
|
||||
decl_vector m_o0_preds;
|
||||
spacer::smt_context_manager m_contexts;
|
||||
spacer::smt_context_manager m_contexts2;
|
||||
spacer::smt_context_manager m_contexts3;
|
||||
|
||||
/** whenever we need an unique number, we get this one and increase */
|
||||
unsigned m_next_unique_num;
|
||||
|
||||
|
||||
static std::vector<std::string> get_state_suffixes();
|
||||
|
||||
unsigned n_index() const { return 0; }
|
||||
unsigned o_index(unsigned i) const { return i + 1; }
|
||||
|
||||
void add_new_state(func_decl * s);
|
||||
|
||||
public:
|
||||
manager(unsigned max_num_contexts, ast_manager & manager);
|
||||
manager(ast_manager & manager);
|
||||
|
||||
ast_manager& get_manager() const { return m; }
|
||||
bool_rewriter& get_brwr() const { return m_brwr; }
|
||||
|
||||
expr_ref mk_and(unsigned sz, expr* const* exprs);
|
||||
expr_ref mk_and(expr_ref_vector const& exprs)
|
||||
{
|
||||
return mk_and(exprs.size(), exprs.c_ptr());
|
||||
}
|
||||
expr_ref mk_and(expr* a, expr* b)
|
||||
{
|
||||
expr* args[2] = { a, b };
|
||||
return mk_and(2, args);
|
||||
}
|
||||
expr_ref mk_or(unsigned sz, expr* const* exprs);
|
||||
expr_ref mk_or(expr_ref_vector const& exprs)
|
||||
{
|
||||
return mk_or(exprs.size(), exprs.c_ptr());
|
||||
}
|
||||
|
||||
expr_ref mk_not_and(expr_ref_vector const& exprs);
|
||||
|
||||
void get_or(expr* e, expr_ref_vector& result);
|
||||
// management of mux names
|
||||
|
||||
//"o" predicates stand for the old states and "n" for the new states
|
||||
func_decl * get_o_pred(func_decl * s, unsigned idx);
|
||||
func_decl * get_n_pred(func_decl * s);
|
||||
|
||||
/**
|
||||
Marks symbol as non-model which means it will not appear in models collected by
|
||||
get_state_cube_from_model function.
|
||||
This is to take care of auxiliary symbols introduced by the disjunction relations
|
||||
to relativize lemmas coming from disjuncts.
|
||||
*/
|
||||
void mark_as_non_model(func_decl * p)
|
||||
{
|
||||
m_mux.mark_as_non_model(p);
|
||||
}
|
||||
|
||||
|
||||
func_decl * const * begin_o0_preds() const { return m_o0_preds.begin(); }
|
||||
func_decl * const * end_o0_preds() const { return m_o0_preds.end(); }
|
||||
|
||||
bool is_state_pred(func_decl * p) const { return m_mux.is_muxed(p); }
|
||||
func_decl * to_o0(func_decl * p) { return m_mux.conv(m_mux.get_primary(p), 0, o_index(0)); }
|
||||
|
||||
bool is_o(func_decl * p, unsigned idx) const
|
||||
{
|
||||
return m_mux.has_index(p, o_index(idx));
|
||||
}
|
||||
void get_o_index(func_decl* p, unsigned& idx) const
|
||||
{
|
||||
m_mux.try_get_index(p, idx);
|
||||
SASSERT(idx != n_index());
|
||||
idx--; // m_mux has indices starting at 1
|
||||
}
|
||||
bool is_o(expr* e, unsigned idx) const
|
||||
{
|
||||
return is_app(e) && is_o(to_app(e)->get_decl(), idx);
|
||||
}
|
||||
bool is_o(func_decl * p) const
|
||||
{
|
||||
unsigned idx;
|
||||
return m_mux.try_get_index(p, idx) && idx != n_index();
|
||||
}
|
||||
bool is_o(expr* e) const
|
||||
{
|
||||
return is_app(e) && is_o(to_app(e)->get_decl());
|
||||
}
|
||||
bool is_n(func_decl * p) const
|
||||
{
|
||||
return m_mux.has_index(p, n_index());
|
||||
}
|
||||
bool is_n(expr* e) const
|
||||
{
|
||||
return is_app(e) && is_n(to_app(e)->get_decl());
|
||||
}
|
||||
|
||||
/** true if p should not appead in models propagates into child relations */
|
||||
bool is_non_model_sym(func_decl * p) const
|
||||
{ return m_mux.is_non_model_sym(p); }
|
||||
|
||||
|
||||
/** true if f doesn't contain any n predicates */
|
||||
bool is_o_formula(expr * f) const
|
||||
{
|
||||
return !m_mux.contains(f, n_index());
|
||||
}
|
||||
|
||||
/** true if f contains only o state preds of index o_idx */
|
||||
bool is_o_formula(expr * f, unsigned o_idx) const
|
||||
{
|
||||
return m_mux.is_homogenous_formula(f, o_index(o_idx));
|
||||
}
|
||||
/** true if f doesn't contain any o predicates */
|
||||
bool is_n_formula(expr * f) const
|
||||
{
|
||||
return m_mux.is_homogenous_formula(f, n_index());
|
||||
}
|
||||
{return m_mux.is_homogenous_formula(f, n_index());}
|
||||
|
||||
func_decl * o2n(func_decl * p, unsigned o_idx) const
|
||||
{
|
||||
return m_mux.conv(p, o_index(o_idx), n_index());
|
||||
}
|
||||
{return m_mux.shift_decl(p, o_index(o_idx), n_index());}
|
||||
func_decl * o2o(func_decl * p, unsigned src_idx, unsigned tgt_idx) const
|
||||
{
|
||||
return m_mux.conv(p, o_index(src_idx), o_index(tgt_idx));
|
||||
}
|
||||
{return m_mux.shift_decl(p, o_index(src_idx), o_index(tgt_idx));}
|
||||
func_decl * n2o(func_decl * p, unsigned o_idx) const
|
||||
{
|
||||
return m_mux.conv(p, n_index(), o_index(o_idx));
|
||||
}
|
||||
{return m_mux.shift_decl(p, n_index(), o_index(o_idx));}
|
||||
|
||||
void formula_o2n(expr * f, expr_ref & result, unsigned o_idx, bool homogenous = true) const
|
||||
{ m_mux.conv_formula(f, o_index(o_idx), n_index(), result, homogenous); }
|
||||
void formula_o2n(expr * f, expr_ref & result, unsigned o_idx,
|
||||
bool homogenous = true) const
|
||||
{m_mux.shift_expr(f, o_index(o_idx), n_index(), result, homogenous);}
|
||||
|
||||
void formula_n2o(expr * f, expr_ref & result, unsigned o_idx, bool homogenous = true) const
|
||||
{ m_mux.conv_formula(f, n_index(), o_index(o_idx), result, homogenous); }
|
||||
void formula_n2o(expr * f, expr_ref & result, unsigned o_idx,
|
||||
bool homogenous = true) const
|
||||
{m_mux.shift_expr(f, n_index(), o_index(o_idx), result, homogenous);}
|
||||
|
||||
void formula_n2o(unsigned o_idx, bool homogenous, expr_ref & result) const
|
||||
{ m_mux.conv_formula(result.get(), n_index(), o_index(o_idx), result, homogenous); }
|
||||
{m_mux.shift_expr(result.get(), n_index(), o_index(o_idx),
|
||||
result, homogenous);}
|
||||
|
||||
void formula_o2o(expr * src, expr_ref & tgt, unsigned src_idx, unsigned tgt_idx, bool homogenous = true) const
|
||||
{ m_mux.conv_formula(src, o_index(src_idx), o_index(tgt_idx), tgt, homogenous); }
|
||||
void formula_o2o(expr * src, expr_ref & tgt, unsigned src_idx,
|
||||
unsigned tgt_idx, bool homogenous = true) const
|
||||
{m_mux.shift_expr(src, o_index(src_idx), o_index(tgt_idx),
|
||||
tgt, homogenous);}
|
||||
|
||||
/**
|
||||
Return true if all state symbols which e contains are of one kind (either "n" or one of "o").
|
||||
*/
|
||||
bool is_homogenous_formula(expr * e) const
|
||||
{
|
||||
return m_mux.is_homogenous_formula(e);
|
||||
}
|
||||
|
||||
/**
|
||||
Collect indices used in expression.
|
||||
*/
|
||||
void collect_indices(expr* e, unsigned_vector& indices) const
|
||||
{
|
||||
m_mux.collect_indices(e, indices);
|
||||
}
|
||||
|
||||
/**
|
||||
Collect used variables of each index.
|
||||
*/
|
||||
void collect_variables(expr* e, vector<ptr_vector<app> >& vars) const
|
||||
{
|
||||
m_mux.collect_variables(e, vars);
|
||||
}
|
||||
|
||||
/**
|
||||
Return true iff both s1 and s2 are either "n" or "o" of the same index.
|
||||
If one (or both) of them are not state symbol, return false.
|
||||
*/
|
||||
bool have_different_state_kinds(func_decl * s1, func_decl * s2) const
|
||||
{
|
||||
unsigned i1, i2;
|
||||
return m_mux.try_get_index(s1, i1) && m_mux.try_get_index(s2, i2) && i1 != i2;
|
||||
}
|
||||
|
||||
/**
|
||||
Increase indexes of state symbols in formula by dist.
|
||||
The 'N' index becomes 'O' index with number dist-1.
|
||||
*/
|
||||
void formula_shift(expr * src, expr_ref & tgt, unsigned dist) const
|
||||
{
|
||||
SASSERT(n_index() == 0);
|
||||
SASSERT(o_index(0) == 1);
|
||||
m_mux.shift_formula(src, dist, tgt);
|
||||
}
|
||||
|
||||
void mk_model_into_cube(const expr_ref_vector & mdl, expr_ref & res);
|
||||
void mk_core_into_cube(const expr_ref_vector & core, expr_ref & res);
|
||||
void mk_cube_into_lemma(expr * cube, expr_ref & res);
|
||||
void mk_lemma_into_cube(expr * lemma, expr_ref & res);
|
||||
|
||||
/**
|
||||
Remove from vec all atoms that do not have an "o" state.
|
||||
The order of elements in vec may change.
|
||||
An assumption is that atoms having "o" state of given index
|
||||
do not have "o" states of other indexes or "n" states.
|
||||
*/
|
||||
void filter_o_atoms(expr_ref_vector& vec, unsigned o_idx) const
|
||||
{ m_mux.filter_idx(vec, o_index(o_idx)); }
|
||||
void filter_n_atoms(expr_ref_vector& vec) const
|
||||
{ m_mux.filter_idx(vec, n_index()); }
|
||||
|
||||
/**
|
||||
Partition literals into o_lits and others.
|
||||
*/
|
||||
void partition_o_atoms(expr_ref_vector const& lits,
|
||||
expr_ref_vector& o_lits,
|
||||
expr_ref_vector& other,
|
||||
unsigned o_idx) const
|
||||
{
|
||||
m_mux.partition_o_idx(lits, o_lits, other, o_index(o_idx));
|
||||
}
|
||||
|
||||
void filter_out_non_model_atoms(expr_ref_vector& vec) const
|
||||
{ m_mux.filter_non_model_lits(vec); }
|
||||
|
||||
bool try_get_state_and_value_from_atom(expr * atom, app *& state, app_ref& value);
|
||||
bool try_get_state_decl_from_atom(expr * atom, func_decl *& state);
|
||||
|
||||
|
||||
std::string pp_model(const model_core & mdl) const
|
||||
{ return m_mux.pp_model(mdl); }
|
||||
|
||||
|
||||
void set_background(expr* b) { m_background = b; }
|
||||
|
||||
expr* get_background() const { return m_background; }
|
||||
|
||||
unsigned get_unique_num() { return m_next_unique_num++; }
|
||||
|
||||
solver* mk_fresh() {return m_contexts.mk_fresh();}
|
||||
smt_params& fparams() { return m_contexts.fparams(); }
|
||||
solver* mk_fresh2() {return m_contexts2.mk_fresh();}
|
||||
smt_params &fparams2() { return m_contexts2.fparams(); }
|
||||
solver* mk_fresh3() {return m_contexts3.mk_fresh();}
|
||||
smt_params &fparams3() {return m_contexts3.fparams();}
|
||||
|
||||
|
||||
|
||||
void collect_statistics(statistics& st) const
|
||||
{
|
||||
m_contexts.collect_statistics(st);
|
||||
m_contexts2.collect_statistics(st);
|
||||
m_contexts3.collect_statistics(st);
|
||||
}
|
||||
|
||||
void reset_statistics()
|
||||
{
|
||||
m_contexts.reset_statistics();
|
||||
m_contexts2.reset_statistics();
|
||||
m_contexts3.reset_statistics();
|
||||
}
|
||||
};
|
||||
|
||||
/** Skolem constants for quantified spacer */
|
||||
app* mk_zk_const (ast_manager &m, unsigned idx, sort *s);
|
||||
void find_zk_const(expr* e, app_ref_vector &out);
|
||||
int find_zk_const(expr* e, app_ref_vector &out);
|
||||
inline int find_zk_const(expr_ref_vector const &v, app_ref_vector &out)
|
||||
{return find_zk_const (mk_and(v), out);}
|
||||
|
||||
bool has_zk_const(expr* e);
|
||||
bool is_zk_const (const app *a, int &n);
|
||||
|
||||
struct sk_lt_proc {bool operator()(const app* a1, const app* a2);};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
102
src/muz/spacer/spacer_mbc.cpp
Normal file
102
src/muz/spacer/spacer_mbc.cpp
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
#include <climits>
|
||||
|
||||
#include "muz/spacer/spacer_mbc.h"
|
||||
#include "ast/rewriter/rewriter.h"
|
||||
#include "ast/rewriter/rewriter_def.h"
|
||||
#include "ast/rewriter/th_rewriter.h"
|
||||
#include "ast/scoped_proof.h"
|
||||
#include "model/model_evaluator.h"
|
||||
|
||||
|
||||
namespace spacer {
|
||||
|
||||
mbc::mbc(ast_manager &m) : m(m) {}
|
||||
|
||||
namespace {
|
||||
class mbc_rewriter_cfg : public default_rewriter_cfg {
|
||||
|
||||
ast_manager &m;
|
||||
const mbc::partition_map &m_pmap;
|
||||
obj_map<expr,expr*> &m_subs;
|
||||
model &m_mdl;
|
||||
model_evaluator m_mev;
|
||||
vector<expr_ref_vector> &m_parts;
|
||||
unsigned m_current_part;
|
||||
|
||||
public:
|
||||
mbc_rewriter_cfg(ast_manager &m, const mbc::partition_map &pmap,
|
||||
obj_map<expr,expr*> &subs,
|
||||
model &mdl, vector<expr_ref_vector> &parts) :
|
||||
m(m), m_pmap(pmap), m_subs(subs), m_mdl(mdl), m_mev(m_mdl),
|
||||
m_parts(parts), m_current_part(UINT_MAX)
|
||||
{m_mev.set_model_completion(true);}
|
||||
|
||||
bool get_subst(expr *s, expr * & t, proof * & t_pr) {
|
||||
if (!is_app(s)) return false;
|
||||
unsigned part = UINT_MAX;
|
||||
|
||||
// not in partition map
|
||||
if (!m_pmap.find (to_app(s)->get_decl(), part)) return false;
|
||||
|
||||
// first part element, remember it
|
||||
if (!found_partition()) {
|
||||
set_partition(part);
|
||||
return false;
|
||||
}
|
||||
|
||||
// already in our substitution map
|
||||
expr *tmp = nullptr;
|
||||
if (m_subs.find(s, tmp)) {
|
||||
t = tmp;
|
||||
return true;
|
||||
}
|
||||
|
||||
// decide value based on model
|
||||
expr_ref val(m);
|
||||
|
||||
// eval in the model
|
||||
m_mev.eval(s, val, true);
|
||||
|
||||
// store decided equality (also keeps ref to s and val)
|
||||
m_parts[part].push_back(m.mk_eq(s, val));
|
||||
// store substitution
|
||||
m_subs.insert(s, val);
|
||||
t = val;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void reset() {reset_partition();};
|
||||
void reset_partition() {m_current_part = UINT_MAX;}
|
||||
unsigned partition() {return m_current_part;}
|
||||
bool found_partition() {return m_current_part < UINT_MAX;}
|
||||
void set_partition(unsigned v) {m_current_part = v;}
|
||||
};
|
||||
}
|
||||
|
||||
void mbc::operator()(const partition_map &pmap, expr_ref_vector &lits,
|
||||
model &mdl, vector<expr_ref_vector> &res) {
|
||||
scoped_no_proof _sp (m);
|
||||
|
||||
obj_map<expr,expr*> subs;
|
||||
mbc_rewriter_cfg cfg(m, pmap, subs, mdl, res);
|
||||
rewriter_tpl<mbc_rewriter_cfg> rw(m, false, cfg);
|
||||
th_rewriter thrw(m);
|
||||
|
||||
for (auto *lit : lits) {
|
||||
expr_ref new_lit(m);
|
||||
rw.reset();
|
||||
rw(lit, new_lit);
|
||||
thrw(new_lit);
|
||||
if (cfg.found_partition()) {
|
||||
SASSERT(cfg.partition() < res.size());
|
||||
res[cfg.partition()].push_back(new_lit);
|
||||
}
|
||||
}
|
||||
|
||||
TRACE("mbc", tout << "Input: " << lits << "\n"
|
||||
<< "Output: \n";
|
||||
for (auto &vec : res) tout << vec << "\n==================\n";);
|
||||
}
|
||||
|
||||
}
|
||||
45
src/muz/spacer/spacer_mbc.h
Normal file
45
src/muz/spacer/spacer_mbc.h
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
/*++
|
||||
Copyright (c) 2018 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_mbc.h
|
||||
|
||||
Abstract:
|
||||
|
||||
Model-Based Cartesian Decomposition
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _SPACER_MBC_H_
|
||||
#define _SPACER_MBC_H_
|
||||
|
||||
#include "ast/ast.h"
|
||||
#include "util/obj_hashtable.h"
|
||||
#include "model/model.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
class mbc {
|
||||
ast_manager &m;
|
||||
public:
|
||||
mbc(ast_manager &m);
|
||||
|
||||
|
||||
typedef obj_map<func_decl, unsigned> partition_map;
|
||||
|
||||
/**
|
||||
\Brief Model Based Cartesian projection of lits
|
||||
*/
|
||||
void operator()(const partition_map &pmap, expr_ref_vector &lits, model &mdl,
|
||||
vector<expr_ref_vector> &res);
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
||||
375
src/muz/spacer/spacer_pdr.cpp
Normal file
375
src/muz/spacer/spacer_pdr.cpp
Normal file
|
|
@ -0,0 +1,375 @@
|
|||
/**++
|
||||
Copyright (c) 2018 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_pdr.h
|
||||
|
||||
Abstract:
|
||||
|
||||
SPACER gPDR strategy implementation
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
|
||||
Based on muz/pdr
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
#include "muz/spacer/spacer_pdr.h"
|
||||
#include "muz/base/dl_context.h"
|
||||
#include "muz/spacer/spacer_mbc.h"
|
||||
|
||||
namespace spacer {
|
||||
model_node::model_node(model_node* parent, class pob *pob):
|
||||
m_pob(pob), m_parent(parent), m_next(nullptr), m_prev(nullptr),
|
||||
m_orig_level(m_pob->level()), m_depth(0),
|
||||
m_closed(false) {
|
||||
SASSERT(m_pob);
|
||||
if (m_parent) m_parent->add_child(this);
|
||||
}
|
||||
|
||||
void model_node::add_child(model_node* kid) {
|
||||
m_children.push_back(kid);
|
||||
SASSERT(level() == kid->level() + 1);
|
||||
SASSERT(level() > 0);
|
||||
kid->m_depth = m_depth + 1;
|
||||
if (is_closed()) set_open();
|
||||
}
|
||||
|
||||
unsigned model_node::index_in_parent() const {
|
||||
if (!m_parent) return 0;
|
||||
for (unsigned i = 0, sz = m_parent->children().size(); i < sz; ++i) {
|
||||
if (this == m_parent->children().get(i)) return i;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void model_node::check_pre_closed() {
|
||||
for (auto *kid : m_children) {if (kid->is_open()) return;}
|
||||
|
||||
set_pre_closed();
|
||||
model_node* p = m_parent;
|
||||
while (p && p->is_1closed()) {
|
||||
p->set_pre_closed();
|
||||
p = p->parent();
|
||||
}
|
||||
}
|
||||
void model_node::set_open() {
|
||||
SASSERT(m_closed);
|
||||
m_closed = false;
|
||||
model_node* p = parent();
|
||||
while (p && p->is_closed()) {
|
||||
p->m_closed = false;
|
||||
p = p->parent();
|
||||
}
|
||||
}
|
||||
|
||||
void model_node::detach(model_node*& qhead) {
|
||||
SASSERT(in_queue());
|
||||
SASSERT(children().empty());
|
||||
if (this == m_next) {
|
||||
SASSERT(m_prev == this);
|
||||
SASSERT(this == qhead);
|
||||
qhead = nullptr;
|
||||
}
|
||||
else {
|
||||
m_next->m_prev = m_prev;
|
||||
m_prev->m_next = m_next;
|
||||
if (this == qhead) qhead = m_next;
|
||||
}
|
||||
|
||||
// detach
|
||||
m_prev = nullptr;
|
||||
m_next = nullptr;
|
||||
}
|
||||
|
||||
|
||||
// insert node n after this in the queue
|
||||
// requires: this is in a queue or this == n
|
||||
void model_node::insert_after(model_node* n) {
|
||||
SASSERT(this == n || in_queue());
|
||||
SASSERT(n);
|
||||
SASSERT(!n->in_queue());
|
||||
if (this == n) {
|
||||
m_next = n;
|
||||
m_prev = n;
|
||||
}
|
||||
else {
|
||||
n->m_next = m_next;
|
||||
m_next->m_prev = n;
|
||||
m_next = n;
|
||||
n->m_prev = this;
|
||||
}
|
||||
}
|
||||
|
||||
void model_search::reset() {
|
||||
if (m_root) {
|
||||
erase_children(*m_root, false);
|
||||
remove_node(m_root, false);
|
||||
dealloc(m_root);
|
||||
m_root = nullptr;
|
||||
}
|
||||
m_cache.reset();
|
||||
}
|
||||
|
||||
model_node* model_search::pop_front() {
|
||||
model_node *res = m_qhead;
|
||||
if (res) {
|
||||
res->detach(m_qhead);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void model_search::add_leaf(model_node* _n) {
|
||||
model_node& n = *_n;
|
||||
SASSERT(n.children().empty());
|
||||
model_nodes ns;
|
||||
model_nodes& nodes = cache(n).insert_if_not_there2(n.post(), ns)->get_data().m_value;
|
||||
if (nodes.contains(&n)) return;
|
||||
|
||||
nodes.push_back(_n);
|
||||
if (nodes.size() == 1) {
|
||||
SASSERT(n.is_open());
|
||||
enqueue_leaf(n);
|
||||
}
|
||||
else {
|
||||
n.set_pre_closed();
|
||||
}
|
||||
}
|
||||
|
||||
void model_search::enqueue_leaf(model_node& n) {
|
||||
SASSERT(n.is_open());
|
||||
SASSERT(!n.in_queue());
|
||||
// queue is empty, initialize it with n
|
||||
if (!m_qhead) {
|
||||
m_qhead = &n;
|
||||
m_qhead->insert_after(m_qhead);
|
||||
}
|
||||
// insert n after m_qhead
|
||||
else if (m_bfs) {
|
||||
m_qhead->insert_after(&n);
|
||||
}
|
||||
// insert n after m_qhead()->next()
|
||||
else {
|
||||
m_qhead->next()->insert_after(&n);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
void model_search::set_root(model_node* root) {
|
||||
reset();
|
||||
m_root = root;
|
||||
SASSERT(m_root);
|
||||
SASSERT(m_root->children().empty());
|
||||
add_leaf(root);
|
||||
}
|
||||
|
||||
void model_search::backtrack_level(bool uses_level, model_node& n) {
|
||||
SASSERT(m_root);
|
||||
if (uses_level) {NOT_IMPLEMENTED_YET();}
|
||||
if (uses_level && m_root->level() > n.level()) {
|
||||
n.increase_level();
|
||||
enqueue_leaf(n);
|
||||
}
|
||||
else {
|
||||
model_node* p = n.parent();
|
||||
if (p) {
|
||||
erase_children(*p, true);
|
||||
enqueue_leaf(*p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
obj_map<expr, ptr_vector<model_node> >& model_search::cache(model_node const& n) {
|
||||
unsigned l = n.orig_level();
|
||||
if (l >= m_cache.size()) m_cache.resize(l + 1);
|
||||
return m_cache[l];
|
||||
}
|
||||
|
||||
void model_search::erase_children(model_node& n, bool backtrack) {
|
||||
ptr_vector<model_node> todo, nodes;
|
||||
todo.append(n.children());
|
||||
// detach n from queue
|
||||
if (n.in_queue()) n.detach(m_qhead);
|
||||
n.reset_children();
|
||||
while (!todo.empty()) {
|
||||
model_node* m = todo.back();
|
||||
todo.pop_back();
|
||||
nodes.push_back(m);
|
||||
todo.append(m->children());
|
||||
remove_node(m, backtrack);
|
||||
}
|
||||
std::for_each(nodes.begin(), nodes.end(), delete_proc<model_node>());
|
||||
}
|
||||
|
||||
// removes node from the search tree and from the cache
|
||||
void model_search::remove_node(model_node* _n, bool backtrack) {
|
||||
model_node& n = *_n;
|
||||
model_nodes& nodes = cache(n).find(n.post());
|
||||
nodes.erase(_n);
|
||||
if (n.in_queue()) n.detach(m_qhead);
|
||||
// TBD: siblings would also fail if n is not a goal.
|
||||
if (!nodes.empty() && backtrack &&
|
||||
nodes[0]->children().empty() && nodes[0]->is_closed()) {
|
||||
model_node* n1 = nodes[0];
|
||||
n1->set_open();
|
||||
enqueue_leaf(*n1);
|
||||
}
|
||||
|
||||
if (nodes.empty()) cache(n).remove(n.post());
|
||||
}
|
||||
|
||||
|
||||
lbool context::gpdr_solve_core() {
|
||||
scoped_watch _w_(m_solve_watch);
|
||||
//if there is no query predicate, abort
|
||||
if (!m_rels.find(m_query_pred, m_query)) { return l_false; }
|
||||
|
||||
model_search ms(m_pdr_bfs);
|
||||
unsigned lvl = 0;
|
||||
unsigned max_level = m_max_level;
|
||||
for (lvl = 0; lvl < max_level; ++lvl) {
|
||||
checkpoint();
|
||||
IF_VERBOSE(1,verbose_stream() << "GPDR Entering level "<< lvl << "\n";);
|
||||
STRACE("spacer.expand-add", tout << "\n* LEVEL " << lvl << "\n";);
|
||||
m_expanded_lvl = infty_level();
|
||||
m_stats.m_max_query_lvl = lvl;
|
||||
if (gpdr_check_reachability(lvl, ms)) {return l_true;}
|
||||
if (lvl > 0) {
|
||||
if (propagate(m_expanded_lvl, lvl, UINT_MAX)) {return l_false;}
|
||||
}
|
||||
}
|
||||
|
||||
// communicate failure to datalog::context
|
||||
if (m_context) {
|
||||
m_context->set_status(datalog::BOUNDED);
|
||||
}
|
||||
return l_undef;
|
||||
}
|
||||
|
||||
bool context::gpdr_check_reachability(unsigned lvl, model_search &ms) {
|
||||
pob_ref root_pob = m_query->mk_pob(nullptr, lvl, 0, m.mk_true());
|
||||
model_node *root_node = alloc(model_node, nullptr, root_pob.get());
|
||||
|
||||
ms.set_root(root_node);
|
||||
pob_ref_buffer new_pobs;
|
||||
|
||||
while (model_node *node = ms.pop_front()) {
|
||||
IF_VERBOSE(2, verbose_stream() << "Expand node: "
|
||||
<< node->level() << "\n";);
|
||||
new_pobs.reset();
|
||||
checkpoint();
|
||||
pred_transformer &pt = node->pt();
|
||||
|
||||
// check reachable cache
|
||||
if (pt.is_must_reachable(node->pob()->post(), nullptr)) {
|
||||
TRACE("spacer",
|
||||
tout << "must-reachable: " << pt.head()->get_name() << " level: "
|
||||
<< node->level() << " depth: " << node->depth () << "\n";
|
||||
tout << mk_pp(node->pob()->post(), m) << "\n";);
|
||||
|
||||
node->set_closed();
|
||||
if (node == root_node) return true;
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (expand_pob(*node->pob(), new_pobs)){
|
||||
case l_true:
|
||||
node->set_closed();
|
||||
if (node == root_node) return true;
|
||||
break;
|
||||
case l_false:
|
||||
ms.backtrack_level(false, *node);
|
||||
if (node == root_node) return false;
|
||||
break;
|
||||
case l_undef:
|
||||
SASSERT(!new_pobs.empty());
|
||||
for (auto pob : new_pobs) {
|
||||
TRACE("spacer_pdr",
|
||||
tout << "looking at pob at level " << pob->level() << " "
|
||||
<< mk_pp(pob->post(), m) << "\n";);
|
||||
if (pob != node->pob())
|
||||
ms.add_leaf(alloc(model_node, node, pob));
|
||||
}
|
||||
node->check_pre_closed();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return root_node->is_closed();
|
||||
}
|
||||
|
||||
bool context::gpdr_create_split_children(pob &n, const datalog::rule &r,
|
||||
expr *trans,
|
||||
model_ref &mdl,
|
||||
pob_ref_buffer &out) {
|
||||
pred_transformer &pt = n.pt();
|
||||
ptr_vector<func_decl> preds;
|
||||
pt.find_predecessors(r, preds);
|
||||
SASSERT(preds.size() > 1);
|
||||
|
||||
ptr_vector<pred_transformer> ppts;
|
||||
for (auto *p : preds) ppts.push_back(&get_pred_transformer(p));
|
||||
|
||||
mbc::partition_map pmap;
|
||||
for (unsigned i = 0, sz = preds.size(); i < sz; ++i) {
|
||||
func_decl *p = preds.get(i);
|
||||
pred_transformer &ppt = *ppts.get(i);
|
||||
for (unsigned j = 0, jsz = p->get_arity(); j < jsz; ++j) {
|
||||
pmap.insert(m_pm.o2o(ppt.sig(j), 0, i), i);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
spacer::mbc _mbc(m);
|
||||
expr_ref_vector lits(m);
|
||||
flatten_and(trans, lits);
|
||||
vector<expr_ref_vector> res(preds.size(), expr_ref_vector(m));
|
||||
_mbc(pmap, lits, *mdl.get(), res);
|
||||
|
||||
// pick an order to process children
|
||||
unsigned_vector kid_order;
|
||||
kid_order.resize(preds.size(), 0);
|
||||
for (unsigned i = 0, sz = preds.size(); i < sz; ++i) kid_order[i] = i;
|
||||
if (m_children_order == CO_REV_RULE) {
|
||||
kid_order.reverse();
|
||||
}
|
||||
else if (m_children_order == CO_RANDOM) {
|
||||
shuffle(kid_order.size(), kid_order.c_ptr(), m_random);
|
||||
}
|
||||
|
||||
|
||||
for (unsigned i = 0, sz = res.size(); i < sz; ++i) {
|
||||
unsigned j = kid_order[i];
|
||||
expr_ref post(m);
|
||||
pred_transformer &ppt = *ppts.get(j);
|
||||
post = mk_and(res.get(j));
|
||||
m_pm.formula_o2n(post.get(), post, j, true);
|
||||
pob * k = ppt.mk_pob(&n, prev_level(n.level()), n.depth(), post);
|
||||
out.push_back(k);
|
||||
IF_VERBOSE (1, verbose_stream()
|
||||
<< "\n\tcreate_child: " << k->pt().head()->get_name()
|
||||
<< " (" << k->level() << ", " << k->depth() << ") "
|
||||
<< (k->use_farkas_generalizer() ? "FAR " : "SUB ")
|
||||
<< k->post()->get_id();
|
||||
verbose_stream().flush(););
|
||||
TRACE ("spacer",
|
||||
tout << "create-pob: " << k->pt().head()->get_name()
|
||||
<< " level: " << k->level()
|
||||
<< " depth: " << k->depth ()
|
||||
<< " fvsz: " << k->get_free_vars_size() << "\n"
|
||||
<< mk_pp(k->post(), m) << "\n";);
|
||||
|
||||
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
} // spacer
|
||||
107
src/muz/spacer/spacer_pdr.h
Normal file
107
src/muz/spacer/spacer_pdr.h
Normal file
|
|
@ -0,0 +1,107 @@
|
|||
/**++
|
||||
Copyright (c) 2018 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_pdr.h
|
||||
|
||||
Abstract:
|
||||
|
||||
SPACER gPDR strategy implementation
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
|
||||
Based on muz/pdr
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
#ifndef _SPACER_PDR_H_
|
||||
#define _SPACER_PDR_H_
|
||||
|
||||
#include "muz/spacer/spacer_context.h"
|
||||
|
||||
namespace spacer {
|
||||
// structure for counter-example search.
|
||||
class model_node {
|
||||
pob_ref m_pob; // proof obligation
|
||||
model_node* m_parent; // parent in the search tree
|
||||
ptr_vector<model_node> m_children; // children in the search tree
|
||||
model_node* m_next; // next element of an in-place circular queue
|
||||
model_node* m_prev; // prev element of an in-place circular queue
|
||||
unsigned m_orig_level; // level at which this search node was created
|
||||
unsigned m_depth; //
|
||||
bool m_closed; // whether the pob is derivable
|
||||
public:
|
||||
model_node(model_node* parent, pob* pob);
|
||||
void add_child(model_node* kid);
|
||||
|
||||
expr *post() const { return m_pob->post(); }
|
||||
unsigned level() const { return m_pob->level(); }
|
||||
unsigned orig_level() const { return m_orig_level; }
|
||||
unsigned depth() const { return m_depth; }
|
||||
void increase_level() { m_pob->inc_level(); }
|
||||
pob_ref &pob() { return m_pob; }
|
||||
ptr_vector<model_node> const& children() { return m_children; }
|
||||
pred_transformer& pt() const { return m_pob->pt(); }
|
||||
model_node* parent() const { return m_parent; }
|
||||
// order in children of the parent
|
||||
unsigned index_in_parent() const;
|
||||
|
||||
bool is_closed() const { return m_closed; }
|
||||
bool is_open() const { return !is_closed(); }
|
||||
|
||||
// closed or has children and they are all closed
|
||||
bool is_1closed() {
|
||||
if (is_closed()) return true;
|
||||
if (m_children.empty()) return false;
|
||||
for (auto kid : m_children)
|
||||
if (kid->is_open()) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
void check_pre_closed();
|
||||
void set_pre_closed() { m_closed = true; }
|
||||
|
||||
void set_closed() { m_closed = true; }
|
||||
void set_open();
|
||||
void reset_children() { m_children.reset(); }
|
||||
|
||||
/// queue
|
||||
|
||||
// remove this node from the given queue
|
||||
void detach(model_node*& qhead);
|
||||
void insert_after(model_node* n);
|
||||
model_node* next() const { return m_next; }
|
||||
bool in_queue() { return m_next && m_prev; }
|
||||
};
|
||||
|
||||
class model_search {
|
||||
typedef ptr_vector<model_node> model_nodes;
|
||||
bool m_bfs;
|
||||
model_node* m_root;
|
||||
model_node* m_qhead;
|
||||
vector<obj_map<expr, model_nodes > > m_cache;
|
||||
obj_map<expr, model_nodes>& cache(model_node const& n);
|
||||
void erase_children(model_node& n, bool backtrack);
|
||||
void remove_node(model_node* _n, bool backtrack);
|
||||
|
||||
public:
|
||||
model_search(bool bfs): m_bfs(bfs), m_root(nullptr), m_qhead(nullptr) {}
|
||||
~model_search() {reset();}
|
||||
|
||||
void set_root(model_node* n);
|
||||
|
||||
void reset();
|
||||
model_node* pop_front();
|
||||
void add_leaf(model_node* n); // add fresh node.
|
||||
model_node& get_root() const { return *m_root; }
|
||||
void backtrack_level(bool uses_level, model_node& n);
|
||||
void remove_goal(model_node& n);
|
||||
|
||||
void enqueue_leaf(model_node &n);
|
||||
};
|
||||
}
|
||||
#endif
|
||||
555
src/muz/spacer/spacer_proof_utils.cpp
Normal file
555
src/muz/spacer/spacer_proof_utils.cpp
Normal file
|
|
@ -0,0 +1,555 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_proof_utils.cpp
|
||||
|
||||
Abstract:
|
||||
Utilities to traverse and manipulate proofs
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
Arie Gurfinkel
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#include "util/params.h"
|
||||
#include "ast/ast_pp.h"
|
||||
#include "ast/ast_util.h"
|
||||
#include "ast/proofs/proof_checker.h"
|
||||
#include "muz/base/dl_util.h"
|
||||
#include "muz/spacer/spacer_iuc_proof.h"
|
||||
|
||||
#include "ast/proofs/proof_utils.h"
|
||||
#include "muz/spacer/spacer_proof_utils.h"
|
||||
#include "muz/spacer/spacer_util.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
// arithmetic lemma recognizer
|
||||
bool is_arith_lemma(ast_manager& m, proof* pr)
|
||||
{
|
||||
// arith lemmas: second parameter specifies exact type of lemma,
|
||||
// could be "farkas", "triangle-eq", "eq-propagate",
|
||||
// "assign-bounds", maybe also something else
|
||||
if (pr->get_decl_kind() == PR_TH_LEMMA) {
|
||||
func_decl* d = pr->get_decl();
|
||||
symbol sym;
|
||||
return d->get_num_parameters() >= 1 &&
|
||||
d->get_parameter(0).is_symbol(sym) &&
|
||||
sym == "arith";
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// farkas lemma recognizer
|
||||
bool is_farkas_lemma(ast_manager& m, proof* pr)
|
||||
{
|
||||
if (pr->get_decl_kind() == PR_TH_LEMMA)
|
||||
{
|
||||
func_decl* d = pr->get_decl();
|
||||
symbol sym;
|
||||
return d->get_num_parameters() >= 2 &&
|
||||
d->get_parameter(0).is_symbol(sym) && sym == "arith" &&
|
||||
d->get_parameter(1).is_symbol(sym) && sym == "farkas";
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ====================================
|
||||
* methods for transforming proofs
|
||||
* ====================================
|
||||
*/
|
||||
|
||||
void theory_axiom_reducer::reset() {
|
||||
m_cache.reset();
|
||||
m_pinned.reset();
|
||||
}
|
||||
|
||||
// -- rewrite theory axioms into theory lemmas
|
||||
proof_ref theory_axiom_reducer::reduce(proof* pr) {
|
||||
proof_post_order pit(pr, m);
|
||||
while (pit.hasNext()) {
|
||||
proof* p = pit.next();
|
||||
|
||||
if (m.get_num_parents(p) == 0 && is_arith_lemma(m, p)) {
|
||||
// we have an arith-theory-axiom and want to get rid of it
|
||||
// we need to replace the axiom with
|
||||
// (a) corresponding hypothesis,
|
||||
// (b) a theory lemma, and
|
||||
// (c) a lemma.
|
||||
// Furthermore update data-structures
|
||||
app *fact = to_app(m.get_fact(p));
|
||||
ptr_buffer<expr> cls;
|
||||
if (m.is_or(fact)) {
|
||||
for (unsigned i = 0, sz = fact->get_num_args(); i < sz; ++i)
|
||||
cls.push_back(fact->get_arg(i));
|
||||
}
|
||||
else
|
||||
cls.push_back(fact);
|
||||
|
||||
// (a) create hypothesis
|
||||
ptr_buffer<proof> hyps;
|
||||
for (unsigned i = 0, sz = cls.size(); i < sz; ++i) {
|
||||
expr *c;
|
||||
expr_ref hyp_fact(m);
|
||||
if (m.is_not(cls[i], c))
|
||||
hyp_fact = c;
|
||||
else
|
||||
hyp_fact = m.mk_not (cls[i]);
|
||||
|
||||
proof* hyp = m.mk_hypothesis(hyp_fact);
|
||||
m_pinned.push_back(hyp);
|
||||
hyps.push_back(hyp);
|
||||
}
|
||||
|
||||
// (b) create farkas lemma. Rebuild parameters since
|
||||
// mk_th_lemma() adds tid as first parameter
|
||||
unsigned num_params = p->get_decl()->get_num_parameters();
|
||||
parameter const* params = p->get_decl()->get_parameters();
|
||||
vector<parameter> parameters;
|
||||
for (unsigned i = 1; i < num_params; ++i) parameters.push_back(params[i]);
|
||||
|
||||
SASSERT(params[0].is_symbol());
|
||||
family_id tid = m.mk_family_id(params[0].get_symbol());
|
||||
SASSERT(tid != null_family_id);
|
||||
|
||||
proof* th_lemma = m.mk_th_lemma(tid, m.mk_false(),
|
||||
hyps.size(), hyps.c_ptr(),
|
||||
num_params-1, parameters.c_ptr());
|
||||
m_pinned.push_back(th_lemma);
|
||||
SASSERT(is_arith_lemma(m, th_lemma));
|
||||
|
||||
// (c) create lemma
|
||||
proof* res = m.mk_lemma(th_lemma, fact);
|
||||
m_pinned.push_back(res);
|
||||
m_cache.insert(p, res);
|
||||
|
||||
SASSERT(m.get_fact(res) == m.get_fact(p));
|
||||
}
|
||||
else {
|
||||
// proof is dirty, if a sub-proof of one of its premises
|
||||
// has been transformed
|
||||
bool dirty = false;
|
||||
|
||||
ptr_buffer<expr> args;
|
||||
for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) {
|
||||
proof *pp, *tmp;
|
||||
pp = m.get_parent(p, i);
|
||||
VERIFY(m_cache.find(pp, tmp));
|
||||
args.push_back(tmp);
|
||||
dirty |= (pp != tmp);
|
||||
}
|
||||
// if not dirty just use the old step
|
||||
if (!dirty) m_cache.insert(p, p);
|
||||
// otherwise create new proof with the corresponding proofs
|
||||
// of the premises
|
||||
else {
|
||||
if (m.has_fact(p)) args.push_back(m.get_fact(p));
|
||||
|
||||
SASSERT(p->get_decl()->get_arity() == args.size());
|
||||
|
||||
proof* res = m.mk_app(p->get_decl(),
|
||||
args.size(), (expr * const*)args.c_ptr());
|
||||
m_pinned.push_back(res);
|
||||
m_cache.insert(p, res);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
proof* res;
|
||||
VERIFY(m_cache.find(pr,res));
|
||||
DEBUG_CODE(
|
||||
proof_checker pc(m);
|
||||
expr_ref_vector side(m);
|
||||
SASSERT(pc.check(res, side));
|
||||
);
|
||||
|
||||
return proof_ref(res, m);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* hypothesis_reducer */
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
proof_ref hypothesis_reducer::reduce(proof* pr) {
|
||||
compute_hypsets(pr);
|
||||
collect_units(pr);
|
||||
|
||||
proof_ref res(reduce_core(pr), m);
|
||||
SASSERT(res);
|
||||
reset();
|
||||
|
||||
DEBUG_CODE(proof_checker pc(m);
|
||||
expr_ref_vector side(m);
|
||||
SASSERT(pc.check(res, side)););
|
||||
return res;
|
||||
}
|
||||
|
||||
void hypothesis_reducer::reset() {
|
||||
m_active_hyps.reset();
|
||||
m_units.reset();
|
||||
m_cache.reset();
|
||||
for (auto t : m_pinned_active_hyps) dealloc(t);
|
||||
m_pinned_active_hyps.reset();
|
||||
m_pinned.reset();
|
||||
m_hyp_mark.reset();
|
||||
m_open_mark.reset();
|
||||
m_visited.reset();
|
||||
}
|
||||
|
||||
void hypothesis_reducer::compute_hypsets(proof *pr) {
|
||||
ptr_buffer<proof> todo;
|
||||
todo.push_back(pr);
|
||||
|
||||
while (!todo.empty()) {
|
||||
proof* p = todo.back();
|
||||
|
||||
if (m_visited.is_marked(p)) {
|
||||
todo.pop_back();
|
||||
continue;
|
||||
}
|
||||
|
||||
unsigned todo_sz = todo.size();
|
||||
for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) {
|
||||
SASSERT(m.is_proof(p->get_arg(i)));
|
||||
proof *parent = to_app(p->get_arg(i));
|
||||
|
||||
if (!m_visited.is_marked(parent))
|
||||
todo.push_back(parent);
|
||||
}
|
||||
if (todo.size() > todo_sz) continue;
|
||||
|
||||
todo.pop_back();
|
||||
|
||||
m_visited.mark(p);
|
||||
|
||||
|
||||
proof_ptr_vector* active_hyps = nullptr;
|
||||
// fill both sets
|
||||
if (m.is_hypothesis(p)) {
|
||||
// create active_hyps-set for step p
|
||||
proof_ptr_vector* active_hyps = alloc(proof_ptr_vector);
|
||||
m_pinned_active_hyps.insert(active_hyps);
|
||||
m_active_hyps.insert(p, active_hyps);
|
||||
active_hyps->push_back(p);
|
||||
m_open_mark.mark(p);
|
||||
m_hyp_mark.mark(m.get_fact(p));
|
||||
continue;
|
||||
}
|
||||
|
||||
ast_fast_mark1 seen;
|
||||
|
||||
active_hyps = alloc(proof_ptr_vector);
|
||||
for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) {
|
||||
proof* parent = m.get_parent(p, i);
|
||||
// lemmas clear all hypotheses above them
|
||||
if (m.is_lemma(p)) continue;
|
||||
for (auto *x : *m_active_hyps.find(parent)) {
|
||||
if (!seen.is_marked(x)) {
|
||||
seen.mark(x);
|
||||
active_hyps->push_back(x);
|
||||
m_open_mark.mark(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (active_hyps->empty()) {
|
||||
dealloc(active_hyps);
|
||||
m_active_hyps.insert(p, &m_empty_vector);
|
||||
}
|
||||
else {
|
||||
m_pinned_active_hyps.push_back(active_hyps);
|
||||
m_active_hyps.insert(p, active_hyps);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// collect all units that are hyp-free and are used as hypotheses somewhere
|
||||
// requires that m_active_hyps has been computed
|
||||
void hypothesis_reducer::collect_units(proof* pr) {
|
||||
|
||||
proof_post_order pit(pr, m);
|
||||
while (pit.hasNext()) {
|
||||
proof* p = pit.next();
|
||||
if (!m.is_hypothesis(p)) {
|
||||
// collect units that are hyp-free and are used as
|
||||
// hypotheses in the proof pr
|
||||
if (!m_open_mark.is_marked(p) && m.has_fact(p) &&
|
||||
m_hyp_mark.is_marked(m.get_fact(p)))
|
||||
m_units.insert(m.get_fact(p), p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
\brief returns true if p is an ancestor of q
|
||||
*/
|
||||
bool hypothesis_reducer::is_ancestor(proof *p, proof *q) {
|
||||
if (p == q) return true;
|
||||
ptr_vector<proof> todo;
|
||||
todo.push_back(q);
|
||||
|
||||
expr_mark visited;
|
||||
while (!todo.empty()) {
|
||||
proof *cur;
|
||||
cur = todo.back();
|
||||
todo.pop_back();
|
||||
|
||||
if (visited.is_marked(cur)) continue;
|
||||
|
||||
if (cur == p) return true;
|
||||
visited.mark(cur);
|
||||
|
||||
for (unsigned i = 0, sz = m.get_num_parents(cur); i < sz; ++i) {
|
||||
todo.push_back(m.get_parent(cur, i));
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
proof* hypothesis_reducer::reduce_core(proof* pf) {
|
||||
SASSERT(m.is_false(m.get_fact(pf)));
|
||||
|
||||
proof *res = NULL;
|
||||
|
||||
ptr_vector<proof> todo;
|
||||
todo.push_back(pf);
|
||||
ptr_buffer<proof> args;
|
||||
bool dirty = false;
|
||||
|
||||
while (true) {
|
||||
proof *p, *tmp, *pp;
|
||||
unsigned todo_sz;
|
||||
|
||||
p = todo.back();
|
||||
if (m_cache.find(p, tmp)) {
|
||||
todo.pop_back();
|
||||
continue;
|
||||
}
|
||||
|
||||
dirty = false;
|
||||
args.reset();
|
||||
todo_sz = todo.size();
|
||||
for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) {
|
||||
pp = m.get_parent(p, i);
|
||||
if (m_cache.find(pp, tmp)) {
|
||||
args.push_back(tmp);
|
||||
dirty |= pp != tmp;
|
||||
} else {
|
||||
todo.push_back(pp);
|
||||
}
|
||||
}
|
||||
|
||||
if (todo_sz < todo.size()) continue;
|
||||
|
||||
todo.pop_back();
|
||||
|
||||
// transform the current proof node
|
||||
|
||||
if (m.is_hypothesis(p)) {
|
||||
// if possible, replace a hypothesis by a unit derivation
|
||||
if (m_units.find(m.get_fact(p), tmp)) {
|
||||
// use already transformed proof of the unit if it is available
|
||||
proof* proof_of_unit;
|
||||
if (!m_cache.find(tmp, proof_of_unit)) {
|
||||
proof_of_unit = tmp;
|
||||
}
|
||||
|
||||
// make sure hypsets for the unit are computed
|
||||
// AG: is this needed?
|
||||
compute_hypsets(proof_of_unit);
|
||||
|
||||
// if the transformation doesn't create a cycle, perform it
|
||||
if (!is_ancestor(p, proof_of_unit)) {
|
||||
res = proof_of_unit;
|
||||
}
|
||||
else {
|
||||
// -- failed to transform the proof, perhaps bad
|
||||
// -- choice of the proof of unit
|
||||
res = p;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// -- no unit found to replace the hypothesis
|
||||
res = p;
|
||||
}
|
||||
}
|
||||
|
||||
else if (!dirty) {res = p;}
|
||||
|
||||
else if (m.is_lemma(p)) {
|
||||
// lemma: reduce the premise; remove reduced consequences
|
||||
// from conclusion
|
||||
SASSERT(args.size() == 1);
|
||||
res = mk_lemma_core(args[0], m.get_fact(p));
|
||||
// -- re-compute hypsets
|
||||
compute_hypsets(res);
|
||||
}
|
||||
else if (m.is_unit_resolution(p)) {
|
||||
// unit: reduce untis; reduce the first premise; rebuild
|
||||
// unit resolution
|
||||
res = mk_unit_resolution_core(p, args);
|
||||
// -- re-compute hypsets
|
||||
compute_hypsets(res);
|
||||
}
|
||||
else {
|
||||
res = mk_proof_core(p, args);
|
||||
// -- re-compute hypsets
|
||||
compute_hypsets(res);
|
||||
}
|
||||
|
||||
SASSERT(res);
|
||||
m_cache.insert(p, res);
|
||||
|
||||
// bail out as soon as found a sub-proof of false
|
||||
if (!m_open_mark.is_marked(res) && m.has_fact(res) && m.is_false(m.get_fact(res)))
|
||||
return res;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
proof* hypothesis_reducer::mk_lemma_core(proof* premise, expr *fact) {
|
||||
SASSERT(m.is_false(m.get_fact(premise)));
|
||||
SASSERT(m_active_hyps.contains(premise));
|
||||
|
||||
proof_ptr_vector* active_hyps = m_active_hyps.find(premise);
|
||||
|
||||
// if there is no active hypothesis return the premise
|
||||
if (!m_open_mark.is_marked(premise)) {
|
||||
// XXX just in case premise might go away
|
||||
m_pinned.push_back(premise);
|
||||
return premise;
|
||||
}
|
||||
|
||||
// add some stability
|
||||
std::stable_sort(active_hyps->begin(), active_hyps->end(), ast_lt_proc());
|
||||
// otherwise, build a disjunction of the negated active hypotheses
|
||||
// and add a lemma proof step
|
||||
expr_ref_buffer args(m);
|
||||
for (auto hyp : *active_hyps) {
|
||||
expr *hyp_fact, *t;
|
||||
hyp_fact = m.get_fact(hyp);
|
||||
if (m.is_not(hyp_fact, t))
|
||||
args.push_back(t);
|
||||
else
|
||||
args.push_back(m.mk_not(hyp_fact));
|
||||
}
|
||||
|
||||
expr_ref lemma(m);
|
||||
lemma = mk_or(m, args.size(), args.c_ptr());
|
||||
|
||||
proof* res;
|
||||
res = m.mk_lemma(premise, lemma);
|
||||
m_pinned.push_back(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
proof* hypothesis_reducer::mk_unit_resolution_core(proof *ures,
|
||||
ptr_buffer<proof>& args) {
|
||||
// if any literal is false, we don't need a unit resolution step
|
||||
// This can be the case due to some previous transformations
|
||||
for (unsigned i = 1, sz = args.size(); i < sz; ++i) {
|
||||
if (m.is_false(m.get_fact(args[i]))) {
|
||||
// XXX pin just in case
|
||||
m_pinned.push_back(args[i]);
|
||||
return args[i];
|
||||
}
|
||||
}
|
||||
|
||||
proof* arg0 = args[0];
|
||||
app *fact0 = to_app(m.get_fact(arg0));
|
||||
|
||||
|
||||
ptr_buffer<proof> pf_args;
|
||||
ptr_buffer<expr> pf_fact;
|
||||
pf_args.push_back(arg0);
|
||||
|
||||
// compute literals to be resolved
|
||||
ptr_buffer<expr> lits;
|
||||
|
||||
// fact0 is a literal whenever the original resolution was a
|
||||
// binary resolution to an empty clause
|
||||
if (m.get_num_parents(ures) == 2 && m.is_false(m.get_fact(ures))) {
|
||||
lits.push_back(fact0);
|
||||
}
|
||||
// fact0 is a literal unless it is a dijsunction
|
||||
else if (!m.is_or(fact0)) {
|
||||
lits.push_back(fact0);
|
||||
}
|
||||
// fact0 is a literal only if it appears as a literal in the
|
||||
// original resolution
|
||||
else {
|
||||
lits.reset();
|
||||
app* ures_fact = to_app(m.get_fact(m.get_parent(ures, 0)));
|
||||
for (unsigned i = 0, sz = ures_fact->get_num_args(); i < sz; ++i) {
|
||||
if (ures_fact->get_arg(i) == fact0) {
|
||||
lits.push_back(fact0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (lits.empty()) {
|
||||
lits.append(fact0->get_num_args(), fact0->get_args());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// -- find all literals that are resolved on
|
||||
for (unsigned i = 0, sz = lits.size(); i < sz; ++i) {
|
||||
bool found = false;
|
||||
for (unsigned j = 1; j < args.size(); ++j) {
|
||||
if (m.is_complement(lits.get(i), m.get_fact(args[j]))) {
|
||||
found = true;
|
||||
pf_args.push_back(args[j]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {pf_fact.push_back(lits.get(i));}
|
||||
}
|
||||
|
||||
// unit resolution got reduced to noop
|
||||
if (pf_args.size() == 1) {
|
||||
// XXX pin just in case
|
||||
m_pinned.push_back(arg0);
|
||||
|
||||
return arg0;
|
||||
}
|
||||
|
||||
// make unit resolution proof step
|
||||
// expr_ref tmp(m);
|
||||
// tmp = mk_or(m, pf_fact.size(), pf_fact.c_ptr());
|
||||
// proof* res = m.mk_unit_resolution(pf_args.size(), pf_args.c_ptr(), tmp);
|
||||
proof *res = m.mk_unit_resolution(pf_args.size(), pf_args.c_ptr());
|
||||
m_pinned.push_back(res);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
proof* hypothesis_reducer::mk_proof_core(proof* old, ptr_buffer<proof>& args) {
|
||||
// if any of the literals are false, we don't need a step
|
||||
for (unsigned i = 0; i < args.size(); ++i) {
|
||||
if (m.is_false(m.get_fact(args[i]))) {
|
||||
// XXX just in case
|
||||
m_pinned.push_back(args[i]);
|
||||
return args[i];
|
||||
}
|
||||
}
|
||||
|
||||
// otherwise build step
|
||||
// BUG: I guess this doesn't work with quantifiers (since they are no apps)
|
||||
args.push_back(to_app(m.get_fact(old)));
|
||||
|
||||
SASSERT(old->get_decl()->get_arity() == args.size());
|
||||
|
||||
proof* res = m.mk_app(old->get_decl(), args.size(),
|
||||
(expr * const*)args.c_ptr());
|
||||
m_pinned.push_back(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
};
|
||||
105
src/muz/spacer/spacer_proof_utils.h
Normal file
105
src/muz/spacer/spacer_proof_utils.h
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_proof_utils.cpp
|
||||
|
||||
Abstract:
|
||||
Utilities to traverse and manipulate proofs
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
Arie Gurfinkel
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _SPACER_PROOF_UTILS_H_
|
||||
#define _SPACER_PROOF_UTILS_H_
|
||||
#include "ast/ast.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
bool is_arith_lemma(ast_manager& m, proof* pr);
|
||||
bool is_farkas_lemma(ast_manager& m, proof* pr);
|
||||
|
||||
/// rewrites theory axioms into theory lemmas
|
||||
class theory_axiom_reducer {
|
||||
public:
|
||||
theory_axiom_reducer(ast_manager& m) : m(m), m_pinned(m) {}
|
||||
|
||||
// reduce theory axioms and return transformed proof
|
||||
proof_ref reduce(proof* pr);
|
||||
|
||||
private:
|
||||
ast_manager &m;
|
||||
|
||||
// tracking all created expressions
|
||||
expr_ref_vector m_pinned;
|
||||
|
||||
// maps each proof of a clause to the transformed subproof of
|
||||
// that clause
|
||||
obj_map<proof, proof*> m_cache;
|
||||
|
||||
void reset();
|
||||
};
|
||||
|
||||
/// reduces the number of hypotheses in a proof
|
||||
class hypothesis_reducer
|
||||
{
|
||||
public:
|
||||
hypothesis_reducer(ast_manager &m) : m(m), m_pinned(m) {}
|
||||
~hypothesis_reducer() {reset();}
|
||||
|
||||
// reduce hypothesis and return transformed proof
|
||||
proof_ref reduce(proof* pf);
|
||||
|
||||
private:
|
||||
typedef ptr_vector<proof> proof_ptr_vector;
|
||||
|
||||
ast_manager &m;
|
||||
|
||||
proof_ptr_vector m_empty_vector;
|
||||
|
||||
// created expressions
|
||||
expr_ref_vector m_pinned;
|
||||
|
||||
// created sets of active hypothesis
|
||||
ptr_vector<proof_ptr_vector> m_pinned_active_hyps;
|
||||
|
||||
// maps a proof to the transformed proof
|
||||
obj_map<proof, proof*> m_cache;
|
||||
|
||||
// maps a unit literal to its derivation
|
||||
obj_map<expr, proof*> m_units;
|
||||
|
||||
// maps a proof node to the set of its active (i.e., in scope) hypotheses
|
||||
obj_map<proof, proof_ptr_vector*> m_active_hyps;
|
||||
|
||||
/// marks if an expression is ever used as a hypothesis in a proof
|
||||
expr_mark m_hyp_mark;
|
||||
/// marks a proof as open, i.e., has a non-discharged hypothesis as ancestor
|
||||
expr_mark m_open_mark;
|
||||
expr_mark m_visited;
|
||||
|
||||
void reset();
|
||||
|
||||
/// true if p is an ancestor of q
|
||||
bool is_ancestor(proof *p, proof *q);
|
||||
// compute active_hyps and parent_hyps for a given proof node and
|
||||
// all its ancestors
|
||||
void compute_hypsets(proof* pr);
|
||||
// compute m_units
|
||||
void collect_units(proof* pr);
|
||||
|
||||
// -- rewrite proof to reduce number of hypotheses used
|
||||
proof* reduce_core(proof* pf);
|
||||
|
||||
proof* mk_lemma_core(proof *pf, expr *fact);
|
||||
proof* mk_unit_resolution_core(proof* ures, ptr_buffer<proof>& args);
|
||||
proof* mk_proof_core(proof* old, ptr_buffer<proof>& args);
|
||||
};
|
||||
}
|
||||
#endif
|
||||
|
|
@ -35,13 +35,15 @@ Revision History:
|
|||
#include "muz/spacer/spacer_farkas_learner.h"
|
||||
#include "muz/spacer/spacer_prop_solver.h"
|
||||
|
||||
#include "muz/base/fixedpoint_params.hpp"
|
||||
#include "model/model_evaluator.h"
|
||||
#include "muz/base/fp_params.hpp"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
prop_solver::prop_solver(manager& pm, fixedpoint_params const& p, symbol const& name) :
|
||||
m(pm.get_manager()),
|
||||
m_pm(pm),
|
||||
prop_solver::prop_solver(ast_manager &m,
|
||||
solver *solver0, solver *solver1,
|
||||
fp_params const& p, symbol const& name) :
|
||||
m(m),
|
||||
m_name(name),
|
||||
m_ctx(nullptr),
|
||||
m_pos_level_atoms(m),
|
||||
|
|
@ -54,17 +56,21 @@ prop_solver::prop_solver(manager& pm, fixedpoint_params const& p, symbol const&
|
|||
m_use_push_bg(p.spacer_keep_proxy())
|
||||
{
|
||||
|
||||
m_solvers[0] = pm.mk_fresh();
|
||||
m_fparams[0] = &pm.fparams();
|
||||
m_solvers[0] = solver0;
|
||||
m_solvers[1] = solver1;
|
||||
|
||||
m_solvers[1] = pm.mk_fresh2();
|
||||
m_fparams[1] = &pm.fparams2();
|
||||
|
||||
m_contexts[0] = alloc(spacer::itp_solver, *(m_solvers[0]), p.spacer_new_unsat_core(), p.spacer_minimize_unsat_core(), p.spacer_farkas_optimized(), p.spacer_farkas_a_const(), p.spacer_split_farkas_literals());
|
||||
m_contexts[1] = alloc(spacer::itp_solver, *(m_solvers[1]), p.spacer_new_unsat_core(), p.spacer_minimize_unsat_core(), p.spacer_farkas_optimized(), p.spacer_farkas_a_const(), p.spacer_split_farkas_literals());
|
||||
|
||||
for (unsigned i = 0; i < 2; ++i)
|
||||
{ m_contexts[i]->assert_expr(m_pm.get_background()); }
|
||||
m_contexts[0] = alloc(spacer::iuc_solver, *(m_solvers[0]),
|
||||
p.spacer_iuc(),
|
||||
p.spacer_iuc_arith(),
|
||||
p.spacer_iuc_print_farkas_stats(),
|
||||
p.spacer_iuc_old_hyp_reducer(),
|
||||
p.spacer_iuc_split_farkas_literals());
|
||||
m_contexts[1] = alloc(spacer::iuc_solver, *(m_solvers[1]),
|
||||
p.spacer_iuc(),
|
||||
p.spacer_iuc_arith(),
|
||||
p.spacer_iuc_print_farkas_stats(),
|
||||
p.spacer_iuc_old_hyp_reducer(),
|
||||
p.spacer_iuc_split_farkas_literals());
|
||||
}
|
||||
|
||||
void prop_solver::add_level()
|
||||
|
|
@ -119,6 +125,8 @@ void prop_solver::assert_expr(expr * form)
|
|||
|
||||
void prop_solver::assert_expr(expr * form, unsigned level)
|
||||
{
|
||||
if (is_infty_level(level)) {assert_expr(form);return;}
|
||||
|
||||
ensure_level(level);
|
||||
app * lev_atom = m_pos_level_atoms[level].get();
|
||||
app_ref lform(m.mk_or(form, lev_atom), m);
|
||||
|
|
@ -126,18 +134,109 @@ void prop_solver::assert_expr(expr * form, unsigned level)
|
|||
}
|
||||
|
||||
|
||||
/// Local model guided maxsmt
|
||||
lbool prop_solver::mss(expr_ref_vector &hard, expr_ref_vector &soft) {
|
||||
// replace expressions by assumption literals
|
||||
iuc_solver::scoped_mk_proxy _p_(*m_ctx, hard);
|
||||
unsigned hard_sz = hard.size();
|
||||
|
||||
lbool res = m_ctx->check_sat(hard.size(), hard.c_ptr());
|
||||
// bail out if hard constraints are not sat, or if there are no
|
||||
// soft constraints
|
||||
if (res != l_true || soft.empty()) {return res;}
|
||||
|
||||
// the main loop
|
||||
|
||||
model_ref mdl;
|
||||
m_ctx->get_model(mdl);
|
||||
|
||||
// don't proxy soft literals. Assume that they are propositional.
|
||||
hard.append(soft);
|
||||
soft.reset();
|
||||
|
||||
|
||||
// hard is divided into 4 regions
|
||||
// x < hard_sz ---> hard constraints
|
||||
// hard_sz <= x < i ---> sat soft constraints
|
||||
// i <= x < j ---> backbones (unsat soft constraints)
|
||||
// j <= x < hard.size() ---> unprocessed soft constraints
|
||||
unsigned i, j;
|
||||
i = hard_sz;
|
||||
j = hard_sz;
|
||||
|
||||
while (j < hard.size()) {
|
||||
model_evaluator mev(*mdl);
|
||||
|
||||
// move all true soft constraints to [hard_sz, i)
|
||||
for (unsigned k = j; k < hard.size(); ++k) {
|
||||
expr_ref e(m);
|
||||
e = hard.get(k);
|
||||
if (!mev.is_false(e) /* true or unset */) {
|
||||
expr_ref tmp(m);
|
||||
tmp = hard.get(i);
|
||||
hard[i] = e;
|
||||
if (i < j) {
|
||||
// tmp is a backbone, put it at j
|
||||
if (j == k) {hard[j] = tmp;}
|
||||
else /* j < k */ {
|
||||
e = hard.get(j);
|
||||
hard[j] = tmp;
|
||||
hard[k] = e;
|
||||
}
|
||||
j++;
|
||||
}
|
||||
else {
|
||||
// there are no backbone literals
|
||||
hard[k] = tmp;
|
||||
j++;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
// done with the model. Reset to avoid confusion in debugging
|
||||
mdl.reset();
|
||||
|
||||
// -- grow the set of backbone literals
|
||||
for (;j < hard.size(); ++j) {
|
||||
res = m_ctx->check_sat(j+1, hard.c_ptr());
|
||||
if (res == l_false) {
|
||||
// -- flip non-true literal to be false
|
||||
hard[j] = mk_not(m, hard.get(j));
|
||||
}
|
||||
else if (res == l_true) {
|
||||
// -- get the model for the next iteration of the outer loop
|
||||
m_ctx->get_model(mdl);
|
||||
break;
|
||||
}
|
||||
else if (res == l_undef) {
|
||||
// -- conservatively bail out
|
||||
hard.resize(hard_sz);
|
||||
return l_undef;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// move sat soft constraints to the output vector
|
||||
for (unsigned k = i; k < j; ++k) { soft.push_back(hard.get(k)); }
|
||||
// cleanup hard constraints
|
||||
hard.resize(hard_sz);
|
||||
return l_true;
|
||||
}
|
||||
|
||||
/// Poor man's maxsat. No guarantees of maximum solution
|
||||
/// Runs maxsat loop on m_ctx Returns l_false if hard is unsat,
|
||||
/// otherwise reduces soft such that hard & soft is sat.
|
||||
lbool prop_solver::maxsmt(expr_ref_vector &hard, expr_ref_vector &soft)
|
||||
lbool prop_solver::maxsmt(expr_ref_vector &hard, expr_ref_vector &soft,
|
||||
vector<expr_ref_vector> const & clauses)
|
||||
{
|
||||
// replace expressions by assumption literals
|
||||
itp_solver::scoped_mk_proxy _p_(*m_ctx, hard);
|
||||
iuc_solver::scoped_mk_proxy _p_(*m_ctx, hard);
|
||||
unsigned hard_sz = hard.size();
|
||||
// assume soft constraints are propositional literals (no need to proxy)
|
||||
hard.append(soft);
|
||||
|
||||
lbool res = m_ctx->check_sat(hard.size(), hard.c_ptr());
|
||||
lbool res = m_ctx->check_sat_cc(hard, clauses);
|
||||
// if hard constraints alone are unsat or there are no soft
|
||||
// constraints, we are done
|
||||
if (res != l_false || soft.empty()) { return res; }
|
||||
|
|
@ -146,7 +245,7 @@ lbool prop_solver::maxsmt(expr_ref_vector &hard, expr_ref_vector &soft)
|
|||
soft.reset();
|
||||
|
||||
expr_ref saved(m);
|
||||
ptr_vector<expr> core;
|
||||
expr_ref_vector core(m);
|
||||
m_ctx->get_unsat_core(core);
|
||||
|
||||
// while there are soft constraints
|
||||
|
|
@ -171,7 +270,7 @@ lbool prop_solver::maxsmt(expr_ref_vector &hard, expr_ref_vector &soft)
|
|||
}
|
||||
|
||||
// check that the NEW constraints became sat
|
||||
res = m_ctx->check_sat(hard.size(), hard.c_ptr());
|
||||
res = m_ctx->check_sat_cc(hard, clauses);
|
||||
if (res != l_false) { break; }
|
||||
// still unsat, update the core and repeat
|
||||
core.reset();
|
||||
|
|
@ -189,17 +288,21 @@ lbool prop_solver::maxsmt(expr_ref_vector &hard, expr_ref_vector &soft)
|
|||
return res;
|
||||
}
|
||||
|
||||
lbool prop_solver::internal_check_assumptions(
|
||||
expr_ref_vector& hard_atoms,
|
||||
expr_ref_vector& soft_atoms)
|
||||
lbool prop_solver::internal_check_assumptions(expr_ref_vector &hard_atoms,
|
||||
expr_ref_vector &soft_atoms,
|
||||
vector<expr_ref_vector> const & clauses)
|
||||
{
|
||||
// XXX Turn model generation if m_model != 0
|
||||
SASSERT(m_ctx);
|
||||
SASSERT(m_ctx_fparams);
|
||||
flet<bool> _model(m_ctx_fparams->m_model, m_model != nullptr);
|
||||
|
||||
params_ref p;
|
||||
if (m_model != nullptr) {
|
||||
p.set_bool("produce_models", true);
|
||||
m_ctx->updt_params(p);
|
||||
}
|
||||
|
||||
if (m_in_level) { assert_level_atoms(m_current_level); }
|
||||
lbool result = maxsmt(hard_atoms, soft_atoms);
|
||||
lbool result = maxsmt(hard_atoms, soft_atoms, clauses);
|
||||
if (result != l_false && m_model) { m_ctx->get_model(*m_model); }
|
||||
|
||||
SASSERT(result != l_false || soft_atoms.empty());
|
||||
|
|
@ -226,15 +329,22 @@ lbool prop_solver::internal_check_assumptions(
|
|||
}
|
||||
|
||||
if (result == l_false && m_core && m.proofs_enabled() && !m_subset_based_core) {
|
||||
TRACE("spacer", tout << "theory core\n";);
|
||||
TRACE("spacer", tout << "Using IUC core\n";);
|
||||
m_core->reset();
|
||||
m_ctx->get_itp_core(*m_core);
|
||||
m_ctx->get_iuc(*m_core);
|
||||
} else if (result == l_false && m_core) {
|
||||
m_core->reset();
|
||||
m_ctx->get_unsat_core(*m_core);
|
||||
// manually undo proxies because maxsmt() call above manually adds proxies
|
||||
// AG: don't think this is needed. maxsmt() undoes the proxies already
|
||||
m_ctx->undo_proxies(*m_core);
|
||||
}
|
||||
|
||||
if (m_model != nullptr) {
|
||||
p.set_bool("produce_models", false);
|
||||
m_ctx->updt_params(p);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
@ -242,9 +352,11 @@ lbool prop_solver::internal_check_assumptions(
|
|||
|
||||
lbool prop_solver::check_assumptions(const expr_ref_vector & _hard,
|
||||
expr_ref_vector& soft,
|
||||
const expr_ref_vector &clause,
|
||||
unsigned num_bg, expr * const * bg,
|
||||
unsigned solver_id)
|
||||
{
|
||||
expr_ref cls(m);
|
||||
// current clients expect that flattening of HARD is
|
||||
// done implicitly during check_assumptions
|
||||
expr_ref_vector hard(m);
|
||||
|
|
@ -252,12 +364,11 @@ lbool prop_solver::check_assumptions(const expr_ref_vector & _hard,
|
|||
flatten_and(hard);
|
||||
|
||||
m_ctx = m_contexts [solver_id == 0 ? 0 : 0 /* 1 */].get();
|
||||
m_ctx_fparams = m_fparams [solver_id == 0 ? 0 : 0 /* 1 */];
|
||||
|
||||
// can be disabled if use_push_bg == true
|
||||
// solver::scoped_push _s_(*m_ctx);
|
||||
if (!m_use_push_bg) { m_ctx->push(); }
|
||||
itp_solver::scoped_bg _b_(*m_ctx);
|
||||
if (!m_use_push_bg) {m_ctx->push();}
|
||||
iuc_solver::scoped_bg _b_(*m_ctx);
|
||||
|
||||
for (unsigned i = 0; i < num_bg; ++i)
|
||||
if (m_use_push_bg) { m_ctx->push_bg(bg [i]); }
|
||||
|
|
@ -265,7 +376,9 @@ lbool prop_solver::check_assumptions(const expr_ref_vector & _hard,
|
|||
|
||||
unsigned soft_sz = soft.size();
|
||||
(void) soft_sz;
|
||||
lbool res = internal_check_assumptions(hard, soft);
|
||||
vector<expr_ref_vector> clauses;
|
||||
if (!clause.empty()) clauses.push_back(clause);
|
||||
lbool res = internal_check_assumptions(hard, soft, clauses);
|
||||
if (!m_use_push_bg) { m_ctx->pop(1); }
|
||||
|
||||
TRACE("psolve_verbose",
|
||||
|
|
|
|||
|
|
@ -29,25 +29,23 @@ Revision History:
|
|||
#include "smt/smt_kernel.h"
|
||||
#include "util/util.h"
|
||||
#include "util/vector.h"
|
||||
#include "muz/spacer/spacer_manager.h"
|
||||
#include "muz/spacer/spacer_smt_context_manager.h"
|
||||
#include "muz/spacer/spacer_itp_solver.h"
|
||||
#include "solver/solver.h"
|
||||
#include "muz/spacer/spacer_iuc_solver.h"
|
||||
#include "muz/spacer/spacer_util.h"
|
||||
|
||||
struct fixedpoint_params;
|
||||
struct fp_params;
|
||||
|
||||
namespace spacer {
|
||||
typedef ptr_vector<func_decl> decl_vector;
|
||||
|
||||
class prop_solver {
|
||||
|
||||
private:
|
||||
ast_manager& m;
|
||||
manager& m_pm;
|
||||
symbol m_name;
|
||||
smt_params* m_fparams[2];
|
||||
solver* m_solvers[2];
|
||||
scoped_ptr<itp_solver> m_contexts[2];
|
||||
itp_solver * m_ctx;
|
||||
smt_params * m_ctx_fparams;
|
||||
ref<solver> m_solvers[2];
|
||||
scoped_ptr<iuc_solver> m_contexts[2];
|
||||
iuc_solver * m_ctx;
|
||||
decl_vector m_level_preds;
|
||||
app_ref_vector m_pos_level_atoms; // atoms used to identify level
|
||||
app_ref_vector m_neg_level_atoms; //
|
||||
|
|
@ -68,13 +66,17 @@ private:
|
|||
void ensure_level(unsigned lvl);
|
||||
|
||||
lbool internal_check_assumptions(expr_ref_vector &hard,
|
||||
expr_ref_vector &soft);
|
||||
expr_ref_vector &soft,
|
||||
vector<expr_ref_vector> const & clause);
|
||||
|
||||
lbool maxsmt(expr_ref_vector &hard, expr_ref_vector &soft);
|
||||
lbool maxsmt(expr_ref_vector &hard, expr_ref_vector &soft,
|
||||
vector<expr_ref_vector> const & clauses);
|
||||
lbool mss(expr_ref_vector &hard, expr_ref_vector &soft);
|
||||
|
||||
|
||||
public:
|
||||
prop_solver(spacer::manager& pm, fixedpoint_params const& p, symbol const& name);
|
||||
prop_solver(ast_manager &m, solver *solver0, solver* solver1,
|
||||
fp_params const& p, symbol const& name);
|
||||
|
||||
|
||||
void set_core(expr_ref_vector* core) { m_core = core; }
|
||||
|
|
@ -91,11 +93,19 @@ public:
|
|||
void assert_expr(expr * form);
|
||||
void assert_expr(expr * form, unsigned level);
|
||||
|
||||
void assert_exprs(const expr_ref_vector &fmls) {
|
||||
for (auto *f : fmls) assert_expr(f);
|
||||
}
|
||||
void assert_exprs(const expr_ref_vector &fmls, unsigned level) {
|
||||
for (auto *f : fmls) assert_expr(f, level);
|
||||
}
|
||||
|
||||
/**
|
||||
* check assumptions with a background formula
|
||||
*/
|
||||
lbool check_assumptions(const expr_ref_vector & hard,
|
||||
expr_ref_vector & soft,
|
||||
const expr_ref_vector &clause,
|
||||
unsigned num_bg = 0,
|
||||
expr * const *bg = nullptr,
|
||||
unsigned solver_id = 0);
|
||||
|
|
@ -136,7 +146,22 @@ public:
|
|||
~scoped_delta_level() {m_delta = false;}
|
||||
};
|
||||
|
||||
class scoped_weakness {
|
||||
public:
|
||||
solver *sol;
|
||||
scoped_weakness(prop_solver &ps, unsigned solver_id, unsigned weakness)
|
||||
: sol(nullptr) {
|
||||
sol = ps.m_solvers[solver_id == 0 ? 0 : 0 /* 1 */].get();
|
||||
if (!sol) return;
|
||||
sol->push_params();
|
||||
|
||||
params_ref p;
|
||||
p.set_bool("arith.ignore_int", weakness < 1);
|
||||
p.set_bool("array.weak", weakness < 2);
|
||||
sol->updt_params(p);
|
||||
}
|
||||
~scoped_weakness() {if (sol) {sol->pop_params();}}
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ Revision History:
|
|||
#include "muz/spacer/spacer_mev_array.h"
|
||||
#include "muz/spacer/spacer_qe_project.h"
|
||||
|
||||
namespace
|
||||
namespace spacer_qe
|
||||
{
|
||||
bool is_partial_eq (app* a);
|
||||
|
||||
|
|
@ -186,7 +186,7 @@ bool is_partial_eq (app* a) {
|
|||
}
|
||||
|
||||
|
||||
namespace qe {
|
||||
namespace spacer_qe {
|
||||
|
||||
class is_relevant_default : public i_expr_pred {
|
||||
public:
|
||||
|
|
@ -195,7 +195,7 @@ namespace qe {
|
|||
}
|
||||
};
|
||||
|
||||
class mk_atom_default : public i_nnf_atom {
|
||||
class mk_atom_default : public qe::i_nnf_atom {
|
||||
public:
|
||||
void operator()(expr* e, bool pol, expr_ref& result) override {
|
||||
if (pol) result = e;
|
||||
|
|
@ -2254,7 +2254,7 @@ namespace qe {
|
|||
void arith_project(model& mdl, app_ref_vector& vars, expr_ref& fml) {
|
||||
ast_manager& m = vars.get_manager();
|
||||
arith_project_util ap(m);
|
||||
atom_set pos_lits, neg_lits;
|
||||
qe::atom_set pos_lits, neg_lits;
|
||||
is_relevant_default is_relevant;
|
||||
mk_atom_default mk_atom;
|
||||
get_nnf (fml, is_relevant, mk_atom, pos_lits, neg_lits);
|
||||
|
|
@ -2264,7 +2264,7 @@ namespace qe {
|
|||
void arith_project(model& mdl, app_ref_vector& vars, expr_ref& fml, expr_map& map) {
|
||||
ast_manager& m = vars.get_manager();
|
||||
arith_project_util ap(m);
|
||||
atom_set pos_lits, neg_lits;
|
||||
qe::atom_set pos_lits, neg_lits;
|
||||
is_relevant_default is_relevant;
|
||||
mk_atom_default mk_atom;
|
||||
get_nnf (fml, is_relevant, mk_atom, pos_lits, neg_lits);
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ Notes:
|
|||
#include "model/model.h"
|
||||
#include "ast/expr_map.h"
|
||||
|
||||
namespace qe {
|
||||
namespace spacer_qe {
|
||||
/**
|
||||
Loos-Weispfenning model-based projection for a basic conjunction.
|
||||
Lits is a vector of literals.
|
||||
|
|
|
|||
671
src/muz/spacer/spacer_quant_generalizer.cpp
Normal file
671
src/muz/spacer/spacer_quant_generalizer.cpp
Normal file
|
|
@ -0,0 +1,671 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Microsoft Corporation and Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_quant_generalizer.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
Quantified lemma generalizer.
|
||||
|
||||
Author:
|
||||
|
||||
|
||||
Yakir Vizel
|
||||
Arie Gurfinkel
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
|
||||
#include "muz/spacer/spacer_context.h"
|
||||
#include "muz/spacer/spacer_generalizers.h"
|
||||
#include "muz/spacer/spacer_manager.h"
|
||||
#include "ast/expr_abstract.h"
|
||||
#include "ast/rewriter/var_subst.h"
|
||||
#include "ast/for_each_expr.h"
|
||||
#include "ast/factor_equivs.h"
|
||||
#include "ast/rewriter/expr_safe_replace.h"
|
||||
#include "ast/substitution/matcher.h"
|
||||
#include "ast/expr_functors.h"
|
||||
|
||||
#include "muz/spacer/spacer_sem_matcher.h"
|
||||
|
||||
using namespace spacer;
|
||||
|
||||
namespace {
|
||||
struct index_lt_proc : public std::binary_function<app*, app *, bool> {
|
||||
arith_util m_arith;
|
||||
index_lt_proc(ast_manager &m) : m_arith(m) {}
|
||||
bool operator() (app *a, app *b) {
|
||||
// XXX This order is a bit strange.
|
||||
// XXX It does the job in our current application, but only because
|
||||
// XXX we assume that we only compare expressions of the form (B + k),
|
||||
// XXX where B is fixed and k is a number.
|
||||
// XXX Might be better to specialize just for that specific use case.
|
||||
rational val1, val2;
|
||||
bool is_num1 = m_arith.is_numeral(a, val1);
|
||||
bool is_num2 = m_arith.is_numeral(b, val2);
|
||||
|
||||
if (is_num1 && is_num2) {
|
||||
return val1 < val2;
|
||||
}
|
||||
else if (is_num1 != is_num2) {
|
||||
return is_num1;
|
||||
}
|
||||
|
||||
is_num1 = false;
|
||||
is_num2 = false;
|
||||
// compare the first numeric argument of a to first numeric argument of b
|
||||
// if available
|
||||
for (unsigned i = 0, sz = a->get_num_args(); !is_num1 && i < sz; ++i) {
|
||||
is_num1 = m_arith.is_numeral (a->get_arg(i), val1);
|
||||
}
|
||||
for (unsigned i = 0, sz = b->get_num_args(); !is_num2 && i < sz; ++i) {
|
||||
is_num2 = m_arith.is_numeral(b->get_arg(i), val2);
|
||||
}
|
||||
|
||||
if (is_num1 && is_num2) {
|
||||
return val1 < val2;
|
||||
}
|
||||
else if (is_num1 != is_num2) {
|
||||
return is_num1;
|
||||
}
|
||||
else {
|
||||
return a->get_id() < b->get_id();
|
||||
}
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
namespace spacer {
|
||||
|
||||
lemma_quantifier_generalizer::lemma_quantifier_generalizer(context &ctx,
|
||||
bool normalize_cube) :
|
||||
lemma_generalizer(ctx), m(ctx.get_ast_manager()), m_arith(m), m_cube(m),
|
||||
m_normalize_cube(normalize_cube), m_offset(0) {}
|
||||
|
||||
void lemma_quantifier_generalizer::collect_statistics(statistics &st) const
|
||||
{
|
||||
st.update("time.spacer.solve.reach.gen.quant", m_st.watch.get_seconds());
|
||||
st.update("quantifier gen", m_st.count);
|
||||
st.update("quantifier gen failures", m_st.num_failures);
|
||||
}
|
||||
|
||||
/**
|
||||
Finds candidates terms to be existentially abstracted.
|
||||
A term t is a candidate if
|
||||
(a) t is ground
|
||||
|
||||
(b) t appears in an expression of the form (select A t) for some array A
|
||||
|
||||
(c) t appears in an expression of the form (select A (+ t v))
|
||||
where v is ground
|
||||
|
||||
The goal is to pick candidates that might result in a lemma in the
|
||||
essentially uninterpreted fragment of FOL or its extensions.
|
||||
*/
|
||||
void lemma_quantifier_generalizer::find_candidates(expr *e,
|
||||
app_ref_vector &candidates) {
|
||||
if (!contains_selects(e, m)) return;
|
||||
|
||||
app_ref_vector indices(m);
|
||||
get_select_indices(e, indices, m);
|
||||
|
||||
app_ref_vector extra(m);
|
||||
expr_sparse_mark marked_args;
|
||||
|
||||
// Make sure not to try and quantify already-quantified indices
|
||||
for (unsigned idx=0, sz = indices.size(); idx < sz; idx++) {
|
||||
// skip expressions that already contain a quantified variable
|
||||
if (has_zk_const(indices.get(idx))) {
|
||||
continue;
|
||||
}
|
||||
|
||||
app *index = indices.get(idx);
|
||||
TRACE ("spacer_qgen", tout << "Candidate: "<< mk_pp(index, m)
|
||||
<< " in " << mk_pp(e, m) << "\n";);
|
||||
extra.push_back(index);
|
||||
if (m_arith.is_add(index)) {
|
||||
for (expr * arg : *index) {
|
||||
if (!is_app(arg) || marked_args.is_marked(arg)) {continue;}
|
||||
marked_args.mark(arg);
|
||||
candidates.push_back (to_app(arg));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::sort(candidates.c_ptr(), candidates.c_ptr() + candidates.size(),
|
||||
index_lt_proc(m));
|
||||
// keep actual select indecies in the order found at the back of
|
||||
// candidate list. There is no particular reason for this order
|
||||
candidates.append(extra);
|
||||
}
|
||||
|
||||
|
||||
/// returns true if expression e contains a sub-expression of the form (select A idx) where
|
||||
/// idx contains exactly one skolem from zks. Returns idx and the skolem
|
||||
bool lemma_quantifier_generalizer::match_sk_idx(expr *e, app_ref_vector const &zks, expr *&idx, app *&sk) {
|
||||
if (zks.size() != 1) return false;
|
||||
contains_app has_zk(m, zks.get(0));
|
||||
|
||||
if (!contains_selects(e, m)) return false;
|
||||
app_ref_vector indicies(m);
|
||||
get_select_indices(e, indicies, m);
|
||||
if (indicies.size() > 2) return false;
|
||||
|
||||
unsigned i=0;
|
||||
if (indicies.size() == 1) {
|
||||
if (!has_zk(indicies.get(0))) return false;
|
||||
}
|
||||
else {
|
||||
if (has_zk(indicies.get(0)) && !has_zk(indicies.get(1)))
|
||||
i = 0;
|
||||
else if (!has_zk(indicies.get(0)) && has_zk(indicies.get(1)))
|
||||
i = 1;
|
||||
else if (!has_zk(indicies.get(0)) && !has_zk(indicies.get(1)))
|
||||
return false;
|
||||
}
|
||||
|
||||
idx = indicies.get(i);
|
||||
sk = zks.get(0);
|
||||
return true;
|
||||
}
|
||||
|
||||
namespace {
|
||||
expr* times_minus_one(expr *e, arith_util &arith) {
|
||||
expr *r;
|
||||
if (arith.is_times_minus_one (e, r)) { return r; }
|
||||
|
||||
return arith.mk_mul(arith.mk_numeral(rational(-1), arith.is_int(get_sort(e))), e);
|
||||
}
|
||||
}
|
||||
|
||||
/** Attempts to rewrite a cube so that quantified variable appears as
|
||||
a top level argument of select-term
|
||||
|
||||
Find sub-expression of the form (select A (+ sk!0 t)) and replaces
|
||||
(+ sk!0 t) --> sk!0 and sk!0 --> (+ sk!0 (* (- 1) t))
|
||||
|
||||
Current implementation is an ugly hack for one special case
|
||||
*/
|
||||
void lemma_quantifier_generalizer::cleanup(expr_ref_vector &cube, app_ref_vector const &zks, expr_ref &bind) {
|
||||
if (zks.size() != 1) return;
|
||||
|
||||
arith_util arith(m);
|
||||
expr *idx = nullptr;
|
||||
app *sk = nullptr;
|
||||
expr_ref rep(m);
|
||||
|
||||
for (expr *e : cube) {
|
||||
if (match_sk_idx(e, zks, idx, sk)) {
|
||||
CTRACE("spacer_qgen", idx != sk,
|
||||
tout << "Possible cleanup of " << mk_pp(idx, m) << " in "
|
||||
<< mk_pp(e, m) << " on " << mk_pp(sk, m) << "\n";);
|
||||
|
||||
if (!arith.is_add(idx)) continue;
|
||||
app *a = to_app(idx);
|
||||
bool found = false;
|
||||
expr_ref_vector kids(m);
|
||||
expr_ref_vector kids_bind(m);
|
||||
for (expr* arg : *a) {
|
||||
if (arg == sk) {
|
||||
found = true;
|
||||
kids.push_back(arg);
|
||||
kids_bind.push_back(bind);
|
||||
}
|
||||
else {
|
||||
kids.push_back (times_minus_one(arg, arith));
|
||||
kids_bind.push_back (times_minus_one(arg, arith));
|
||||
}
|
||||
}
|
||||
if (!found) continue;
|
||||
|
||||
rep = arith.mk_add(kids.size(), kids.c_ptr());
|
||||
bind = arith.mk_add(kids_bind.size(), kids_bind.c_ptr());
|
||||
TRACE("spacer_qgen",
|
||||
tout << "replace " << mk_pp(idx, m) << " with " << mk_pp(rep, m) << "\n";);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (rep) {
|
||||
expr_safe_replace rw(m);
|
||||
rw.insert(sk, rep);
|
||||
rw.insert(idx, sk);
|
||||
rw(cube);
|
||||
TRACE("spacer_qgen",
|
||||
tout << "Cleaned cube to: " << mk_and(cube) << "\n";);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
Create an abstract cube by abstracting a given term with a given variable.
|
||||
On return,
|
||||
gnd_cube contains all ground literals from m_cube
|
||||
abs_cube contains all newly quantified literals from m_cube
|
||||
lb contains an expression determining the lower bound on the variable
|
||||
ub contains an expression determining the upper bound on the variable
|
||||
|
||||
Conjunction of gnd_cube and abs_cube is the new quantified cube
|
||||
|
||||
lb and ub are null if no bound was found
|
||||
*/
|
||||
void lemma_quantifier_generalizer::mk_abs_cube(lemma_ref &lemma, app *term, var *var,
|
||||
expr_ref_vector &gnd_cube,
|
||||
expr_ref_vector &abs_cube,
|
||||
expr *&lb, expr *&ub, unsigned &stride) {
|
||||
|
||||
// create an abstraction function that maps candidate term to variables
|
||||
expr_safe_replace sub(m);
|
||||
// term -> var
|
||||
sub.insert(term , var);
|
||||
rational val;
|
||||
if (m_arith.is_numeral(term, val)) {
|
||||
bool is_int = val.is_int();
|
||||
expr_ref minus_one(m);
|
||||
minus_one = m_arith.mk_numeral(rational(-1), is_int);
|
||||
|
||||
// term+1 -> var+1 if term is a number
|
||||
sub.insert(
|
||||
m_arith.mk_numeral(val + 1, is_int),
|
||||
m_arith.mk_add(var, m_arith.mk_numeral(rational(1), is_int)));
|
||||
// -term-1 -> -1*var + -1 if term is a number
|
||||
sub.insert(
|
||||
m_arith.mk_numeral(-1*val + -1, is_int),
|
||||
m_arith.mk_add (m_arith.mk_mul (minus_one, var), minus_one));
|
||||
}
|
||||
|
||||
lb = nullptr;
|
||||
ub = nullptr;
|
||||
|
||||
for (expr *lit : m_cube) {
|
||||
expr_ref abs_lit(m);
|
||||
sub (lit, abs_lit);
|
||||
if (lit == abs_lit) {
|
||||
gnd_cube.push_back(lit);
|
||||
}
|
||||
else {
|
||||
expr *e1, *e2;
|
||||
// generalize v=num into v>=num
|
||||
if (m.is_eq(abs_lit, e1, e2) && (e1 == var || e2 == var)) {
|
||||
if (m_arith.is_numeral(e1)) {
|
||||
abs_lit = m_arith.mk_ge (var, e1);
|
||||
} else if (m_arith.is_numeral(e2)) {
|
||||
abs_lit = m_arith.mk_ge(var, e2);
|
||||
}
|
||||
}
|
||||
abs_cube.push_back(abs_lit);
|
||||
if (contains_selects(abs_lit, m)) {
|
||||
expr_ref_vector pob_cube(m);
|
||||
flatten_and(lemma->get_pob()->post(), pob_cube);
|
||||
find_stride(pob_cube, abs_lit, stride);
|
||||
}
|
||||
|
||||
if (!lb && is_lb(var, abs_lit)) {
|
||||
lb = abs_lit;
|
||||
}
|
||||
else if (!ub && is_ub(var, abs_lit)) {
|
||||
ub = abs_lit;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// -- returns true if e is an upper bound for var
|
||||
bool lemma_quantifier_generalizer::is_ub(var *var, expr *e) {
|
||||
expr *e1, *e2;
|
||||
// var <= e2
|
||||
if ((m_arith.is_le (e, e1, e2) || m_arith.is_lt(e, e1, e2)) && var == e1) {
|
||||
return true;
|
||||
}
|
||||
// e1 >= var
|
||||
if ((m_arith.is_ge(e, e1, e2) || m_arith.is_gt(e, e1, e2)) && var == e2) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// t <= -1*var
|
||||
if ((m_arith.is_le (e, e1, e2) || m_arith.is_lt(e, e1, e2))
|
||||
&& m_arith.is_times_minus_one(e2, e2) && e2 == var) {
|
||||
return true;
|
||||
}
|
||||
// -1*var >= t
|
||||
if ((m_arith.is_ge(e, e1, e2) || m_arith.is_gt(e, e1, e2)) &&
|
||||
m_arith.is_times_minus_one(e1, e1) && e1 == var) {
|
||||
return true;
|
||||
}
|
||||
// ! (var >= e2)
|
||||
if (m.is_not (e, e1) && is_lb(var, e1)) {
|
||||
return true;
|
||||
}
|
||||
// var + t1 <= t2
|
||||
if ((m_arith.is_le(e, e1, e2) || m_arith.is_lt(e, e1, e2)) &&
|
||||
m_arith.is_add(e1)) {
|
||||
app *a = to_app(e1);
|
||||
for (expr* arg : *a) {
|
||||
if (arg == var) return true;
|
||||
}
|
||||
}
|
||||
// t1 <= t2 + -1*var
|
||||
if ((m_arith.is_le(e, e1, e2) || m_arith.is_lt(e, e1, e2)) &&
|
||||
m_arith.is_add(e2)) {
|
||||
app *a = to_app(e2);
|
||||
for (expr* arg : *a) {
|
||||
if (m_arith.is_times_minus_one(arg, arg) && arg == var)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// t1 >= t2 + var
|
||||
if ((m_arith.is_ge(e, e1, e2) || m_arith.is_gt(e, e1, e2)) &&
|
||||
m_arith.is_add(e2)) {
|
||||
app *a = to_app(e2);
|
||||
for (expr * arg : *a) {
|
||||
if (arg == var) return true;
|
||||
}
|
||||
}
|
||||
// -1*var + t1 >= t2
|
||||
if ((m_arith.is_ge(e, e1, e2) || m_arith.is_gt(e, e1, e2)) &&
|
||||
m_arith.is_add(e1)) {
|
||||
app *a = to_app(e1);
|
||||
for (expr * arg : *a) {
|
||||
if (m_arith.is_times_minus_one(arg, arg) && arg == var)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// -- returns true if e is a lower bound for var
|
||||
bool lemma_quantifier_generalizer::is_lb(var *var, expr *e) {
|
||||
expr *e1, *e2;
|
||||
// var >= e2
|
||||
if ((m_arith.is_ge (e, e1, e2) || m_arith.is_gt(e, e1, e2)) && var == e1) {
|
||||
return true;
|
||||
}
|
||||
// e1 <= var
|
||||
if ((m_arith.is_le(e, e1, e2) || m_arith.is_lt(e, e1, e2)) && var == e2) {
|
||||
return true;
|
||||
}
|
||||
// t >= -1*var
|
||||
if ((m_arith.is_ge (e, e1, e2) || m_arith.is_gt(e, e1, e2))
|
||||
&& m_arith.is_times_minus_one(e2, e2) && e2 == var) {
|
||||
return true;
|
||||
}
|
||||
// -1*var <= t
|
||||
if ((m_arith.is_le(e, e1, e2) || m_arith.is_lt(e, e1, e2)) &&
|
||||
m_arith.is_times_minus_one(e1, e1) && e1 == var) {
|
||||
return true;
|
||||
}
|
||||
// ! (var <= e2)
|
||||
if (m.is_not (e, e1) && is_ub(var, e1)) {
|
||||
return true;
|
||||
}
|
||||
// var + t1 >= t2
|
||||
if ((m_arith.is_ge(e, e1, e2) || m_arith.is_gt(e, e1, e2)) &&
|
||||
m_arith.is_add(e1)) {
|
||||
app *a = to_app(e1);
|
||||
for (expr * arg : *a) {
|
||||
if (arg == var) return true;
|
||||
}
|
||||
}
|
||||
// t1 >= t2 + -1*var
|
||||
if ((m_arith.is_ge(e, e1, e2) || m_arith.is_gt(e, e1, e2)) &&
|
||||
m_arith.is_add(e2)) {
|
||||
app *a = to_app(e2);
|
||||
for (expr * arg : *a) {
|
||||
if (m_arith.is_times_minus_one(arg, arg) && arg == var)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// t1 <= t2 + var
|
||||
if ((m_arith.is_le(e, e1, e2) || m_arith.is_lt(e, e1, e2)) &&
|
||||
m_arith.is_add(e2)) {
|
||||
app *a = to_app(e2);
|
||||
for (expr * arg : *a) {
|
||||
if (arg == var) return true;
|
||||
}
|
||||
}
|
||||
// -1*var + t1 <= t2
|
||||
if ((m_arith.is_le(e, e1, e2) || m_arith.is_lt(e, e1, e2)) &&
|
||||
m_arith.is_add(e1)) {
|
||||
app *a = to_app(e1);
|
||||
for (expr * arg : *a) {
|
||||
if (m_arith.is_times_minus_one(arg, arg) && arg == var)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool lemma_quantifier_generalizer::generalize (lemma_ref &lemma, app *term) {
|
||||
|
||||
expr *lb = nullptr, *ub = nullptr;
|
||||
unsigned stride = 1;
|
||||
expr_ref_vector gnd_cube(m);
|
||||
expr_ref_vector abs_cube(m);
|
||||
|
||||
var_ref var(m);
|
||||
var = m.mk_var (m_offset, get_sort(term));
|
||||
|
||||
mk_abs_cube(lemma, term, var, gnd_cube, abs_cube, lb, ub, stride);
|
||||
if (abs_cube.empty()) {return false;}
|
||||
|
||||
TRACE("spacer_qgen",
|
||||
tout << "abs_cube is: " << mk_and(abs_cube) << "\n";
|
||||
tout << "lb = ";
|
||||
if (lb) tout << mk_pp(lb, m); else tout << "none";
|
||||
tout << "\n";
|
||||
tout << "ub = ";
|
||||
if (ub) tout << mk_pp(ub, m); else tout << "none";
|
||||
tout << "\n";);
|
||||
|
||||
if (!lb && !ub)
|
||||
return false;
|
||||
|
||||
// -- guess lower or upper bound if missing
|
||||
if (!lb) {
|
||||
abs_cube.push_back (m_arith.mk_ge (var, term));
|
||||
lb = abs_cube.back();
|
||||
}
|
||||
if (!ub) {
|
||||
abs_cube.push_back (m_arith.mk_lt(var, term));
|
||||
ub = abs_cube.back();
|
||||
}
|
||||
|
||||
rational init;
|
||||
expr_ref constant(m);
|
||||
if (is_var(to_app(lb)->get_arg(0)))
|
||||
constant = to_app(lb)->get_arg(1);
|
||||
else
|
||||
constant = to_app(lb)->get_arg(0);
|
||||
|
||||
if (stride > 1 && m_arith.is_numeral(constant, init)) {
|
||||
unsigned mod = init.get_unsigned() % stride;
|
||||
TRACE("spacer_qgen",
|
||||
tout << "mod=" << mod << " init=" << init << " stride=" << stride << "\n";
|
||||
tout.flush(););
|
||||
abs_cube.push_back(m.mk_eq(
|
||||
m_arith.mk_mod(var, m_arith.mk_numeral(rational(stride), true)),
|
||||
m_arith.mk_numeral(rational(mod), true)));
|
||||
}
|
||||
|
||||
// skolemize
|
||||
expr_ref gnd(m);
|
||||
app_ref_vector zks(m);
|
||||
ground_expr(mk_and(abs_cube), gnd, zks);
|
||||
flatten_and(gnd, gnd_cube);
|
||||
|
||||
TRACE("spacer_qgen",
|
||||
tout << "New CUBE is: " << gnd_cube << "\n";);
|
||||
|
||||
// check if the result is a true lemma
|
||||
unsigned uses_level = 0;
|
||||
pred_transformer &pt = lemma->get_pob()->pt();
|
||||
if (pt.check_inductive(lemma->level(), gnd_cube, uses_level, lemma->weakness())) {
|
||||
TRACE("spacer_qgen",
|
||||
tout << "Quantifier Generalization Succeeded!\n"
|
||||
<< "New CUBE is: " << gnd_cube << "\n";);
|
||||
SASSERT(zks.size() >= static_cast<unsigned>(m_offset));
|
||||
|
||||
// lift quantified variables to top of select
|
||||
expr_ref ext_bind(m);
|
||||
ext_bind = term;
|
||||
cleanup(gnd_cube, zks, ext_bind);
|
||||
|
||||
// XXX better do that check before changing bind in cleanup()
|
||||
// XXX Or not because substitution might introduce _n variable into bind
|
||||
if (m_ctx.get_manager().is_n_formula(ext_bind)) {
|
||||
// XXX this creates an instance, but not necessarily the needed one
|
||||
|
||||
// XXX This is sound because any instance of
|
||||
// XXX universal quantifier is sound
|
||||
|
||||
// XXX needs better long term solution. leave
|
||||
// comment here for the future
|
||||
m_ctx.get_manager().formula_n2o(ext_bind, ext_bind, 0);
|
||||
}
|
||||
|
||||
lemma->update_cube(lemma->get_pob(), gnd_cube);
|
||||
lemma->set_level(uses_level);
|
||||
|
||||
SASSERT(var->get_idx() < zks.size());
|
||||
SASSERT(is_app(ext_bind));
|
||||
lemma->add_skolem(zks.get(var->get_idx()), to_app(ext_bind));
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool lemma_quantifier_generalizer::find_stride(expr_ref_vector &c, expr_ref &pattern, unsigned &stride) {
|
||||
expr_ref tmp(m);
|
||||
tmp = mk_and(c);
|
||||
normalize(tmp, tmp, false, true);
|
||||
c.reset();
|
||||
flatten_and(tmp, c);
|
||||
|
||||
app_ref_vector indices(m);
|
||||
get_select_indices(pattern, indices, m);
|
||||
|
||||
// TODO
|
||||
if (indices.size() > 1)
|
||||
return false;
|
||||
|
||||
app *p_index = indices.get(0);
|
||||
if (is_var(p_index)) return false;
|
||||
|
||||
std::vector<unsigned> instances;
|
||||
for (expr* lit : c) {
|
||||
|
||||
if (!contains_selects(lit, m))
|
||||
continue;
|
||||
|
||||
indices.reset();
|
||||
get_select_indices(lit, indices, m);
|
||||
|
||||
// TODO:
|
||||
if (indices.size() > 1)
|
||||
continue;
|
||||
|
||||
app *candidate = indices.get(0);
|
||||
|
||||
unsigned size = p_index->get_num_args();
|
||||
unsigned matched = 0;
|
||||
for (unsigned p=0; p < size; p++) {
|
||||
expr *arg = p_index->get_arg(p);
|
||||
if (is_var(arg)) {
|
||||
rational val;
|
||||
if (p < candidate->get_num_args() &&
|
||||
m_arith.is_numeral(candidate->get_arg(p), val) &&
|
||||
val.is_unsigned()) {
|
||||
instances.push_back(val.get_unsigned());
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (expr* cand : *candidate) {
|
||||
if (cand == arg) {
|
||||
matched++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (matched < size - 1)
|
||||
continue;
|
||||
|
||||
if (candidate->get_num_args() == matched)
|
||||
instances.push_back(0);
|
||||
|
||||
TRACE("spacer_qgen",
|
||||
tout << "Match succeeded!\n";);
|
||||
}
|
||||
|
||||
if (instances.size() <= 1)
|
||||
return false;
|
||||
|
||||
std::sort(instances.begin(), instances.end());
|
||||
|
||||
stride = instances[1]-instances[0];
|
||||
TRACE("spacer_qgen", tout << "Index Stride is: " << stride << "\n";);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void lemma_quantifier_generalizer::operator()(lemma_ref &lemma) {
|
||||
if (lemma->get_cube().empty()) return;
|
||||
if (!lemma->has_pob()) return;
|
||||
|
||||
m_st.count++;
|
||||
scoped_watch _w_(m_st.watch);
|
||||
|
||||
TRACE("spacer_qgen",
|
||||
tout << "initial cube: " << mk_and(lemma->get_cube()) << "\n";);
|
||||
|
||||
// setup the cube
|
||||
m_cube.reset();
|
||||
m_cube.append(lemma->get_cube());
|
||||
|
||||
if (m_normalize_cube) {
|
||||
// -- re-normalize the cube
|
||||
expr_ref c(m);
|
||||
c = mk_and(m_cube);
|
||||
normalize(c, c, false, true);
|
||||
m_cube.reset();
|
||||
flatten_and(c, m_cube);
|
||||
TRACE("spacer_qgen",
|
||||
tout << "normalized cube:\n" << mk_and(m_cube) << "\n";);
|
||||
}
|
||||
|
||||
// first unused free variable
|
||||
m_offset = lemma->get_pob()->get_free_vars_size();
|
||||
|
||||
// for every literal, find a candidate term to abstract
|
||||
for (unsigned i=0; i < m_cube.size(); i++) {
|
||||
expr *r = m_cube.get(i);
|
||||
|
||||
// generate candidates for abstraction
|
||||
app_ref_vector candidates(m);
|
||||
find_candidates(r, candidates);
|
||||
if (candidates.empty()) continue;
|
||||
|
||||
// for every candidate
|
||||
for (unsigned arg=0, sz = candidates.size(); arg < sz; arg++) {
|
||||
if (generalize (lemma, candidates.get(arg))) {
|
||||
return;
|
||||
}
|
||||
else {
|
||||
++m_st.num_failures;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
178
src/muz/spacer/spacer_sat_answer.cpp
Normal file
178
src/muz/spacer/spacer_sat_answer.cpp
Normal file
|
|
@ -0,0 +1,178 @@
|
|||
#include "muz/spacer/spacer_sat_answer.h"
|
||||
#include "muz/base/dl_context.h"
|
||||
#include "muz/base/dl_rule.h"
|
||||
|
||||
#include "smt/smt_solver.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
struct ground_sat_answer_op::frame {
|
||||
reach_fact *m_rf;
|
||||
pred_transformer &m_pt;
|
||||
expr_ref_vector m_gnd_subst;
|
||||
expr_ref m_gnd_eq;
|
||||
expr_ref m_fact;
|
||||
unsigned m_visit;
|
||||
expr_ref_vector m_kids;
|
||||
|
||||
frame(reach_fact *rf, pred_transformer &pt, const expr_ref_vector &gnd_subst) :
|
||||
m_rf(rf), m_pt(pt),
|
||||
m_gnd_subst(gnd_subst),
|
||||
m_gnd_eq(pt.get_ast_manager()),
|
||||
m_fact(pt.get_ast_manager()),
|
||||
m_visit(0),
|
||||
m_kids(pt.get_ast_manager()) {
|
||||
|
||||
ast_manager &m = pt.get_ast_manager();
|
||||
spacer::manager &pm = pt.get_manager();
|
||||
|
||||
m_fact = m.mk_app(head(), m_gnd_subst.size(), m_gnd_subst.c_ptr());
|
||||
if (pt.head()->get_arity() == 0)
|
||||
m_gnd_eq = m.mk_true();
|
||||
else {
|
||||
SASSERT(m_gnd_subst.size() == pt.head()->get_arity());
|
||||
for (unsigned i = 0, sz = pt.sig_size(); i < sz; ++i) {
|
||||
m_gnd_eq = m.mk_eq(m.mk_const(pm.o2n(pt.sig(i), 0)),
|
||||
m_gnd_subst.get(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func_decl* head() {return m_pt.head();}
|
||||
expr* fact() {return m_fact;}
|
||||
const datalog::rule &rule() {return m_rf->get_rule();}
|
||||
pred_transformer &pt() {return m_pt;}
|
||||
};
|
||||
|
||||
ground_sat_answer_op::ground_sat_answer_op(context &ctx) :
|
||||
m_ctx(ctx), m(m_ctx.get_ast_manager()), m_pm(m_ctx.get_manager()),
|
||||
m_pinned(m) {
|
||||
m_solver = mk_smt_solver(m, params_ref::get_empty(), symbol::null);
|
||||
}
|
||||
|
||||
proof_ref ground_sat_answer_op::operator()(pred_transformer &query) {
|
||||
|
||||
|
||||
vector<frame> todo, new_todo;
|
||||
|
||||
// -- find substitution for a query if query is not nullary
|
||||
expr_ref_vector qsubst(m);
|
||||
if (query.head()->get_arity() > 0) {
|
||||
solver::scoped_push _s_(*m_solver);
|
||||
m_solver->assert_expr(query.get_last_rf()->get());
|
||||
lbool res = m_solver->check_sat(0, nullptr);
|
||||
(void)res;
|
||||
SASSERT(res == l_true);
|
||||
model_ref mdl;
|
||||
m_solver->get_model(mdl);
|
||||
for (unsigned i = 0, sz = query.sig_size(); i < sz; ++i) {
|
||||
expr_ref arg(m), val(m);
|
||||
arg = m.mk_const(m_pm.o2n(query.sig(i), 0));
|
||||
mdl->eval(arg, val, true);
|
||||
qsubst.push_back(val);
|
||||
}
|
||||
}
|
||||
|
||||
todo.push_back(frame(query.get_last_rf(), query, qsubst));
|
||||
expr_ref root_fact(m);
|
||||
root_fact = todo.back().fact();
|
||||
|
||||
while (!todo.empty()) {
|
||||
frame &curr = todo.back();
|
||||
if (m_cache.contains(curr.fact())) {
|
||||
todo.pop_back();
|
||||
continue;
|
||||
}
|
||||
|
||||
if (curr.m_visit == 0) {
|
||||
new_todo.reset();
|
||||
mk_children(curr, new_todo);
|
||||
curr.m_visit = 1;
|
||||
// curr becomes invalid
|
||||
todo.append(new_todo);
|
||||
}
|
||||
else {
|
||||
proof* pf = mk_proof_step(curr);
|
||||
m_pinned.push_back(curr.fact());
|
||||
m_cache.insert(curr.fact(), pf);
|
||||
todo.pop_back();
|
||||
}
|
||||
}
|
||||
return proof_ref(m_cache.find(root_fact), m);
|
||||
}
|
||||
|
||||
|
||||
void ground_sat_answer_op::mk_children(frame &fr, vector<frame> &todo) {
|
||||
const datalog::rule &r = fr.rule();
|
||||
ptr_vector<func_decl> preds;
|
||||
fr.pt().find_predecessors(r, preds);
|
||||
|
||||
if (preds.empty()) return;
|
||||
|
||||
const reach_fact_ref_vector &kid_rfs = fr.m_rf->get_justifications();
|
||||
solver::scoped_push _s_(*m_solver);
|
||||
m_solver->assert_expr(fr.m_gnd_eq);
|
||||
unsigned ut_sz = r.get_uninterpreted_tail_size();
|
||||
for (unsigned i = 0; i < ut_sz; ++i) {
|
||||
expr_ref f(m);
|
||||
m_pm.formula_n2o(kid_rfs.get(i)->get(), f, i);
|
||||
m_solver->assert_expr(f);
|
||||
}
|
||||
m_solver->assert_expr(fr.pt().transition());
|
||||
m_solver->assert_expr(fr.pt().rule2tag(&r));
|
||||
|
||||
lbool res = m_solver->check_sat(0, nullptr);
|
||||
(void)res;
|
||||
VERIFY(res == l_true);
|
||||
|
||||
model_ref mdl;
|
||||
m_solver->get_model(mdl);
|
||||
expr_ref_vector subst(m);
|
||||
for (unsigned i = 0, sz = preds.size(); i < sz; ++i) {
|
||||
subst.reset();
|
||||
mk_child_subst_from_model(preds.get(i), i, mdl, subst);
|
||||
todo.push_back(frame(kid_rfs.get(i),
|
||||
m_ctx.get_pred_transformer(preds.get(i)), subst));
|
||||
fr.m_kids.push_back(todo.back().fact());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ground_sat_answer_op::mk_child_subst_from_model(func_decl *pred,
|
||||
unsigned j, model_ref &mdl,
|
||||
expr_ref_vector &subst) {
|
||||
pred_transformer &pt = m_ctx.get_pred_transformer(pred);
|
||||
for (unsigned i = 0, sz = pt.sig_size(); i < sz; ++i) {
|
||||
expr_ref arg(m), val(m);
|
||||
arg = m.mk_const(m_pm.o2o(pt.sig(i), 0, j));
|
||||
mdl->eval(arg, val, true);
|
||||
subst.push_back(val);
|
||||
}
|
||||
}
|
||||
|
||||
proof *ground_sat_answer_op::mk_proof_step(frame &fr) {
|
||||
svector<std::pair<unsigned, unsigned>> positions;
|
||||
vector<expr_ref_vector> substs;
|
||||
|
||||
proof_ref_vector premises(m);
|
||||
datalog::rule_manager &rm = m_ctx.get_datalog_context().get_rule_manager();
|
||||
expr_ref rule_fml(m);
|
||||
rm.to_formula(fr.rule(), rule_fml);
|
||||
// premises.push_back(fr.rule().get_proof());
|
||||
premises.push_back(m.mk_asserted(rule_fml));
|
||||
for (auto &k : fr.m_kids) {premises.push_back(m_cache.find(k));}
|
||||
|
||||
for (unsigned i = 0; i < premises.size(); i++) {
|
||||
positions.push_back(std::make_pair(0,i));
|
||||
}
|
||||
for (unsigned i = 0; i <= premises.size(); i++) {
|
||||
substs.push_back(expr_ref_vector(m));
|
||||
}
|
||||
m_pinned.push_back(m.mk_hyper_resolve(premises.size(),
|
||||
premises.c_ptr(),
|
||||
fr.fact(),
|
||||
positions, substs));
|
||||
return to_app(m_pinned.back());
|
||||
}
|
||||
|
||||
}
|
||||
55
src/muz/spacer/spacer_sat_answer.h
Normal file
55
src/muz/spacer/spacer_sat_answer.h
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
/*++
|
||||
Copyright (c) 2018 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_sat_answer.h
|
||||
|
||||
Abstract:
|
||||
|
||||
Compute refutation proof for CHC
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _SPACER_SAT_ANSWER_H_
|
||||
#define _SPACER_SAT_ANSWER_H_
|
||||
|
||||
#include "muz/spacer/spacer_context.h"
|
||||
#include "ast/ast.h"
|
||||
#include "util/obj_hashtable.h"
|
||||
#include "model/model.h"
|
||||
#include "solver/solver.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
class ground_sat_answer_op {
|
||||
context &m_ctx;
|
||||
ast_manager &m;
|
||||
manager &m_pm;
|
||||
|
||||
expr_ref_vector m_pinned;
|
||||
obj_map<expr, proof*> m_cache;
|
||||
|
||||
ref<solver> m_solver;
|
||||
|
||||
struct frame;
|
||||
|
||||
proof *mk_proof_step(frame &fr);
|
||||
void mk_children(frame &fr, vector<frame> &todo);
|
||||
void mk_child_subst_from_model(func_decl *pred, unsigned i,
|
||||
model_ref &mdl, expr_ref_vector &subst);
|
||||
|
||||
public:
|
||||
ground_sat_answer_op(context &ctx);
|
||||
|
||||
proof_ref operator() (pred_transformer &query);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
147
src/muz/spacer/spacer_sem_matcher.cpp
Normal file
147
src/muz/spacer/spacer_sem_matcher.cpp
Normal file
|
|
@ -0,0 +1,147 @@
|
|||
/*++
|
||||
Copyright (c) 2006 Microsoft Corporation and Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
sem_matcher.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
<abstract>
|
||||
|
||||
Author:
|
||||
|
||||
Leonardo de Moura (leonardo) 2008-02-02.
|
||||
Arie Gurfinkel
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
#include "muz/spacer/spacer_sem_matcher.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
sem_matcher::sem_matcher(ast_manager &man) : m(man), m_arith(m), m_pinned(m) {}
|
||||
|
||||
bool sem_matcher::match_var (var *v, expr *e) {
|
||||
expr_offset r;
|
||||
if (m_subst->find(v, 0, r)) {
|
||||
if (!m.are_equal(r.get_expr(), e)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else {
|
||||
m_subst->insert(v, 0, expr_offset(e, 1));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
bool sem_matcher::operator()(expr * e1, expr * e2, substitution & s, bool &pos) {
|
||||
reset();
|
||||
m_subst = &s;
|
||||
m_todo.push_back(expr_pair(e1, e2));
|
||||
|
||||
// true on the first run through the loop
|
||||
bool top = true;
|
||||
pos = true;
|
||||
while (!m_todo.empty()) {
|
||||
expr_pair const & p = m_todo.back();
|
||||
|
||||
if (is_var(p.first)) {
|
||||
if (!match_var(to_var(p.first), p.second)) {
|
||||
return false;
|
||||
}
|
||||
m_todo.pop_back();
|
||||
top = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
if (is_var(p.second))
|
||||
return false;
|
||||
if (!is_app(p.first))
|
||||
return false;
|
||||
if (!is_app(p.second))
|
||||
return false;
|
||||
|
||||
app * n1 = to_app(p.first);
|
||||
app * n2 = to_app(p.second);
|
||||
|
||||
expr *t = nullptr;
|
||||
|
||||
// strip negation
|
||||
if (top && n1->get_decl() != n2->get_decl()) {
|
||||
if (m.is_not(n1, t) && !m.is_not(n2) && is_app(t) &&
|
||||
to_app(t)->get_decl() == n2->get_decl()) {
|
||||
pos = false;
|
||||
n1 = to_app(e1);
|
||||
}
|
||||
else if (!m.is_not(n1) && m.is_not(n2, t) && is_app(t) &&
|
||||
to_app(t)->get_decl() == n1->get_decl()) {
|
||||
pos = false;
|
||||
n2 = to_app(t);
|
||||
}
|
||||
}
|
||||
top = false;
|
||||
|
||||
if (n1->get_decl() != n2->get_decl()) {
|
||||
expr *e1 = nullptr, *e2 = nullptr;
|
||||
rational val1, val2;
|
||||
|
||||
// x<=y == !(x>y)
|
||||
if (m_arith.is_le(n1) && m.is_not(n2, t) && m_arith.is_gt(t)) {
|
||||
n2 = to_app(t);
|
||||
}
|
||||
else if (m_arith.is_le(n2) && m.is_not(n1, t) && m_arith.is_gt(t)) {
|
||||
n1 = to_app(t);
|
||||
}
|
||||
// x>=y == !(x<y)
|
||||
if (m_arith.is_ge(n1) && m.is_not(n2, t) && m_arith.is_lt(t)) {
|
||||
n2 = to_app(t);
|
||||
}
|
||||
else if (m_arith.is_ge(n2) && m.is_not(n1, t) && m_arith.is_lt(t)) {
|
||||
n1 = to_app(t);
|
||||
}
|
||||
// x+val1 matched to val2, where x is a variable, and
|
||||
// val1, val2 are numerals
|
||||
if (m_arith.is_numeral(n2, val2) && m_arith.is_add(n1, e1, e2) &&
|
||||
m_arith.is_numeral(e2, val1) && is_var(e1)) {
|
||||
val1 = val2 - val1;
|
||||
|
||||
expr_ref num1(m);
|
||||
num1 = m_arith.mk_numeral (val1, val1.is_int());
|
||||
m_pinned.push_back(num1);
|
||||
if (!match_var (to_var(e1), num1)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
m_todo.pop_back();
|
||||
continue;
|
||||
}
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned num_args1 = n1->get_num_args();
|
||||
if (num_args1 != n2->get_num_args())
|
||||
return false;
|
||||
|
||||
m_todo.pop_back();
|
||||
|
||||
if (num_args1 == 0)
|
||||
continue;
|
||||
|
||||
unsigned j = num_args1;
|
||||
while (j > 0) {
|
||||
--j;
|
||||
m_todo.push_back(expr_pair(n1->get_arg(j), n2->get_arg(j)));
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void sem_matcher::reset() {
|
||||
m_todo.reset();
|
||||
m_pinned.reset();
|
||||
}
|
||||
}
|
||||
69
src/muz/spacer/spacer_sem_matcher.h
Normal file
69
src/muz/spacer/spacer_sem_matcher.h
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
/*++
|
||||
Copyright (c) 2006 Microsoft Corporation and Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
sem_matcher.h
|
||||
|
||||
Abstract:
|
||||
|
||||
Semantic matcher
|
||||
|
||||
Author:
|
||||
|
||||
Leonardo de Moura (leonardo) 2008-02-02.
|
||||
Arie Gurfinkel
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
#ifndef SPACER_SEM_MATCHER_H_
|
||||
#define SPACER_SEM_MATCHER_H_
|
||||
|
||||
#include "ast/substitution/substitution.h"
|
||||
#include "ast/arith_decl_plugin.h"
|
||||
#include "util/hashtable.h"
|
||||
|
||||
namespace spacer {
|
||||
/**
|
||||
\brief Functor for matching expressions.
|
||||
*/
|
||||
class sem_matcher {
|
||||
typedef std::pair<expr *, expr *> expr_pair;
|
||||
typedef pair_hash<obj_ptr_hash<expr>, obj_ptr_hash<expr> > expr_pair_hash;
|
||||
typedef hashtable<expr_pair, expr_pair_hash, default_eq<expr_pair> > cache;
|
||||
|
||||
ast_manager &m;
|
||||
arith_util m_arith;
|
||||
expr_ref_vector m_pinned;
|
||||
substitution * m_subst;
|
||||
svector<expr_pair> m_todo;
|
||||
|
||||
void reset();
|
||||
|
||||
bool match_var(var *v, expr *e);
|
||||
public:
|
||||
sem_matcher(ast_manager &man);
|
||||
|
||||
/**
|
||||
\brief Return true if e2 is an instance of e1.
|
||||
In case of success (result is true), it will store the substitution that makes e1 equals to e2 into s.
|
||||
Sets pos to true if the match is positive and to false if it is negative (i.e., e1 equals !e2)
|
||||
|
||||
For example:
|
||||
1) e1 = f(g(x), x), e2 = f(g(h(a)), h(a))
|
||||
The result is true, and s will contain x -> h(a)
|
||||
|
||||
2) e1 = f(a, x) e2 = f(x, a)
|
||||
The result is false.
|
||||
|
||||
3) e1 = f(x, x) e2 = f(y, a)
|
||||
The result is false
|
||||
|
||||
4) e1 = f(x, y) e2 = f(h(z), a)
|
||||
The result is true, and s contains x->h(z) and y->a
|
||||
*/
|
||||
bool operator()(expr * e1, expr * e2, substitution & s, bool &pos);
|
||||
};
|
||||
}
|
||||
#endif /* SPACER_SEM_MATCHER_H_ */
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_smt_context_manager.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
Manager of smt contexts
|
||||
|
||||
Author:
|
||||
|
||||
Nikolaj Bjorner (nbjorner) 2011-11-26.
|
||||
Arie Gurfinkel
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
|
||||
#include "ast/ast_pp.h"
|
||||
#include "ast/ast_pp_util.h"
|
||||
#include "ast/ast_smt_pp.h"
|
||||
|
||||
#include "smt/smt_context.h"
|
||||
#include "smt/params/smt_params.h"
|
||||
|
||||
#include "muz/spacer/spacer_util.h"
|
||||
#include "muz/spacer/spacer_smt_context_manager.h"
|
||||
namespace spacer {
|
||||
|
||||
|
||||
|
||||
|
||||
smt_context_manager::smt_context_manager(ast_manager &m,
|
||||
unsigned max_num_contexts,
|
||||
const params_ref &p) :
|
||||
m_fparams(p),
|
||||
m(m),
|
||||
m_max_num_contexts(max_num_contexts),
|
||||
m_num_contexts(0) { m_stats.reset();}
|
||||
|
||||
|
||||
smt_context_manager::~smt_context_manager()
|
||||
{
|
||||
std::for_each(m_solvers.begin(), m_solvers.end(),
|
||||
delete_proc<spacer::virtual_solver_factory>());
|
||||
}
|
||||
|
||||
virtual_solver* smt_context_manager::mk_fresh()
|
||||
{
|
||||
++m_num_contexts;
|
||||
virtual_solver_factory *solver_factory = nullptr;
|
||||
|
||||
if (m_max_num_contexts == 0 || m_solvers.size() < m_max_num_contexts) {
|
||||
m_solvers.push_back(alloc(spacer::virtual_solver_factory, m, m_fparams));
|
||||
solver_factory = m_solvers.back();
|
||||
} else
|
||||
{ solver_factory = m_solvers[(m_num_contexts - 1) % m_max_num_contexts]; }
|
||||
|
||||
return solver_factory->mk_solver();
|
||||
}
|
||||
|
||||
void smt_context_manager::collect_statistics(statistics& st) const
|
||||
{
|
||||
for (unsigned i = 0; i < m_solvers.size(); ++i) {
|
||||
m_solvers[i]->collect_statistics(st);
|
||||
}
|
||||
}
|
||||
|
||||
void smt_context_manager::reset_statistics()
|
||||
{
|
||||
for (unsigned i = 0; i < m_solvers.size(); ++i) {
|
||||
m_solvers[i]->reset_statistics();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_smt_context_manager.h
|
||||
|
||||
Abstract:
|
||||
|
||||
Manager of smt contexts
|
||||
|
||||
Author:
|
||||
|
||||
Nikolaj Bjorner (nbjorner) 2011-11-26.
|
||||
Arie Gurfinkel
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _SPACER_SMT_CONTEXT_MANAGER_H_
|
||||
#define _SPACER_SMT_CONTEXT_MANAGER_H_
|
||||
|
||||
#include "util/stopwatch.h"
|
||||
|
||||
#include "smt/smt_kernel.h"
|
||||
#include "muz/base/dl_util.h"
|
||||
#include "muz/spacer/spacer_virtual_solver.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
class smt_context_manager {
|
||||
|
||||
struct stats {
|
||||
unsigned m_num_smt_checks;
|
||||
unsigned m_num_sat_smt_checks;
|
||||
stats() { reset(); }
|
||||
void reset() { memset(this, 0, sizeof(*this)); }
|
||||
};
|
||||
|
||||
smt_params m_fparams;
|
||||
ast_manager& m;
|
||||
unsigned m_max_num_contexts;
|
||||
ptr_vector<virtual_solver_factory> m_solvers;
|
||||
unsigned m_num_contexts;
|
||||
|
||||
|
||||
stats m_stats;
|
||||
stopwatch m_check_watch;
|
||||
stopwatch m_check_sat_watch;
|
||||
|
||||
public:
|
||||
smt_context_manager(ast_manager& m, unsigned max_num_contexts = 1,
|
||||
const params_ref &p = params_ref::get_empty());
|
||||
|
||||
~smt_context_manager();
|
||||
virtual_solver* mk_fresh();
|
||||
|
||||
void collect_statistics(statistics& st) const;
|
||||
void reset_statistics();
|
||||
|
||||
void updt_params(params_ref const &p) { m_fparams.updt_params(p); }
|
||||
smt_params& fparams() {return m_fparams;}
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
Copyright (c) 2018 Arie Gurfinkel and Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
|
|
@ -7,10 +7,12 @@ Module Name:
|
|||
|
||||
Abstract:
|
||||
|
||||
A symbol multiplexer that helps with having multiple versions of each of a set of symbols.
|
||||
A symbol multiplexer that helps with having multiple versions of
|
||||
each of a set of symbols.
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
Krystof Hoder (t-khoder) 2011-9-8.
|
||||
|
||||
Revision History:
|
||||
|
|
@ -22,345 +24,123 @@ Revision History:
|
|||
#include "ast/rewriter/rewriter.h"
|
||||
#include "ast/rewriter/rewriter_def.h"
|
||||
|
||||
#include "model/model.h"
|
||||
|
||||
#include "muz/spacer/spacer_util.h"
|
||||
#include "muz/spacer/spacer_sym_mux.h"
|
||||
|
||||
using namespace spacer;
|
||||
|
||||
sym_mux::sym_mux(ast_manager & m, const std::vector<std::string> & suffixes)
|
||||
: m(m), m_ref_holder(m), m_next_sym_suffix_idx(0), m_suffixes(suffixes)
|
||||
{
|
||||
for (std::string const& s : m_suffixes) {
|
||||
m_used_suffixes.insert(symbol(s.c_str()));
|
||||
sym_mux::sym_mux(ast_manager & m) : m(m) {}
|
||||
sym_mux::~sym_mux() {
|
||||
for (auto &entry : m_entries) {
|
||||
dealloc(entry.m_value);
|
||||
}
|
||||
}
|
||||
|
||||
std::string sym_mux::get_suffix(unsigned i) const
|
||||
{
|
||||
while (m_suffixes.size() <= i) {
|
||||
std::string new_suffix;
|
||||
symbol new_syffix_sym;
|
||||
do {
|
||||
std::stringstream stm;
|
||||
stm << '_' << m_next_sym_suffix_idx;
|
||||
m_next_sym_suffix_idx++;
|
||||
new_suffix = stm.str();
|
||||
new_syffix_sym = symbol(new_suffix.c_str());
|
||||
} while (m_used_suffixes.contains(new_syffix_sym));
|
||||
m_used_suffixes.insert(new_syffix_sym);
|
||||
m_suffixes.push_back(new_suffix);
|
||||
}
|
||||
return m_suffixes[i];
|
||||
func_decl_ref sym_mux::mk_variant(func_decl *fdecl, unsigned i) const {
|
||||
func_decl_ref v(m);
|
||||
std::string name = fdecl->get_name().str();
|
||||
std::string suffix = "_";
|
||||
suffix += i == 0 ? "n" : std::to_string(i - 1);
|
||||
name += suffix;
|
||||
v = m.mk_func_decl(symbol(name.c_str()), fdecl->get_arity(),
|
||||
fdecl->get_domain(), fdecl->get_range());
|
||||
return v;
|
||||
}
|
||||
|
||||
void sym_mux::create_tuple(func_decl* prefix, unsigned arity, sort * const * domain, sort * range,
|
||||
unsigned tuple_length, decl_vector & tuple)
|
||||
{
|
||||
SASSERT(tuple_length > 0);
|
||||
while (tuple.size() < tuple_length) {
|
||||
tuple.push_back(0);
|
||||
}
|
||||
SASSERT(tuple.size() == tuple_length);
|
||||
std::string pre = prefix->get_name().str();
|
||||
for (unsigned i = 0; i < tuple_length; i++) {
|
||||
void sym_mux::register_decl(func_decl *fdecl) {
|
||||
sym_mux_entry *entry = alloc(sym_mux_entry, m);
|
||||
entry->m_main = fdecl;
|
||||
entry->m_variants.push_back(mk_variant(fdecl, 0));
|
||||
entry->m_variants.push_back(mk_variant(fdecl, 1));
|
||||
|
||||
if (tuple[i] != 0) {
|
||||
SASSERT(tuple[i]->get_arity() == arity);
|
||||
SASSERT(tuple[i]->get_range() == range);
|
||||
//domain should match as well, but we won't bother checking an array equality
|
||||
} else {
|
||||
std::string name = pre + get_suffix(i);
|
||||
tuple[i] = m.mk_func_decl(symbol(name.c_str()), arity, domain, range);
|
||||
}
|
||||
m_ref_holder.push_back(tuple[i]);
|
||||
m_sym2idx.insert(tuple[i], i);
|
||||
m_sym2prim.insert(tuple[i], tuple[0]);
|
||||
}
|
||||
|
||||
m_prim2all.insert(tuple[0], tuple);
|
||||
m_prefix2prim.insert(prefix, tuple[0]);
|
||||
m_prim2prefix.insert(tuple[0], prefix);
|
||||
m_prim_preds.push_back(tuple[0]);
|
||||
m_ref_holder.push_back(prefix);
|
||||
m_entries.insert(fdecl, entry);
|
||||
m_muxes.insert(entry->m_variants.get(0), std::make_pair(entry, 0));
|
||||
m_muxes.insert(entry->m_variants.get(1), std::make_pair(entry, 1));
|
||||
}
|
||||
|
||||
void sym_mux::ensure_tuple_size(func_decl * prim, unsigned sz) const
|
||||
{
|
||||
SASSERT(m_prim2all.contains(prim));
|
||||
decl_vector& tuple = m_prim2all.find_core(prim)->get_data().m_value;
|
||||
SASSERT(tuple[0] == prim);
|
||||
|
||||
if (sz <= tuple.size()) { return; }
|
||||
|
||||
func_decl * prefix;
|
||||
TRUSTME(m_prim2prefix.find(prim, prefix));
|
||||
std::string prefix_name = prefix->get_name().bare_str();
|
||||
for (unsigned i = tuple.size(); i < sz; ++i) {
|
||||
std::string name = prefix_name + get_suffix(i);
|
||||
func_decl * new_sym = m.mk_func_decl(symbol(name.c_str()), prefix->get_arity(),
|
||||
prefix->get_domain(), prefix->get_range());
|
||||
|
||||
tuple.push_back(new_sym);
|
||||
m_ref_holder.push_back(new_sym);
|
||||
m_sym2idx.insert(new_sym, i);
|
||||
m_sym2prim.insert(new_sym, prim);
|
||||
void sym_mux::ensure_capacity(sym_mux_entry &entry, unsigned sz) const {
|
||||
while (entry.m_variants.size() < sz) {
|
||||
unsigned idx = entry.m_variants.size();
|
||||
entry.m_variants.push_back (mk_variant(entry.m_main, idx));
|
||||
m_muxes.insert(entry.m_variants.back(), std::make_pair(&entry, idx));
|
||||
}
|
||||
}
|
||||
|
||||
func_decl * sym_mux::conv(func_decl * sym, unsigned src_idx, unsigned tgt_idx) const
|
||||
{
|
||||
if (src_idx == tgt_idx) { return sym; }
|
||||
func_decl * prim = (src_idx == 0) ? sym : get_primary(sym);
|
||||
if (tgt_idx > src_idx) {
|
||||
ensure_tuple_size(prim, tgt_idx + 1);
|
||||
}
|
||||
decl_vector & sym_vect = m_prim2all.find_core(prim)->get_data().m_value;
|
||||
SASSERT(sym_vect[src_idx] == sym);
|
||||
return sym_vect[tgt_idx];
|
||||
bool sym_mux::find_idx(func_decl * sym, unsigned & idx) const {
|
||||
std::pair<sym_mux_entry *, unsigned> entry;
|
||||
if (m_muxes.find(sym, entry)) {idx = entry.second; return true;}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
func_decl * sym_mux::get_or_create_symbol_by_prefix(func_decl* prefix, unsigned idx,
|
||||
unsigned arity, sort * const * domain, sort * range)
|
||||
{
|
||||
func_decl * prim = try_get_primary_by_prefix(prefix);
|
||||
if (prim) {
|
||||
SASSERT(prim->get_arity() == arity);
|
||||
SASSERT(prim->get_range() == range);
|
||||
//domain should match as well, but we won't bother checking an array equality
|
||||
|
||||
return conv(prim, 0, idx);
|
||||
func_decl * sym_mux::find_by_decl(func_decl* fdecl, unsigned idx) const {
|
||||
sym_mux_entry *entry = nullptr;
|
||||
if (m_entries.find(fdecl, entry)) {
|
||||
ensure_capacity(*entry, idx+1);
|
||||
return entry->m_variants.get(idx);
|
||||
}
|
||||
|
||||
decl_vector syms;
|
||||
create_tuple(prefix, arity, domain, range, idx + 1, syms);
|
||||
return syms[idx];
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool sym_mux::is_muxed_lit(expr * e, unsigned idx) const
|
||||
{
|
||||
if (!is_app(e)) { return false; }
|
||||
app * a = to_app(e);
|
||||
if (m.is_not(a) && is_app(a->get_arg(0))) {
|
||||
a = to_app(a->get_arg(0));
|
||||
func_decl * sym_mux::shift_decl(func_decl * decl,
|
||||
unsigned src_idx, unsigned tgt_idx) const {
|
||||
std::pair<sym_mux_entry*,unsigned> entry;
|
||||
if (m_muxes.find(decl, entry)) {
|
||||
SASSERT(entry.second == src_idx);
|
||||
ensure_capacity(*entry.first, tgt_idx + 1);
|
||||
return entry.first->m_variants.get(tgt_idx);
|
||||
}
|
||||
return is_muxed(a->get_decl());
|
||||
UNREACHABLE();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
namespace {
|
||||
struct formula_checker {
|
||||
formula_checker(const sym_mux & parent, unsigned idx) :
|
||||
m_parent(parent), m_idx(idx), m_found(false) {}
|
||||
|
||||
struct sym_mux::formula_checker {
|
||||
formula_checker(const sym_mux & parent, bool all, unsigned idx) :
|
||||
m_parent(parent), m_all(all), m_idx(idx),
|
||||
m_found_what_needed(false)
|
||||
{
|
||||
}
|
||||
|
||||
void operator()(expr * e)
|
||||
{
|
||||
if (m_found_what_needed || !is_app(e)) { return; }
|
||||
void operator()(expr * e) {
|
||||
if (m_found || !is_app(e)) { return; }
|
||||
|
||||
func_decl * sym = to_app(e)->get_decl();
|
||||
unsigned sym_idx;
|
||||
if (!m_parent.try_get_index(sym, sym_idx)) { return; }
|
||||
if (!m_parent.find_idx(sym, sym_idx)) { return; }
|
||||
|
||||
bool have_idx = sym_idx == m_idx;
|
||||
|
||||
if (m_all ? (!have_idx) : have_idx) {
|
||||
m_found_what_needed = true;
|
||||
}
|
||||
|
||||
m_found = !have_idx;
|
||||
}
|
||||
|
||||
bool all_have_idx() const
|
||||
{
|
||||
SASSERT(m_all); //we were looking for the queried property
|
||||
return !m_found_what_needed;
|
||||
}
|
||||
|
||||
bool some_with_idx() const
|
||||
{
|
||||
SASSERT(!m_all); //we were looking for the queried property
|
||||
return m_found_what_needed;
|
||||
}
|
||||
bool all_have_idx() const {return !m_found;}
|
||||
|
||||
private:
|
||||
const sym_mux & m_parent;
|
||||
bool m_all;
|
||||
unsigned m_idx;
|
||||
|
||||
/**
|
||||
If we check whether all muxed symbols are of given index, we look for
|
||||
counter-examples, checking whether form contains a muxed symbol of an index,
|
||||
we look for symbol of index m_idx.
|
||||
*/
|
||||
bool m_found_what_needed;
|
||||
bool m_found;
|
||||
};
|
||||
|
||||
bool sym_mux::contains(expr * e, unsigned idx) const
|
||||
{
|
||||
formula_checker chck(*this, false, idx);
|
||||
for_each_expr(chck, m_visited, e);
|
||||
m_visited.reset();
|
||||
return chck.some_with_idx();
|
||||
}
|
||||
|
||||
bool sym_mux::is_homogenous_formula(expr * e, unsigned idx) const
|
||||
{
|
||||
formula_checker chck(*this, true, idx);
|
||||
for_each_expr(chck, m_visited, e);
|
||||
m_visited.reset();
|
||||
return chck.all_have_idx();
|
||||
bool sym_mux::is_homogenous_formula(expr * e, unsigned idx) const {
|
||||
expr_mark visited;
|
||||
formula_checker fck(*this, idx);
|
||||
for_each_expr(fck, visited, e);
|
||||
return fck.all_have_idx();
|
||||
}
|
||||
|
||||
bool sym_mux::is_homogenous(const expr_ref_vector & vect, unsigned idx) const
|
||||
{
|
||||
expr * const * begin = vect.c_ptr();
|
||||
expr * const * end = begin + vect.size();
|
||||
for (expr * const * it = begin; it != end; it++) {
|
||||
if (!is_homogenous_formula(*it, idx)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
class sym_mux::index_collector {
|
||||
sym_mux const& m_parent;
|
||||
svector<bool> m_indices;
|
||||
public:
|
||||
index_collector(sym_mux const& s):
|
||||
m_parent(s) {}
|
||||
|
||||
void operator()(expr * e)
|
||||
{
|
||||
if (is_app(e)) {
|
||||
func_decl * sym = to_app(e)->get_decl();
|
||||
unsigned idx;
|
||||
if (m_parent.try_get_index(sym, idx)) {
|
||||
SASSERT(idx > 0);
|
||||
--idx;
|
||||
if (m_indices.size() <= idx) {
|
||||
m_indices.resize(idx + 1, false);
|
||||
}
|
||||
m_indices[idx] = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void extract(unsigned_vector& indices)
|
||||
{
|
||||
for (unsigned i = 0; i < m_indices.size(); ++i) {
|
||||
if (m_indices[i]) {
|
||||
indices.push_back(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
void sym_mux::collect_indices(expr* e, unsigned_vector& indices) const
|
||||
{
|
||||
indices.reset();
|
||||
index_collector collector(*this);
|
||||
for_each_expr(collector, m_visited, e);
|
||||
m_visited.reset();
|
||||
collector.extract(indices);
|
||||
}
|
||||
|
||||
class sym_mux::variable_collector {
|
||||
sym_mux const& m_parent;
|
||||
vector<ptr_vector<app> >& m_vars;
|
||||
public:
|
||||
variable_collector(sym_mux const& s, vector<ptr_vector<app> >& vars):
|
||||
m_parent(s), m_vars(vars) {}
|
||||
|
||||
void operator()(expr * e)
|
||||
{
|
||||
if (is_app(e)) {
|
||||
func_decl * sym = to_app(e)->get_decl();
|
||||
unsigned idx;
|
||||
if (m_parent.try_get_index(sym, idx)) {
|
||||
SASSERT(idx > 0);
|
||||
--idx;
|
||||
if (m_vars.size() <= idx) {
|
||||
m_vars.resize(idx + 1, ptr_vector<app>());
|
||||
}
|
||||
m_vars[idx].push_back(to_app(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void sym_mux::collect_variables(expr* e, vector<ptr_vector<app> >& vars) const
|
||||
{
|
||||
vars.reset();
|
||||
variable_collector collector(*this, vars);
|
||||
for_each_expr(collector, m_visited, e);
|
||||
m_visited.reset();
|
||||
}
|
||||
|
||||
class sym_mux::hmg_checker {
|
||||
const sym_mux & m_parent;
|
||||
|
||||
bool m_found_idx;
|
||||
unsigned m_idx;
|
||||
bool m_multiple_indexes;
|
||||
|
||||
public:
|
||||
hmg_checker(const sym_mux & parent) :
|
||||
m_parent(parent), m_found_idx(false), m_multiple_indexes(false)
|
||||
{
|
||||
}
|
||||
|
||||
void operator()(expr * e)
|
||||
{
|
||||
if (m_multiple_indexes || !is_app(e)) { return; }
|
||||
|
||||
func_decl * sym = to_app(e)->get_decl();
|
||||
unsigned sym_idx;
|
||||
if (!m_parent.try_get_index(sym, sym_idx)) { return; }
|
||||
|
||||
if (!m_found_idx) {
|
||||
m_found_idx = true;
|
||||
m_idx = sym_idx;
|
||||
return;
|
||||
}
|
||||
if (m_idx == sym_idx) { return; }
|
||||
m_multiple_indexes = true;
|
||||
}
|
||||
|
||||
bool has_multiple_indexes() const
|
||||
{
|
||||
return m_multiple_indexes;
|
||||
}
|
||||
};
|
||||
|
||||
bool sym_mux::is_homogenous_formula(expr * e) const
|
||||
{
|
||||
hmg_checker chck(*this);
|
||||
for_each_expr(chck, m_visited, e);
|
||||
m_visited.reset();
|
||||
return !chck.has_multiple_indexes();
|
||||
}
|
||||
|
||||
|
||||
struct sym_mux::conv_rewriter_cfg : public default_rewriter_cfg {
|
||||
namespace {
|
||||
struct conv_rewriter_cfg : public default_rewriter_cfg {
|
||||
private:
|
||||
ast_manager & m;
|
||||
const sym_mux & m_parent;
|
||||
unsigned m_from_idx;
|
||||
unsigned m_to_idx;
|
||||
bool m_homogenous;
|
||||
expr_ref_vector m_pinned;
|
||||
public:
|
||||
conv_rewriter_cfg(const sym_mux & parent, unsigned from_idx, unsigned to_idx, bool homogenous)
|
||||
conv_rewriter_cfg(const sym_mux & parent, unsigned from_idx,
|
||||
unsigned to_idx, bool homogenous)
|
||||
: m(parent.get_manager()),
|
||||
m_parent(parent),
|
||||
m_from_idx(from_idx),
|
||||
m_to_idx(to_idx),
|
||||
m_homogenous(homogenous) {}
|
||||
m_homogenous(homogenous), m_pinned(m) {(void) m_homogenous;}
|
||||
|
||||
bool get_subst(expr * s, expr * & t, proof * & t_pr)
|
||||
{
|
||||
|
|
@ -368,241 +148,23 @@ public:
|
|||
app * a = to_app(s);
|
||||
func_decl * sym = a->get_decl();
|
||||
if (!m_parent.has_index(sym, m_from_idx)) {
|
||||
(void) m_homogenous;
|
||||
SASSERT(!m_homogenous || !m_parent.is_muxed(sym));
|
||||
return false;
|
||||
}
|
||||
func_decl * tgt = m_parent.conv(sym, m_from_idx, m_to_idx);
|
||||
|
||||
func_decl * tgt = m_parent.shift_decl(sym, m_from_idx, m_to_idx);
|
||||
t = m.mk_app(tgt, a->get_args());
|
||||
m_pinned.push_back(t);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
void sym_mux::conv_formula(expr * f, unsigned src_idx, unsigned tgt_idx, expr_ref & res, bool homogenous) const
|
||||
{
|
||||
if (src_idx == tgt_idx) {
|
||||
res = f;
|
||||
return;
|
||||
}
|
||||
conv_rewriter_cfg r_cfg(*this, src_idx, tgt_idx, homogenous);
|
||||
rewriter_tpl<conv_rewriter_cfg> rwr(m, false, r_cfg);
|
||||
rwr(f, res);
|
||||
}
|
||||
|
||||
struct sym_mux::shifting_rewriter_cfg : public default_rewriter_cfg {
|
||||
private:
|
||||
ast_manager & m;
|
||||
const sym_mux & m_parent;
|
||||
int m_shift;
|
||||
public:
|
||||
shifting_rewriter_cfg(const sym_mux & parent, int shift)
|
||||
: m(parent.get_manager()),
|
||||
m_parent(parent),
|
||||
m_shift(shift) {}
|
||||
|
||||
bool get_subst(expr * s, expr * & t, proof * & t_pr)
|
||||
{
|
||||
if (!is_app(s)) { return false; }
|
||||
app * a = to_app(s);
|
||||
func_decl * sym = a->get_decl();
|
||||
|
||||
unsigned idx;
|
||||
if (!m_parent.try_get_index(sym, idx)) {
|
||||
return false;
|
||||
}
|
||||
SASSERT(static_cast<int>(idx) + m_shift >= 0);
|
||||
func_decl * tgt = m_parent.conv(sym, idx, idx + m_shift);
|
||||
t = m.mk_app(tgt, a->get_args());
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
void sym_mux::shift_formula(expr * f, int dist, expr_ref & res) const
|
||||
{
|
||||
if (dist == 0) {
|
||||
res = f;
|
||||
return;
|
||||
}
|
||||
shifting_rewriter_cfg r_cfg(*this, dist);
|
||||
rewriter_tpl<shifting_rewriter_cfg> rwr(m, false, r_cfg);
|
||||
rwr(f, res);
|
||||
}
|
||||
|
||||
void sym_mux::conv_formula_vector(const expr_ref_vector & vect, unsigned src_idx, unsigned tgt_idx,
|
||||
expr_ref_vector & res) const
|
||||
{
|
||||
res.reset();
|
||||
expr * const * begin = vect.c_ptr();
|
||||
expr * const * end = begin + vect.size();
|
||||
for (expr * const * it = begin; it != end; it++) {
|
||||
expr_ref converted(m);
|
||||
conv_formula(*it, src_idx, tgt_idx, converted);
|
||||
res.push_back(converted);
|
||||
void sym_mux::shift_expr(expr * f, unsigned src_idx, unsigned tgt_idx,
|
||||
expr_ref & res, bool homogenous) const {
|
||||
if (src_idx == tgt_idx) {res = f;}
|
||||
else {
|
||||
conv_rewriter_cfg r_cfg(*this, src_idx, tgt_idx, homogenous);
|
||||
rewriter_tpl<conv_rewriter_cfg> rwr(m, false, r_cfg);
|
||||
rwr(f, res);
|
||||
}
|
||||
}
|
||||
|
||||
void sym_mux::filter_idx(expr_ref_vector & vect, unsigned idx) const
|
||||
{
|
||||
unsigned i = 0;
|
||||
while (i < vect.size()) {
|
||||
expr* e = vect[i].get();
|
||||
if (contains(e, idx) && is_homogenous_formula(e, idx)) {
|
||||
i++;
|
||||
} else {
|
||||
//we don't allow mixing states inside vector elements
|
||||
SASSERT(!contains(e, idx));
|
||||
vect[i] = vect.back();
|
||||
vect.pop_back();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void sym_mux::partition_o_idx(
|
||||
expr_ref_vector const& lits,
|
||||
expr_ref_vector& o_lits,
|
||||
expr_ref_vector& other, unsigned idx) const
|
||||
{
|
||||
|
||||
for (unsigned i = 0; i < lits.size(); ++i) {
|
||||
if (contains(lits[i], idx) && is_homogenous_formula(lits[i], idx)) {
|
||||
o_lits.push_back(lits[i]);
|
||||
} else {
|
||||
other.push_back(lits[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
class sym_mux::nonmodel_sym_checker {
|
||||
const sym_mux & m_parent;
|
||||
|
||||
bool m_found;
|
||||
public:
|
||||
nonmodel_sym_checker(const sym_mux & parent) :
|
||||
m_parent(parent), m_found(false)
|
||||
{
|
||||
}
|
||||
|
||||
void operator()(expr * e)
|
||||
{
|
||||
if (m_found || !is_app(e)) { return; }
|
||||
|
||||
func_decl * sym = to_app(e)->get_decl();
|
||||
|
||||
if (m_parent.is_non_model_sym(sym)) {
|
||||
m_found = true;
|
||||
}
|
||||
}
|
||||
|
||||
bool found() const
|
||||
{
|
||||
return m_found;
|
||||
}
|
||||
};
|
||||
|
||||
bool sym_mux::has_nonmodel_symbol(expr * e) const
|
||||
{
|
||||
nonmodel_sym_checker chck(*this);
|
||||
for_each_expr(chck, e);
|
||||
return chck.found();
|
||||
}
|
||||
|
||||
void sym_mux::filter_non_model_lits(expr_ref_vector & vect) const
|
||||
{
|
||||
unsigned i = 0;
|
||||
while (i < vect.size()) {
|
||||
if (!has_nonmodel_symbol(vect[i].get())) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
vect[i] = vect.back();
|
||||
vect.pop_back();
|
||||
}
|
||||
}
|
||||
|
||||
class sym_mux::decl_idx_comparator {
|
||||
const sym_mux & m_parent;
|
||||
public:
|
||||
decl_idx_comparator(const sym_mux & parent)
|
||||
: m_parent(parent)
|
||||
{ }
|
||||
|
||||
bool operator()(func_decl * sym1, func_decl * sym2)
|
||||
{
|
||||
unsigned idx1, idx2;
|
||||
if (!m_parent.try_get_index(sym1, idx1)) { idx1 = UINT_MAX; }
|
||||
if (!m_parent.try_get_index(sym2, idx2)) { idx2 = UINT_MAX; }
|
||||
|
||||
if (idx1 != idx2) { return idx1 < idx2; }
|
||||
return lt(sym1->get_name(), sym2->get_name());
|
||||
}
|
||||
};
|
||||
|
||||
std::string sym_mux::pp_model(const model_core & mdl) const
|
||||
{
|
||||
decl_vector consts;
|
||||
unsigned sz = mdl.get_num_constants();
|
||||
for (unsigned i = 0; i < sz; i++) {
|
||||
func_decl * d = mdl.get_constant(i);
|
||||
consts.push_back(d);
|
||||
}
|
||||
|
||||
std::sort(consts.begin(), consts.end(), decl_idx_comparator(*this));
|
||||
|
||||
std::stringstream res;
|
||||
|
||||
decl_vector::iterator end = consts.end();
|
||||
for (decl_vector::iterator it = consts.begin(); it != end; it++) {
|
||||
func_decl * d = *it;
|
||||
std::string name = d->get_name().str();
|
||||
const char * arrow = " -> ";
|
||||
res << name << arrow;
|
||||
unsigned indent = static_cast<unsigned>(name.length() + strlen(arrow));
|
||||
res << mk_pp(mdl.get_const_interp(d), m, indent) << "\n";
|
||||
|
||||
if (it + 1 != end) {
|
||||
unsigned idx1, idx2;
|
||||
if (!try_get_index(*it, idx1)) { idx1 = UINT_MAX; }
|
||||
if (!try_get_index(*(it + 1), idx2)) { idx2 = UINT_MAX; }
|
||||
if (idx1 != idx2) { res << "\n"; }
|
||||
}
|
||||
}
|
||||
return res.str();
|
||||
}
|
||||
|
||||
|
||||
#if 0
|
||||
|
||||
class sym_mux::index_renamer_cfg : public default_rewriter_cfg {
|
||||
const sym_mux & m_parent;
|
||||
unsigned m_idx;
|
||||
|
||||
public:
|
||||
index_renamer_cfg(const sym_mux & p, unsigned idx) : m_parent(p), m_idx(idx) {}
|
||||
|
||||
bool get_subst(expr * s, expr * & t, proof * & t_pr)
|
||||
{
|
||||
if (!is_app(s)) { return false; }
|
||||
app * a = to_app(s);
|
||||
if (a->get_family_id() != null_family_id) {
|
||||
return false;
|
||||
}
|
||||
func_decl * sym = a->get_decl();
|
||||
unsigned idx;
|
||||
if (!m_parent.try_get_index(sym, idx)) {
|
||||
return false;
|
||||
}
|
||||
if (m_idx == idx) {
|
||||
return false;
|
||||
}
|
||||
ast_manager& m = m_parent.get_manager();
|
||||
symbol name = symbol((sym->get_name().str() + "!").c_str());
|
||||
func_decl * tgt = m.mk_func_decl(name, sym->get_arity(), sym->get_domain(), sym->get_range());
|
||||
t = m.mk_app(tgt, a->get_num_args(), a->get_args());
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
Copyright (c) 2018 Arie Gurfinkel and Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
|
|
@ -7,10 +7,12 @@ Module Name:
|
|||
|
||||
Abstract:
|
||||
|
||||
A symbol multiplexer that helps with having multiple versions of each of a set of symbols.
|
||||
A symbol multiplexer that helps with having multiple versions of
|
||||
each of a set of symbols.
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
Krystof Hoder (t-khoder) 2011-9-8.
|
||||
|
||||
Revision History:
|
||||
|
|
@ -20,236 +22,72 @@ Revision History:
|
|||
#ifndef _SYM_MUX_H_
|
||||
#define _SYM_MUX_H_
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "ast/ast.h"
|
||||
#include "util/map.h"
|
||||
#include "util/vector.h"
|
||||
#include <vector>
|
||||
|
||||
class model_core;
|
||||
|
||||
namespace spacer {
|
||||
class sym_mux {
|
||||
public:
|
||||
typedef ptr_vector<app> app_vector;
|
||||
typedef ptr_vector<func_decl> decl_vector;
|
||||
private:
|
||||
typedef obj_map<func_decl, unsigned> sym2u;
|
||||
typedef obj_map<func_decl, decl_vector> sym2dv;
|
||||
typedef obj_map<func_decl, func_decl *> sym2sym;
|
||||
typedef obj_map<func_decl, func_decl *> sym2pred;
|
||||
typedef hashtable<symbol, symbol_hash_proc, symbol_eq_proc> symbols;
|
||||
class sym_mux_entry {
|
||||
public:
|
||||
func_decl_ref m_main;
|
||||
func_decl_ref_vector m_variants;
|
||||
sym_mux_entry(ast_manager &m) : m_main(m), m_variants(m) {};
|
||||
};
|
||||
|
||||
ast_manager & m;
|
||||
mutable ast_ref_vector m_ref_holder;
|
||||
mutable expr_mark m_visited;
|
||||
typedef obj_map<func_decl, sym_mux_entry*> decl2entry_map;
|
||||
typedef obj_map<func_decl, std::pair<sym_mux_entry*, unsigned> > mux2entry_map;
|
||||
|
||||
mutable unsigned m_next_sym_suffix_idx;
|
||||
mutable symbols m_used_suffixes;
|
||||
/** Here we have default suffixes for each of the variants */
|
||||
mutable std::vector<std::string> m_suffixes;
|
||||
ast_manager &m;
|
||||
mutable decl2entry_map m_entries;
|
||||
mutable mux2entry_map m_muxes;
|
||||
|
||||
func_decl_ref mk_variant(func_decl *fdecl, unsigned i) const;
|
||||
void ensure_capacity(sym_mux_entry &entry, unsigned sz) const;
|
||||
|
||||
/**
|
||||
Primary symbol is the 0-th variant. This member maps from primary symbol
|
||||
to vector of all its variants (including the primary variant).
|
||||
*/
|
||||
sym2dv m_prim2all;
|
||||
|
||||
/**
|
||||
For each symbol contains its variant index
|
||||
*/
|
||||
mutable sym2u m_sym2idx;
|
||||
/**
|
||||
For each symbol contains its primary variant
|
||||
*/
|
||||
mutable sym2sym m_sym2prim;
|
||||
|
||||
/**
|
||||
Maps prefixes passed to the create_tuple to
|
||||
the primary symbol created from it.
|
||||
*/
|
||||
sym2pred m_prefix2prim;
|
||||
|
||||
/**
|
||||
Maps pripary symbols to prefixes that were used to create them.
|
||||
*/
|
||||
sym2sym m_prim2prefix;
|
||||
|
||||
decl_vector m_prim_preds;
|
||||
|
||||
obj_hashtable<func_decl> m_non_model_syms;
|
||||
|
||||
struct formula_checker;
|
||||
struct conv_rewriter_cfg;
|
||||
struct shifting_rewriter_cfg;
|
||||
class decl_idx_comparator;
|
||||
class hmg_checker;
|
||||
class nonmodel_sym_checker;
|
||||
class index_renamer_cfg;
|
||||
class index_collector;
|
||||
class variable_collector;
|
||||
|
||||
std::string get_suffix(unsigned i) const;
|
||||
void ensure_tuple_size(func_decl * prim, unsigned sz) const;
|
||||
|
||||
expr_ref isolate_o_idx(expr* e, unsigned idx) const;
|
||||
public:
|
||||
sym_mux(ast_manager & m, const std::vector<std::string> & suffixes);
|
||||
|
||||
sym_mux(ast_manager & m);
|
||||
~sym_mux();
|
||||
ast_manager & get_manager() const { return m; }
|
||||
|
||||
bool is_muxed(func_decl * sym) const { return m_sym2idx.contains(sym); }
|
||||
|
||||
bool try_get_index(func_decl * sym, unsigned & idx) const
|
||||
{
|
||||
return m_sym2idx.find(sym, idx);
|
||||
}
|
||||
|
||||
void register_decl(func_decl *fdecl);
|
||||
bool find_idx(func_decl * sym, unsigned & idx) const;
|
||||
bool has_index(func_decl * sym, unsigned idx) const
|
||||
{
|
||||
unsigned actual_idx;
|
||||
return try_get_index(sym, actual_idx) && idx == actual_idx;
|
||||
}
|
||||
{unsigned v; return find_idx(sym, v) && idx == v;}
|
||||
|
||||
/** Return primary symbol. sym must be muxed. */
|
||||
func_decl * get_primary(func_decl * sym) const
|
||||
{
|
||||
func_decl * prim;
|
||||
TRUSTME(m_sym2prim.find(sym, prim));
|
||||
return prim;
|
||||
}
|
||||
bool is_muxed(func_decl *fdecl) const {return m_muxes.contains(fdecl);}
|
||||
|
||||
/**
|
||||
Return primary symbol created from prefix, or 0 if the prefix was never used.
|
||||
\brief Return symbol created from prefix, or 0 if the prefix
|
||||
was never used.
|
||||
*/
|
||||
func_decl * try_get_primary_by_prefix(func_decl* prefix) const
|
||||
{
|
||||
func_decl * res;
|
||||
if(!m_prefix2prim.find(prefix, res)) {
|
||||
return nullptr;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
func_decl * find_by_decl(func_decl* fdecl, unsigned idx) const;
|
||||
|
||||
/**
|
||||
Return symbol created from prefix, or 0 if the prefix was never used.
|
||||
*/
|
||||
func_decl * try_get_by_prefix(func_decl* prefix, unsigned idx) const
|
||||
{
|
||||
func_decl * prim = try_get_primary_by_prefix(prefix);
|
||||
if(!prim) {
|
||||
return nullptr;
|
||||
}
|
||||
return conv(prim, 0, idx);
|
||||
}
|
||||
|
||||
/**
|
||||
Marks symbol as non-model which means it will not appear in models collected by
|
||||
get_muxed_cube_from_model function.
|
||||
This is to take care of auxiliary symbols introduced by the disjunction relations
|
||||
to relativize lemmas coming from disjuncts.
|
||||
*/
|
||||
void mark_as_non_model(func_decl * sym)
|
||||
{
|
||||
SASSERT(is_muxed(sym));
|
||||
m_non_model_syms.insert(get_primary(sym));
|
||||
}
|
||||
|
||||
func_decl * get_or_create_symbol_by_prefix(func_decl* prefix, unsigned idx,
|
||||
unsigned arity, sort * const * domain, sort * range);
|
||||
|
||||
|
||||
|
||||
bool is_muxed_lit(expr * e, unsigned idx) const;
|
||||
|
||||
bool is_non_model_sym(func_decl * s) const
|
||||
{
|
||||
return is_muxed(s) && m_non_model_syms.contains(get_primary(s));
|
||||
}
|
||||
|
||||
/**
|
||||
Create a multiplexed tuple of propositional constants.
|
||||
Symbols may be suplied in the tuple vector,
|
||||
those beyond the size of the array and those with corresponding positions
|
||||
assigned to zero will be created using prefix.
|
||||
Tuple length must be at least one.
|
||||
*/
|
||||
void create_tuple(func_decl* prefix, unsigned arity, sort * const * domain, sort * range,
|
||||
unsigned tuple_length, decl_vector & tuple);
|
||||
|
||||
/**
|
||||
Return true if the only multiplexed symbols which e contains are of index idx.
|
||||
\brief Return true if the only multiplexed symbols which e contains are
|
||||
of index idx.
|
||||
*/
|
||||
bool is_homogenous_formula(expr * e, unsigned idx) const;
|
||||
bool is_homogenous(const expr_ref_vector & vect, unsigned idx) const;
|
||||
|
||||
/**
|
||||
Return true if all multiplexed symbols which e contains are of one index.
|
||||
*/
|
||||
bool is_homogenous_formula(expr * e) const;
|
||||
|
||||
/**
|
||||
Return true if expression e contains a muxed symbol of index idx.
|
||||
*/
|
||||
bool contains(expr * e, unsigned idx) const;
|
||||
|
||||
/**
|
||||
Collect indices used in expression.
|
||||
*/
|
||||
void collect_indices(expr* e, unsigned_vector& indices) const;
|
||||
|
||||
/**
|
||||
Collect used variables of each index.
|
||||
*/
|
||||
void collect_variables(expr* e, vector<ptr_vector<app> >& vars) const;
|
||||
|
||||
/**
|
||||
Convert symbol sym which has to be of src_idx variant into variant tgt_idx.
|
||||
*/
|
||||
func_decl * conv(func_decl * sym, unsigned src_idx, unsigned tgt_idx) const;
|
||||
|
||||
|
||||
/**
|
||||
Convert src_idx symbols in formula f variant into tgt_idx.
|
||||
If homogenous is true, formula cannot contain symbols of other variants.
|
||||
\brief Convert symbol sym which has to be of src_idx variant
|
||||
into variant tgt_idx.
|
||||
*/
|
||||
void conv_formula(expr * f, unsigned src_idx, unsigned tgt_idx, expr_ref & res, bool homogenous = true) const;
|
||||
void conv_formula_vector(const expr_ref_vector & vect, unsigned src_idx, unsigned tgt_idx,
|
||||
expr_ref_vector & res) const;
|
||||
func_decl * shift_decl(func_decl * sym, unsigned src_idx, unsigned tgt_idx) const;
|
||||
|
||||
/**
|
||||
Shifts the muxed symbols in f by dist. Dist can be negative, but it should never shift
|
||||
symbol index to a negative value.
|
||||
\brief Convert src_idx symbols in formula f variant into
|
||||
tgt_idx. If homogenous is true, formula cannot contain symbols
|
||||
of other variants.
|
||||
*/
|
||||
void shift_formula(expr * f, int dist, expr_ref & res) const;
|
||||
void shift_expr(expr * f, unsigned src_idx, unsigned tgt_idx,
|
||||
expr_ref & res, bool homogenous = true) const;
|
||||
|
||||
/**
|
||||
Remove from vect literals (atoms or negations of atoms) of symbols
|
||||
that contain multiplexed symbols with indexes other than idx.
|
||||
|
||||
Each of the literals can contain only symbols multiplexed with one index
|
||||
(this trivially holds if the literals are propositional).
|
||||
|
||||
Order of elements in vect may be modified by this function
|
||||
*/
|
||||
void filter_idx(expr_ref_vector & vect, unsigned idx) const;
|
||||
|
||||
/**
|
||||
Partition literals into o_literals and others.
|
||||
*/
|
||||
void partition_o_idx(expr_ref_vector const& lits,
|
||||
expr_ref_vector& o_lits,
|
||||
expr_ref_vector& other, unsigned idx) const;
|
||||
|
||||
bool has_nonmodel_symbol(expr * e) const;
|
||||
void filter_non_model_lits(expr_ref_vector & vect) const;
|
||||
|
||||
func_decl * const * begin_prim_preds() const { return m_prim_preds.begin(); }
|
||||
func_decl * const * end_prim_preds() const { return m_prim_preds.end(); }
|
||||
|
||||
void get_muxed_cube_from_model(const model_core & model, expr_ref_vector & res) const;
|
||||
|
||||
std::string pp_model(const model_core & mdl) const;
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,111 +17,47 @@ Revision History:
|
|||
--*/
|
||||
#include <unordered_map>
|
||||
|
||||
#include "ast/for_each_expr.h"
|
||||
#include "ast/proofs/proof_utils.h"
|
||||
#include "muz/spacer/spacer_unsat_core_learner.h"
|
||||
#include "muz/spacer/spacer_unsat_core_plugin.h"
|
||||
#include "ast/for_each_expr.h"
|
||||
|
||||
namespace spacer
|
||||
{
|
||||
#include "muz/spacer/spacer_iuc_proof.h"
|
||||
#include "muz/spacer/spacer_util.h"
|
||||
|
||||
|
||||
unsat_core_learner::~unsat_core_learner()
|
||||
{
|
||||
namespace spacer {
|
||||
|
||||
unsat_core_learner::~unsat_core_learner() {
|
||||
std::for_each(m_plugins.begin(), m_plugins.end(), delete_proc<unsat_core_plugin>());
|
||||
|
||||
}
|
||||
|
||||
void unsat_core_learner::register_plugin(unsat_core_plugin* plugin)
|
||||
{
|
||||
void unsat_core_learner::register_plugin(unsat_core_plugin* plugin) {
|
||||
m_plugins.push_back(plugin);
|
||||
}
|
||||
|
||||
void unsat_core_learner::compute_unsat_core(proof *root, expr_set& asserted_b, expr_ref_vector& unsat_core)
|
||||
{
|
||||
// transform proof in order to get a proof which is better suited for unsat-core-extraction
|
||||
proof_ref pr(root, m);
|
||||
|
||||
reduce_hypotheses(pr);
|
||||
STRACE("spacer.unsat_core_learner",
|
||||
verbose_stream() << "Reduced proof:\n" << mk_ismt2_pp(pr, m) << "\n";
|
||||
);
|
||||
|
||||
// compute symbols occurring in B
|
||||
collect_symbols_b(asserted_b);
|
||||
|
||||
void unsat_core_learner::compute_unsat_core(expr_ref_vector& unsat_core) {
|
||||
// traverse proof
|
||||
proof_post_order it(root, m);
|
||||
while (it.hasNext())
|
||||
{
|
||||
proof_post_order it(m_pr.get(), m);
|
||||
while (it.hasNext()) {
|
||||
proof* currentNode = it.next();
|
||||
|
||||
if (m.get_num_parents(currentNode) == 0)
|
||||
{
|
||||
switch(currentNode->get_decl_kind())
|
||||
{
|
||||
|
||||
case PR_ASSERTED: // currentNode is an axiom
|
||||
{
|
||||
if (asserted_b.contains(m.get_fact(currentNode)))
|
||||
{
|
||||
m_b_mark.mark(currentNode, true);
|
||||
}
|
||||
else
|
||||
{
|
||||
m_a_mark.mark(currentNode, true);
|
||||
}
|
||||
break;
|
||||
}
|
||||
// currentNode is a hypothesis:
|
||||
case PR_HYPOTHESIS:
|
||||
{
|
||||
m_h_mark.mark(currentNode, true);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// collect from parents whether derivation of current node contains A-axioms, B-axioms and hypothesis
|
||||
bool need_to_mark_a = false;
|
||||
bool need_to_mark_b = false;
|
||||
bool need_to_mark_h = false;
|
||||
if (m.get_num_parents(currentNode) > 0) {
|
||||
bool need_to_mark_closed = true;
|
||||
|
||||
for (unsigned i = 0; i < m.get_num_parents(currentNode); ++i)
|
||||
{
|
||||
SASSERT(m.is_proof(currentNode->get_arg(i)));
|
||||
proof* premise = to_app(currentNode->get_arg(i));
|
||||
|
||||
need_to_mark_a = need_to_mark_a || m_a_mark.is_marked(premise);
|
||||
need_to_mark_b = need_to_mark_b || m_b_mark.is_marked(premise);
|
||||
need_to_mark_h = need_to_mark_h || m_h_mark.is_marked(premise);
|
||||
need_to_mark_closed = need_to_mark_closed && (!m_b_mark.is_marked(premise) || m_closed.is_marked(premise));
|
||||
for (proof* premise : m.get_parents(currentNode)) {
|
||||
need_to_mark_closed &= (!m_pr.is_b_marked(premise) || m_closed.is_marked(premise));
|
||||
}
|
||||
|
||||
// if current node is application of lemma, we know that all hypothesis are removed
|
||||
if(currentNode->get_decl_kind() == PR_LEMMA)
|
||||
{
|
||||
need_to_mark_h = false;
|
||||
}
|
||||
|
||||
// save results
|
||||
m_a_mark.mark(currentNode, need_to_mark_a);
|
||||
m_b_mark.mark(currentNode, need_to_mark_b);
|
||||
m_h_mark.mark(currentNode, need_to_mark_h);
|
||||
// save result
|
||||
m_closed.mark(currentNode, need_to_mark_closed);
|
||||
}
|
||||
|
||||
// we have now collected all necessary information, so we can visit the node
|
||||
// if the node mixes A-reasoning and B-reasoning and contains non-closed premises
|
||||
if (m_a_mark.is_marked(currentNode) && m_b_mark.is_marked(currentNode) && !m_closed.is_marked(currentNode))
|
||||
{
|
||||
if (m_pr.is_a_marked(currentNode) &&
|
||||
m_pr.is_b_marked(currentNode) &&
|
||||
!m_closed.is_marked(currentNode)) {
|
||||
compute_partial_core(currentNode); // then we need to compute a partial core
|
||||
// SASSERT(!(m_a_mark.is_marked(currentNode) && m_b_mark.is_marked(currentNode)) || m_closed.is_marked(currentNode)); TODO: doesn't hold anymore if we do the mincut-thing!
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -130,226 +66,39 @@ void unsat_core_learner::compute_unsat_core(proof *root, expr_set& asserted_b, e
|
|||
|
||||
// TODO: remove duplicates from unsat core?
|
||||
|
||||
bool debug_proof = false;
|
||||
if(debug_proof)
|
||||
{
|
||||
// print proof for debugging
|
||||
verbose_stream() << "\n\nProof:\n";
|
||||
std::unordered_map<unsigned, unsigned> id_to_small_id;
|
||||
unsigned counter = 0;
|
||||
|
||||
proof_post_order it2(root, m);
|
||||
while (it2.hasNext())
|
||||
{
|
||||
proof* currentNode = it2.next();
|
||||
|
||||
SASSERT(id_to_small_id.find(currentNode->get_id()) == id_to_small_id.end());
|
||||
id_to_small_id.insert(std::make_pair(currentNode->get_id(), counter));
|
||||
|
||||
verbose_stream() << counter << " ";
|
||||
verbose_stream() << "[";
|
||||
if (is_a_marked(currentNode))
|
||||
{
|
||||
verbose_stream() << "a";
|
||||
}
|
||||
if (is_b_marked(currentNode))
|
||||
{
|
||||
verbose_stream() << "b";
|
||||
}
|
||||
if (is_h_marked(currentNode))
|
||||
{
|
||||
verbose_stream() << "h";
|
||||
}
|
||||
if (is_closed(currentNode))
|
||||
{
|
||||
verbose_stream() << "c";
|
||||
}
|
||||
verbose_stream() << "] ";
|
||||
|
||||
if (m.get_num_parents(currentNode) == 0)
|
||||
{
|
||||
switch (currentNode->get_decl_kind())
|
||||
{
|
||||
case PR_ASSERTED:
|
||||
verbose_stream() << "asserted";
|
||||
break;
|
||||
case PR_HYPOTHESIS:
|
||||
verbose_stream() << "hypothesis";
|
||||
break;
|
||||
default:
|
||||
verbose_stream() << "unknown axiom-type";
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (currentNode->get_decl_kind() == PR_LEMMA)
|
||||
{
|
||||
verbose_stream() << "lemma";
|
||||
}
|
||||
else if (currentNode->get_decl_kind() == PR_TH_LEMMA)
|
||||
{
|
||||
verbose_stream() << "th_lemma";
|
||||
func_decl* d = currentNode->get_decl();
|
||||
symbol sym;
|
||||
if (d->get_num_parameters() >= 2 && // the Farkas coefficients are saved in the parameters of step
|
||||
d->get_parameter(0).is_symbol(sym) && sym == "arith" && // the first two parameters are "arith", "farkas",
|
||||
d->get_parameter(1).is_symbol(sym) && sym == "farkas")
|
||||
{
|
||||
verbose_stream() << "(farkas)";
|
||||
}
|
||||
else
|
||||
{
|
||||
verbose_stream() << "(other)";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
verbose_stream() << "step";
|
||||
}
|
||||
verbose_stream() << " from ";
|
||||
for (int i = m.get_num_parents(currentNode) - 1; i >= 0 ; --i)
|
||||
{
|
||||
proof* premise = to_app(currentNode->get_arg(i));
|
||||
unsigned premise_small_id = id_to_small_id[premise->get_id()];
|
||||
if (i > 0)
|
||||
{
|
||||
verbose_stream() << premise_small_id << ", ";
|
||||
}
|
||||
else
|
||||
{
|
||||
verbose_stream() << premise_small_id;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
if (currentNode->get_decl_kind() == PR_TH_LEMMA || (is_a_marked(currentNode) && is_b_marked(currentNode)) || is_h_marked(currentNode) || (!is_a_marked(currentNode) && !is_b_marked(currentNode)))
|
||||
{
|
||||
verbose_stream() << std::endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
verbose_stream() << ": " << mk_pp(m.get_fact(currentNode), m) << std::endl;
|
||||
}
|
||||
++counter;
|
||||
}
|
||||
}
|
||||
// move all lemmas into vector
|
||||
for (expr* const* it = m_unsat_core.begin(); it != m_unsat_core.end(); ++it)
|
||||
{
|
||||
unsat_core.push_back(*it);
|
||||
for (expr* e : m_unsat_core) {
|
||||
unsat_core.push_back(e);
|
||||
}
|
||||
}
|
||||
|
||||
void unsat_core_learner::compute_partial_core(proof* step)
|
||||
{
|
||||
for (unsat_core_plugin** it=m_plugins.begin(), **end = m_plugins.end (); it != end && !m_closed.is_marked(step); ++it)
|
||||
{
|
||||
unsat_core_plugin* plugin = *it;
|
||||
void unsat_core_learner::compute_partial_core(proof* step) {
|
||||
for (unsat_core_plugin* plugin : m_plugins) {
|
||||
if (m_closed.is_marked(step)) break;
|
||||
plugin->compute_partial_core(step);
|
||||
}
|
||||
}
|
||||
|
||||
void unsat_core_learner::finalize()
|
||||
{
|
||||
for (unsat_core_plugin** it=m_plugins.begin(); it != m_plugins.end(); ++it)
|
||||
{
|
||||
unsat_core_plugin* plugin = *it;
|
||||
void unsat_core_learner::finalize() {
|
||||
for (unsat_core_plugin* plugin : m_plugins) {
|
||||
plugin->finalize();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool unsat_core_learner::is_a_marked(proof* p)
|
||||
{
|
||||
return m_a_mark.is_marked(p);
|
||||
}
|
||||
bool unsat_core_learner::is_b_marked(proof* p)
|
||||
{
|
||||
return m_b_mark.is_marked(p);
|
||||
}
|
||||
bool unsat_core_learner::is_h_marked(proof* p)
|
||||
{
|
||||
return m_h_mark.is_marked(p);
|
||||
}
|
||||
bool unsat_core_learner::is_closed(proof*p)
|
||||
{
|
||||
bool unsat_core_learner::is_closed(proof* p) {
|
||||
return m_closed.is_marked(p);
|
||||
}
|
||||
void unsat_core_learner::set_closed(proof* p, bool value)
|
||||
{
|
||||
|
||||
void unsat_core_learner::set_closed(proof* p, bool value) {
|
||||
m_closed.mark(p, value);
|
||||
}
|
||||
|
||||
void unsat_core_learner::add_lemma_to_core(expr* lemma)
|
||||
{
|
||||
m_unsat_core.push_back(lemma);
|
||||
}
|
||||
|
||||
|
||||
class collect_pure_proc {
|
||||
func_decl_set& m_symbs;
|
||||
public:
|
||||
collect_pure_proc(func_decl_set& s):m_symbs(s) {}
|
||||
|
||||
void operator()(app* a) {
|
||||
if (a->get_family_id() == null_family_id) {
|
||||
m_symbs.insert(a->get_decl());
|
||||
}
|
||||
}
|
||||
void operator()(var*) {}
|
||||
void operator()(quantifier*) {}
|
||||
};
|
||||
|
||||
void unsat_core_learner::collect_symbols_b(const expr_set& axioms_b)
|
||||
{
|
||||
expr_mark visited;
|
||||
collect_pure_proc proc(m_symbols_b);
|
||||
for (expr_set::iterator it = axioms_b.begin(); it != axioms_b.end(); ++it)
|
||||
{
|
||||
for_each_expr(proc, visited, *it);
|
||||
}
|
||||
bool unsat_core_learner::is_b_open(proof *p) {
|
||||
return m_pr.is_b_marked(p) && !is_closed (p);
|
||||
}
|
||||
|
||||
class is_pure_expr_proc {
|
||||
func_decl_set const& m_symbs;
|
||||
array_util m_au;
|
||||
public:
|
||||
struct non_pure {};
|
||||
|
||||
is_pure_expr_proc(func_decl_set const& s, ast_manager& m):
|
||||
m_symbs(s),
|
||||
m_au (m)
|
||||
{}
|
||||
|
||||
void operator()(app* a) {
|
||||
if (a->get_family_id() == null_family_id) {
|
||||
if (!m_symbs.contains(a->get_decl())) {
|
||||
throw non_pure();
|
||||
}
|
||||
}
|
||||
else if (a->get_family_id () == m_au.get_family_id () &&
|
||||
a->is_app_of (a->get_family_id (), OP_ARRAY_EXT)) {
|
||||
throw non_pure();
|
||||
}
|
||||
}
|
||||
void operator()(var*) {}
|
||||
void operator()(quantifier*) {}
|
||||
};
|
||||
|
||||
bool unsat_core_learner::only_contains_symbols_b(expr* expr) const
|
||||
{
|
||||
is_pure_expr_proc proc(m_symbols_b, m);
|
||||
try {
|
||||
for_each_expr(proc, expr);
|
||||
}
|
||||
catch (is_pure_expr_proc::non_pure)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
void unsat_core_learner::add_lemma_to_core(expr* lemma) {
|
||||
m_unsat_core.push_back(lemma);
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,8 @@ Module Name:
|
|||
spacer_unsat_core_learner.h
|
||||
|
||||
Abstract:
|
||||
itp cores
|
||||
|
||||
itp cores
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
|
|
@ -20,21 +21,23 @@ Revision History:
|
|||
|
||||
#include "ast/ast.h"
|
||||
#include "muz/spacer/spacer_util.h"
|
||||
#include "ast/proofs/proof_utils.h"
|
||||
#include "muz/spacer/spacer_proof_utils.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
|
||||
class unsat_core_plugin;
|
||||
class unsat_core_learner
|
||||
{
|
||||
class iuc_proof;
|
||||
class unsat_core_learner {
|
||||
typedef obj_hashtable<expr> expr_set;
|
||||
|
||||
public:
|
||||
unsat_core_learner(ast_manager& m) : m(m), m_unsat_core(m) {};
|
||||
unsat_core_learner(ast_manager& m, iuc_proof& pr) :
|
||||
m(m), m_pr(pr), m_unsat_core(m) {};
|
||||
virtual ~unsat_core_learner();
|
||||
|
||||
ast_manager& m;
|
||||
iuc_proof& m_pr;
|
||||
|
||||
/*
|
||||
* register a plugin for computation of partial unsat cores
|
||||
|
|
@ -45,51 +48,33 @@ namespace spacer {
|
|||
/*
|
||||
* compute unsat core using the registered unsat-core-plugins
|
||||
*/
|
||||
void compute_unsat_core(proof* root, expr_set& asserted_b, expr_ref_vector& unsat_core);
|
||||
void compute_unsat_core(expr_ref_vector& unsat_core);
|
||||
|
||||
/*
|
||||
* getter/setter methods for data structures exposed to plugins
|
||||
* the following invariants can be assumed and need to be maintained by the plugins:
|
||||
* - a node is a-marked iff it is derived using at least one asserted proof step from A.
|
||||
* - a node is b-marked iff its derivation contains no asserted proof steps from A and
|
||||
* no hypothesis (with the additional complication that lemmas conceptually remove hypothesis)
|
||||
* - a node is h-marked, iff it is derived using at least one hypothesis
|
||||
* the following invariant can be assumed and need to be maintained by the plugins:
|
||||
* - a node is closed, iff it has already been interpolated, i.e. its contribution is
|
||||
* already covered by the unsat-core.
|
||||
*/
|
||||
bool is_a_marked(proof* p);
|
||||
bool is_b_marked(proof* p);
|
||||
bool is_h_marked(proof* p);
|
||||
bool is_closed(proof* p);
|
||||
void set_closed(proof* p, bool value);
|
||||
|
||||
bool is_b_open (proof *p);
|
||||
|
||||
/*
|
||||
* adds a lemma to the unsat core
|
||||
*/
|
||||
void add_lemma_to_core(expr* lemma);
|
||||
|
||||
/*
|
||||
* helper method, which can be used by plugins
|
||||
* returns true iff all symbols of expr occur in some b-asserted formula.
|
||||
* must only be called after a call to collect_symbols_b.
|
||||
*/
|
||||
bool only_contains_symbols_b(expr* expr) const;
|
||||
bool is_b_pure (proof *p)
|
||||
{return !is_h_marked (p) && only_contains_symbols_b (m.get_fact (p));}
|
||||
bool is_b_open (proof *p)
|
||||
{ return is_b_marked (p) && !is_closed (p); }
|
||||
|
||||
private:
|
||||
ptr_vector<unsat_core_plugin> m_plugins;
|
||||
func_decl_set m_symbols_b; // symbols, which occur in any b-asserted formula
|
||||
void collect_symbols_b(const expr_set& axioms_b);
|
||||
|
||||
ast_mark m_a_mark;
|
||||
ast_mark m_b_mark;
|
||||
ast_mark m_h_mark;
|
||||
ast_mark m_closed;
|
||||
|
||||
expr_ref_vector m_unsat_core; // collects the lemmas of the unsat-core, will at the end be inserted into unsat_core.
|
||||
/*
|
||||
* collects the lemmas of the unsat-core
|
||||
* will at the end be inserted into unsat_core.
|
||||
*/
|
||||
expr_ref_vector m_unsat_core;
|
||||
|
||||
/*
|
||||
* computes partial core for step by delegating computation to plugins
|
||||
|
|
@ -101,7 +86,6 @@ namespace spacer {
|
|||
*/
|
||||
void finalize();
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ Module Name:
|
|||
spacer_unsat_core_plugin.cpp
|
||||
|
||||
Abstract:
|
||||
plugin for itp cores
|
||||
plugin for itp cores
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
|
|
@ -30,311 +30,248 @@ Revision History:
|
|||
#include "muz/spacer/spacer_matrix.h"
|
||||
#include "muz/spacer/spacer_unsat_core_plugin.h"
|
||||
#include "muz/spacer/spacer_unsat_core_learner.h"
|
||||
#include "muz/spacer/spacer_iuc_proof.h"
|
||||
|
||||
namespace spacer
|
||||
{
|
||||
namespace spacer {
|
||||
|
||||
|
||||
void unsat_core_plugin_lemma::compute_partial_core(proof* step)
|
||||
{
|
||||
SASSERT(m_learner.is_a_marked(step));
|
||||
SASSERT(m_learner.is_b_marked(step));
|
||||
|
||||
for (unsigned i = 0; i < m_learner.m.get_num_parents(step); ++i)
|
||||
unsat_core_plugin::unsat_core_plugin(unsat_core_learner& learner):
|
||||
m(learner.m), m_learner(learner) {};
|
||||
|
||||
void unsat_core_plugin_lemma::compute_partial_core(proof* step) {
|
||||
SASSERT(m_learner.m_pr.is_a_marked(step));
|
||||
SASSERT(m_learner.m_pr.is_b_marked(step));
|
||||
|
||||
for (proof* premise : m.get_parents(step)) {
|
||||
|
||||
if (m_learner.is_b_open (premise)) {
|
||||
// by IH, premises that are AB marked are already closed
|
||||
SASSERT(!m_learner.m_pr.is_a_marked(premise));
|
||||
add_lowest_split_to_core(premise);
|
||||
}
|
||||
}
|
||||
m_learner.set_closed(step, true);
|
||||
}
|
||||
|
||||
void unsat_core_plugin_lemma::add_lowest_split_to_core(proof* step) const
|
||||
{
|
||||
SASSERT(m_learner.m.is_proof(step->get_arg(i)));
|
||||
proof* premise = to_app(step->get_arg(i));
|
||||
|
||||
if (m_learner.is_b_open (premise))
|
||||
{
|
||||
// by IH, premises that are AB marked are already closed
|
||||
SASSERT(!m_learner.is_a_marked(premise));
|
||||
add_lowest_split_to_core(premise);
|
||||
SASSERT(m_learner.is_b_open(step));
|
||||
|
||||
ptr_buffer<proof> todo;
|
||||
todo.push_back(step);
|
||||
|
||||
while (!todo.empty()) {
|
||||
proof* pf = todo.back();
|
||||
todo.pop_back();
|
||||
|
||||
// if current step hasn't been processed,
|
||||
if (!m_learner.is_closed(pf)) {
|
||||
m_learner.set_closed(pf, true);
|
||||
// the step is b-marked and not closed.
|
||||
// by I.H. the step must be already visited
|
||||
// so if it is also a-marked, it must be closed
|
||||
SASSERT(m_learner.m_pr.is_b_marked(pf));
|
||||
SASSERT(!m_learner.m_pr.is_a_marked(pf));
|
||||
|
||||
// the current step needs to be interpolated:
|
||||
expr* fact = m.get_fact(pf);
|
||||
// if we trust the current step and we are able to use it
|
||||
if (m_learner.m_pr.is_b_pure (pf) &&
|
||||
(m.is_asserted(pf) || is_literal(m, fact))) {
|
||||
// just add it to the core
|
||||
m_learner.add_lemma_to_core(fact);
|
||||
}
|
||||
// otherwise recurse on premises
|
||||
else {
|
||||
for (proof* premise : m.get_parents(pf))
|
||||
if (m_learner.is_b_open(premise))
|
||||
todo.push_back(premise);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
m_learner.set_closed(step, true);
|
||||
}
|
||||
|
||||
void unsat_core_plugin_lemma::add_lowest_split_to_core(proof* step) const
|
||||
{
|
||||
SASSERT(m_learner.is_b_open(step));
|
||||
ast_manager &m = m_learner.m;
|
||||
|
||||
ptr_vector<proof> todo;
|
||||
todo.push_back(step);
|
||||
|
||||
while (!todo.empty())
|
||||
void unsat_core_plugin_farkas_lemma::compute_partial_core(proof* step)
|
||||
{
|
||||
proof* pf = todo.back();
|
||||
todo.pop_back();
|
||||
|
||||
// if current step hasn't been processed,
|
||||
if (!m_learner.is_closed(pf))
|
||||
{
|
||||
m_learner.set_closed(pf, true);
|
||||
// the step is b-marked and not closed.
|
||||
// by I.H. the step must be already visited
|
||||
// so if it is also a-marked, it must be closed
|
||||
SASSERT(m_learner.is_b_marked(pf));
|
||||
SASSERT(!m_learner.is_a_marked(pf));
|
||||
|
||||
// the current step needs to be interpolated:
|
||||
expr* fact = m_learner.m.get_fact(pf);
|
||||
// if we trust the current step and we are able to use it
|
||||
if (m_learner.is_b_pure (pf) &&
|
||||
(m.is_asserted(pf) || is_literal(m, fact)))
|
||||
{
|
||||
// just add it to the core
|
||||
m_learner.add_lemma_to_core(fact);
|
||||
SASSERT(m_learner.m_pr.is_a_marked(step));
|
||||
SASSERT(m_learner.m_pr.is_b_marked(step));
|
||||
// XXX this assertion should be true so there is no need to check for it
|
||||
SASSERT (!m_learner.is_closed (step));
|
||||
func_decl* d = step->get_decl();
|
||||
symbol sym;
|
||||
if (!m_learner.is_closed(step) && // if step is not already interpolated
|
||||
is_farkas_lemma(m, step)) {
|
||||
// weaker check: d->get_num_parameters() >= m.get_num_parents(step) + 2
|
||||
|
||||
SASSERT(d->get_num_parameters() == m.get_num_parents(step) + 2);
|
||||
SASSERT(m.has_fact(step));
|
||||
|
||||
coeff_lits_t coeff_lits;
|
||||
expr_ref_vector pinned(m);
|
||||
|
||||
/* The farkas lemma represents a subproof starting from premise(-set)s A, BNP and BP(ure) and
|
||||
* ending in a disjunction D. We need to compute the contribution of BP, i.e. a formula, which
|
||||
* is entailed by BP and together with A and BNP entails D.
|
||||
*
|
||||
* Let Fark(F) be the farkas coefficient for F. We can use the fact that
|
||||
* (A*Fark(A) + BNP*Fark(BNP) + BP*Fark(BP) + (neg D)*Fark(D)) => false. (E1)
|
||||
* We further have that A+B => C implies (A \land B) => C. (E2)
|
||||
*
|
||||
* Alternative 1:
|
||||
* From (E1) immediately get that BP*Fark(BP) is a solution.
|
||||
*
|
||||
* Alternative 2:
|
||||
* We can rewrite (E2) to rewrite (E1) to
|
||||
* (BP*Fark(BP)) => (neg(A*Fark(A) + BNP*Fark(BNP) + (neg D)*Fark(D))) (E3)
|
||||
* and since we can derive (A*Fark(A) + BNP*Fark(BNP) + (neg D)*Fark(D)) from
|
||||
* A, BNP and D, we also know that it is inconsisent. Therefore
|
||||
* neg(A*Fark(A) + BNP*Fark(BNP) + (neg D)*Fark(D)) is a solution.
|
||||
*
|
||||
* Finally we also need the following workaround:
|
||||
* 1) Although we know from theory, that the Farkas coefficients are always nonnegative,
|
||||
* the Farkas coefficients provided by arith_core are sometimes negative (must be a bug)
|
||||
* as workaround we take the absolute value of the provided coefficients.
|
||||
*/
|
||||
parameter const* params = d->get_parameters() + 2; // point to the first Farkas coefficient
|
||||
|
||||
STRACE("spacer.farkas",
|
||||
verbose_stream() << "Farkas input: "<< "\n";
|
||||
for (unsigned i = 0; i < m.get_num_parents(step); ++i) {
|
||||
proof * prem = m.get_parent(step, i);
|
||||
rational coef = params[i].get_rational();
|
||||
bool b_pure = m_learner.m_pr.is_b_pure (prem);
|
||||
verbose_stream() << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m.get_fact(prem), m) << "\n";
|
||||
}
|
||||
);
|
||||
|
||||
bool can_be_closed = true;
|
||||
|
||||
for (unsigned i = 0; i < m.get_num_parents(step); ++i) {
|
||||
proof * premise = m.get_parent(step, i);
|
||||
|
||||
if (m_learner.is_b_open (premise)) {
|
||||
SASSERT(!m_learner.m_pr.is_a_marked(premise));
|
||||
|
||||
if (m_learner.m_pr.is_b_pure (step)) {
|
||||
if (!m_use_constant_from_a) {
|
||||
rational coefficient = params[i].get_rational();
|
||||
coeff_lits.push_back(std::make_pair(abs(coefficient), (app*)m.get_fact(premise)));
|
||||
}
|
||||
}
|
||||
else {
|
||||
can_be_closed = false;
|
||||
|
||||
if (m_use_constant_from_a) {
|
||||
rational coefficient = params[i].get_rational();
|
||||
coeff_lits.push_back(std::make_pair(abs(coefficient), (app*)m.get_fact(premise)));
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (m_use_constant_from_a) {
|
||||
rational coefficient = params[i].get_rational();
|
||||
coeff_lits.push_back(std::make_pair(abs(coefficient), (app*)m.get_fact(premise)));
|
||||
}
|
||||
}
|
||||
}
|
||||
// otherwise recurse on premises
|
||||
else
|
||||
{
|
||||
for (unsigned i = 0, sz = m_learner.m.get_num_parents(pf);
|
||||
i < sz; ++i)
|
||||
{
|
||||
SASSERT(m_learner.m.is_proof(pf->get_arg(i)));
|
||||
proof* premise = m.get_parent (pf, i);
|
||||
if (m_learner.is_b_open(premise)) {
|
||||
todo.push_back(premise);
|
||||
|
||||
if (m_use_constant_from_a) {
|
||||
params += m.get_num_parents(step); // point to the first Farkas coefficient, which corresponds to a formula in the conclusion
|
||||
|
||||
// the conclusion can either be a single formula or a disjunction of several formulas, we have to deal with both situations
|
||||
if (m.get_num_parents(step) + 2 < d->get_num_parameters()) {
|
||||
unsigned num_args = 1;
|
||||
expr* conclusion = m.get_fact(step);
|
||||
expr* const* args = &conclusion;
|
||||
if (m.is_or(conclusion)) {
|
||||
app* _or = to_app(conclusion);
|
||||
num_args = _or->get_num_args();
|
||||
args = _or->get_args();
|
||||
}
|
||||
SASSERT(m.get_num_parents(step) + 2 + num_args == d->get_num_parameters());
|
||||
|
||||
bool_rewriter brw(m);
|
||||
for (unsigned i = 0; i < num_args; ++i) {
|
||||
expr* premise = args[i];
|
||||
|
||||
expr_ref negatedPremise(m);
|
||||
brw.mk_not(premise, negatedPremise);
|
||||
pinned.push_back(negatedPremise);
|
||||
rational coefficient = params[i].get_rational();
|
||||
coeff_lits.push_back(std::make_pair(abs(coefficient), to_app(negatedPremise)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void unsat_core_plugin_farkas_lemma::compute_partial_core(proof* step)
|
||||
{
|
||||
ast_manager &m = m_learner.m;
|
||||
SASSERT(m_learner.is_a_marked(step));
|
||||
SASSERT(m_learner.is_b_marked(step));
|
||||
// XXX this assertion should be true so there is no need to check for it
|
||||
SASSERT (!m_learner.is_closed (step));
|
||||
func_decl* d = step->get_decl();
|
||||
symbol sym;
|
||||
if(!m_learner.is_closed(step) && // if step is not already interpolated
|
||||
step->get_decl_kind() == PR_TH_LEMMA && // and step is a Farkas lemma
|
||||
d->get_num_parameters() >= 2 && // the Farkas coefficients are saved in the parameters of step
|
||||
d->get_parameter(0).is_symbol(sym) && sym == "arith" && // the first two parameters are "arith", "farkas",
|
||||
d->get_parameter(1).is_symbol(sym) && sym == "farkas" &&
|
||||
d->get_num_parameters() >= m_learner.m.get_num_parents(step) + 2) // the following parameters are the Farkas coefficients
|
||||
{
|
||||
SASSERT(m_learner.m.has_fact(step));
|
||||
|
||||
ptr_vector<app> literals;
|
||||
vector<rational> coefficients;
|
||||
|
||||
/* The farkas lemma represents a subproof starting from premise(-set)s A, BNP and BP(ure) and
|
||||
* ending in a disjunction D. We need to compute the contribution of BP, i.e. a formula, which
|
||||
* is entailed by BP and together with A and BNP entails D.
|
||||
*
|
||||
* Let Fark(F) be the farkas coefficient for F. We can use the fact that
|
||||
* (A*Fark(A) + BNP*Fark(BNP) + BP*Fark(BP) + (neg D)*Fark(D)) => false. (E1)
|
||||
* We further have that A+B => C implies (A \land B) => C. (E2)
|
||||
*
|
||||
* Alternative 1:
|
||||
* From (E1) immediately get that BP*Fark(BP) is a solution.
|
||||
*
|
||||
* Alternative 2:
|
||||
* We can rewrite (E2) to rewrite (E1) to
|
||||
* (BP*Fark(BP)) => (neg(A*Fark(A) + BNP*Fark(BNP) + (neg D)*Fark(D))) (E3)
|
||||
* and since we can derive (A*Fark(A) + BNP*Fark(BNP) + (neg D)*Fark(D)) from
|
||||
* A, BNP and D, we also know that it is inconsisent. Therefore
|
||||
* neg(A*Fark(A) + BNP*Fark(BNP) + (neg D)*Fark(D)) is a solution.
|
||||
*
|
||||
* Finally we also need the following workaround:
|
||||
* 1) Although we know from theory, that the Farkas coefficients are always nonnegative,
|
||||
* the Farkas coefficients provided by arith_core are sometimes negative (must be a bug)
|
||||
* as workaround we take the absolute value of the provided coefficients.
|
||||
*/
|
||||
parameter const* params = d->get_parameters() + 2; // point to the first Farkas coefficient
|
||||
|
||||
STRACE("spacer.farkas",
|
||||
verbose_stream() << "Farkas input: "<< "\n";
|
||||
for (unsigned i = 0; i < m_learner.m.get_num_parents(step); ++i)
|
||||
{
|
||||
SASSERT(m_learner.m.is_proof(step->get_arg(i)));
|
||||
proof *prem = m.get_parent (step, i);
|
||||
|
||||
rational coef;
|
||||
VERIFY(params[i].is_rational(coef));
|
||||
|
||||
bool b_pure = m_learner.is_b_pure (prem);
|
||||
verbose_stream() << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m_learner.m.get_fact(prem), m_learner.m) << "\n";
|
||||
}
|
||||
);
|
||||
|
||||
bool can_be_closed = true;
|
||||
|
||||
for(unsigned i = 0; i < m.get_num_parents(step); ++i)
|
||||
{
|
||||
SASSERT(m_learner.m.is_proof(step->get_arg(i)));
|
||||
proof * premise = m.get_parent (step, i);
|
||||
|
||||
if (m_learner.is_b_open (premise))
|
||||
{
|
||||
SASSERT(!m_learner.is_a_marked(premise));
|
||||
|
||||
if (m_learner.is_b_pure (step))
|
||||
{
|
||||
if (!m_use_constant_from_a)
|
||||
{
|
||||
rational coefficient;
|
||||
VERIFY(params[i].is_rational(coefficient));
|
||||
literals.push_back(to_app(m_learner.m.get_fact(premise)));
|
||||
coefficients.push_back(abs(coefficient));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
can_be_closed = false;
|
||||
|
||||
if (m_use_constant_from_a)
|
||||
{
|
||||
rational coefficient;
|
||||
VERIFY(params[i].is_rational(coefficient));
|
||||
literals.push_back(to_app(m_learner.m.get_fact(premise)));
|
||||
coefficients.push_back(abs(coefficient));
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (m_use_constant_from_a)
|
||||
{
|
||||
rational coefficient;
|
||||
VERIFY(params[i].is_rational(coefficient));
|
||||
literals.push_back(to_app(m_learner.m.get_fact(premise)));
|
||||
coefficients.push_back(abs(coefficient));
|
||||
}
|
||||
// only if all b-premises can be used directly, add the farkas core and close the step
|
||||
if (can_be_closed) {
|
||||
m_learner.set_closed(step, true);
|
||||
|
||||
expr_ref res = compute_linear_combination(coeff_lits);
|
||||
|
||||
m_learner.add_lemma_to_core(res);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (m_use_constant_from_a)
|
||||
{
|
||||
params += m_learner.m.get_num_parents(step); // point to the first Farkas coefficient, which corresponds to a formula in the conclusion
|
||||
expr_ref unsat_core_plugin_farkas_lemma::compute_linear_combination(const coeff_lits_t& coeff_lits)
|
||||
{
|
||||
|
||||
// the conclusion can either be a single formula or a disjunction of several formulas, we have to deal with both situations
|
||||
if (m_learner.m.get_num_parents(step) + 2 < d->get_num_parameters())
|
||||
{
|
||||
unsigned num_args = 1;
|
||||
expr* conclusion = m_learner.m.get_fact(step);
|
||||
expr* const* args = &conclusion;
|
||||
if (m_learner.m.is_or(conclusion))
|
||||
{
|
||||
app* _or = to_app(conclusion);
|
||||
num_args = _or->get_num_args();
|
||||
args = _or->get_args();
|
||||
}
|
||||
SASSERT(m_learner.m.get_num_parents(step) + 2 + num_args == d->get_num_parameters());
|
||||
|
||||
bool_rewriter brw(m_learner.m);
|
||||
for (unsigned i = 0; i < num_args; ++i)
|
||||
{
|
||||
expr* premise = args[i];
|
||||
|
||||
expr_ref negatedPremise(m_learner.m);
|
||||
brw.mk_not(premise, negatedPremise);
|
||||
literals.push_back(to_app(negatedPremise));
|
||||
|
||||
rational coefficient;
|
||||
VERIFY(params[i].is_rational(coefficient));
|
||||
coefficients.push_back(abs(coefficient));
|
||||
}
|
||||
}
|
||||
smt::farkas_util util(m);
|
||||
if (m_use_constant_from_a) {
|
||||
util.set_split_literals (m_split_literals); // small optimization: if flag m_split_literals is set, then preserve diff constraints
|
||||
}
|
||||
|
||||
// only if all b-premises can be used directly, add the farkas core and close the step
|
||||
if (can_be_closed)
|
||||
{
|
||||
m_learner.set_closed(step, true);
|
||||
|
||||
expr_ref res(m_learner.m);
|
||||
compute_linear_combination(coefficients, literals, res);
|
||||
|
||||
m_learner.add_lemma_to_core(res);
|
||||
for (auto& p : coeff_lits) {
|
||||
util.add(p.first, p.second);
|
||||
}
|
||||
if (m_use_constant_from_a) {
|
||||
return util.get();
|
||||
}
|
||||
else {
|
||||
return expr_ref(mk_not(m, util.get()), m);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rational>& coefficients, const ptr_vector<app>& literals, expr_ref& res)
|
||||
{
|
||||
SASSERT(literals.size() == coefficients.size());
|
||||
|
||||
ast_manager& m = res.get_manager();
|
||||
smt::farkas_util util(m);
|
||||
if (m_use_constant_from_a)
|
||||
{
|
||||
util.set_split_literals (m_split_literals); // small optimization: if flag m_split_literals is set, then preserve diff constraints
|
||||
}
|
||||
for(unsigned i = 0; i < literals.size(); ++i)
|
||||
{
|
||||
util.add(coefficients[i], literals[i]);
|
||||
}
|
||||
if (m_use_constant_from_a)
|
||||
{
|
||||
res = util.get();
|
||||
}
|
||||
else
|
||||
{
|
||||
expr_ref negated_linear_combination = util.get();
|
||||
res = mk_not(m, negated_linear_combination);
|
||||
}
|
||||
}
|
||||
|
||||
void unsat_core_plugin_farkas_lemma_optimized::compute_partial_core(proof* step)
|
||||
{
|
||||
SASSERT(m_learner.is_a_marked(step));
|
||||
SASSERT(m_learner.is_b_marked(step));
|
||||
SASSERT(m_learner.m_pr.is_a_marked(step));
|
||||
SASSERT(m_learner.m_pr.is_b_marked(step));
|
||||
|
||||
func_decl* d = step->get_decl();
|
||||
symbol sym;
|
||||
if(!m_learner.is_closed(step) && // if step is not already interpolated
|
||||
step->get_decl_kind() == PR_TH_LEMMA && // and step is a Farkas lemma
|
||||
d->get_num_parameters() >= 2 && // the Farkas coefficients are saved in the parameters of step
|
||||
d->get_parameter(0).is_symbol(sym) && sym == "arith" && // the first two parameters are "arith", "farkas",
|
||||
d->get_parameter(1).is_symbol(sym) && sym == "farkas" &&
|
||||
d->get_num_parameters() >= m_learner.m.get_num_parents(step) + 2) // the following parameters are the Farkas coefficients
|
||||
{
|
||||
SASSERT(m_learner.m.has_fact(step));
|
||||
is_farkas_lemma(m, step)) {
|
||||
SASSERT(d->get_num_parameters() == m.get_num_parents(step) + 2);
|
||||
SASSERT(m.has_fact(step));
|
||||
|
||||
vector<std::pair<app*,rational> > linear_combination; // collects all summands of the linear combination
|
||||
coeff_lits_t linear_combination; // collects all summands of the linear combination
|
||||
|
||||
parameter const* params = d->get_parameters() + 2; // point to the first Farkas coefficient
|
||||
|
||||
STRACE("spacer.farkas",
|
||||
verbose_stream() << "Farkas input: "<< "\n";
|
||||
for (unsigned i = 0; i < m_learner.m.get_num_parents(step); ++i)
|
||||
{
|
||||
SASSERT(m_learner.m.is_proof(step->get_arg(i)));
|
||||
proof *prem = m.get_parent (step, i);
|
||||
|
||||
rational coef;
|
||||
VERIFY(params[i].is_rational(coef));
|
||||
|
||||
bool b_pure = m_learner.is_b_pure (prem);
|
||||
verbose_stream() << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m_learner.m.get_fact(prem), m_learner.m) << "\n";
|
||||
}
|
||||
);
|
||||
verbose_stream() << "Farkas input: "<< "\n";
|
||||
for (unsigned i = 0; i < m.get_num_parents(step); ++i) {
|
||||
proof * prem = m.get_parent(step, i);
|
||||
rational coef = params[i].get_rational();
|
||||
bool b_pure = m_learner.m_pr.is_b_pure (prem);
|
||||
verbose_stream() << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m.get_fact(prem), m_learner.m) << "\n";
|
||||
}
|
||||
);
|
||||
|
||||
bool can_be_closed = true;
|
||||
for(unsigned i = 0; i < m_learner.m.get_num_parents(step); ++i)
|
||||
{
|
||||
SASSERT(m_learner.m.is_proof(step->get_arg(i)));
|
||||
proof * premise = m.get_parent (step, i);
|
||||
for (unsigned i = 0; i < m.get_num_parents(step); ++i) {
|
||||
proof * premise = m.get_parent(step, i);
|
||||
|
||||
if (m_learner.is_b_marked(premise) && !m_learner.is_closed(premise))
|
||||
if (m_learner.m_pr.is_b_marked(premise) && !m_learner.is_closed(premise))
|
||||
{
|
||||
SASSERT(!m_learner.is_a_marked(premise));
|
||||
SASSERT(!m_learner.m_pr.is_a_marked(premise));
|
||||
|
||||
if (m_learner.only_contains_symbols_b(m_learner.m.get_fact(premise)) && !m_learner.is_h_marked(premise))
|
||||
if (m_learner.m_pr.is_b_pure(premise))
|
||||
{
|
||||
rational coefficient;
|
||||
VERIFY(params[i].is_rational(coefficient));
|
||||
linear_combination.push_back(std::make_pair(to_app(m_learner.m.get_fact(premise)), abs(coefficient)));
|
||||
rational coefficient = params[i].get_rational();
|
||||
linear_combination.push_back
|
||||
(std::make_pair(abs(coefficient), to_app(m.get_fact(premise))));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
@ -357,99 +294,21 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
|
|||
|
||||
struct farkas_optimized_less_than_pairs
|
||||
{
|
||||
inline bool operator() (const std::pair<app*,rational>& pair1, const std::pair<app*,rational>& pair2) const
|
||||
inline bool operator() (const std::pair<rational, app*>& pair1, const std::pair<rational, app*>& pair2) const
|
||||
{
|
||||
return (pair1.first->get_id() < pair2.first->get_id());
|
||||
return (pair1.second->get_id() < pair2.second->get_id());
|
||||
}
|
||||
};
|
||||
|
||||
void unsat_core_plugin_farkas_lemma_optimized::finalize()
|
||||
{
|
||||
if(m_linear_combinations.empty())
|
||||
if (m_linear_combinations.empty())
|
||||
{
|
||||
return;
|
||||
}
|
||||
DEBUG_CODE(
|
||||
for (auto& linear_combination : m_linear_combinations) {
|
||||
SASSERT(linear_combination.size() > 0);
|
||||
});
|
||||
|
||||
// 1. construct ordered basis
|
||||
ptr_vector<app> ordered_basis;
|
||||
obj_map<app, unsigned> map;
|
||||
unsigned counter = 0;
|
||||
for (const auto& linear_combination : m_linear_combinations)
|
||||
{
|
||||
for (const auto& pair : linear_combination)
|
||||
{
|
||||
if (!map.contains(pair.first))
|
||||
{
|
||||
ordered_basis.push_back(pair.first);
|
||||
map.insert(pair.first, counter++);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 2. populate matrix
|
||||
spacer_matrix matrix(m_linear_combinations.size(), ordered_basis.size());
|
||||
|
||||
for (unsigned i=0; i < m_linear_combinations.size(); ++i)
|
||||
{
|
||||
auto linear_combination = m_linear_combinations[i];
|
||||
for (const auto& pair : linear_combination)
|
||||
{
|
||||
matrix.set(i, map[pair.first], pair.second);
|
||||
}
|
||||
}
|
||||
|
||||
// 3. perform gaussian elimination
|
||||
unsigned i = matrix.perform_gaussian_elimination();
|
||||
|
||||
// 4. extract linear combinations from matrix and add result to core
|
||||
for (unsigned k=0; k < i; k++)// i points to the row after the last row which is non-zero
|
||||
{
|
||||
ptr_vector<app> literals;
|
||||
vector<rational> coefficients;
|
||||
for (unsigned l=0; l < matrix.num_cols(); ++l)
|
||||
{
|
||||
if (!matrix.get(k,l).is_zero())
|
||||
{
|
||||
literals.push_back(ordered_basis[l]);
|
||||
coefficients.push_back(matrix.get(k,l));
|
||||
}
|
||||
}
|
||||
SASSERT(literals.size() > 0);
|
||||
expr_ref linear_combination(m);
|
||||
compute_linear_combination(coefficients, literals, linear_combination);
|
||||
|
||||
m_learner.add_lemma_to_core(linear_combination);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void unsat_core_plugin_farkas_lemma_optimized::compute_linear_combination(const vector<rational>& coefficients, const ptr_vector<app>& literals, expr_ref& res)
|
||||
{
|
||||
SASSERT(literals.size() == coefficients.size());
|
||||
|
||||
ast_manager& m = res.get_manager();
|
||||
smt::farkas_util util(m);
|
||||
for(unsigned i = 0; i < literals.size(); ++i)
|
||||
{
|
||||
util.add(coefficients[i], literals[i]);
|
||||
}
|
||||
expr_ref negated_linear_combination = util.get();
|
||||
SASSERT(m.is_not(negated_linear_combination));
|
||||
res = mk_not(m, negated_linear_combination); //TODO: rewrite the get-method to return nonnegated stuff?
|
||||
}
|
||||
|
||||
|
||||
void unsat_core_plugin_farkas_lemma_bounded::finalize() {
|
||||
if (m_linear_combinations.empty()) {
|
||||
return;
|
||||
}
|
||||
DEBUG_CODE(
|
||||
for (auto& linear_combination : m_linear_combinations) {
|
||||
SASSERT(linear_combination.size() > 0);
|
||||
SASSERT(!linear_combination.empty());
|
||||
});
|
||||
|
||||
// 1. construct ordered basis
|
||||
|
|
@ -458,9 +317,72 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
|
|||
unsigned counter = 0;
|
||||
for (const auto& linear_combination : m_linear_combinations) {
|
||||
for (const auto& pair : linear_combination) {
|
||||
if (!map.contains(pair.first)) {
|
||||
ordered_basis.push_back(pair.first);
|
||||
map.insert(pair.first, counter++);
|
||||
if (!map.contains(pair.second)) {
|
||||
ordered_basis.push_back(pair.second);
|
||||
map.insert(pair.second, counter++);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 2. populate matrix
|
||||
spacer_matrix matrix(m_linear_combinations.size(), ordered_basis.size());
|
||||
|
||||
for (unsigned i = 0; i < m_linear_combinations.size(); ++i) {
|
||||
auto linear_combination = m_linear_combinations[i];
|
||||
for (const auto& pair : linear_combination) {
|
||||
matrix.set(i, map[pair.second], pair.first);
|
||||
}
|
||||
}
|
||||
|
||||
// 3. perform gaussian elimination
|
||||
unsigned i = matrix.perform_gaussian_elimination();
|
||||
|
||||
// 4. extract linear combinations from matrix and add result to core
|
||||
for (unsigned k = 0; k < i; ++k)// i points to the row after the last row which is non-zero
|
||||
{
|
||||
coeff_lits_t coeff_lits;
|
||||
for (unsigned l = 0; l < matrix.num_cols(); ++l) {
|
||||
if (!matrix.get(k,l).is_zero()) {
|
||||
coeff_lits.push_back(std::make_pair(matrix.get(k, l), ordered_basis[l]));
|
||||
}
|
||||
}
|
||||
SASSERT(!coeff_lits.empty());
|
||||
expr_ref linear_combination = compute_linear_combination(coeff_lits);
|
||||
|
||||
m_learner.add_lemma_to_core(linear_combination);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
expr_ref unsat_core_plugin_farkas_lemma_optimized::compute_linear_combination(const coeff_lits_t& coeff_lits) {
|
||||
smt::farkas_util util(m);
|
||||
for (auto const & p : coeff_lits) {
|
||||
util.add(p.first, p.second);
|
||||
}
|
||||
expr_ref negated_linear_combination = util.get();
|
||||
SASSERT(m.is_not(negated_linear_combination));
|
||||
return expr_ref(mk_not(m, negated_linear_combination), m);
|
||||
//TODO: rewrite the get-method to return nonnegated stuff?
|
||||
}
|
||||
|
||||
void unsat_core_plugin_farkas_lemma_bounded::finalize() {
|
||||
if (m_linear_combinations.empty()) {
|
||||
return;
|
||||
}
|
||||
DEBUG_CODE(
|
||||
for (auto& linear_combination : m_linear_combinations) {
|
||||
SASSERT(!linear_combination.empty());
|
||||
});
|
||||
|
||||
// 1. construct ordered basis
|
||||
ptr_vector<app> ordered_basis;
|
||||
obj_map<app, unsigned> map;
|
||||
unsigned counter = 0;
|
||||
for (const auto& linear_combination : m_linear_combinations) {
|
||||
for (const auto& pair : linear_combination) {
|
||||
if (!map.contains(pair.second)) {
|
||||
ordered_basis.push_back(pair.second);
|
||||
map.insert(pair.second, counter++);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -471,7 +393,7 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
|
|||
for (unsigned i=0; i < m_linear_combinations.size(); ++i) {
|
||||
auto linear_combination = m_linear_combinations[i];
|
||||
for (const auto& pair : linear_combination) {
|
||||
matrix.set(i, map[pair.first], pair.second);
|
||||
matrix.set(i, map[pair.second], pair.first);
|
||||
}
|
||||
}
|
||||
matrix.print_matrix();
|
||||
|
|
@ -483,13 +405,12 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
|
|||
arith_util util(m);
|
||||
|
||||
vector<expr_ref_vector> coeffs;
|
||||
for (unsigned i=0; i < matrix.num_rows(); ++i) {
|
||||
for (unsigned i = 0; i < matrix.num_rows(); ++i) {
|
||||
coeffs.push_back(expr_ref_vector(m));
|
||||
}
|
||||
|
||||
vector<expr_ref_vector> bounded_vectors;
|
||||
for (unsigned j=0; j < matrix.num_cols(); ++j)
|
||||
{
|
||||
for (unsigned j = 0; j < matrix.num_cols(); ++j) {
|
||||
bounded_vectors.push_back(expr_ref_vector(m));
|
||||
}
|
||||
|
||||
|
|
@ -498,87 +419,67 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
|
|||
{
|
||||
params_ref p;
|
||||
p.set_bool("model", true);
|
||||
scoped_ptr<solver> s = mk_smt_solver(m, p, symbol::null); // TODO: incremental version?
|
||||
solver_ref s = mk_smt_solver(m, p, symbol::null); // TODO: incremental version?
|
||||
|
||||
// add new variables w_in,
|
||||
for (unsigned i=0; i < matrix.num_rows(); ++i)
|
||||
{
|
||||
for (unsigned i = 0; i < matrix.num_rows(); ++i) {
|
||||
std::string name = "w_" + std::to_string(i) + std::to_string(n);
|
||||
|
||||
func_decl_ref decl(m);
|
||||
decl = m.mk_func_decl(symbol(name.c_str()), 0, (sort*const*)nullptr, util.mk_int());
|
||||
coeffs[i].push_back(m.mk_const(decl));
|
||||
coeffs[i].push_back(m.mk_const(name, util.mk_int()));
|
||||
}
|
||||
|
||||
// we need s_jn
|
||||
for (unsigned j=0; j < matrix.num_cols(); ++j)
|
||||
{
|
||||
for (unsigned j = 0; j < matrix.num_cols(); ++j) {
|
||||
std::string name = "s_" + std::to_string(j) + std::to_string(n);
|
||||
|
||||
func_decl_ref decl(m);
|
||||
decl = m.mk_func_decl(symbol(name.c_str()), 0, (sort*const*)nullptr, util.mk_int());
|
||||
|
||||
expr_ref s_jn(m);
|
||||
s_jn = m.mk_const(decl);
|
||||
|
||||
bounded_vectors[j].push_back(s_jn);
|
||||
bounded_vectors[j].push_back(m.mk_const(name, util.mk_int()));
|
||||
}
|
||||
|
||||
// assert bounds for all s_jn
|
||||
for (unsigned l=0; l < n; ++l) {
|
||||
for (unsigned j=0; j < matrix.num_cols(); ++j) {
|
||||
for (unsigned l = 0; l < n; ++l) {
|
||||
for (unsigned j = 0; j < matrix.num_cols(); ++j) {
|
||||
expr* s_jn = bounded_vectors[j][l].get();
|
||||
|
||||
expr_ref lb(util.mk_le(util.mk_int(0), s_jn), m);
|
||||
expr_ref ub(util.mk_le(s_jn, util.mk_int(1)), m);
|
||||
s->assert_expr(lb);
|
||||
s->assert_expr(ub);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// assert: forall i,j: a_ij = sum_k w_ik * s_jk
|
||||
for (unsigned i=0; i < matrix.num_rows(); ++i)
|
||||
{
|
||||
for (unsigned j=0; j < matrix.num_cols(); ++j)
|
||||
{
|
||||
for (unsigned i = 0; i < matrix.num_rows(); ++i) {
|
||||
for (unsigned j = 0; j < matrix.num_cols(); ++j) {
|
||||
SASSERT(matrix.get(i, j).is_int());
|
||||
app_ref a_ij(util.mk_numeral(matrix.get(i,j), true),m);
|
||||
|
||||
app_ref sum(m);
|
||||
sum = util.mk_int(0);
|
||||
for (unsigned k=0; k < n; ++k)
|
||||
{
|
||||
app_ref a_ij(util.mk_numeral(matrix.get(i,j), true), m);
|
||||
|
||||
app_ref sum(util.mk_int(0), m);
|
||||
for (unsigned k = 0; k < n; ++k) {
|
||||
sum = util.mk_add(sum, util.mk_mul(coeffs[i][k].get(), bounded_vectors[j][k].get()));
|
||||
}
|
||||
expr_ref eq(m.mk_eq(a_ij, sum),m);
|
||||
s->assert_expr(eq);
|
||||
}
|
||||
expr_ref eq(m.mk_eq(a_ij, sum), m);
|
||||
s->assert_expr(eq);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// check result
|
||||
lbool res = s->check_sat(0,nullptr);
|
||||
lbool res = s->check_sat(0, nullptr);
|
||||
|
||||
// if sat extract model and add corresponding linear combinations to core
|
||||
if (res == lbool::l_true) {
|
||||
model_ref model;
|
||||
s->get_model(model);
|
||||
|
||||
for (unsigned k=0; k < n; ++k) {
|
||||
ptr_vector<app> literals;
|
||||
vector<rational> coefficients;
|
||||
for (unsigned j=0; j < matrix.num_cols(); ++j) {
|
||||
|
||||
for (unsigned k = 0; k < n; ++k) {
|
||||
coeff_lits_t coeff_lits;
|
||||
for (unsigned j = 0; j < matrix.num_cols(); ++j) {
|
||||
expr_ref evaluation(m);
|
||||
|
||||
|
||||
model.get()->eval(bounded_vectors[j][k].get(), evaluation, false);
|
||||
if (!util.is_zero(evaluation)) {
|
||||
literals.push_back(ordered_basis[j]);
|
||||
coefficients.push_back(rational(1));
|
||||
coeff_lits.push_back(std::make_pair(rational(1), ordered_basis[j]));
|
||||
}
|
||||
}
|
||||
SASSERT(!literals.empty()); // since then previous outer loop would have found solution already
|
||||
expr_ref linear_combination(m);
|
||||
compute_linear_combination(coefficients, literals, linear_combination);
|
||||
|
||||
SASSERT(!coeff_lits.empty()); // since then previous outer loop would have found solution already
|
||||
expr_ref linear_combination = compute_linear_combination(coeff_lits);
|
||||
|
||||
m_learner.add_lemma_to_core(linear_combination);
|
||||
}
|
||||
return;
|
||||
|
|
@ -586,7 +487,7 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
|
|||
}
|
||||
}
|
||||
|
||||
unsat_core_plugin_min_cut::unsat_core_plugin_min_cut(unsat_core_learner& learner, ast_manager& m) : unsat_core_plugin(learner), m(m){}
|
||||
unsat_core_plugin_min_cut::unsat_core_plugin_min_cut(unsat_core_learner& learner, ast_manager& m) : unsat_core_plugin(learner) {}
|
||||
|
||||
/*
|
||||
* traverses proof rooted in step and constructs graph, which corresponds to the proof-DAG, with the following differences:
|
||||
|
|
@ -603,8 +504,8 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
|
|||
{
|
||||
ptr_vector<proof> todo;
|
||||
|
||||
SASSERT(m_learner.is_a_marked(step));
|
||||
SASSERT(m_learner.is_b_marked(step));
|
||||
SASSERT(m_learner.m_pr.is_a_marked(step));
|
||||
SASSERT(m_learner.m_pr.is_b_marked(step));
|
||||
SASSERT(m.get_num_parents(step) > 0);
|
||||
SASSERT(!m_learner.is_closed(step));
|
||||
todo.push_back(step);
|
||||
|
|
@ -621,7 +522,7 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
|
|||
// add an edge from current to each leaf of that subproof
|
||||
// add the leaves to todo
|
||||
advance_to_lowest_partial_cut(current, todo);
|
||||
|
||||
|
||||
m_visited.mark(current, true);
|
||||
|
||||
}
|
||||
|
|
@ -629,22 +530,16 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
|
|||
m_learner.set_closed(step, true);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void unsat_core_plugin_min_cut::advance_to_lowest_partial_cut(proof* step, ptr_vector<proof>& todo)
|
||||
{
|
||||
bool is_sink = true;
|
||||
|
||||
ast_manager &m = m_learner.m;
|
||||
ptr_vector<proof> todo_subproof;
|
||||
ptr_buffer<proof> todo_subproof;
|
||||
|
||||
for (unsigned i = 0, sz = m.get_num_parents(step); i < sz; ++i)
|
||||
{
|
||||
proof* premise = m.get_parent (step, i);
|
||||
{
|
||||
if (m_learner.is_b_marked(premise))
|
||||
{
|
||||
todo_subproof.push_back(premise);
|
||||
}
|
||||
for (proof* premise : m.get_parents(step)) {
|
||||
if (m_learner.m_pr.is_b_marked(premise)) {
|
||||
todo_subproof.push_back(premise);
|
||||
}
|
||||
}
|
||||
while (!todo_subproof.empty())
|
||||
|
|
@ -655,19 +550,19 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
|
|||
// if we need to deal with the node
|
||||
if (!m_learner.is_closed(current))
|
||||
{
|
||||
SASSERT(!m_learner.is_a_marked(current)); // by I.H. the step must be already visited
|
||||
SASSERT(!m_learner.m_pr.is_a_marked(current)); // by I.H. the step must be already visited
|
||||
|
||||
// and the current step needs to be interpolated:
|
||||
if (m_learner.is_b_marked(current))
|
||||
if (m_learner.m_pr.is_b_marked(current))
|
||||
{
|
||||
// if we trust the current step and we are able to use it
|
||||
if (m_learner.is_b_pure (current) &&
|
||||
if (m_learner.m_pr.is_b_pure (current) &&
|
||||
(m.is_asserted(current) ||
|
||||
is_literal(m, m.get_fact(current))))
|
||||
{
|
||||
// we found a leaf of the subproof, so
|
||||
// 1) we add corresponding edges
|
||||
if (m_learner.is_a_marked(step))
|
||||
if (m_learner.m_pr.is_a_marked(step))
|
||||
{
|
||||
add_edge(nullptr, current); // current is sink
|
||||
}
|
||||
|
|
@ -682,10 +577,7 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
|
|||
// otherwise continue search for leaves of subproof
|
||||
else
|
||||
{
|
||||
for (unsigned i = 0; i < m_learner.m.get_num_parents(current); ++i)
|
||||
{
|
||||
SASSERT(m_learner.m.is_proof(current->get_arg(i)));
|
||||
proof* premise = m.get_parent (current, i);
|
||||
for (proof* premise : m.get_parents(current)) {
|
||||
todo_subproof.push_back(premise);
|
||||
}
|
||||
}
|
||||
|
|
@ -707,6 +599,8 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
|
|||
*/
|
||||
void unsat_core_plugin_min_cut::add_edge(proof* i, proof* j)
|
||||
{
|
||||
SASSERT(i != nullptr || j != nullptr);
|
||||
|
||||
unsigned node_i;
|
||||
unsigned node_j;
|
||||
if (i == nullptr)
|
||||
|
|
@ -735,7 +629,7 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
|
|||
m_node_to_formula[node_other] = m.get_fact(i);
|
||||
m_node_to_formula[node_i] = m.get_fact(i);
|
||||
|
||||
m_min_cut.add_edge(node_other, node_i);
|
||||
m_min_cut.add_edge(node_other, node_i, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -765,26 +659,32 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rat
|
|||
m_node_to_formula[node_j] = m.get_fact(j);
|
||||
m_node_to_formula[node_other] = m.get_fact(j);
|
||||
|
||||
m_min_cut.add_edge(node_j, node_other);
|
||||
m_min_cut.add_edge(node_j, node_other, 1);
|
||||
}
|
||||
}
|
||||
|
||||
// finally connect nodes
|
||||
m_min_cut.add_edge(node_i, node_j);
|
||||
// finally connect nodes (if there is not already a connection i -> j (only relevant if i is the supersource))
|
||||
if (!(i == nullptr && m_connected_to_s.is_marked(j)))
|
||||
{
|
||||
m_min_cut.add_edge(node_i, node_j, 1);
|
||||
}
|
||||
|
||||
if (i == nullptr)
|
||||
{
|
||||
m_connected_to_s.mark(j, true);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* computes min-cut on the graph constructed by compute_partial_core-method
|
||||
* and adds the corresponding lemmas to the core
|
||||
*/
|
||||
void unsat_core_plugin_min_cut::finalize()
|
||||
{
|
||||
void unsat_core_plugin_min_cut::finalize() {
|
||||
unsigned_vector cut_nodes;
|
||||
m_min_cut.compute_min_cut(cut_nodes);
|
||||
|
||||
for (unsigned cut_node : cut_nodes)
|
||||
{
|
||||
|
||||
for (unsigned cut_node : cut_nodes) {
|
||||
m_learner.add_lemma_to_core(m_node_to_formula[cut_node]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,84 +23,73 @@ Revision History:
|
|||
|
||||
namespace spacer {
|
||||
|
||||
class unsat_core_learner;
|
||||
class unsat_core_learner;
|
||||
|
||||
|
||||
class unsat_core_plugin {
|
||||
|
||||
public:
|
||||
unsat_core_plugin(unsat_core_learner& learner) : m_learner(learner){};
|
||||
virtual ~unsat_core_plugin(){};
|
||||
virtual void compute_partial_core(proof* step) = 0;
|
||||
virtual void finalize(){};
|
||||
|
||||
unsat_core_learner& m_learner;
|
||||
};
|
||||
|
||||
|
||||
class unsat_core_plugin_lemma : public unsat_core_plugin {
|
||||
|
||||
public:
|
||||
unsat_core_plugin_lemma(unsat_core_learner& learner) : unsat_core_plugin(learner){};
|
||||
|
||||
void compute_partial_core(proof* step) override;
|
||||
|
||||
private:
|
||||
void add_lowest_split_to_core(proof* step) const;
|
||||
};
|
||||
|
||||
|
||||
class unsat_core_plugin_farkas_lemma : public unsat_core_plugin {
|
||||
|
||||
public:
|
||||
unsat_core_plugin_farkas_lemma(unsat_core_learner& learner, bool split_literals, bool use_constant_from_a=true) : unsat_core_plugin(learner), m_split_literals(split_literals), m_use_constant_from_a(use_constant_from_a) {};
|
||||
|
||||
void compute_partial_core(proof* step) override;
|
||||
|
||||
private:
|
||||
bool m_split_literals;
|
||||
bool m_use_constant_from_a;
|
||||
/*
|
||||
* compute linear combination of literals 'literals' having coefficients 'coefficients' and save result in res
|
||||
*/
|
||||
void compute_linear_combination(const vector<rational>& coefficients, const ptr_vector<app>& literals, expr_ref& res);
|
||||
};
|
||||
|
||||
class unsat_core_plugin_farkas_lemma_optimized : public unsat_core_plugin {
|
||||
|
||||
public:
|
||||
unsat_core_plugin_farkas_lemma_optimized(unsat_core_learner& learner, ast_manager& m) : unsat_core_plugin(learner), m(m) {};
|
||||
|
||||
void compute_partial_core(proof* step) override;
|
||||
void finalize() override;
|
||||
|
||||
class unsat_core_plugin {
|
||||
protected:
|
||||
vector<vector<std::pair<app*, rational> > > m_linear_combinations;
|
||||
typedef vector<std::pair<rational, app*>> coeff_lits_t;
|
||||
ast_manager& m;
|
||||
public:
|
||||
unsat_core_plugin(unsat_core_learner& learner);
|
||||
virtual ~unsat_core_plugin() {};
|
||||
virtual void compute_partial_core(proof* step) = 0;
|
||||
virtual void finalize(){};
|
||||
|
||||
unsat_core_learner& m_learner;
|
||||
};
|
||||
|
||||
class unsat_core_plugin_lemma : public unsat_core_plugin {
|
||||
public:
|
||||
unsat_core_plugin_lemma(unsat_core_learner& learner) : unsat_core_plugin(learner){};
|
||||
void compute_partial_core(proof* step) override;
|
||||
private:
|
||||
void add_lowest_split_to_core(proof* step) const;
|
||||
};
|
||||
|
||||
class unsat_core_plugin_farkas_lemma : public unsat_core_plugin {
|
||||
public:
|
||||
unsat_core_plugin_farkas_lemma(unsat_core_learner& learner,
|
||||
bool split_literals,
|
||||
bool use_constant_from_a=true) :
|
||||
unsat_core_plugin(learner),
|
||||
m_split_literals(split_literals),
|
||||
m_use_constant_from_a(use_constant_from_a) {};
|
||||
void compute_partial_core(proof* step) override;
|
||||
private:
|
||||
bool m_split_literals;
|
||||
bool m_use_constant_from_a;
|
||||
/*
|
||||
* compute linear combination of literals 'literals' having coefficients 'coefficients' and save result in res
|
||||
*/
|
||||
void compute_linear_combination(const vector<rational>& coefficients, const ptr_vector<app>& literals, expr_ref& res);
|
||||
expr_ref compute_linear_combination(const coeff_lits_t& coeff_lits);
|
||||
};
|
||||
|
||||
class unsat_core_plugin_farkas_lemma_optimized : public unsat_core_plugin {
|
||||
public:
|
||||
unsat_core_plugin_farkas_lemma_optimized(unsat_core_learner& learner, ast_manager& m) : unsat_core_plugin(learner) {};
|
||||
void compute_partial_core(proof* step) override;
|
||||
void finalize() override;
|
||||
protected:
|
||||
vector<coeff_lits_t> m_linear_combinations;
|
||||
/*
|
||||
* compute linear combination of literals 'literals' having coefficients 'coefficients' and save result in res
|
||||
*/
|
||||
expr_ref compute_linear_combination(const coeff_lits_t& coeff_lits);
|
||||
};
|
||||
|
||||
class unsat_core_plugin_farkas_lemma_bounded : public unsat_core_plugin_farkas_lemma_optimized {
|
||||
|
||||
public:
|
||||
unsat_core_plugin_farkas_lemma_bounded(unsat_core_learner& learner, ast_manager& m) : unsat_core_plugin_farkas_lemma_optimized(learner, m) {};
|
||||
|
||||
void finalize() override;
|
||||
};
|
||||
|
||||
class unsat_core_plugin_min_cut : public unsat_core_plugin {
|
||||
|
||||
public:
|
||||
unsat_core_plugin_min_cut(unsat_core_learner& learner, ast_manager& m);
|
||||
|
||||
void compute_partial_core(proof* step) override;
|
||||
void finalize() override;
|
||||
private:
|
||||
ast_manager& m;
|
||||
|
||||
ast_mark m_visited; // saves for each node i whether the subproof with root i has already been added to the min-cut-problem
|
||||
obj_map<proof, unsigned> m_proof_to_node_minus; // maps proof-steps to the corresponding minus-nodes (the ones which are closer to source)
|
||||
obj_map<proof, unsigned> m_proof_to_node_plus; // maps proof-steps to the corresponding plus-nodes (the ones which are closer to sink)
|
||||
|
|
@ -108,6 +97,7 @@ private:
|
|||
void add_edge(proof* i, proof* j);
|
||||
|
||||
vector<expr*> m_node_to_formula; // maps each node to the corresponding formula in the original proof
|
||||
ast_mark m_connected_to_s; // remember which nodes have already been connected to the supersource, in order to avoid multiple edges.
|
||||
|
||||
min_cut m_min_cut;
|
||||
};
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -44,129 +44,120 @@ class model_evaluator;
|
|||
|
||||
namespace spacer {
|
||||
|
||||
inline unsigned infty_level () {return UINT_MAX;}
|
||||
|
||||
inline bool is_infty_level(unsigned lvl)
|
||||
{ return lvl == infty_level (); }
|
||||
|
||||
inline unsigned next_level(unsigned lvl)
|
||||
{ return is_infty_level(lvl)?lvl:(lvl+1); }
|
||||
|
||||
inline unsigned prev_level (unsigned lvl)
|
||||
{
|
||||
if(is_infty_level(lvl)) { return infty_level(); }
|
||||
if(lvl == 0) { return 0; }
|
||||
return lvl -1;
|
||||
}
|
||||
|
||||
struct pp_level {
|
||||
unsigned m_level;
|
||||
pp_level(unsigned l): m_level(l) {}
|
||||
};
|
||||
|
||||
inline std::ostream& operator<<(std::ostream& out, pp_level const& p)
|
||||
{
|
||||
if (is_infty_level(p.m_level)) {
|
||||
return out << "oo";
|
||||
} else {
|
||||
return out << p.m_level;
|
||||
inline unsigned infty_level () {
|
||||
return UINT_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
inline bool is_infty_level(unsigned lvl) {
|
||||
return lvl == infty_level ();
|
||||
}
|
||||
|
||||
inline unsigned next_level(unsigned lvl) {
|
||||
return is_infty_level(lvl)?lvl:(lvl+1);
|
||||
}
|
||||
|
||||
inline unsigned prev_level (unsigned lvl) {
|
||||
if (is_infty_level(lvl)) return infty_level();
|
||||
if (lvl == 0) return 0;
|
||||
return lvl - 1;
|
||||
}
|
||||
|
||||
typedef ptr_vector<app> app_vector;
|
||||
typedef ptr_vector<func_decl> decl_vector;
|
||||
typedef obj_hashtable<func_decl> func_decl_set;
|
||||
struct pp_level {
|
||||
unsigned m_level;
|
||||
pp_level(unsigned l): m_level(l) {}
|
||||
};
|
||||
|
||||
inline std::ostream& operator<<(std::ostream& out, pp_level const& p) {
|
||||
if (is_infty_level(p.m_level)) {
|
||||
return out << "oo";
|
||||
} else {
|
||||
return out << p.m_level;
|
||||
}
|
||||
}
|
||||
|
||||
class model_evaluator_util {
|
||||
ast_manager& m;
|
||||
model_ref m_model;
|
||||
model_evaluator* m_mev;
|
||||
typedef ptr_vector<app> app_vector;
|
||||
typedef ptr_vector<func_decl> decl_vector;
|
||||
typedef obj_hashtable<func_decl> func_decl_set;
|
||||
|
||||
// TBD: deprecate
|
||||
class model_evaluator_util {
|
||||
ast_manager& m;
|
||||
model_ref m_model;
|
||||
model_evaluator* m_mev;
|
||||
|
||||
/// initialize with a given model. All previous state is lost. model can be NULL
|
||||
void reset (model *model);
|
||||
public:
|
||||
model_evaluator_util(ast_manager& m);
|
||||
~model_evaluator_util();
|
||||
|
||||
void set_model(model &model) {reset (&model);}
|
||||
model_ref &get_model() {return m_model;}
|
||||
ast_manager& get_ast_manager() const {return m;}
|
||||
|
||||
public:
|
||||
bool is_true (const expr_ref_vector &v);
|
||||
bool is_false(expr* x);
|
||||
bool is_true(expr* x);
|
||||
|
||||
bool eval (const expr_ref_vector &v, expr_ref &result, bool model_completion);
|
||||
/// evaluates an expression
|
||||
bool eval (expr *e, expr_ref &result, bool model_completion);
|
||||
// expr_ref eval(expr* e, bool complete=true);
|
||||
};
|
||||
|
||||
/// initialize with a given model. All previous state is lost. model can be NULL
|
||||
void reset (model *model);
|
||||
public:
|
||||
model_evaluator_util(ast_manager& m);
|
||||
~model_evaluator_util();
|
||||
/**
|
||||
\brief hoist non-boolean if expressions.
|
||||
*/
|
||||
|
||||
void to_mbp_benchmark(std::ostream &out, const expr* fml, const app_ref_vector &vars);
|
||||
|
||||
void set_model(model &model) {reset (&model);}
|
||||
model_ref &get_model() {return m_model;}
|
||||
ast_manager& get_ast_manager() const {return m;}
|
||||
|
||||
// TBD: deprecate by qe::mbp
|
||||
/**
|
||||
* do the following in sequence
|
||||
* 1. use qe_lite to cheaply eliminate vars
|
||||
* 2. for remaining boolean vars, substitute using M
|
||||
* 3. use MBP for remaining array and arith variables
|
||||
* 4. for any remaining arith variables, substitute using M
|
||||
*/
|
||||
void qe_project (ast_manager& m, app_ref_vector& vars, expr_ref& fml,
|
||||
const model_ref& M, bool reduce_all_selects=false, bool native_mbp=false,
|
||||
bool dont_sub=false);
|
||||
|
||||
public:
|
||||
bool is_true (const expr_ref_vector &v);
|
||||
bool is_false(expr* x);
|
||||
bool is_true(expr* x);
|
||||
|
||||
bool eval (const expr_ref_vector &v, expr_ref &result, bool model_completion);
|
||||
/// evaluates an expression
|
||||
bool eval (expr *e, expr_ref &result, bool model_completion);
|
||||
// expr_ref eval(expr* e, bool complete=true);
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
\brief replace variables that are used in many disequalities by
|
||||
an equality using the model.
|
||||
|
||||
Assumption: the model satisfies the conjunctions.
|
||||
*/
|
||||
void reduce_disequalities(model& model, unsigned threshold, expr_ref& fml);
|
||||
|
||||
/**
|
||||
\brief hoist non-boolean if expressions.
|
||||
*/
|
||||
void hoist_non_bool_if(expr_ref& fml);
|
||||
|
||||
bool is_difference_logic(ast_manager& m, unsigned num_fmls, expr* const* fmls);
|
||||
|
||||
bool is_utvpi_logic(ast_manager& m, unsigned num_fmls, expr* const* fmls);
|
||||
|
||||
/**
|
||||
* do the following in sequence
|
||||
* 1. use qe_lite to cheaply eliminate vars
|
||||
* 2. for remaining boolean vars, substitute using M
|
||||
* 3. use MBP for remaining array and arith variables
|
||||
* 4. for any remaining arith variables, substitute using M
|
||||
*/
|
||||
void qe_project (ast_manager& m, app_ref_vector& vars, expr_ref& fml,
|
||||
const model_ref& M, bool reduce_all_selects=false, bool native_mbp=false,
|
||||
bool dont_sub=false);
|
||||
|
||||
void qe_project (ast_manager& m, app_ref_vector& vars, expr_ref& fml, model_ref& M, expr_map& map);
|
||||
|
||||
void expand_literals(ast_manager &m, expr_ref_vector& conjs);
|
||||
void compute_implicant_literals (model_evaluator_util &mev,
|
||||
expr_ref_vector &formula, expr_ref_vector &res);
|
||||
void simplify_bounds (expr_ref_vector &lemmas);
|
||||
void normalize(expr *e, expr_ref &out, bool use_simplify_bounds = true, bool factor_eqs = false);
|
||||
|
||||
/** ground expression by replacing all free variables by skolem constants */
|
||||
void ground_expr (expr *e, expr_ref &out, app_ref_vector &vars);
|
||||
|
||||
|
||||
void mbqi_project (model &M, app_ref_vector &vars, expr_ref &fml);
|
||||
|
||||
bool contains_selects (expr* fml, ast_manager& m);
|
||||
void get_select_indices (expr* fml, app_ref_vector& indices, ast_manager& m);
|
||||
|
||||
void find_decls (expr* fml, app_ref_vector& decls, std::string& prefix);
|
||||
|
||||
/** extended pretty-printer
|
||||
* used for debugging
|
||||
* disables aliasing of common sub-expressions
|
||||
*/
|
||||
struct mk_epp : public mk_pp {
|
||||
params_ref m_epp_params;
|
||||
expr_ref m_epp_expr;
|
||||
void qe_project (ast_manager& m, app_ref_vector& vars, expr_ref& fml, model_ref& M, expr_map& map);
|
||||
|
||||
// TBD: sort out
|
||||
void expand_literals(ast_manager &m, expr_ref_vector& conjs);
|
||||
void compute_implicant_literals (model_evaluator_util &mev, expr_ref_vector &formula, expr_ref_vector &res);
|
||||
void simplify_bounds (expr_ref_vector &lemmas);
|
||||
void normalize(expr *e, expr_ref &out, bool use_simplify_bounds = true, bool factor_eqs = false);
|
||||
|
||||
/**
|
||||
* Ground expression by replacing all free variables by skolem
|
||||
* constants. On return, out is the resulting expression, and vars is
|
||||
* a map from variable ids to corresponding skolem constants.
|
||||
*/
|
||||
void ground_expr (expr *e, expr_ref &out, app_ref_vector &vars);
|
||||
|
||||
void mbqi_project (model &M, app_ref_vector &vars, expr_ref &fml);
|
||||
|
||||
bool contains_selects (expr* fml, ast_manager& m);
|
||||
void get_select_indices (expr* fml, app_ref_vector& indices, ast_manager& m);
|
||||
|
||||
void find_decls (expr* fml, app_ref_vector& decls, std::string& prefix);
|
||||
|
||||
/**
|
||||
* extended pretty-printer
|
||||
* used for debugging
|
||||
* disables aliasing of common sub-expressions
|
||||
*/
|
||||
struct mk_epp : public mk_pp {
|
||||
params_ref m_epp_params;
|
||||
expr_ref m_epp_expr;
|
||||
mk_epp(ast *t, ast_manager &m, unsigned indent = 0, unsigned num_vars = 0, char const * var_prefix = nullptr);
|
||||
void rw(expr *e, expr_ref &out);
|
||||
|
||||
};
|
||||
|
||||
void rw(expr *e, expr_ref &out);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -1,354 +0,0 @@
|
|||
/**
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_virtual_solver.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
multi-solver view of a single smt::kernel
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
|
||||
#include "muz/spacer/spacer_virtual_solver.h"
|
||||
#include "ast/ast_util.h"
|
||||
#include "ast/ast_pp_util.h"
|
||||
#include "muz/spacer/spacer_util.h"
|
||||
#include "ast/rewriter/bool_rewriter.h"
|
||||
|
||||
#include "ast/proofs/proof_checker.h"
|
||||
#include "ast/proofs/proof_utils.h"
|
||||
|
||||
#include "ast/scoped_proof.h"
|
||||
|
||||
namespace spacer {
|
||||
virtual_solver::virtual_solver(virtual_solver_factory &factory,
|
||||
smt::kernel &context, app* pred) :
|
||||
solver_na2as(context.m()),
|
||||
m_factory(factory),
|
||||
m(context.m()),
|
||||
m_context(context),
|
||||
m_pred(pred, m),
|
||||
m_virtual(!m.is_true(pred)),
|
||||
m_assertions(m),
|
||||
m_head(0),
|
||||
m_flat(m),
|
||||
m_pushed(false),
|
||||
m_in_delay_scope(false),
|
||||
m_dump_benchmarks(factory.fparams().m_dump_benchmarks),
|
||||
m_dump_counter(0),
|
||||
m_proof(m)
|
||||
{
|
||||
// -- insert m_pred->true background assumption this will not
|
||||
// -- change m_context, but will add m_pred to
|
||||
// -- the private field solver_na2as::m_assumptions
|
||||
if (m_virtual)
|
||||
{ solver_na2as::assert_expr_core2(m.mk_true(), m_pred); }
|
||||
}
|
||||
|
||||
virtual_solver::~virtual_solver()
|
||||
{
|
||||
SASSERT(!m_pushed || get_scope_level() > 0);
|
||||
if (m_pushed) { pop(get_scope_level()); }
|
||||
|
||||
if (m_virtual) {
|
||||
m_pred = m.mk_not(m_pred);
|
||||
m_context.assert_expr(m_pred);
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
|
||||
// TBD: move to ast/proofs/elim_aux_assertions
|
||||
|
||||
|
||||
}
|
||||
|
||||
proof *virtual_solver::get_proof()
|
||||
{
|
||||
scoped_watch _t_(m_factory.m_proof_watch);
|
||||
|
||||
if (!m_proof.get()) {
|
||||
elim_aux_assertions pc(m_pred);
|
||||
m_proof = m_context.get_proof();
|
||||
pc(m, m_proof.get(), m_proof);
|
||||
}
|
||||
return m_proof.get();
|
||||
}
|
||||
|
||||
bool virtual_solver::is_aux_predicate(expr *p)
|
||||
{return is_app(p) && to_app(p) == m_pred.get();}
|
||||
|
||||
lbool virtual_solver::check_sat_core(unsigned num_assumptions,
|
||||
expr *const * assumptions)
|
||||
{
|
||||
SASSERT(!m_pushed || get_scope_level() > 0);
|
||||
m_proof.reset();
|
||||
scoped_watch _t_(m_factory.m_check_watch);
|
||||
m_factory.m_stats.m_num_smt_checks++;
|
||||
|
||||
stopwatch sw;
|
||||
sw.start();
|
||||
internalize_assertions();
|
||||
if (false) {
|
||||
std::stringstream file_name;
|
||||
file_name << "virt_solver";
|
||||
if (m_virtual) { file_name << "_" << m_pred->get_decl()->get_name(); }
|
||||
file_name << "_" << (m_dump_counter++) << ".smt2";
|
||||
|
||||
verbose_stream() << "Dumping SMT2 benchmark: " << file_name.str() << "\n";
|
||||
|
||||
std::ofstream out(file_name.str().c_str());
|
||||
|
||||
to_smt2_benchmark(out, m_context, num_assumptions, assumptions,
|
||||
"virt_solver");
|
||||
|
||||
out << "(exit)\n";
|
||||
out.close();
|
||||
}
|
||||
lbool res = m_context.check(num_assumptions, assumptions);
|
||||
sw.stop();
|
||||
if (res == l_true) {
|
||||
m_factory.m_check_sat_watch.add(sw);
|
||||
m_factory.m_stats.m_num_sat_smt_checks++;
|
||||
} else if (res == l_undef) {
|
||||
m_factory.m_check_undef_watch.add(sw);
|
||||
m_factory.m_stats.m_num_undef_smt_checks++;
|
||||
}
|
||||
set_status(res);
|
||||
|
||||
if (m_dump_benchmarks &&
|
||||
sw.get_seconds() >= m_factory.fparams().m_dump_min_time) {
|
||||
std::stringstream file_name;
|
||||
file_name << "virt_solver";
|
||||
if (m_virtual) { file_name << "_" << m_pred->get_decl()->get_name(); }
|
||||
file_name << "_" << (m_dump_counter++) << ".smt2";
|
||||
|
||||
std::ofstream out(file_name.str().c_str());
|
||||
|
||||
|
||||
out << "(set-info :status ";
|
||||
if (res == l_true) { out << "sat"; }
|
||||
else if (res == l_false) { out << "unsat"; }
|
||||
else { out << "unknown"; }
|
||||
out << ")\n";
|
||||
|
||||
to_smt2_benchmark(out, m_context, num_assumptions, assumptions,
|
||||
"virt_solver");
|
||||
|
||||
out << "(exit)\n";
|
||||
::statistics st;
|
||||
m_context.collect_statistics(st);
|
||||
st.update("time", sw.get_seconds());
|
||||
st.display_smt2(out);
|
||||
|
||||
out.close();
|
||||
|
||||
if (m_factory.fparams().m_dump_recheck) {
|
||||
scoped_no_proof _no_proof_(m);
|
||||
smt_params p;
|
||||
stopwatch sw2;
|
||||
smt::kernel kernel(m, p);
|
||||
for (unsigned i = 0, sz = m_context.size(); i < sz; ++i)
|
||||
{ kernel.assert_expr(m_context.get_formula(i)); }
|
||||
sw2.start();
|
||||
kernel.check(num_assumptions, assumptions);
|
||||
sw2.stop();
|
||||
verbose_stream() << file_name.str() << " :orig "
|
||||
<< sw.get_seconds() << " :new " << sw2.get_seconds();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void virtual_solver::push_core()
|
||||
{
|
||||
SASSERT(!m_pushed || get_scope_level() > 0);
|
||||
if (m_in_delay_scope) {
|
||||
// second push
|
||||
internalize_assertions();
|
||||
m_context.push();
|
||||
m_pushed = true;
|
||||
m_in_delay_scope = false;
|
||||
}
|
||||
|
||||
if (!m_pushed) { m_in_delay_scope = true; }
|
||||
else {
|
||||
SASSERT(m_pushed);
|
||||
SASSERT(!m_in_delay_scope);
|
||||
m_context.push();
|
||||
}
|
||||
}
|
||||
void virtual_solver::pop_core(unsigned n) {
|
||||
SASSERT(!m_pushed || get_scope_level() > 0);
|
||||
if (m_pushed) {
|
||||
SASSERT(!m_in_delay_scope);
|
||||
m_context.pop(n);
|
||||
m_pushed = get_scope_level() - n > 0;
|
||||
}
|
||||
else {
|
||||
m_in_delay_scope = get_scope_level() - n > 0;
|
||||
}
|
||||
}
|
||||
|
||||
void virtual_solver::get_unsat_core(ptr_vector<expr> &r)
|
||||
{
|
||||
for (unsigned i = 0, sz = m_context.get_unsat_core_size(); i < sz; ++i) {
|
||||
expr *core = m_context.get_unsat_core_expr(i);
|
||||
if (is_aux_predicate(core)) { continue; }
|
||||
r.push_back(core);
|
||||
}
|
||||
}
|
||||
|
||||
void virtual_solver::assert_expr_core(expr *e)
|
||||
{
|
||||
SASSERT(!m_pushed || get_scope_level() > 0);
|
||||
if (m.is_true(e)) { return; }
|
||||
if (m_in_delay_scope) {
|
||||
internalize_assertions();
|
||||
m_context.push();
|
||||
m_pushed = true;
|
||||
m_in_delay_scope = false;
|
||||
}
|
||||
|
||||
if (m_pushed)
|
||||
{ m_context.assert_expr(e); }
|
||||
else {
|
||||
m_flat.push_back(e);
|
||||
flatten_and(m_flat);
|
||||
m_assertions.append(m_flat);
|
||||
m_flat.reset();
|
||||
}
|
||||
}
|
||||
void virtual_solver::internalize_assertions()
|
||||
{
|
||||
SASSERT(!m_pushed || m_head == m_assertions.size());
|
||||
for (unsigned sz = m_assertions.size(); m_head < sz; ++m_head) {
|
||||
expr_ref f(m);
|
||||
f = m.mk_implies(m_pred, (m_assertions.get(m_head)));
|
||||
m_context.assert_expr(f);
|
||||
}
|
||||
}
|
||||
void virtual_solver::refresh()
|
||||
{
|
||||
SASSERT(!m_pushed);
|
||||
m_head = 0;
|
||||
}
|
||||
|
||||
void virtual_solver::reset()
|
||||
{
|
||||
SASSERT(!m_pushed);
|
||||
m_head = 0;
|
||||
m_assertions.reset();
|
||||
m_factory.refresh();
|
||||
}
|
||||
|
||||
void virtual_solver::get_labels(svector<symbol> &r)
|
||||
{
|
||||
r.reset();
|
||||
buffer<symbol> tmp;
|
||||
m_context.get_relevant_labels(nullptr, tmp);
|
||||
r.append(tmp.size(), tmp.c_ptr());
|
||||
}
|
||||
|
||||
solver* virtual_solver::translate(ast_manager& m, params_ref const& p)
|
||||
{
|
||||
UNREACHABLE();
|
||||
return nullptr;
|
||||
}
|
||||
void virtual_solver::updt_params(params_ref const &p) { m_factory.updt_params(p); }
|
||||
void virtual_solver::collect_param_descrs(param_descrs &r) { m_factory.collect_param_descrs(r); }
|
||||
void virtual_solver::set_produce_models(bool f) { m_factory.set_produce_models(f); }
|
||||
smt_params &virtual_solver::fparams() {return m_factory.fparams();}
|
||||
|
||||
void virtual_solver::to_smt2_benchmark(std::ostream &out,
|
||||
smt::kernel &context,
|
||||
unsigned num_assumptions,
|
||||
expr * const * assumptions,
|
||||
char const * name,
|
||||
symbol const &logic,
|
||||
char const * status,
|
||||
char const * attributes)
|
||||
{
|
||||
ast_pp_util pp(m);
|
||||
expr_ref_vector asserts(m);
|
||||
|
||||
|
||||
for (unsigned i = 0, sz = context.size(); i < sz; ++i) {
|
||||
asserts.push_back(context.get_formula(i));
|
||||
pp.collect(asserts.back());
|
||||
}
|
||||
pp.collect(num_assumptions, assumptions);
|
||||
pp.display_decls(out);
|
||||
pp.display_asserts(out, asserts);
|
||||
out << "(check-sat ";
|
||||
for (unsigned i = 0; i < num_assumptions; ++i)
|
||||
{ out << mk_pp(assumptions[i], m) << " "; }
|
||||
out << ")\n";
|
||||
}
|
||||
|
||||
|
||||
virtual_solver_factory::virtual_solver_factory(ast_manager &mgr, smt_params &fparams) :
|
||||
m_fparams(fparams), m(mgr), m_context(m, m_fparams)
|
||||
{
|
||||
m_stats.reset();
|
||||
}
|
||||
|
||||
virtual_solver* virtual_solver_factory::mk_solver()
|
||||
{
|
||||
std::stringstream name;
|
||||
name << "vsolver#" << m_solvers.size();
|
||||
app_ref pred(m);
|
||||
pred = m.mk_const(symbol(name.str().c_str()), m.mk_bool_sort());
|
||||
SASSERT(m_context.get_scope_level() == 0);
|
||||
m_solvers.push_back(alloc(virtual_solver, *this, m_context, pred));
|
||||
return m_solvers.back();
|
||||
}
|
||||
|
||||
void virtual_solver_factory::collect_statistics(statistics &st) const
|
||||
{
|
||||
m_context.collect_statistics(st);
|
||||
st.update("time.virtual_solver.smt.total", m_check_watch.get_seconds());
|
||||
st.update("time.virtual_solver.smt.total.sat", m_check_sat_watch.get_seconds());
|
||||
st.update("time.virtual_solver.smt.total.undef", m_check_undef_watch.get_seconds());
|
||||
st.update("time.virtual_solver.proof", m_proof_watch.get_seconds());
|
||||
st.update("virtual_solver.checks", m_stats.m_num_smt_checks);
|
||||
st.update("virtual_solver.checks.sat", m_stats.m_num_sat_smt_checks);
|
||||
st.update("virtual_solver.checks.undef", m_stats.m_num_undef_smt_checks);
|
||||
}
|
||||
void virtual_solver_factory::reset_statistics()
|
||||
{
|
||||
m_context.reset_statistics();
|
||||
m_stats.reset();
|
||||
m_check_sat_watch.reset();
|
||||
m_check_undef_watch.reset();
|
||||
m_check_watch.reset();
|
||||
m_proof_watch.reset();
|
||||
}
|
||||
|
||||
void virtual_solver_factory::refresh()
|
||||
{
|
||||
m_context.reset();
|
||||
for (unsigned i = 0, e = m_solvers.size(); i < e; ++i)
|
||||
{ m_solvers [i]->refresh(); }
|
||||
}
|
||||
|
||||
virtual_solver_factory::~virtual_solver_factory()
|
||||
{
|
||||
for (unsigned i = 0, e = m_solvers.size(); i < e; ++i)
|
||||
{ dealloc(m_solvers [i]); }
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -1,154 +0,0 @@
|
|||
/**
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_virtual_solver.h
|
||||
|
||||
Abstract:
|
||||
|
||||
multi-solver view of a single smt::kernel
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
#ifndef SPACER_VIRTUAL_SOLVER_H_
|
||||
#define SPACER_VIRTUAL_SOLVER_H_
|
||||
#include"ast/ast.h"
|
||||
#include"util/params.h"
|
||||
#include"solver/solver_na2as.h"
|
||||
#include"smt/smt_kernel.h"
|
||||
#include"smt/params/smt_params.h"
|
||||
#include"util/stopwatch.h"
|
||||
namespace spacer {
|
||||
class virtual_solver_factory;
|
||||
|
||||
class virtual_solver : public solver_na2as {
|
||||
friend class virtual_solver_factory;
|
||||
|
||||
private:
|
||||
virtual_solver_factory &m_factory;
|
||||
ast_manager &m;
|
||||
smt::kernel &m_context;
|
||||
app_ref m_pred;
|
||||
|
||||
bool m_virtual;
|
||||
expr_ref_vector m_assertions;
|
||||
unsigned m_head;
|
||||
// temporary to flatten conjunction
|
||||
expr_ref_vector m_flat;
|
||||
|
||||
bool m_pushed;
|
||||
bool m_in_delay_scope;
|
||||
bool m_dump_benchmarks;
|
||||
unsigned m_dump_counter;
|
||||
|
||||
proof_ref m_proof;
|
||||
|
||||
virtual_solver(virtual_solver_factory &factory, smt::kernel &context, app* pred);
|
||||
|
||||
bool is_aux_predicate(expr *p);
|
||||
void internalize_assertions();
|
||||
void to_smt2_benchmark(std::ostream &out,
|
||||
smt::kernel &context,
|
||||
unsigned num_assumptions,
|
||||
expr * const * assumptions,
|
||||
char const * name = "benchmarks",
|
||||
symbol const &logic = symbol::null,
|
||||
char const * status = "unknown",
|
||||
char const * attributes = "");
|
||||
|
||||
void refresh();
|
||||
|
||||
public:
|
||||
~virtual_solver() override;
|
||||
unsigned get_num_assumptions() const override
|
||||
{
|
||||
unsigned sz = solver_na2as::get_num_assumptions();
|
||||
return m_virtual ? sz - 1 : sz;
|
||||
}
|
||||
expr* get_assumption(unsigned idx) const override
|
||||
{
|
||||
if(m_virtual) { idx++; }
|
||||
return solver_na2as::get_assumption(idx);
|
||||
}
|
||||
|
||||
|
||||
void get_unsat_core(ptr_vector<expr> &r) override;
|
||||
void assert_expr_core(expr *e) override;
|
||||
void collect_statistics(statistics &st) const override {}
|
||||
void get_model_core(model_ref &m) override {m_context.get_model(m);}
|
||||
proof* get_proof() override;
|
||||
std::string reason_unknown() const override
|
||||
{return m_context.last_failure_as_string();}
|
||||
void set_reason_unknown(char const *msg) override
|
||||
{m_context.set_reason_unknown(msg);}
|
||||
ast_manager& get_manager() const override {return m;}
|
||||
void get_labels(svector<symbol> &r) override;
|
||||
void set_produce_models(bool f) override;
|
||||
smt_params &fparams();
|
||||
void reset();
|
||||
expr_ref_vector cube(expr_ref_vector&, unsigned) override { return expr_ref_vector(m); }
|
||||
void set_progress_callback(progress_callback *callback) override {UNREACHABLE();}
|
||||
|
||||
solver *translate(ast_manager &m, params_ref const &p) override;
|
||||
|
||||
void updt_params(params_ref const &p) override;
|
||||
void collect_param_descrs(param_descrs &r) override;
|
||||
|
||||
|
||||
protected:
|
||||
lbool check_sat_core(unsigned num_assumptions, expr *const * assumptions) override;
|
||||
void push_core() override;
|
||||
void pop_core(unsigned n) override;
|
||||
};
|
||||
|
||||
/// multi-solver abstraction on top of a single smt::kernel
|
||||
class virtual_solver_factory {
|
||||
friend class virtual_solver;
|
||||
private:
|
||||
smt_params &m_fparams;
|
||||
ast_manager &m;
|
||||
smt::kernel m_context;
|
||||
/// solvers managed by this factory
|
||||
ptr_vector<virtual_solver> m_solvers;
|
||||
|
||||
struct stats {
|
||||
unsigned m_num_smt_checks;
|
||||
unsigned m_num_sat_smt_checks;
|
||||
unsigned m_num_undef_smt_checks;
|
||||
stats() { reset(); }
|
||||
void reset() { memset(this, 0, sizeof(*this)); }
|
||||
};
|
||||
|
||||
stats m_stats;
|
||||
stopwatch m_check_watch;
|
||||
stopwatch m_check_sat_watch;
|
||||
stopwatch m_check_undef_watch;
|
||||
stopwatch m_proof_watch;
|
||||
|
||||
|
||||
void refresh();
|
||||
|
||||
smt_params &fparams() { return m_fparams; }
|
||||
|
||||
public:
|
||||
virtual_solver_factory(ast_manager &mgr, smt_params &fparams);
|
||||
virtual ~virtual_solver_factory();
|
||||
virtual_solver* mk_solver();
|
||||
void collect_statistics(statistics &st) const;
|
||||
void reset_statistics();
|
||||
void updt_params(params_ref const &p) { m_fparams.updt_params(p); }
|
||||
void collect_param_descrs(param_descrs &r) { /* empty */ }
|
||||
void set_produce_models(bool f) { m_fparams.m_model = f; }
|
||||
bool get_produce_models() { return m_fparams.m_model; }
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
|
@ -30,7 +30,7 @@ Revision History:
|
|||
#include "ast/for_each_expr.h"
|
||||
#include "ast/substitution/matcher.h"
|
||||
#include "ast/scoped_proof.h"
|
||||
#include "muz/base/fixedpoint_params.hpp"
|
||||
#include "muz/base/fp_params.hpp"
|
||||
#include "ast/ast_util.h"
|
||||
|
||||
namespace tb {
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ namespace datalog {
|
|||
}
|
||||
|
||||
bool mk_array_blast::is_store_def(expr* e, expr*& x, expr*& y) {
|
||||
if (m.is_iff(e, x, y) || m.is_eq(e, x, y)) {
|
||||
if (m.is_eq(e, x, y)) {
|
||||
if (!a.is_store(y)) {
|
||||
std::swap(x,y);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ Revision History:
|
|||
#include "ast/expr_abstract.h"
|
||||
#include "muz/base/dl_context.h"
|
||||
#include "muz/base/dl_context.h"
|
||||
#include "muz/base/fixedpoint_params.hpp"
|
||||
#include "muz/base/fp_params.hpp"
|
||||
#include "muz/transforms/dl_mk_array_eq_rewrite.h"
|
||||
#include "ast/factor_equivs.h"
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ Revision History:
|
|||
#include "muz/base/dl_context.h"
|
||||
#include "ast/rewriter/expr_safe_replace.h"
|
||||
#include "ast/expr_abstract.h"
|
||||
#include "muz/base/fixedpoint_params.hpp"
|
||||
#include "muz/base/fp_params.hpp"
|
||||
|
||||
namespace datalog {
|
||||
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ Revision History:
|
|||
#include "ast/rewriter/expr_safe_replace.h"
|
||||
#include "tactic/generic_model_converter.h"
|
||||
#include "muz/transforms/dl_mk_interp_tail_simplifier.h"
|
||||
#include "muz/base/fixedpoint_params.hpp"
|
||||
#include "muz/base/fp_params.hpp"
|
||||
#include "ast/scoped_proof.h"
|
||||
#include "model/model_v2_pp.h"
|
||||
|
||||
|
|
@ -32,9 +32,9 @@ namespace datalog {
|
|||
|
||||
//
|
||||
// P(v) :- Q(extract[1:1]v ++ 0), R(1 ++ extract[0:0]v).
|
||||
// ->
|
||||
// ->
|
||||
// P(bv(x,y)) :- Q(bv(x,0)), R(bv(1,y)) .
|
||||
//
|
||||
//
|
||||
// Introduce P_bv:
|
||||
// P_bv(x,y) :- Q_bv(x,0), R_bv(1,y)
|
||||
// P(bv(x,y)) :- P_bv(x,y)
|
||||
|
|
@ -51,7 +51,7 @@ namespace datalog {
|
|||
bit_blast_model_converter(ast_manager& m):
|
||||
m(m),
|
||||
m_bv(m),
|
||||
m_old_funcs(m),
|
||||
m_old_funcs(m),
|
||||
m_new_funcs(m) {}
|
||||
|
||||
void insert(func_decl* old_f, func_decl* new_f) {
|
||||
|
|
@ -73,7 +73,7 @@ namespace datalog {
|
|||
func_decl* q = m_old_funcs[i].get();
|
||||
func_interp* f = model->get_func_interp(p);
|
||||
if (!f) continue;
|
||||
expr_ref body(m);
|
||||
expr_ref body(m);
|
||||
unsigned arity_q = q->get_arity();
|
||||
TRACE("dl",
|
||||
model_v2_pp(tout, *model);
|
||||
|
|
@ -87,10 +87,10 @@ namespace datalog {
|
|||
if (f) {
|
||||
body = f->get_interp();
|
||||
SASSERT(!f->is_partial());
|
||||
SASSERT(body);
|
||||
SASSERT(body);
|
||||
}
|
||||
else {
|
||||
body = m.mk_false();
|
||||
body = m.mk_false();
|
||||
}
|
||||
unsigned idx = 0;
|
||||
expr_ref arg(m), proj(m);
|
||||
|
|
@ -104,18 +104,18 @@ namespace datalog {
|
|||
for (unsigned k = 0; k < sz; ++k) {
|
||||
parameter p(k);
|
||||
proj = m.mk_app(m_bv.get_family_id(), OP_BIT2BOOL, 1, &p, 1, &t);
|
||||
sub.insert(m.mk_var(idx++, m.mk_bool_sort()), proj);
|
||||
sub.insert(m.mk_var(idx++, m.mk_bool_sort()), proj);
|
||||
}
|
||||
}
|
||||
else {
|
||||
sub.insert(m.mk_var(idx++, s), arg);
|
||||
}
|
||||
}
|
||||
sub(body);
|
||||
sub(body);
|
||||
g->set_else(body);
|
||||
model->register_decl(q, g);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class expand_mkbv_cfg : public default_rewriter_cfg {
|
||||
|
|
@ -134,10 +134,10 @@ namespace datalog {
|
|||
public:
|
||||
|
||||
expand_mkbv_cfg(context& ctx):
|
||||
m_context(ctx),
|
||||
m_context(ctx),
|
||||
m(ctx.get_manager()),
|
||||
m_util(m),
|
||||
m_args(m),
|
||||
m_args(m),
|
||||
m_f_vars(m),
|
||||
m_g_vars(m),
|
||||
m_old_funcs(m),
|
||||
|
|
@ -152,8 +152,8 @@ namespace datalog {
|
|||
void set_dst(rule_set* dst) { m_dst = dst; }
|
||||
func_decl_ref_vector const& old_funcs() const { return m_old_funcs; }
|
||||
func_decl_ref_vector const& new_funcs() const { return m_new_funcs; }
|
||||
|
||||
br_status reduce_app(func_decl * f, unsigned num, expr * const * args, expr_ref & result, proof_ref & result_pr) {
|
||||
|
||||
br_status reduce_app(func_decl * f, unsigned num, expr * const * args, expr_ref & result, proof_ref & result_pr) {
|
||||
if (num == 0) {
|
||||
if (m_src->is_output_predicate(f))
|
||||
m_dst->set_output_predicate(f);
|
||||
|
|
@ -165,9 +165,9 @@ namespace datalog {
|
|||
return BR_FAILED;
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
// f(mk_bv(args),...)
|
||||
//
|
||||
//
|
||||
m_args.reset();
|
||||
m_g_vars.reset();
|
||||
m_f_vars.reset();
|
||||
|
|
@ -191,9 +191,9 @@ namespace datalog {
|
|||
}
|
||||
}
|
||||
func_decl* g = nullptr;
|
||||
|
||||
|
||||
if (!m_pred2blast.find(f, g)) {
|
||||
|
||||
|
||||
ptr_vector<sort> domain;
|
||||
for (unsigned i = 0; i < m_args.size(); ++i) {
|
||||
domain.push_back(m.get_sort(m_args[i].get()));
|
||||
|
|
@ -262,7 +262,7 @@ namespace datalog {
|
|||
m_params.set_bool("blast_quant", true);
|
||||
m_blaster.updt_params(m_params);
|
||||
}
|
||||
|
||||
|
||||
rule_set * operator()(rule_set const & source) {
|
||||
// TODO pc
|
||||
if (!m_context.xform_bit_blast()) {
|
||||
|
|
@ -270,8 +270,8 @@ namespace datalog {
|
|||
}
|
||||
rule_manager& rm = m_context.get_rule_manager();
|
||||
unsigned sz = source.get_num_rules();
|
||||
expr_ref fml(m);
|
||||
rule_set * result = alloc(rule_set, m_context);
|
||||
expr_ref fml(m);
|
||||
rule_set * result = alloc(rule_set, m_context);
|
||||
m_rewriter.m_cfg.set_src(&source);
|
||||
m_rewriter.m_cfg.set_dst(result);
|
||||
for (unsigned i = 0; !m_context.canceled() && i < sz; ++i) {
|
||||
|
|
@ -299,8 +299,8 @@ namespace datalog {
|
|||
if (!source.contains(*I))
|
||||
result->set_output_predicate(*I);
|
||||
}
|
||||
|
||||
if (m_context.get_model_converter()) {
|
||||
|
||||
if (m_context.get_model_converter()) {
|
||||
generic_model_converter* fmc = alloc(generic_model_converter, m, "dl_mk_bit_blast");
|
||||
bit_blast_model_converter* bvmc = alloc(bit_blast_model_converter, m);
|
||||
func_decl_ref_vector const& old_funcs = m_rewriter.m_cfg.old_funcs();
|
||||
|
|
@ -311,7 +311,7 @@ namespace datalog {
|
|||
}
|
||||
m_context.add_model_converter(concat(bvmc, fmc));
|
||||
}
|
||||
|
||||
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
|
@ -326,6 +326,6 @@ namespace datalog {
|
|||
|
||||
rule_set * mk_bit_blast::operator()(rule_set const & source) {
|
||||
return (*m_impl)(source);
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ Revision History:
|
|||
#include "muz/transforms/dl_mk_interp_tail_simplifier.h"
|
||||
#include "ast/ast_util.h"
|
||||
|
||||
#include "muz/base/fixedpoint_params.hpp"
|
||||
#include "muz/base/fp_params.hpp"
|
||||
namespace datalog {
|
||||
|
||||
// -----------------------------------
|
||||
|
|
@ -46,7 +46,7 @@ namespace datalog {
|
|||
bool mk_interp_tail_simplifier::rule_substitution::unify(expr * e1, expr * e2) {
|
||||
SASSERT(m_rule);
|
||||
|
||||
//we need to apply the current substitution in order to ensure the unifier
|
||||
//we need to apply the current substitution in order to ensure the unifier
|
||||
//works in an incremental way
|
||||
expr_ref e1_s(m);
|
||||
expr_ref e2_s(m);
|
||||
|
|
@ -268,7 +268,7 @@ namespace datalog {
|
|||
if (neq) {
|
||||
have_pair = false;
|
||||
v[prev_pair_idx] = neq;
|
||||
|
||||
|
||||
read_idx++;
|
||||
continue;
|
||||
}
|
||||
|
|
@ -294,7 +294,7 @@ namespace datalog {
|
|||
|
||||
//bool detect_same_variable_conj_pairs
|
||||
|
||||
br_status reduce_app(func_decl * f, unsigned num, expr * const * args, expr_ref & result,
|
||||
br_status reduce_app(func_decl * f, unsigned num, expr * const * args, expr_ref & result,
|
||||
proof_ref & result_pr)
|
||||
{
|
||||
if (m.is_not(f) && (m.is_and(args[0]) || m.is_or(args[0]))) {
|
||||
|
|
@ -307,15 +307,15 @@ namespace datalog {
|
|||
m_app_args.push_back(tmp);
|
||||
}
|
||||
if (m.is_and(args[0])) {
|
||||
result = mk_or(m_app_args);
|
||||
result = mk_or(m_app_args);
|
||||
}
|
||||
else {
|
||||
result = mk_and(m_app_args);
|
||||
result = mk_and(m_app_args);
|
||||
}
|
||||
return BR_REWRITE2;
|
||||
}
|
||||
if (!m.is_and(f) && !m.is_or(f)) {
|
||||
return BR_FAILED;
|
||||
if (!m.is_and(f) && !m.is_or(f)) {
|
||||
return BR_FAILED;
|
||||
}
|
||||
if (num == 0) {
|
||||
if (m.is_and(f)) {
|
||||
|
|
@ -375,7 +375,7 @@ namespace datalog {
|
|||
m_simp(ctx.get_rewriter()),
|
||||
a(m),
|
||||
m_rule_subst(ctx),
|
||||
m_tail(m),
|
||||
m_tail(m),
|
||||
m_itail_members(m),
|
||||
m_conj(m) {
|
||||
m_cfg = alloc(normalizer_cfg, m);
|
||||
|
|
@ -386,7 +386,7 @@ namespace datalog {
|
|||
dealloc(m_rw);
|
||||
dealloc(m_cfg);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void mk_interp_tail_simplifier::simplify_expr(app * a, expr_ref& res)
|
||||
{
|
||||
|
|
@ -537,7 +537,7 @@ namespace datalog {
|
|||
simplify_expr(itail.get(), simp_res);
|
||||
|
||||
modified |= itail.get() != simp_res.get();
|
||||
|
||||
|
||||
if (m.is_false(simp_res)) {
|
||||
TRACE("dl", r->display(m_context, tout << "rule is infeasible\n"););
|
||||
return false;
|
||||
|
|
@ -568,7 +568,7 @@ namespace datalog {
|
|||
|
||||
rule_ref pro_var_eq_result(m_context.get_rule_manager());
|
||||
if (propagate_variable_equivalences(res, pro_var_eq_result)) {
|
||||
SASSERT(rule_counter().get_max_rule_var(*r.get())==0 ||
|
||||
SASSERT(rule_counter().get_max_rule_var(*r.get())==0 ||
|
||||
rule_counter().get_max_rule_var(*r.get()) > rule_counter().get_max_rule_var(*pro_var_eq_result.get()));
|
||||
r = pro_var_eq_result;
|
||||
goto start;
|
||||
|
|
@ -607,8 +607,8 @@ namespace datalog {
|
|||
rule_set * res = alloc(rule_set, m_context);
|
||||
if (transform_rules(source, *res)) {
|
||||
res->inherit_predicates(source);
|
||||
TRACE("dl",
|
||||
source.display(tout);
|
||||
TRACE("dl",
|
||||
source.display(tout);
|
||||
res->display(tout););
|
||||
} else {
|
||||
dealloc(res);
|
||||
|
|
@ -616,6 +616,5 @@ namespace datalog {
|
|||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ Abstract:
|
|||
|
||||
Author:
|
||||
|
||||
Ken McMillan
|
||||
Ken McMillan
|
||||
Andrey Rybalchenko
|
||||
Nikolaj Bjorner (nbjorner) 2013-04-02
|
||||
|
||||
|
|
@ -23,13 +23,12 @@ Revision History:
|
|||
#include "muz/base/dl_context.h"
|
||||
#include "ast/rewriter/expr_safe_replace.h"
|
||||
#include "ast/expr_abstract.h"
|
||||
#include "muz/base/fixedpoint_params.hpp"
|
||||
|
||||
|
||||
namespace datalog {
|
||||
|
||||
|
||||
// model converter:
|
||||
// model converter:
|
||||
// Given model for P^(x, y, i, a[i])
|
||||
// create model: P(x,y,a) == forall i . P^(x,y,i,a[i])
|
||||
// requires substitution and list of bound variables.
|
||||
|
|
@ -55,7 +54,7 @@ namespace datalog {
|
|||
|
||||
void display(std::ostream& out) override { display_add(out, m); }
|
||||
|
||||
void get_units(obj_map<expr, bool>& units) override { units.reset(); }
|
||||
void get_units(obj_map<expr, bool>& units) override { units.reset(); }
|
||||
|
||||
void insert(func_decl* old_p, func_decl* new_p, expr_ref_vector& sub, sort_ref_vector& sorts, svector<bool> const& bound) {
|
||||
m_old_funcs.push_back(old_p);
|
||||
|
|
@ -74,7 +73,7 @@ namespace datalog {
|
|||
sort_ref_vector const& sorts = m_sorts[i];
|
||||
svector<bool> const& is_bound = m_bound[i];
|
||||
func_interp* f = old_model->get_func_interp(p);
|
||||
expr_ref body(m);
|
||||
expr_ref body(m);
|
||||
unsigned arity_q = q->get_arity();
|
||||
SASSERT(0 < p->get_arity());
|
||||
func_interp* g = alloc(func_interp, m, arity_q);
|
||||
|
|
@ -82,7 +81,7 @@ namespace datalog {
|
|||
if (f) {
|
||||
body = f->get_interp();
|
||||
SASSERT(!f->is_partial());
|
||||
SASSERT(body);
|
||||
SASSERT(body);
|
||||
}
|
||||
else {
|
||||
expr_ref_vector args(m);
|
||||
|
|
@ -94,7 +93,7 @@ namespace datalog {
|
|||
// Create quantifier wrapper around body.
|
||||
|
||||
TRACE("dl", tout << mk_pp(body, m) << "\n";);
|
||||
// 1. replace variables by the compound terms from
|
||||
// 1. replace variables by the compound terms from
|
||||
// the original predicate.
|
||||
expr_safe_replace rep(m);
|
||||
for (unsigned i = 0; i < sub.size(); ++i) {
|
||||
|
|
@ -121,7 +120,7 @@ namespace datalog {
|
|||
_free.push_back(consts.back());
|
||||
}
|
||||
}
|
||||
rep(body);
|
||||
rep(body);
|
||||
rep.reset();
|
||||
|
||||
TRACE("dl", tout << mk_pp(body, m) << "\n";);
|
||||
|
|
@ -130,18 +129,18 @@ namespace datalog {
|
|||
body = m.mk_forall(names.size(), bound_sorts.c_ptr(), names.c_ptr(), body);
|
||||
|
||||
TRACE("dl", tout << mk_pp(body, m) << "\n";);
|
||||
// 4. replace remaining constants by variables.
|
||||
// 4. replace remaining constants by variables.
|
||||
for (unsigned i = 0; i < _free.size(); ++i) {
|
||||
rep.insert(_free[i].get(), m.mk_var(i, m.get_sort(_free[i].get())));
|
||||
}
|
||||
rep(body);
|
||||
rep(body);
|
||||
g->set_else(body);
|
||||
TRACE("dl", tout << mk_pp(body, m) << "\n";);
|
||||
|
||||
new_model->register_decl(q, g);
|
||||
}
|
||||
}
|
||||
old_model = new_model;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
mk_quantifier_abstraction::mk_quantifier_abstraction(
|
||||
|
|
@ -154,7 +153,7 @@ namespace datalog {
|
|||
m_mc(nullptr) {
|
||||
}
|
||||
|
||||
mk_quantifier_abstraction::~mk_quantifier_abstraction() {
|
||||
mk_quantifier_abstraction::~mk_quantifier_abstraction() {
|
||||
}
|
||||
|
||||
func_decl* mk_quantifier_abstraction::declare_pred(rule_set const& rules, rule_set& dst, func_decl* old_p) {
|
||||
|
|
@ -178,7 +177,7 @@ namespace datalog {
|
|||
func_decl* new_p = nullptr;
|
||||
if (!m_old2new.find(old_p, new_p)) {
|
||||
expr_ref_vector sub(m), vars(m);
|
||||
svector<bool> bound;
|
||||
svector<bool> bound;
|
||||
sort_ref_vector domain(m), sorts(m);
|
||||
expr_ref arg(m);
|
||||
for (unsigned i = 0; i < sz; ++i) {
|
||||
|
|
@ -208,7 +207,7 @@ namespace datalog {
|
|||
bound.push_back(false);
|
||||
sub.push_back(arg);
|
||||
sorts.push_back(s0);
|
||||
}
|
||||
}
|
||||
SASSERT(old_p->get_range() == m.mk_bool_sort());
|
||||
new_p = m.mk_func_decl(old_p->get_name(), domain.size(), domain.c_ptr(), old_p->get_range());
|
||||
m_refs.push_back(new_p);
|
||||
|
|
@ -242,12 +241,12 @@ namespace datalog {
|
|||
}
|
||||
args.push_back(arg);
|
||||
}
|
||||
TRACE("dl",
|
||||
TRACE("dl",
|
||||
tout << mk_pp(new_p, m) << "\n";
|
||||
for (unsigned i = 0; i < args.size(); ++i) {
|
||||
tout << mk_pp(args[i].get(), m) << "\n";
|
||||
});
|
||||
return app_ref(m.mk_app(new_p, args.size(), args.c_ptr()), m);
|
||||
return app_ref(m.mk_app(new_p, args.size(), args.c_ptr()), m);
|
||||
}
|
||||
|
||||
app_ref mk_quantifier_abstraction::mk_tail(rule_set const& rules, rule_set& dst, app* p) {
|
||||
|
|
@ -272,7 +271,7 @@ namespace datalog {
|
|||
for (unsigned i = 0; i < sz; ++i) {
|
||||
arg = ps->get_arg(i);
|
||||
sort* s = m.get_sort(arg);
|
||||
bool is_pattern = false;
|
||||
bool is_pattern = false;
|
||||
while (a.is_array(s)) {
|
||||
is_pattern = true;
|
||||
unsigned arity = get_array_arity(s);
|
||||
|
|
@ -304,9 +303,9 @@ namespace datalog {
|
|||
ptr_vector<expr> args2;
|
||||
args2.push_back(arg);
|
||||
args2.append(num_args, args);
|
||||
return a.mk_select(args2.size(), args2.c_ptr());
|
||||
return a.mk_select(args2.size(), args2.c_ptr());
|
||||
}
|
||||
|
||||
|
||||
rule_set * mk_quantifier_abstraction::operator()(rule_set const & source) {
|
||||
if (!m_ctx.quantify_arrays()) {
|
||||
return nullptr;
|
||||
|
|
@ -334,10 +333,10 @@ namespace datalog {
|
|||
}
|
||||
rule_set * result = alloc(rule_set, m_ctx);
|
||||
|
||||
for (unsigned i = 0; i < sz; ++i) {
|
||||
for (unsigned i = 0; i < sz; ++i) {
|
||||
tail.reset();
|
||||
rule & r = *source.get_rule(i);
|
||||
TRACE("dl", r.display(m_ctx, tout); );
|
||||
TRACE("dl", r.display(m_ctx, tout); );
|
||||
unsigned cnt = vc.get_max_rule_var(r)+1;
|
||||
unsigned utsz = r.get_uninterpreted_tail_size();
|
||||
unsigned tsz = r.get_tail_size();
|
||||
|
|
@ -352,8 +351,8 @@ namespace datalog {
|
|||
proof_ref pr(m);
|
||||
rm.mk_rule(fml, pr, *result, r.name());
|
||||
TRACE("dl", result->last()->display(m_ctx, tout););
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// proof converter: proofs are not necessarily preserved using this transformation.
|
||||
|
||||
if (m_old2new.empty()) {
|
||||
|
|
@ -371,5 +370,3 @@ namespace datalog {
|
|||
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -180,7 +180,7 @@ namespace datalog {
|
|||
}
|
||||
m_terms[n] = e;
|
||||
visited.mark(e);
|
||||
if (m.is_eq(e, e1, e2) || m.is_iff(e, e1, e2)) {
|
||||
if (m.is_eq(e, e1, e2)) {
|
||||
m_uf.merge(e1->get_id(), e2->get_id());
|
||||
}
|
||||
if (is_app(e)) {
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ Revision History:
|
|||
Added linear_inline 2012-9-10 (nbjorner)
|
||||
|
||||
Disable inliner for quantified rules 2012-10-31 (nbjorner)
|
||||
|
||||
|
||||
Notes:
|
||||
|
||||
Resolution transformation (resolve):
|
||||
|
|
@ -27,7 +27,7 @@ Resolution transformation (resolve):
|
|||
--------------------------------------------------
|
||||
P(x) :- R(z), phi(x,y), psi(y,z)
|
||||
|
||||
Proof converter:
|
||||
Proof converter:
|
||||
|
||||
replace assumption (*) by rule and upper assumptions.
|
||||
|
||||
|
|
@ -37,9 +37,9 @@ Subsumption transformation (remove rule):
|
|||
P(x) :- Q(y), phi(x,y) Rules
|
||||
---------------------------------
|
||||
Rules
|
||||
|
||||
|
||||
Model converter:
|
||||
|
||||
|
||||
Model converter:
|
||||
|
||||
P(x) := P(x) or (exists y . Q(y) & phi(x,y))
|
||||
|
||||
|
|
@ -52,7 +52,7 @@ Subsumption transformation (remove rule):
|
|||
#include "ast/rewriter/rewriter.h"
|
||||
#include "ast/rewriter/rewriter_def.h"
|
||||
#include "muz/transforms/dl_mk_rule_inliner.h"
|
||||
#include "muz/base/fixedpoint_params.hpp"
|
||||
#include "muz/base/fp_params.hpp"
|
||||
|
||||
namespace datalog {
|
||||
|
||||
|
|
@ -67,15 +67,15 @@ namespace datalog {
|
|||
unsigned var_cnt = std::max(vc.get_max_rule_var(tgt), vc.get_max_rule_var(src))+1;
|
||||
m_subst.reset();
|
||||
m_subst.reserve(2, var_cnt);
|
||||
|
||||
|
||||
m_ready = m_unif(tgt.get_tail(tgt_idx), src.get_head(), m_subst);
|
||||
|
||||
if (m_ready) {
|
||||
m_deltas[0] = 0;
|
||||
m_deltas[1] = var_cnt;
|
||||
TRACE("dl",
|
||||
output_predicate(m_context, src.get_head(), tout << "unify rules ");
|
||||
output_predicate(m_context, tgt.get_head(), tout << "\n");
|
||||
TRACE("dl",
|
||||
output_predicate(m_context, src.get_head(), tout << "unify rules ");
|
||||
output_predicate(m_context, tgt.get_head(), tout << "\n");
|
||||
tout << "\n";);
|
||||
}
|
||||
return m_ready;
|
||||
|
|
@ -90,7 +90,7 @@ namespace datalog {
|
|||
}
|
||||
|
||||
void rule_unifier::apply(
|
||||
rule const& r, bool is_tgt, unsigned skipped_index,
|
||||
rule const& r, bool is_tgt, unsigned skipped_index,
|
||||
app_ref_vector& res, svector<bool>& res_neg) {
|
||||
unsigned rule_len = r.get_tail_size();
|
||||
for (unsigned i = 0; i < rule_len; i++) {
|
||||
|
|
@ -127,7 +127,7 @@ namespace datalog {
|
|||
);
|
||||
|
||||
if (m_normalize) {
|
||||
m_rm.fix_unbound_vars(res, true);
|
||||
m_rm.fix_unbound_vars(res, true);
|
||||
if (m_interp_simplifier.transform_rule(res.get(), simpl_rule)) {
|
||||
res = simpl_rule;
|
||||
return true;
|
||||
|
|
@ -150,8 +150,8 @@ namespace datalog {
|
|||
for (unsigned i = 0; i < sorts.size(); ++i) {
|
||||
v = m.mk_var(i, sorts[i]);
|
||||
m_subst.apply(2, m_deltas, expr_offset(v, is_tgt?0:1), w);
|
||||
result.push_back(w);
|
||||
}
|
||||
result.push_back(w);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
@ -184,7 +184,7 @@ namespace datalog {
|
|||
expr_ref_vector s2 = m_unifier.get_rule_subst(src, false);
|
||||
datalog::resolve_rule(m_rm, tgt, src, tail_index, s1, s2, *res.get());
|
||||
}
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
TRACE("dl", res->display(m_context, tout << "interpreted tail is unsat\n"););
|
||||
|
|
@ -240,12 +240,12 @@ namespace datalog {
|
|||
return false;
|
||||
}
|
||||
|
||||
//
|
||||
// these conditions are optional, they avoid possible exponential increase
|
||||
//
|
||||
// these conditions are optional, they avoid possible exponential increase
|
||||
// in the size of the problem
|
||||
//
|
||||
//
|
||||
|
||||
return
|
||||
return
|
||||
//m_head_pred_non_empty_tails_ctr.get(pred)<=1
|
||||
m_head_pred_ctr.get(pred) <= 1
|
||||
|| (m_tail_pred_ctr.get(pred) <= 1 && m_head_pred_ctr.get(pred) <= 4)
|
||||
|
|
@ -253,7 +253,7 @@ namespace datalog {
|
|||
}
|
||||
|
||||
/** Caller has to dealloc the returned object */
|
||||
rule_set * mk_rule_inliner::create_allowed_rule_set(rule_set const & orig)
|
||||
rule_set * mk_rule_inliner::create_allowed_rule_set(rule_set const & orig)
|
||||
{
|
||||
rule_set * res = alloc(rule_set, m_context);
|
||||
for (rule * r : orig) {
|
||||
|
|
@ -268,7 +268,7 @@ namespace datalog {
|
|||
|
||||
/**
|
||||
Try to make the set of inlined predicates acyclic by forbidding inlining of one
|
||||
predicate from each strongly connected component. Return true if we did forbide some
|
||||
predicate from each strongly connected component. Return true if we did forbide some
|
||||
predicate, and false if the set of rules is already acyclic.
|
||||
*/
|
||||
bool mk_rule_inliner::forbid_preds_from_cycles(rule_set const & r)
|
||||
|
|
@ -276,7 +276,7 @@ namespace datalog {
|
|||
SASSERT(r.is_closed());
|
||||
|
||||
bool something_forbidden = false;
|
||||
|
||||
|
||||
const rule_stratifier::comp_vector& comps = r.get_stratifier().get_strats();
|
||||
|
||||
for (rule_stratifier::item_set * stratum : comps) {
|
||||
|
|
@ -293,12 +293,12 @@ namespace datalog {
|
|||
return something_forbidden;
|
||||
}
|
||||
|
||||
bool mk_rule_inliner::forbid_multiple_multipliers(const rule_set & orig,
|
||||
bool mk_rule_inliner::forbid_multiple_multipliers(const rule_set & orig,
|
||||
rule_set const & proposed_inlined_rules) {
|
||||
|
||||
bool something_forbidden = false;
|
||||
|
||||
const rule_stratifier::comp_vector& comps =
|
||||
const rule_stratifier::comp_vector& comps =
|
||||
proposed_inlined_rules.get_stratifier().get_strats();
|
||||
|
||||
for (rule_stratifier::item_set * stratum : comps) {
|
||||
|
|
@ -332,7 +332,7 @@ namespace datalog {
|
|||
}
|
||||
else {
|
||||
is_multi_head_pred = true;
|
||||
m_head_pred_ctr.get(head_pred) =
|
||||
m_head_pred_ctr.get(head_pred) =
|
||||
m_head_pred_ctr.get(head_pred)*tail_pred_head_cnt;
|
||||
}
|
||||
}
|
||||
|
|
@ -379,7 +379,7 @@ namespace datalog {
|
|||
void mk_rule_inliner::plan_inlining(rule_set const & orig)
|
||||
{
|
||||
count_pred_occurrences(orig);
|
||||
|
||||
|
||||
scoped_ptr<rule_set> candidate_inlined_set = create_allowed_rule_set(orig);
|
||||
while (forbid_preds_from_cycles(*candidate_inlined_set)) {
|
||||
candidate_inlined_set = create_allowed_rule_set(orig);
|
||||
|
|
@ -458,8 +458,8 @@ namespace datalog {
|
|||
rule_ref r(rl, m_rm);
|
||||
func_decl * pred = r->get_decl();
|
||||
|
||||
// if inlining is allowed, then we are eliminating
|
||||
// this relation through inlining,
|
||||
// if inlining is allowed, then we are eliminating
|
||||
// this relation through inlining,
|
||||
// so we don't add its rules to the result
|
||||
|
||||
something_done |= !inlining_allowed(orig, pred) && transform_rule(orig, r, tgt);
|
||||
|
|
@ -472,14 +472,14 @@ namespace datalog {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return something_done;
|
||||
}
|
||||
|
||||
/**
|
||||
Check whether rule r is oriented in a particular ordering.
|
||||
This is to avoid infinite cycle of inlining in the eager inliner.
|
||||
|
||||
|
||||
Out ordering is lexicographic, comparing atoms first on stratum they are in,
|
||||
then on arity and then on ast ID of their func_decl.
|
||||
*/
|
||||
|
|
@ -488,7 +488,7 @@ namespace datalog {
|
|||
unsigned head_strat = strat.get_predicate_strat(head_pred);
|
||||
unsigned head_arity = head_pred->get_arity();
|
||||
unsigned pt_len = r->get_positive_tail_size();
|
||||
for (unsigned ti=0; ti < pt_len; ++ti) {
|
||||
for (unsigned ti=0; ti < pt_len; ++ti) {
|
||||
func_decl * pred = r->get_decl(ti);
|
||||
unsigned pred_strat = strat.get_predicate_strat(pred);
|
||||
SASSERT(pred_strat <= head_strat);
|
||||
|
|
@ -516,7 +516,7 @@ namespace datalog {
|
|||
|
||||
unsigned pt_len = r->get_positive_tail_size();
|
||||
for (unsigned ti = 0; ti < pt_len; ++ti) {
|
||||
|
||||
|
||||
func_decl * pred = r->get_decl(ti);
|
||||
if (pred == head_pred || m_preds_with_facts.contains(pred)) { continue; }
|
||||
|
||||
|
|
@ -532,7 +532,7 @@ namespace datalog {
|
|||
}
|
||||
else {
|
||||
inlining_candidate = nullptr;
|
||||
|
||||
|
||||
for (unsigned ri = 0; ri < rule_cnt; ++ri) {
|
||||
rule * pred_rule = pred_rules[ri];
|
||||
if (!m_unifier.unify_rules(*r, ti, *pred_rule)) {
|
||||
|
|
@ -540,9 +540,9 @@ namespace datalog {
|
|||
continue;
|
||||
}
|
||||
if (inlining_candidate != nullptr) {
|
||||
// We have two rules that can be inlined into the current
|
||||
// We have two rules that can be inlined into the current
|
||||
// tail predicate. In this situation we don't do inlinning
|
||||
// on this tail atom, as we don't want the overall number
|
||||
// on this tail atom, as we don't want the overall number
|
||||
// of rules to increase.
|
||||
goto process_next_tail;
|
||||
}
|
||||
|
|
@ -608,14 +608,14 @@ namespace datalog {
|
|||
|
||||
P(1,x) :- P(1,z), phi(x,y), psi(y,z)
|
||||
|
||||
whenever P(0,x) is not unifiable with the
|
||||
whenever P(0,x) is not unifiable with the
|
||||
body of the rule where it appears (P(1,z))
|
||||
and P(0,x) is unifiable with at most one (?)
|
||||
and P(0,x) is unifiable with at most one (?)
|
||||
other rule (and it does not occur negatively).
|
||||
*/
|
||||
bool mk_rule_inliner::visitor::operator()(expr* e) {
|
||||
m_unifiers.append(m_positions.find(e));
|
||||
TRACE("dl",
|
||||
TRACE("dl",
|
||||
tout << "unifier: " << (m_unifiers.empty()?0:m_unifiers.back());
|
||||
tout << " num unifiers: " << m_unifiers.size();
|
||||
tout << " num positions: " << m_positions.find(e).size() << "\n";
|
||||
|
|
@ -640,7 +640,7 @@ namespace datalog {
|
|||
}
|
||||
|
||||
unsigned_vector const& mk_rule_inliner::visitor::del_position(expr* e, unsigned j) {
|
||||
obj_map<expr, unsigned_vector>::obj_map_entry * et = m_positions.find_core(e);
|
||||
obj_map<expr, unsigned_vector>::obj_map_entry * et = m_positions.find_core(e);
|
||||
SASSERT(et && et->get_data().m_value.contains(j));
|
||||
et->get_data().m_value.erase(j);
|
||||
return et->get_data().m_value;
|
||||
|
|
@ -654,7 +654,7 @@ namespace datalog {
|
|||
m_head_visitor.add_position(head, i);
|
||||
m_head_index.insert(head);
|
||||
m_pinned.push_back(r);
|
||||
|
||||
|
||||
if (source.is_output_predicate(headd) ||
|
||||
m_preds_with_facts.contains(headd)) {
|
||||
can_remove.set(i, false);
|
||||
|
|
@ -667,10 +667,10 @@ namespace datalog {
|
|||
m_tail_visitor.add_position(tail, i);
|
||||
m_tail_index.insert(tail);
|
||||
}
|
||||
bool can_exp =
|
||||
bool can_exp =
|
||||
tl_sz == 1
|
||||
&& r->get_positive_tail_size() == 1
|
||||
&& !m_preds_with_facts.contains(r->get_decl(0))
|
||||
&& r->get_positive_tail_size() == 1
|
||||
&& !m_preds_with_facts.contains(r->get_decl(0))
|
||||
&& !source.is_output_predicate(r->get_decl(0));
|
||||
can_expand.set(i, can_exp);
|
||||
}
|
||||
|
|
@ -682,14 +682,14 @@ namespace datalog {
|
|||
for (unsigned j = 0; j < tl_sz; ++j) {
|
||||
app* tail = r->get_tail(j);
|
||||
m_tail_visitor.del_position(tail, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#define PRT(_x_) ((_x_)?"T":"F")
|
||||
|
||||
bool mk_rule_inliner::inline_linear(scoped_ptr<rule_set>& rules) {
|
||||
bool done_something = false;
|
||||
bool done_something = false;
|
||||
unsigned sz = rules->get_num_rules();
|
||||
|
||||
m_head_visitor.reset(sz);
|
||||
|
|
@ -704,7 +704,7 @@ namespace datalog {
|
|||
acc.push_back(rules->get_rule(i));
|
||||
}
|
||||
|
||||
// set up unification index.
|
||||
// set up unification index.
|
||||
svector<bool>& can_remove = m_head_visitor.can_remove();
|
||||
svector<bool>& can_expand = m_head_visitor.can_expand();
|
||||
|
||||
|
|
@ -729,7 +729,7 @@ namespace datalog {
|
|||
|
||||
svector<bool> valid;
|
||||
valid.reset();
|
||||
valid.resize(sz, true);
|
||||
valid.resize(sz, true);
|
||||
|
||||
bool allow_branching = m_context.get_params().xform_inline_linear_branch();
|
||||
|
||||
|
|
@ -738,9 +738,9 @@ namespace datalog {
|
|||
while (true) {
|
||||
|
||||
rule_ref r(acc[i].get(), m_rm);
|
||||
|
||||
|
||||
TRACE("dl", r->display(m_context, tout << "processing: " << i << "\n"););
|
||||
|
||||
|
||||
if (!valid.get(i)) {
|
||||
TRACE("dl", tout << "invalid: " << i << "\n";);
|
||||
break;
|
||||
|
|
@ -762,9 +762,9 @@ namespace datalog {
|
|||
TRACE("dl", tout << PRT(can_remove.get(j)) << " " << PRT(valid.get(j)) << " " << PRT(i != j) << "\n";);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
rule* r2 = acc[j].get();
|
||||
|
||||
|
||||
// check that the head of r2 only unifies with this single body position.
|
||||
TRACE("dl", output_predicate(m_context, r2->get_head(), tout << "unify head: "); tout << "\n";);
|
||||
m_tail_visitor.reset();
|
||||
|
|
@ -776,7 +776,7 @@ namespace datalog {
|
|||
TRACE("dl", tout << "too many tails " << num_tail_unifiers << "\n";);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
rule_ref rl_res(m_rm);
|
||||
if (!try_to_inline_rule(*r.get(), *r2, 0, rl_res)) {
|
||||
TRACE("dl", r->display(m_context, tout << "inlining failed\n"); r2->display(m_context, tout); );
|
||||
|
|
@ -787,12 +787,12 @@ namespace datalog {
|
|||
|
||||
del_rule(r, i);
|
||||
add_rule(*rules, rl_res.get(), i);
|
||||
|
||||
|
||||
|
||||
r = rl_res;
|
||||
acc[i] = r.get();
|
||||
can_expand.set(i, can_expand.get(j));
|
||||
|
||||
|
||||
if (num_tail_unifiers == 1) {
|
||||
TRACE("dl", tout << "setting invalid: " << j << "\n";);
|
||||
valid.set(j, false);
|
||||
|
|
@ -815,22 +815,22 @@ namespace datalog {
|
|||
res->inherit_predicates(*rules);
|
||||
TRACE("dl", res->display(tout););
|
||||
rules = res.detach();
|
||||
}
|
||||
}
|
||||
return done_something;
|
||||
}
|
||||
|
||||
rule_set * mk_rule_inliner::operator()(rule_set const & source) {
|
||||
|
||||
bool something_done = false;
|
||||
ref<horn_subsume_model_converter> hsmc;
|
||||
ref<horn_subsume_model_converter> hsmc;
|
||||
|
||||
if (source.get_num_rules() == 0) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
for (rule const* r : source)
|
||||
if (has_quantifier(*r))
|
||||
return nullptr;
|
||||
for (rule const* r : source)
|
||||
if (has_quantifier(*r))
|
||||
return nullptr;
|
||||
|
||||
if (m_context.get_model_converter()) {
|
||||
hsmc = alloc(horn_subsume_model_converter, m);
|
||||
|
|
@ -841,15 +841,15 @@ namespace datalog {
|
|||
|
||||
if (m_context.get_params().xform_inline_eager()) {
|
||||
TRACE("dl", source.display(tout << "before eager inlining\n"););
|
||||
plan_inlining(source);
|
||||
something_done = transform_rules(source, *res);
|
||||
plan_inlining(source);
|
||||
something_done = transform_rules(source, *res);
|
||||
VERIFY(res->close()); //this transformation doesn't break the negation stratification
|
||||
// try eager inlining
|
||||
if (do_eager_inlining(res)) {
|
||||
something_done = true;
|
||||
}
|
||||
}
|
||||
TRACE("dl", res->display(tout << "after eager inlining\n"););
|
||||
}
|
||||
}
|
||||
if (something_done) {
|
||||
res->inherit_predicates(source);
|
||||
}
|
||||
|
|
@ -870,6 +870,5 @@ namespace datalog {
|
|||
|
||||
return res.detach();
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ Revision History:
|
|||
|
||||
#include "muz/transforms/dl_mk_scale.h"
|
||||
#include "muz/base/dl_context.h"
|
||||
#include "muz/base/fixedpoint_params.hpp"
|
||||
#include "muz/base/fp_params.hpp"
|
||||
|
||||
namespace datalog {
|
||||
|
||||
|
|
@ -37,7 +37,7 @@ namespace datalog {
|
|||
m_trail.push_back(new_f);
|
||||
m_new2old.insert(new_f, old_f);
|
||||
}
|
||||
|
||||
|
||||
void get_units(obj_map<expr, bool>& units) override { units.reset(); }
|
||||
|
||||
void operator()(model_ref& md) override {
|
||||
|
|
@ -74,7 +74,7 @@ namespace datalog {
|
|||
old_model->register_decl(old_p, old_fi);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// register values that have not been scaled.
|
||||
unsigned sz = md->get_num_constants();
|
||||
for (unsigned i = 0; i < sz; ++i) {
|
||||
|
|
@ -111,12 +111,12 @@ namespace datalog {
|
|||
m_ctx(ctx),
|
||||
a(m),
|
||||
m_trail(m),
|
||||
m_eqs(m) {
|
||||
m_eqs(m) {
|
||||
}
|
||||
|
||||
mk_scale::~mk_scale() {
|
||||
mk_scale::~mk_scale() {
|
||||
}
|
||||
|
||||
|
||||
rule_set * mk_scale::operator()(rule_set const & source) {
|
||||
if (!m_ctx.scale()) {
|
||||
return nullptr;
|
||||
|
|
@ -135,7 +135,7 @@ namespace datalog {
|
|||
}
|
||||
m_mc = smc.get();
|
||||
|
||||
for (unsigned i = 0; i < sz; ++i) {
|
||||
for (unsigned i = 0; i < sz; ++i) {
|
||||
rule & r = *source.get_rule(i);
|
||||
unsigned utsz = r.get_uninterpreted_tail_size();
|
||||
unsigned tsz = r.get_tail_size();
|
||||
|
|
@ -157,10 +157,10 @@ namespace datalog {
|
|||
tail.push_back(a.mk_gt(m.mk_var(num_vars, a.mk_real()), a.mk_numeral(rational(0), false)));
|
||||
neg.resize(tail.size(), false);
|
||||
new_rule = rm.mk(new_pred, tail.size(), tail.c_ptr(), neg.c_ptr(), r.name(), true);
|
||||
result->add_rule(new_rule);
|
||||
result->add_rule(new_rule);
|
||||
if (source.is_output_predicate(r.get_decl())) {
|
||||
result->set_output_predicate(new_rule->get_decl());
|
||||
}
|
||||
}
|
||||
}
|
||||
TRACE("dl", result->display(tout););
|
||||
if (m_mc) {
|
||||
|
|
@ -227,7 +227,7 @@ namespace datalog {
|
|||
a.is_lt(e) || a.is_gt(e)) {
|
||||
expr_ref_vector args(m);
|
||||
for (unsigned i = 0; i < ap->get_num_args(); ++i) {
|
||||
args.push_back(linearize(sigma_idx, ap->get_arg(i)));
|
||||
args.push_back(linearize(sigma_idx, ap->get_arg(i)));
|
||||
}
|
||||
result = m.mk_app(ap->get_decl(), args.size(), args.c_ptr());
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ Revision History:
|
|||
#include "ast/rewriter/rewriter.h"
|
||||
#include "ast/rewriter/rewriter_def.h"
|
||||
#include "muz/transforms/dl_mk_subsumption_checker.h"
|
||||
#include "muz/base/fixedpoint_params.hpp"
|
||||
#include "muz/base/fp_params.hpp"
|
||||
#include "tactic/generic_model_converter.h"
|
||||
|
||||
|
||||
|
|
@ -39,8 +39,8 @@ namespace datalog {
|
|||
|
||||
|
||||
bool mk_subsumption_checker::is_total_rule(const rule * r) {
|
||||
if (r->get_tail_size() != 0) {
|
||||
return false;
|
||||
if (r->get_tail_size() != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned pt_len = r->get_positive_tail_size();
|
||||
|
|
@ -113,7 +113,7 @@ namespace datalog {
|
|||
}
|
||||
|
||||
|
||||
bool mk_subsumption_checker::transform_rule(rule * r,
|
||||
bool mk_subsumption_checker::transform_rule(rule * r,
|
||||
rule_subsumption_index& subs_index, rule_ref & res)
|
||||
{
|
||||
unsigned u_len = r->get_uninterpreted_tail_size();
|
||||
|
|
@ -133,7 +133,7 @@ namespace datalog {
|
|||
if(m_total_relations.contains(tail_atom->get_decl())
|
||||
|| subs_index.is_subsumed(tail_atom)) {
|
||||
if(neg) {
|
||||
//rule contains negated total relation, this means that it is unsatisfiable
|
||||
//rule contains negated total relation, this means that it is unsatisfiable
|
||||
//and can be removed
|
||||
return false;
|
||||
}
|
||||
|
|
@ -143,8 +143,8 @@ namespace datalog {
|
|||
}
|
||||
}
|
||||
if(!neg && head.get()==tail_atom) {
|
||||
//rule contains its head positively in the tail, therefore
|
||||
//it will never add any new facts to the relation, so it
|
||||
//rule contains its head positively in the tail, therefore
|
||||
//it will never add any new facts to the relation, so it
|
||||
//can be removed
|
||||
return false;
|
||||
}
|
||||
|
|
@ -197,9 +197,9 @@ namespace datalog {
|
|||
if (m_total_relations.contains(head_pred)) {
|
||||
if (!orig.is_output_predicate(head_pred) ||
|
||||
total_relations_with_included_rules.contains(head_pred)) {
|
||||
//We just skip definitions of total non-output relations as
|
||||
//We just skip definitions of total non-output relations as
|
||||
//we'll eliminate them from the problem.
|
||||
//We also skip rules of total output relations for which we have
|
||||
//We also skip rules of total output relations for which we have
|
||||
//already output the rule which implies their totality.
|
||||
modified = true;
|
||||
continue;
|
||||
|
|
@ -286,7 +286,7 @@ namespace datalog {
|
|||
obj_hashtable<app> * head_store;
|
||||
if(m_ground_unconditional_rule_heads.find(pred, head_store)) {
|
||||
//Some relations may receive facts by ground unconditioned rules.
|
||||
//We scanned for those earlier, so now we check whether we cannot get a
|
||||
//We scanned for those earlier, so now we check whether we cannot get a
|
||||
//better estimate of relation size from these.
|
||||
|
||||
unsigned gnd_rule_cnt = head_store->size();
|
||||
|
|
@ -334,7 +334,7 @@ namespace datalog {
|
|||
|
||||
rule_set * mk_subsumption_checker::operator()(rule_set const & source) {
|
||||
// TODO mc
|
||||
if (!m_context.get_params ().xform_subsumption_checker())
|
||||
if (!m_context.get_params ().xform_subsumption_checker())
|
||||
return nullptr;
|
||||
|
||||
m_have_new_total_rule = false;
|
||||
|
|
@ -366,6 +366,5 @@ namespace datalog {
|
|||
|
||||
return res;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ Revision History:
|
|||
#include "muz/transforms/dl_mk_scale.h"
|
||||
#include "muz/transforms/dl_mk_array_eq_rewrite.h"
|
||||
#include "muz/transforms/dl_mk_array_instantiation.h"
|
||||
#include "muz/base/fixedpoint_params.hpp"
|
||||
#include "muz/base/fp_params.hpp"
|
||||
|
||||
namespace datalog {
|
||||
|
||||
|
|
@ -72,7 +72,7 @@ namespace datalog {
|
|||
transf.register_plugin(alloc(datalog::mk_rule_inliner, ctx, 34970));
|
||||
transf.register_plugin(alloc(datalog::mk_coi_filter, ctx, 34960));
|
||||
transf.register_plugin(alloc(datalog::mk_interp_tail_simplifier, ctx, 34950));
|
||||
|
||||
|
||||
if (ctx.get_params().datalog_subsumption()) {
|
||||
transf.register_plugin(alloc(datalog::mk_subsumption_checker, ctx, 34940));
|
||||
transf.register_plugin(alloc(datalog::mk_rule_inliner, ctx, 34930));
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue