diff --git a/src/ast/ast.h b/src/ast/ast.h index ce193e8b1..b6b71c6a3 100644 --- a/src/ast/ast.h +++ b/src/ast/ast.h @@ -1744,6 +1744,14 @@ public: arity, domain); } + func_decl * mk_const_decl(const char* name, sort * s) { + return mk_func_decl(symbol(name), static_cast(0), nullptr, s); + } + + func_decl * mk_const_decl(std::string const& name, sort * s) { + return mk_func_decl(symbol(name.c_str()), static_cast(0), nullptr, s); + } + func_decl * mk_const_decl(symbol const & name, sort * s) { return mk_func_decl(name, static_cast(0), nullptr, s); } @@ -1810,6 +1818,14 @@ public: return mk_const(mk_const_decl(name, s)); } + app * mk_const(std::string const & name, sort * s) { + return mk_const(mk_const_decl(name, s)); + } + + app * mk_const(char const* name, sort * s) { + return mk_const(mk_const_decl(name, s)); + } + func_decl * mk_fresh_func_decl(symbol const & prefix, symbol const & suffix, unsigned arity, sort * const * domain, sort * range); @@ -2150,6 +2166,23 @@ public: return n > 0 && get_sort(p->get_arg(n - 1)) != m_proof_sort; } expr * get_fact(proof const * p) const { SASSERT(is_proof(p)); SASSERT(has_fact(p)); return p->get_arg(p->get_num_args() - 1); } + + class proof_parents { + ast_manager& m; + proof * m_proof; + public: + proof_parents(ast_manager& m, proof * p): m(m), m_proof(p) {} + proof * const * begin() const { return (proof* const*)(m_proof->begin()); } + proof * const * end() const { + unsigned n = m_proof->get_num_args(); + return (proof* const*)(begin() + (m.has_fact(m_proof) ? n - 1 : n)); + } + }; + + proof_parents get_parents(proof* p) { + return proof_parents(*this, p); + } + unsigned get_num_parents(proof const * p) const { SASSERT(is_proof(p)); unsigned n = p->get_num_args(); diff --git a/src/muz/spacer/spacer_context.cpp b/src/muz/spacer/spacer_context.cpp index 5165940d8..6726bb15d 100644 --- a/src/muz/spacer/spacer_context.cpp +++ b/src/muz/spacer/spacer_context.cpp @@ -1500,11 +1500,9 @@ void pred_transformer::mk_assumptions(func_decl* head, expr* fml, expr_ref_vector& result) { expr_ref tmp1(m), tmp2(m); - obj_map::iterator it = m_tag2rule.begin(), - end = m_tag2rule.end(); - for (; it != end; ++it) { - expr* tag = it->m_key; - datalog::rule const* r = it->m_value; + for (auto& kv : m_tag2rule) { + expr* tag = kv.m_key; + datalog::rule const* r = kv.m_value; if (!r) { continue; } find_predecessors(*r, m_predicates); for (unsigned i = 0; i < m_predicates.size(); i++) { @@ -2338,7 +2336,6 @@ void context::init_rules(datalog::rule_set& rules, decl2rel& rels) } // Initialize use list dependencies - // for (auto it = rels.begin(), end = rels.end(); it != end; ++it) { for (auto &entry : rels) { func_decl* pred = entry.m_key; pred_transformer* pt = entry.m_value, *pt_user = nullptr; @@ -2464,14 +2461,13 @@ bool context::validate() get_level_property(m_inductive_lvl, refs, rs); inductive_property ex(m, mc, rs); ex.to_model(model); - decl2rel::iterator it = m_rels.begin(), end = m_rels.end(); var_subst vs(m, false); - for (; it != end; ++it) { - ptr_vector const& rules = it->m_value->rules(); - TRACE ("spacer", tout << "PT: " << it->m_value->head ()->get_name ().str () + for (auto& kv : m_rels) { + ptr_vector const& rules = kv.m_value->rules(); + TRACE ("spacer", tout << "PT: " << kv.m_value->head ()->get_name ().str () << "\n";); - for (unsigned i = 0; i < rules.size(); ++i) { - datalog::rule& r = *rules[i]; + for (auto* rp : rules) { + datalog::rule& r = *rp; TRACE ("spacer", get_datalog_context (). @@ -2603,11 +2599,9 @@ void context::init_lemma_generalizers() } void context::get_level_property(unsigned lvl, expr_ref_vector& res, - vector& rs) const -{ - decl2rel::iterator it = m_rels.begin(), end = m_rels.end(); - for (; it != end; ++it) { - pred_transformer* r = it->m_value; + vector& rs) const { + for (auto const& kv : m_rels) { + pred_transformer* r = kv.m_value; if (r->head() == m_query_pred) { continue; } @@ -2619,12 +2613,9 @@ void context::get_level_property(unsigned lvl, expr_ref_vector& res, } } -void context::simplify_formulas() -{ - decl2rel::iterator it = m_rels.begin(), end = m_rels.end(); - for (; it != end; ++it) { - pred_transformer* r = it->m_value; - r->simplify_formulas(); +void context::simplify_formulas() { + for (auto& kv : m_rels) { + kv.m_value->simplify_formulas(); } } @@ -3549,18 +3540,17 @@ bool context::propagate(unsigned min_prop_lvl, tout << "In full propagation\n";); bool all_propagated = true; - decl2rel::iterator it = m_rels.begin(), end = m_rels.end(); - for (; it != end; ++it) { + for (auto & kv : m_rels) { checkpoint(); - pred_transformer& r = *it->m_value; + pred_transformer& r = *kv.m_value; all_propagated = r.propagate_to_next_level(lvl) && all_propagated; } //CASSERT("spacer", check_invariant(lvl)); if (all_propagated) { - for (it = m_rels.begin(); it != end; ++it) { + for (auto& kv : m_rels) { checkpoint (); - pred_transformer& r = *it->m_value; + pred_transformer& r = *kv.m_value; r.propagate_to_infinity (lvl); } if (lvl <= max_prop_lvl) { @@ -3793,9 +3783,8 @@ void context::collect_statistics(statistics& st) const m_pool1->collect_statistics(st); m_pool2->collect_statistics(st); - decl2rel::iterator it = m_rels.begin(), end = m_rels.end(); - for (it = m_rels.begin(); it != end; ++it) { - it->m_value->collect_statistics(st); + for (auto const& kv : m_rels) { + kv.m_value->collect_statistics(st); } // -- number of times a pob for some predicate transformer has @@ -3846,9 +3835,8 @@ void context::reset_statistics() m_pool1->reset_statistics(); m_pool2->reset_statistics(); - decl2rel::iterator it = m_rels.begin(), end = m_rels.end(); - for (it = m_rels.begin(); it != end; ++it) { - it->m_value->reset_statistics(); + for (auto & kv : m_rels) { + kv.m_value->reset_statistics(); } m_stats.reset(); @@ -3897,9 +3885,8 @@ expr_ref context::get_constraints (unsigned level) expr_ref res(m); expr_ref_vector constraints(m); - decl2rel::iterator it = m_rels.begin(), end = m_rels.end(); - for (; it != end; ++it) { - pred_transformer& r = *it->m_value; + for (auto & kv : m_rels) { + pred_transformer& r = *kv.m_value; expr_ref c = r.get_formulas(level); if (m.is_true(c)) { continue; } diff --git a/src/muz/spacer/spacer_context.h b/src/muz/spacer/spacer_context.h index 3c6c35742..34c04d596 100644 --- a/src/muz/spacer/spacer_context.h +++ b/src/muz/spacer/spacer_context.h @@ -176,7 +176,7 @@ public: void dec_ref () { SASSERT (m_ref_count > 0); --m_ref_count; - if(m_ref_count == 0) {dealloc(this);} + if (m_ref_count == 0) {dealloc(this);} } }; @@ -242,7 +242,7 @@ class pred_transformer { } void get_frame_geq_lemmas (unsigned level, expr_ref_vector &out) const { for (auto &lemma : m_lemmas) { - if(lemma->level() >= level) { + if (lemma->level() >= level) { out.push_back(lemma->get_expr()); } } @@ -362,7 +362,7 @@ public: void find_predecessors(datalog::rule const& r, ptr_vector& predicates) const; void add_rule(datalog::rule* r) {m_rules.push_back(r);} - void add_use(pred_transformer* pt) {if(!m_use.contains(pt)) {m_use.insert(pt);}} + void add_use(pred_transformer* pt) {if (!m_use.contains(pt)) {m_use.insert(pt);}} void initialize(decl2rel const& pts); func_decl* head() const {return m_head;} @@ -528,7 +528,7 @@ public: pob (pob* parent, pred_transformer& pt, unsigned level, unsigned depth=0, bool add_to_parent=true); - ~pob() {if(m_parent) { m_parent->erase_child(*this); }} + ~pob() {if (m_parent) { m_parent->erase_child(*this); }} unsigned weakness() {return m_weakness;} void bump_weakness() {m_weakness++;} @@ -564,7 +564,7 @@ public: void set_post(expr *post, app_ref_vector const &binding); /// indicate that a new post should be set for the node - void new_post(expr *post) {if(post != m_post) {m_new_post = post;}} + void new_post(expr *post) {if (post != m_post) {m_new_post = post;}} /// true if the node needs to be updated outside of the priority queue bool is_dirty () {return m_new_post;} /// clean a dirty node @@ -592,14 +592,14 @@ public: */ void get_skolems(app_ref_vector& v); - void on_expand() { m_expand_watches[m_depth].start(); if(m_parent.get()){m_parent.get()->on_expand();} } - void off_expand() { m_expand_watches[m_depth].stop(); if(m_parent.get()){m_parent.get()->off_expand();} }; + void on_expand() { m_expand_watches[m_depth].start(); if (m_parent.get()){m_parent.get()->on_expand();} } + void off_expand() { m_expand_watches[m_depth].stop(); if (m_parent.get()){m_parent.get()->off_expand();} }; double get_expand_time(unsigned depth) { return m_expand_watches[depth].get_seconds();} void inc_ref () {++m_ref_count;} void dec_ref () { --m_ref_count; - if(m_ref_count == 0) {dealloc(this);} + if (m_ref_count == 0) {dealloc(this);} } class on_expand_event @@ -727,7 +727,7 @@ public: SASSERT (!m_obligations.empty () || m_root); m_max_level++; m_min_depth++; - if(m_root && m_obligations.empty()) { m_obligations.push(m_root); } + if (m_root && m_obligations.empty()) { m_obligations.push(m_root); } } pob& get_root() const {return *m_root.get ();} diff --git a/src/muz/spacer/spacer_iuc_solver.cpp b/src/muz/spacer/spacer_iuc_solver.cpp index 35932b2ba..a0bc2a627 100644 --- a/src/muz/spacer/spacer_iuc_solver.cpp +++ b/src/muz/spacer/spacer_iuc_solver.cpp @@ -99,8 +99,9 @@ void iuc_solver::pop_bg (unsigned n) { if (n == 0) { return; } - if (m_assumptions.size () > m_first_assumption) - { m_assumptions.shrink(m_first_assumption); } + if (m_assumptions.size () > m_first_assumption) { + m_assumptions.shrink(m_first_assumption); + } m_first_assumption = m_first_assumption > n ? m_first_assumption - n : 0; m_assumptions.shrink (m_first_assumption); } @@ -110,9 +111,8 @@ unsigned iuc_solver::get_num_bg () {return m_first_assumption;} lbool iuc_solver::check_sat (unsigned num_assumptions, expr * const *assumptions) { // -- remove any old assumptions - if (m_assumptions.size () > m_first_assumption) - { m_assumptions.shrink(m_first_assumption); } - + m_assumptions.shrink(m_first_assumption); + // -- replace theory literals in background assumptions with proxies mk_proxies (m_assumptions); // -- in case mk_proxies added new literals, they are all background @@ -121,20 +121,17 @@ lbool iuc_solver::check_sat (unsigned num_assumptions, expr * const *assumptions m_assumptions.append (num_assumptions, assumptions); m_is_proxied = mk_proxies (m_assumptions, m_first_assumption); - lbool res; - res = m_solver.check_sat (m_assumptions.size (), m_assumptions.c_ptr ()); - set_status (res); - return res; + return set_status (m_solver.check_sat (m_assumptions)); } lbool iuc_solver::check_sat_cc(const expr_ref_vector &cube, vector const & clauses) { - if (clauses.empty()) {return check_sat(cube.size(), cube.c_ptr());} - + if (clauses.empty()) + return check_sat(cube.size(), cube.c_ptr()); + // -- remove any old assumptions - if (m_assumptions.size() > m_first_assumption) - { m_assumptions.shrink(m_first_assumption); } - + m_assumptions.shrink(m_first_assumption); + // -- replace theory literals in background assumptions with proxies mk_proxies(m_assumptions); // -- in case mk_proxies added new literals, they are all background @@ -143,28 +140,24 @@ lbool iuc_solver::check_sat_cc(const expr_ref_vector &cube, m_assumptions.append(cube); m_is_proxied = mk_proxies(m_assumptions, m_first_assumption); - lbool res; - res = m_solver.check_sat_cc(m_assumptions, clauses); - set_status (res); - return res; + return set_status (m_solver.check_sat_cc(m_assumptions, clauses)); } app* iuc_solver::def_manager::mk_proxy (expr *v) { app* r; - if (m_expr2proxy.find(v, r)) { return r; } + if (m_expr2proxy.find(v, r)) + return r; ast_manager &m = m_parent.m; - app_ref proxy(m); - app_ref def(m); - proxy = m_parent.fresh_proxy (); - def = m.mk_or (m.mk_not (proxy), v); + app* proxy = m_parent.fresh_proxy (); + app* def = m.mk_or (m.mk_not (proxy), v); m_defs.push_back (def); m_expr2proxy.insert (v, proxy); m_proxy2def.insert (proxy, def); - m_parent.assert_expr (def.get ()); + m_parent.assert_expr (def); return proxy; } @@ -191,18 +184,16 @@ bool iuc_solver::def_manager::is_proxy_def (expr *v) bool iuc_solver::is_proxy(expr *e, app_ref &def) { - if (!is_uninterp_const(e)) { return false; } + if (!is_uninterp_const(e)) + return false; - app *a = to_app (e); + app* a = to_app (e); - for (int i = m_defs.size (); i > 0; --i) - if (m_defs[i-1].is_proxy (a, def)) - { return true; } + for (int i = m_defs.size (); i-- > 0; ) + if (m_defs[i].is_proxy (a, def)) + return true; - if (m_base_defs.is_proxy (a, def)) - { return true; } - - return false; + return m_base_defs.is_proxy (a, def); } void iuc_solver::collect_statistics (statistics &st) const @@ -233,21 +224,25 @@ void iuc_solver::undo_proxies_in_core (ptr_vector &r) { app_ref e(m); expr_fast_mark1 bg; - for (unsigned i = 0; i < m_first_assumption; ++i) - { bg.mark(m_assumptions.get(i)); } + for (unsigned i = 0; i < m_first_assumption; ++i) { + bg.mark(m_assumptions.get(i)); + } // expand proxies unsigned j = 0; - for (unsigned i = 0, sz = r.size(); i < sz; ++i) { + for (expr* rr : r) { // skip background assumptions - if (bg.is_marked(r[i])) { continue; } + if (bg.is_marked(rr)) + continue; // -- undo proxies, but only if they were introduced in check_sat - if (m_is_proxied && is_proxy(r[i], e)) { + if (m_is_proxied && is_proxy(rr, e)) { SASSERT (m.is_or (e)); - r[j] = e->get_arg (1); - } else if (i != j) { r[j] = r[i]; } - j++; + r[j++] = e->get_arg (1); + } + else { + r[j++] = rr; + } } r.shrink (j); } @@ -370,65 +365,66 @@ void iuc_solver::get_iuc(expr_ref_vector &core) unsat_core_learner learner(m, iuc_pf); + unsat_core_plugin* plugin; // -- register iuc plugins - if (m_iuc_arith == 0 || m_iuc_arith == 1) { - unsat_core_plugin_farkas_lemma* plugin = + switch (m_iuc_arith) { + case 0: + case 1: + plugin = alloc(unsat_core_plugin_farkas_lemma, learner, m_split_literals, (m_iuc_arith == 1) /* use constants from A */); learner.register_plugin(plugin); - } - else if (m_iuc_arith == 2) { + break; + case 2: SASSERT(false && "Broken"); - unsat_core_plugin_farkas_lemma_optimized* plugin = - alloc(unsat_core_plugin_farkas_lemma_optimized, learner, m); + plugin = alloc(unsat_core_plugin_farkas_lemma_optimized, learner, m); learner.register_plugin(plugin); - } - else if(m_iuc_arith == 3) { - unsat_core_plugin_farkas_lemma_bounded* plugin = - alloc(unsat_core_plugin_farkas_lemma_bounded, learner, m); + break; + case 3: + plugin = alloc(unsat_core_plugin_farkas_lemma_bounded, learner, m); learner.register_plugin(plugin); - } - else { + break; + default: UNREACHABLE(); + break; } - if (m_iuc == 1) { + switch (m_iuc) { + case 1: // -- iuc based on the lowest cut in the proof - unsat_core_plugin_lemma* plugin = - alloc(unsat_core_plugin_lemma, learner); + plugin = alloc(unsat_core_plugin_lemma, learner); learner.register_plugin(plugin); - } - else if (m_iuc == 2) { + break; + case 2: // -- iuc based on the smallest cut in the proof - unsat_core_plugin_min_cut* plugin = - alloc(unsat_core_plugin_min_cut, learner, m); + plugin = alloc(unsat_core_plugin_min_cut, learner, m); learner.register_plugin(plugin); - } - else { + break; + default: UNREACHABLE(); + break; } - + { scoped_watch _t_ (m_learn_core_sw); // compute interpolating unsat core learner.compute_unsat_core(core); } - + elim_proxies (core); // AG: this should be taken care of by minimizing the iuc cut simplify_bounds (core); } - + IF_VERBOSE(2, - verbose_stream () << "IUC Core:\n" - << mk_and(core) << "\n";); + verbose_stream () << "IUC Core:\n" << core << "\n";); } void iuc_solver::refresh () { // only refresh in non-pushed state - SASSERT (m_defs.size () == 0); + SASSERT (m_defs.empty()); expr_ref_vector assertions (m); for (unsigned i = 0, e = m_solver.get_num_assertions(); i < e; ++i) { expr* a = m_solver.get_assertion (i); diff --git a/src/muz/spacer/spacer_iuc_solver.h b/src/muz/spacer/spacer_iuc_solver.h index dcee54612..0195ea134 100644 --- a/src/muz/spacer/spacer_iuc_solver.h +++ b/src/muz/spacer/spacer_iuc_solver.h @@ -26,11 +26,10 @@ namespace spacer { class iuc_solver : public solver { private: struct def_manager { - iuc_solver &m_parent; + iuc_solver & m_parent; + expr_ref_vector m_defs; obj_map m_expr2proxy; - obj_map m_proxy2def; - - expr_ref_vector m_defs; + obj_map m_proxy2def; def_manager(iuc_solver &parent) : m_parent(parent), m_defs(m_parent.m) @@ -44,15 +43,15 @@ private: }; friend struct def_manager; - ast_manager &m; - solver &m_solver; - app_ref_vector m_proxies; - unsigned m_num_proxies; + ast_manager& m; + solver& m_solver; + app_ref_vector m_proxies; + unsigned m_num_proxies; vector m_defs; - def_manager m_base_defs; - expr_ref_vector m_assumptions; - unsigned m_first_assumption; - bool m_is_proxied; + def_manager m_base_defs; + expr_ref_vector m_assumptions; + unsigned m_first_assumption; + bool m_is_proxied; stopwatch m_iuc_sw; stopwatch m_hyp_reduce1_sw; @@ -95,7 +94,7 @@ public: /* iuc solver specific */ void get_unsat_core(expr_ref_vector &core) override; virtual void get_iuc(expr_ref_vector &core); - void set_split_literals(bool v) {m_split_literals = v;} + void set_split_literals(bool v) { m_split_literals = v; } bool mk_proxies(expr_ref_vector &v, unsigned from = 0); void undo_proxies(expr_ref_vector &v); @@ -103,42 +102,40 @@ public: void pop_bg(unsigned n); unsigned get_num_bg(); - void get_full_unsat_core(ptr_vector &core) - {m_solver.get_unsat_core(core);} + void get_full_unsat_core(ptr_vector &core) { m_solver.get_unsat_core(core); } /* solver interface */ - solver* translate(ast_manager &m, params_ref const &p) override { return m_solver.translate(m, p);} - void updt_params(params_ref const &p) override {m_solver.updt_params(p);} - void reset_params(params_ref const &p) override {m_solver.reset_params(p);} - const params_ref &get_params() const override {return m_solver.get_params();} - void push_params() override {m_solver.push_params();} - void pop_params() override {m_solver.pop_params();} - void collect_param_descrs(param_descrs &r) override { m_solver.collect_param_descrs(r);} - void set_produce_models(bool f) override { m_solver.set_produce_models(f);} - void assert_expr_core(expr *t) override { m_solver.assert_expr(t);} - void assert_expr_core2(expr *t, expr *a) override { NOT_IMPLEMENTED_YET();} + solver* translate(ast_manager &m, params_ref const &p) override { + return m_solver.translate(m, p); + } + void updt_params(params_ref const &p) override { m_solver.updt_params(p); } + void reset_params(params_ref const &p) override { m_solver.reset_params(p); } + const params_ref &get_params() const override { return m_solver.get_params(); } + void push_params() override { m_solver.push_params(); } + void pop_params() override { m_solver.pop_params(); } + void collect_param_descrs(param_descrs &r) override { m_solver.collect_param_descrs(r); } + void set_produce_models(bool f) override { m_solver.set_produce_models(f); } + void assert_expr_core(expr *t) override { m_solver.assert_expr(t); } + void assert_expr_core2(expr *t, expr *a) override { NOT_IMPLEMENTED_YET(); } expr_ref_vector cube(expr_ref_vector&, unsigned) override { return expr_ref_vector(m); } void push() override; void pop(unsigned n) override; - unsigned get_scope_level() const override - {return m_solver.get_scope_level();} + unsigned get_scope_level() const override { return m_solver.get_scope_level(); } lbool check_sat(unsigned num_assumptions, expr * const *assumptions) override; lbool check_sat_cc(const expr_ref_vector &cube, vector const & clauses) override; - void set_progress_callback(progress_callback *callback) override - {m_solver.set_progress_callback(callback);} - unsigned get_num_assertions() const override - {return m_solver.get_num_assertions();} - expr * get_assertion(unsigned idx) const override - {return m_solver.get_assertion(idx);} - unsigned get_num_assumptions() const override - {return m_solver.get_num_assumptions();} - expr * get_assumption(unsigned idx) const override - {return m_solver.get_assumption(idx);} - std::ostream &display(std::ostream &out, unsigned n, expr* const* es) const override - { return m_solver.display(out, n, es); } + void set_progress_callback(progress_callback *callback) override { + m_solver.set_progress_callback(callback); + } + unsigned get_num_assertions() const override { return m_solver.get_num_assertions(); } + expr * get_assertion(unsigned idx) const override { return m_solver.get_assertion(idx); } + unsigned get_num_assumptions() const override { return m_solver.get_num_assumptions(); } + expr * get_assumption(unsigned idx) const override { return m_solver.get_assumption(idx); } + std::ostream &display(std::ostream &out, unsigned n, expr* const* es) const override { + return m_solver.display(out, n, es); + } /* check_sat_result interface */ @@ -148,13 +145,10 @@ public: void get_unsat_core(ptr_vector &r) override; void get_model_core(model_ref &m) override {m_solver.get_model(m);} proof *get_proof() override {return m_solver.get_proof();} - std::string reason_unknown() const override - {return m_solver.reason_unknown();} - void set_reason_unknown(char const* msg) override - {m_solver.set_reason_unknown(msg);} - void get_labels(svector &r) override - {m_solver.get_labels(r);} - ast_manager &get_manager() const override {return m;} + std::string reason_unknown() const override { return m_solver.reason_unknown(); } + void set_reason_unknown(char const* msg) override { m_solver.set_reason_unknown(msg); } + void get_labels(svector &r) override { m_solver.get_labels(r); } + ast_manager& get_manager() const override { return m; } virtual void refresh(); @@ -162,10 +156,10 @@ public: iuc_solver &m_s; expr_ref_vector &m_v; public: - scoped_mk_proxy(iuc_solver &s, expr_ref_vector &v) : m_s(s), m_v(v) - {m_s.mk_proxies(m_v);} - ~scoped_mk_proxy() - {m_s.undo_proxies(m_v);} + scoped_mk_proxy(iuc_solver &s, expr_ref_vector &v) : m_s(s), m_v(v) { + m_s.mk_proxies(m_v); + } + ~scoped_mk_proxy() { m_s.undo_proxies(m_v); } }; class scoped_bg { @@ -173,8 +167,11 @@ public: unsigned m_bg_sz; public: scoped_bg(iuc_solver &s) : m_s(s), m_bg_sz(m_s.get_num_bg()) {} - ~scoped_bg() - {if(m_s.get_num_bg() > m_bg_sz) { m_s.pop_bg(m_s.get_num_bg() - m_bg_sz); }} + ~scoped_bg() { + if (m_s.get_num_bg() > m_bg_sz) { + m_s.pop_bg(m_s.get_num_bg() - m_bg_sz); + } + } }; }; } diff --git a/src/muz/spacer/spacer_pdr.cpp b/src/muz/spacer/spacer_pdr.cpp index e1a268c22..89647ed02 100644 --- a/src/muz/spacer/spacer_pdr.cpp +++ b/src/muz/spacer/spacer_pdr.cpp @@ -28,14 +28,14 @@ model_node::model_node(model_node* parent, class pob *pob): m_orig_level(m_pob->level()), m_depth(0), m_closed(false) { SASSERT(m_pob); - if (m_parent) m_parent->add_child(*this); + if (m_parent) m_parent->add_child(this); } -void model_node::add_child(model_node &kid) { - m_children.push_back(&kid); - SASSERT(level() == kid.level() + 1); +void model_node::add_child(model_node* kid) { + m_children.push_back(kid); + SASSERT(level() == kid->level() + 1); SASSERT(level() > 0); - kid.m_depth = m_depth + 1; + kid->m_depth = m_depth + 1; if (is_closed()) set_open(); } @@ -109,7 +109,7 @@ void model_node::insert_after(model_node* n) { void model_search::reset() { if (m_root) { erase_children(*m_root, false); - remove_node(*m_root, false); + remove_node(m_root, false); dealloc(m_root); m_root = nullptr; } @@ -117,24 +117,28 @@ void model_search::reset() { } model_node* model_search::pop_front() { - if (!m_qhead) return nullptr; model_node *res = m_qhead; - res->detach(m_qhead); + if (res) { + res->detach(m_qhead); + } return res; } -void model_search::add_leaf(model_node& n) { +void model_search::add_leaf(model_node* _n) { + model_node& n = *_n; SASSERT(n.children().empty()); model_nodes ns; model_nodes& nodes = cache(n).insert_if_not_there2(n.post(), ns)->get_data().m_value; if (nodes.contains(&n)) return; - nodes.push_back(&n); + nodes.push_back(_n); if (nodes.size() == 1) { SASSERT(n.is_open()); enqueue_leaf(n); } - else n.set_pre_closed(); + else { + n.set_pre_closed(); + } } void model_search::enqueue_leaf(model_node& n) { @@ -162,7 +166,7 @@ void model_search::set_root(model_node* root) { m_root = root; SASSERT(m_root); SASSERT(m_root->children().empty()); - add_leaf(*root); + add_leaf(root); } void model_search::backtrack_level(bool uses_level, model_node& n) { @@ -198,15 +202,16 @@ void model_search::erase_children(model_node& n, bool backtrack) { todo.pop_back(); nodes.push_back(m); todo.append(m->children()); - remove_node(*m, backtrack); + remove_node(m, backtrack); } std::for_each(nodes.begin(), nodes.end(), delete_proc()); } // removes node from the search tree and from the cache -void model_search::remove_node(model_node& n, bool backtrack) { +void model_search::remove_node(model_node* _n, bool backtrack) { + model_node& n = *_n; model_nodes& nodes = cache(n).find(n.post()); - nodes.erase(&n); + nodes.erase(_n); if (n.in_queue()) n.detach(m_qhead); // TBD: siblings would also fail if n is not a goal. if (!nodes.empty() && backtrack && @@ -241,9 +246,10 @@ lbool context::gpdr_solve_core() { } // communicate failure to datalog::context - if (m_context) { m_context->set_status(datalog::BOUNDED); } + if (m_context) { + m_context->set_status(datalog::BOUNDED); + } return l_undef; - } bool context::gpdr_check_reachability(unsigned lvl, model_search &ms) { @@ -273,9 +279,8 @@ bool context::gpdr_check_reachability(unsigned lvl, model_search &ms) { TRACE("spacer_pdr", tout << "looking at pob at level " << pob->level() << " " << mk_pp(pob->post(), m) << "\n";); - if (pob == node->pob()) {continue;} - model_node *kid = alloc(model_node, node, pob); - ms.add_leaf(*kid); + if (pob != node->pob()) + ms.add_leaf(alloc(model_node, node, pob)); } node->check_pre_closed(); break; diff --git a/src/muz/spacer/spacer_pdr.h b/src/muz/spacer/spacer_pdr.h index 56900a1e8..36abb0bc9 100644 --- a/src/muz/spacer/spacer_pdr.h +++ b/src/muz/spacer/spacer_pdr.h @@ -36,9 +36,9 @@ class model_node { bool m_closed; // whether the pob is derivable public: model_node(model_node* parent, pob* pob); - void add_child(model_node &kid); + void add_child(model_node* kid); - expr *post() const {return m_pob->post();} + expr *post() const { return m_pob->post(); } unsigned level() const { return m_pob->level(); } unsigned orig_level() const { return m_orig_level; } unsigned depth() const { return m_depth; } @@ -57,24 +57,25 @@ public: bool is_1closed() { if (is_closed()) return true; if (m_children.empty()) return false; - for (auto kid : m_children) {if (kid->is_open()) return false;} + for (auto kid : m_children) + if (kid->is_open()) return false; return true; } void check_pre_closed(); - void set_pre_closed() {m_closed = true;} + void set_pre_closed() { m_closed = true; } - void set_closed() {m_closed = true;} + void set_closed() { m_closed = true; } void set_open(); - void reset_children() {m_children.reset();} + void reset_children() { m_children.reset(); } /// queue // remove this node from the given queue void detach(model_node*& qhead); void insert_after(model_node* n); - model_node* next() const {return m_next;} - bool in_queue() {return m_next && m_prev;} + model_node* next() const { return m_next; } + bool in_queue() { return m_next && m_prev; } }; class model_search { @@ -85,8 +86,7 @@ class model_search { vector > m_cache; obj_map& cache(model_node const& n); void erase_children(model_node& n, bool backtrack); - void remove_node(model_node& n, bool backtrack); - void add_leaf(model_node* n); // add leaf to priority queue. + void remove_node(model_node* _n, bool backtrack); public: model_search(bool bfs): m_bfs(bfs), m_root(nullptr), m_qhead(nullptr) {} @@ -96,7 +96,7 @@ public: void reset(); model_node* pop_front(); - void add_leaf(model_node& n); // add fresh node. + void add_leaf(model_node* n); // add fresh node. model_node& get_root() const { return *m_root; } void backtrack_level(bool uses_level, model_node& n); void remove_goal(model_node& n); diff --git a/src/muz/spacer/spacer_quant_generalizer.cpp b/src/muz/spacer/spacer_quant_generalizer.cpp index 3dace0cdd..f9525b2f6 100644 --- a/src/muz/spacer/spacer_quant_generalizer.cpp +++ b/src/muz/spacer/spacer_quant_generalizer.cpp @@ -132,14 +132,13 @@ void lemma_quantifier_generalizer::find_candidates(expr *e, << " in " << mk_pp(e, m) << "\n";); extra.push_back(index); if (m_arith.is_add(index)) { - for (unsigned j = 0, asz = index->get_num_args(); j < asz; j++) { - expr *arg = index->get_arg(j); - if (!is_app(arg) || marked_args.is_marked(arg)) {continue;} - marked_args.mark(arg); - candidates.push_back (to_app(arg)); + for (expr * arg : *index) { + if (!is_app(arg) || marked_args.is_marked(arg)) {continue;} + marked_args.mark(arg); + candidates.push_back (to_app(arg)); + } } } - } std::sort(candidates.c_ptr(), candidates.c_ptr() + candidates.size(), index_lt_proc(m)); @@ -214,15 +213,15 @@ void lemma_quantifier_generalizer::cleanup(expr_ref_vector &cube, app_ref_vector bool found = false; expr_ref_vector kids(m); expr_ref_vector kids_bind(m); - for (unsigned i = 0, sz = a->get_num_args(); i < sz; ++i) { - if (a->get_arg(i) == sk) { + for (expr* arg : *a) { + if (arg == sk) { found = true; - kids.push_back(a->get_arg(i)); + kids.push_back(arg); kids_bind.push_back(bind); } else { - kids.push_back (times_minus_one(a->get_arg(i), arith)); - kids_bind.push_back (times_minus_one(a->get_arg(i), arith)); + kids.push_back (times_minus_one(arg, arith)); + kids_bind.push_back (times_minus_one(arg, arith)); } } if (!found) continue; @@ -292,7 +291,7 @@ void lemma_quantifier_generalizer::mk_abs_cube(lemma_ref &lemma, app *term, var gnd_cube.push_back(lit); } else { - expr *e1, *e2; + expr *e1, *e2; // generalize v=num into v>=num if (m.is_eq(abs_lit, e1, e2) && (e1 == var || e2 == var)) { if (m_arith.is_numeral(e1)) { @@ -310,13 +309,13 @@ void lemma_quantifier_generalizer::mk_abs_cube(lemma_ref &lemma, app *term, var if (!lb && is_lb(var, abs_lit)) { lb = abs_lit; - } + } else if (!ub && is_ub(var, abs_lit)) { ub = abs_lit; + } } } } -} // -- returns true if e is an upper bound for var bool lemma_quantifier_generalizer::is_ub(var *var, expr *e) { @@ -348,38 +347,34 @@ bool lemma_quantifier_generalizer::is_ub(var *var, expr *e) { if ((m_arith.is_le(e, e1, e2) || m_arith.is_lt(e, e1, e2)) && m_arith.is_add(e1)) { app *a = to_app(e1); - for (unsigned i = 0, sz = a->get_num_args(); i < sz; ++i) { - expr *arg = a->get_arg(i); - if (arg == var) {return true;} + for (expr* arg : *a) { + if (arg == var) return true; } } // t1 <= t2 + -1*var if ((m_arith.is_le(e, e1, e2) || m_arith.is_lt(e, e1, e2)) && m_arith.is_add(e2)) { app *a = to_app(e2); - for (unsigned i = 0, sz = a->get_num_args(); i < sz; ++i) { - expr *arg = a->get_arg(i); + for (expr* arg : *a) { if (m_arith.is_times_minus_one(arg, arg) && arg == var) - {return true;} + return true; } } // t1 >= t2 + var if ((m_arith.is_ge(e, e1, e2) || m_arith.is_gt(e, e1, e2)) && m_arith.is_add(e2)) { app *a = to_app(e2); - for (unsigned i = 0, sz = a->get_num_args(); i < sz; ++i) { - expr *arg = a->get_arg(i); - if (arg == var) {return true;} + for (expr * arg : *a) { + if (arg == var) return true; } } // -1*var + t1 >= t2 if ((m_arith.is_ge(e, e1, e2) || m_arith.is_gt(e, e1, e2)) && m_arith.is_add(e1)) { app *a = to_app(e1); - for (unsigned i = 0, sz = a->get_num_args(); i < sz; ++i) { - expr *arg = a->get_arg(i); + for (expr * arg : *a) { if (m_arith.is_times_minus_one(arg, arg) && arg == var) - {return true;} + return true; } } return false; @@ -414,38 +409,34 @@ bool lemma_quantifier_generalizer::is_lb(var *var, expr *e) { if ((m_arith.is_ge(e, e1, e2) || m_arith.is_gt(e, e1, e2)) && m_arith.is_add(e1)) { app *a = to_app(e1); - for (unsigned i = 0, sz = a->get_num_args(); i < sz; ++i) { - expr *arg = a->get_arg(i); - if (arg == var) {return true;} + for (expr * arg : *a) { + if (arg == var) return true; } } // t1 >= t2 + -1*var if ((m_arith.is_ge(e, e1, e2) || m_arith.is_gt(e, e1, e2)) && m_arith.is_add(e2)) { app *a = to_app(e2); - for (unsigned i = 0, sz = a->get_num_args(); i < sz; ++i) { - expr *arg = a->get_arg(i); + for (expr * arg : *a) { if (m_arith.is_times_minus_one(arg, arg) && arg == var) - {return true;} + return true; } } // t1 <= t2 + var if ((m_arith.is_le(e, e1, e2) || m_arith.is_lt(e, e1, e2)) && m_arith.is_add(e2)) { app *a = to_app(e2); - for (unsigned i = 0, sz = a->get_num_args(); i < sz; ++i) { - expr *arg = a->get_arg(i); - if (arg == var) {return true;} + for (expr * arg : *a) { + if (arg == var) return true; } } // -1*var + t1 <= t2 if ((m_arith.is_le(e, e1, e2) || m_arith.is_lt(e, e1, e2)) && m_arith.is_add(e1)) { app *a = to_app(e1); - for (unsigned i = 0, sz = a->get_num_args(); i < sz; ++i) { - expr *arg = a->get_arg(i); + for (expr * arg : *a) { if (m_arith.is_times_minus_one(arg, arg) && arg == var) - {return true;} + return true; } } @@ -470,22 +461,22 @@ bool lemma_quantifier_generalizer::generalize (lemma_ref &lemma, app *term) { tout << "lb = "; if (lb) tout << mk_pp(lb, m); else tout << "none"; tout << "\n"; - tout << "ub = "; if (ub) tout << mk_pp(ub, m); else tout << "none"; tout << "\n";); - if (!lb && !ub) {return false;} + if (!lb && !ub) + return false; // -- guess lower or upper bound if missing if (!lb) { abs_cube.push_back (m_arith.mk_ge (var, term)); lb = abs_cube.back(); - } + } if (!ub) { abs_cube.push_back (m_arith.mk_lt(var, term)); ub = abs_cube.back(); - } + } rational init; expr_ref constant(m); @@ -511,7 +502,7 @@ bool lemma_quantifier_generalizer::generalize (lemma_ref &lemma, app *term) { flatten_and(gnd, gnd_cube); TRACE("spacer_qgen", - tout << "New CUBE is: " << mk_pp(mk_and(gnd_cube),m) << "\n";); + tout << "New CUBE is: " << gnd_cube << "\n";); // check if the result is a true lemma unsigned uses_level = 0; @@ -519,8 +510,8 @@ bool lemma_quantifier_generalizer::generalize (lemma_ref &lemma, app *term) { if (pt.check_inductive(lemma->level(), gnd_cube, uses_level, lemma->weakness())) { TRACE("spacer_qgen", tout << "Quantifier Generalization Succeeded!\n" - << "New CUBE is: " << mk_pp(mk_and(gnd_cube),m) << "\n";); - SASSERT(zks.size() >= m_offset); + << "New CUBE is: " << gnd_cube << "\n";); + SASSERT(zks.size() >= static_cast(m_offset)); // lift quantified variables to top of select expr_ref ext_bind(m); @@ -541,7 +532,7 @@ bool lemma_quantifier_generalizer::generalize (lemma_ref &lemma, app *term) { } lemma->update_cube(lemma->get_pob(), gnd_cube); - lemma->set_level(uses_level); + lemma->set_level(uses_level); SASSERT(var->get_idx() < zks.size()); SASSERT(is_app(ext_bind)); @@ -570,8 +561,7 @@ bool lemma_quantifier_generalizer::find_stride(expr_ref_vector &c, expr_ref &pat if (is_var(p_index)) return false; std::vector instances; - for (unsigned i=0; i < c.size(); i++) { - expr *lit = c.get(i); + for (expr* lit : c) { if (!contains_selects(lit, m)) continue; @@ -589,16 +579,17 @@ bool lemma_quantifier_generalizer::find_stride(expr_ref_vector &c, expr_ref &pat unsigned matched = 0; for (unsigned p=0; p < size; p++) { expr *arg = p_index->get_arg(p); - if (is_var(arg)) - { + if (is_var(arg)) { rational val; - if (p < candidate->get_num_args() && m_arith.is_numeral(candidate->get_arg(p), val)) { + if (p < candidate->get_num_args() && + m_arith.is_numeral(candidate->get_arg(p), val) && + val.is_unsigned()) { instances.push_back(val.get_unsigned()); } } else { - for (unsigned j=0; j < candidate->get_num_args(); j++) { - if (candidate->get_arg(j) == arg) { + for (expr* cand : *candidate) { + if (cand == arg) { matched++; break; } diff --git a/src/muz/spacer/spacer_unsat_core_learner.cpp b/src/muz/spacer/spacer_unsat_core_learner.cpp index a6ab01870..5b3420800 100644 --- a/src/muz/spacer/spacer_unsat_core_learner.cpp +++ b/src/muz/spacer/spacer_unsat_core_learner.cpp @@ -17,46 +17,36 @@ Revision History: --*/ #include +#include "ast/for_each_expr.h" +#include "ast/proofs/proof_utils.h" #include "muz/spacer/spacer_unsat_core_learner.h" #include "muz/spacer/spacer_unsat_core_plugin.h" #include "muz/spacer/spacer_iuc_proof.h" -#include "ast/for_each_expr.h" - -#include "ast/proofs/proof_utils.h" #include "muz/spacer/spacer_util.h" -namespace spacer -{ +namespace spacer { -unsat_core_learner::~unsat_core_learner() -{ +unsat_core_learner::~unsat_core_learner() { std::for_each(m_plugins.begin(), m_plugins.end(), delete_proc()); } -void unsat_core_learner::register_plugin(unsat_core_plugin* plugin) -{ +void unsat_core_learner::register_plugin(unsat_core_plugin* plugin) { m_plugins.push_back(plugin); } -void unsat_core_learner::compute_unsat_core(expr_ref_vector& unsat_core) -{ +void unsat_core_learner::compute_unsat_core(expr_ref_vector& unsat_core) { // traverse proof proof_post_order it(m_pr.get(), m); - while (it.hasNext()) - { + while (it.hasNext()) { proof* currentNode = it.next(); - if (m.get_num_parents(currentNode) > 0) - { + if (m.get_num_parents(currentNode) > 0) { bool need_to_mark_closed = true; - for (unsigned i = 0; i < m.get_num_parents(currentNode); ++i) - { - SASSERT(m.is_proof(currentNode->get_arg(i))); - proof* premise = to_app(currentNode->get_arg(i)); - - need_to_mark_closed = need_to_mark_closed && (!m_pr.is_b_marked(premise) || m_closed.is_marked(premise)); + for (unsigned i = 0; i < m.get_num_parents(currentNode); ++i) { + proof* premise = m.get_parent(currentNode, i); + need_to_mark_closed &= (!m_pr.is_b_marked(premise) || m_closed.is_marked(premise)); } // save result @@ -65,8 +55,9 @@ void unsat_core_learner::compute_unsat_core(expr_ref_vector& unsat_core) // we have now collected all necessary information, so we can visit the node // if the node mixes A-reasoning and B-reasoning and contains non-closed premises - if (m_pr.is_a_marked(currentNode) && m_pr.is_b_marked(currentNode) && !m_closed.is_marked(currentNode)) - { + if (m_pr.is_a_marked(currentNode) && + m_pr.is_b_marked(currentNode) && + !m_closed.is_marked(currentNode)) { compute_partial_core(currentNode); // then we need to compute a partial core } } @@ -77,46 +68,38 @@ void unsat_core_learner::compute_unsat_core(expr_ref_vector& unsat_core) // TODO: remove duplicates from unsat core? // move all lemmas into vector - for (expr* const* it = m_unsat_core.begin(); it != m_unsat_core.end(); ++it) - { - unsat_core.push_back(*it); + for (expr* e : m_unsat_core) { + unsat_core.push_back(e); } } -void unsat_core_learner::compute_partial_core(proof* step) -{ - for (unsat_core_plugin** it=m_plugins.begin(), **end = m_plugins.end (); it != end && !m_closed.is_marked(step); ++it) - { - unsat_core_plugin* plugin = *it; +void unsat_core_learner::compute_partial_core(proof* step) { + for (unsat_core_plugin* plugin : m_plugins) { + if (m_closed.is_marked(step)) break; plugin->compute_partial_core(step); } } -void unsat_core_learner::finalize() -{ - for (unsat_core_plugin** it=m_plugins.begin(); it != m_plugins.end(); ++it) - { - unsat_core_plugin* plugin = *it; +void unsat_core_learner::finalize() { + for (unsat_core_plugin* plugin : m_plugins) { plugin->finalize(); } } -bool unsat_core_learner::is_closed(proof*p) -{ +bool unsat_core_learner::is_closed(proof* p) { return m_closed.is_marked(p); } -void unsat_core_learner::set_closed(proof* p, bool value) -{ + +void unsat_core_learner::set_closed(proof* p, bool value) { m_closed.mark(p, value); } -bool unsat_core_learner::is_b_open(proof *p) -{ - return m_pr.is_b_marked(p) && !is_closed (p); +bool unsat_core_learner::is_b_open(proof *p) { + return m_pr.is_b_marked(p) && !is_closed (p); } -void unsat_core_learner::add_lemma_to_core(expr* lemma) -{ +void unsat_core_learner::add_lemma_to_core(expr* lemma) { m_unsat_core.push_back(lemma); } + } diff --git a/src/muz/spacer/spacer_unsat_core_learner.h b/src/muz/spacer/spacer_unsat_core_learner.h index a8b5965fc..327b12eb6 100644 --- a/src/muz/spacer/spacer_unsat_core_learner.h +++ b/src/muz/spacer/spacer_unsat_core_learner.h @@ -6,7 +6,8 @@ Module Name: spacer_unsat_core_learner.h Abstract: - itp cores + + itp cores Author: Bernhard Gleiss @@ -27,8 +28,7 @@ namespace spacer { class unsat_core_plugin; class iuc_proof; - class unsat_core_learner - { + class unsat_core_learner { typedef obj_hashtable expr_set; public: @@ -37,7 +37,7 @@ namespace spacer { virtual ~unsat_core_learner(); ast_manager& m; - iuc_proof& m_pr; + iuc_proof& m_pr; /* * register a plugin for computation of partial unsat cores @@ -56,7 +56,6 @@ namespace spacer { * - a node is closed, iff it has already been interpolated, i.e. its contribution is * already covered by the unsat-core. */ - bool is_closed(proof* p); void set_closed(proof* p, bool value); @@ -67,14 +66,14 @@ namespace spacer { */ void add_lemma_to_core(expr* lemma); - - private: ptr_vector m_plugins; ast_mark m_closed; - // collects the lemmas of the unsat-core - // will at the end be inserted into unsat_core. + /* + * collects the lemmas of the unsat-core + * will at the end be inserted into unsat_core. + */ expr_ref_vector m_unsat_core; /* @@ -86,9 +85,7 @@ namespace spacer { * finalize computation of unsat-core */ void finalize(); - }; - } #endif diff --git a/src/muz/spacer/spacer_unsat_core_plugin.cpp b/src/muz/spacer/spacer_unsat_core_plugin.cpp index 557983628..25b12cf25 100644 --- a/src/muz/spacer/spacer_unsat_core_plugin.cpp +++ b/src/muz/spacer/spacer_unsat_core_plugin.cpp @@ -6,7 +6,7 @@ Module Name: spacer_unsat_core_plugin.cpp Abstract: - plugin for itp cores + plugin for itp cores Author: Bernhard Gleiss @@ -32,259 +32,213 @@ Revision History: #include "muz/spacer/spacer_unsat_core_learner.h" #include "muz/spacer/spacer_iuc_proof.h" -namespace spacer -{ +namespace spacer { + + unsat_core_plugin::unsat_core_plugin(unsat_core_learner& learner): + m(learner.m), m_learner(learner) {}; + -void unsat_core_plugin_lemma::compute_partial_core(proof* step) -{ - SASSERT(m_learner.m_pr.is_a_marked(step)); - SASSERT(m_learner.m_pr.is_b_marked(step)); - - for (unsigned i = 0; i < m_learner.m.get_num_parents(step); ++i) - { - SASSERT(m_learner.m.is_proof(step->get_arg(i))); - proof* premise = to_app(step->get_arg(i)); - - if (m_learner.is_b_open (premise)) - { - // by IH, premises that are AB marked are already closed - SASSERT(!m_learner.m_pr.is_a_marked(premise)); - add_lowest_split_to_core(premise); - } - } - m_learner.set_closed(step, true); -} - -void unsat_core_plugin_lemma::add_lowest_split_to_core(proof* step) const -{ - SASSERT(m_learner.is_b_open(step)); - ast_manager &m = m_learner.m; - - ptr_vector todo; - todo.push_back(step); - - while (!todo.empty()) - { - proof* pf = todo.back(); - todo.pop_back(); - - // if current step hasn't been processed, - if (!m_learner.is_closed(pf)) - { - m_learner.set_closed(pf, true); - // the step is b-marked and not closed. - // by I.H. the step must be already visited - // so if it is also a-marked, it must be closed - SASSERT(m_learner.m_pr.is_b_marked(pf)); - SASSERT(!m_learner.m_pr.is_a_marked(pf)); - - // the current step needs to be interpolated: - expr* fact = m_learner.m.get_fact(pf); - // if we trust the current step and we are able to use it - if (m_learner.m_pr.is_b_pure (pf) && - (m.is_asserted(pf) || is_literal(m, fact))) - { - // just add it to the core - m_learner.add_lemma_to_core(fact); - } - // otherwise recurse on premises - else - { - for (unsigned i = 0, sz = m_learner.m.get_num_parents(pf); - i < sz; ++i) - { - SASSERT(m_learner.m.is_proof(pf->get_arg(i))); - proof* premise = m.get_parent (pf, i); - if (m_learner.is_b_open(premise)) { - todo.push_back(premise); - } - } - } - - } - } -} - - -void unsat_core_plugin_farkas_lemma::compute_partial_core(proof* step) -{ - ast_manager &m = m_learner.m; - SASSERT(m_learner.m_pr.is_a_marked(step)); - SASSERT(m_learner.m_pr.is_b_marked(step)); - // XXX this assertion should be true so there is no need to check for it - SASSERT (!m_learner.is_closed (step)); - func_decl* d = step->get_decl(); - symbol sym; - if(!m_learner.is_closed(step) && // if step is not already interpolated - step->get_decl_kind() == PR_TH_LEMMA && // and step is a Farkas lemma - d->get_num_parameters() >= 2 && // the Farkas coefficients are saved in the parameters of step - d->get_parameter(0).is_symbol(sym) && sym == "arith" && // the first two parameters are "arith", "farkas", - d->get_parameter(1).is_symbol(sym) && sym == "farkas" && - d->get_num_parameters() >= m_learner.m.get_num_parents(step) + 2) // the following parameters are the Farkas coefficients - { - SASSERT(m_learner.m.has_fact(step)); - - ptr_vector literals; - vector coefficients; - - /* The farkas lemma represents a subproof starting from premise(-set)s A, BNP and BP(ure) and - * ending in a disjunction D. We need to compute the contribution of BP, i.e. a formula, which - * is entailed by BP and together with A and BNP entails D. - * - * Let Fark(F) be the farkas coefficient for F. We can use the fact that - * (A*Fark(A) + BNP*Fark(BNP) + BP*Fark(BP) + (neg D)*Fark(D)) => false. (E1) - * We further have that A+B => C implies (A \land B) => C. (E2) - * - * Alternative 1: - * From (E1) immediately get that BP*Fark(BP) is a solution. - * - * Alternative 2: - * We can rewrite (E2) to rewrite (E1) to - * (BP*Fark(BP)) => (neg(A*Fark(A) + BNP*Fark(BNP) + (neg D)*Fark(D))) (E3) - * and since we can derive (A*Fark(A) + BNP*Fark(BNP) + (neg D)*Fark(D)) from - * A, BNP and D, we also know that it is inconsisent. Therefore - * neg(A*Fark(A) + BNP*Fark(BNP) + (neg D)*Fark(D)) is a solution. - * - * Finally we also need the following workaround: - * 1) Although we know from theory, that the Farkas coefficients are always nonnegative, - * the Farkas coefficients provided by arith_core are sometimes negative (must be a bug) - * as workaround we take the absolute value of the provided coefficients. - */ - parameter const* params = d->get_parameters() + 2; // point to the first Farkas coefficient - - STRACE("spacer.farkas", - verbose_stream() << "Farkas input: "<< "\n"; - for (unsigned i = 0; i < m_learner.m.get_num_parents(step); ++i) - { - SASSERT(m_learner.m.is_proof(step->get_arg(i))); - proof *prem = m.get_parent (step, i); - - rational coef; - VERIFY(params[i].is_rational(coef)); - - bool b_pure = m_learner.m_pr.is_b_pure (prem); - verbose_stream() << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m_learner.m.get_fact(prem), m_learner.m) << "\n"; - } - ); - - bool can_be_closed = true; - - for(unsigned i = 0; i < m.get_num_parents(step); ++i) - { - SASSERT(m_learner.m.is_proof(step->get_arg(i))); - proof * premise = m.get_parent (step, i); - - if (m_learner.is_b_open (premise)) - { + void unsat_core_plugin_lemma::compute_partial_core(proof* step) { + SASSERT(m_learner.m_pr.is_a_marked(step)); + SASSERT(m_learner.m_pr.is_b_marked(step)); + + for (proof* premise : m.get_parents(step)) { + + if (m_learner.is_b_open (premise)) { + // by IH, premises that are AB marked are already closed SASSERT(!m_learner.m_pr.is_a_marked(premise)); + add_lowest_split_to_core(premise); + } + } + m_learner.set_closed(step, true); + } + + void unsat_core_plugin_lemma::add_lowest_split_to_core(proof* step) const + { + SASSERT(m_learner.is_b_open(step)); + + ptr_buffer todo; + todo.push_back(step); + + while (!todo.empty()) { + proof* pf = todo.back(); + todo.pop_back(); + + // if current step hasn't been processed, + if (!m_learner.is_closed(pf)) { + m_learner.set_closed(pf, true); + // the step is b-marked and not closed. + // by I.H. the step must be already visited + // so if it is also a-marked, it must be closed + SASSERT(m_learner.m_pr.is_b_marked(pf)); + SASSERT(!m_learner.m_pr.is_a_marked(pf)); + + // the current step needs to be interpolated: + expr* fact = m.get_fact(pf); + // if we trust the current step and we are able to use it + if (m_learner.m_pr.is_b_pure (pf) && + (m.is_asserted(pf) || is_literal(m, fact))) { + // just add it to the core + m_learner.add_lemma_to_core(fact); + } + // otherwise recurse on premises + else { + for (proof* premise : m.get_parents(pf)) + if (m_learner.is_b_open(premise)) + todo.push_back(premise); + } + + } + } + } - if (m_learner.m_pr.is_b_pure (step)) - { - if (!m_use_constant_from_a) - { - rational coefficient; - VERIFY(params[i].is_rational(coefficient)); - literals.push_back(to_app(m_learner.m.get_fact(premise))); - coefficients.push_back(abs(coefficient)); + + void unsat_core_plugin_farkas_lemma::compute_partial_core(proof* step) + { + SASSERT(m_learner.m_pr.is_a_marked(step)); + SASSERT(m_learner.m_pr.is_b_marked(step)); + // XXX this assertion should be true so there is no need to check for it + SASSERT (!m_learner.is_closed (step)); + func_decl* d = step->get_decl(); + symbol sym; + if (!m_learner.is_closed(step) && // if step is not already interpolated + step->get_decl_kind() == PR_TH_LEMMA && // and step is a Farkas lemma + d->get_num_parameters() >= 2 && // the Farkas coefficients are saved in the parameters of step + d->get_parameter(0).is_symbol(sym) && sym == "arith" && // the first two parameters are "arith", "farkas", + d->get_parameter(1).is_symbol(sym) && sym == "farkas" && + d->get_num_parameters() >= m.get_num_parents(step) + 2) { // the following parameters are the Farkas coefficients + + SASSERT(m.has_fact(step)); + + coeff_lits_t coeff_lits; + expr_ref_vector pinned(m); + + /* The farkas lemma represents a subproof starting from premise(-set)s A, BNP and BP(ure) and + * ending in a disjunction D. We need to compute the contribution of BP, i.e. a formula, which + * is entailed by BP and together with A and BNP entails D. + * + * Let Fark(F) be the farkas coefficient for F. We can use the fact that + * (A*Fark(A) + BNP*Fark(BNP) + BP*Fark(BP) + (neg D)*Fark(D)) => false. (E1) + * We further have that A+B => C implies (A \land B) => C. (E2) + * + * Alternative 1: + * From (E1) immediately get that BP*Fark(BP) is a solution. + * + * Alternative 2: + * We can rewrite (E2) to rewrite (E1) to + * (BP*Fark(BP)) => (neg(A*Fark(A) + BNP*Fark(BNP) + (neg D)*Fark(D))) (E3) + * and since we can derive (A*Fark(A) + BNP*Fark(BNP) + (neg D)*Fark(D)) from + * A, BNP and D, we also know that it is inconsisent. Therefore + * neg(A*Fark(A) + BNP*Fark(BNP) + (neg D)*Fark(D)) is a solution. + * + * Finally we also need the following workaround: + * 1) Although we know from theory, that the Farkas coefficients are always nonnegative, + * the Farkas coefficients provided by arith_core are sometimes negative (must be a bug) + * as workaround we take the absolute value of the provided coefficients. + */ + parameter const* params = d->get_parameters() + 2; // point to the first Farkas coefficient + + STRACE("spacer.farkas", + verbose_stream() << "Farkas input: "<< "\n"; + for (unsigned i = 0; i < m.get_num_parents(step); ++i) { + proof * prem = m.get_parent(step, i); + + rational coef = params[i].get_rational(); + + bool b_pure = m_learner.m_pr.is_b_pure (prem); + verbose_stream() << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m.get_fact(prem), m_learner.m) << "\n"; + } + ); + + bool can_be_closed = true; + + for (unsigned i = 0; i < m.get_num_parents(step); ++i) { + proof * premise = m.get_parent(step, i); + + if (m_learner.is_b_open (premise)) { + SASSERT(!m_learner.m_pr.is_a_marked(premise)); + + if (m_learner.m_pr.is_b_pure (step)) { + if (!m_use_constant_from_a) { + rational coefficient = params[i].get_rational(); + coeff_lits.push_back(std::make_pair(abs(coefficient), (app*)m.get_fact(premise))); + } + } + else { + can_be_closed = false; + + if (m_use_constant_from_a) { + rational coefficient = params[i].get_rational(); + coeff_lits.push_back(std::make_pair(abs(coefficient), (app*)m.get_fact(premise))); + } } } - else - { - can_be_closed = false; - - if (m_use_constant_from_a) - { - rational coefficient; - VERIFY(params[i].is_rational(coefficient)); - literals.push_back(to_app(m_learner.m.get_fact(premise))); - coefficients.push_back(abs(coefficient)); + else { + if (m_use_constant_from_a) { + rational coefficient = params[i].get_rational(); + coeff_lits.push_back(std::make_pair(abs(coefficient), (app*)m.get_fact(premise))); } } } - else - { - if (m_use_constant_from_a) - { - rational coefficient; - VERIFY(params[i].is_rational(coefficient)); - literals.push_back(to_app(m_learner.m.get_fact(premise))); - coefficients.push_back(abs(coefficient)); + + if (m_use_constant_from_a) { + params += m.get_num_parents(step); // point to the first Farkas coefficient, which corresponds to a formula in the conclusion + + // the conclusion can either be a single formula or a disjunction of several formulas, we have to deal with both situations + if (m.get_num_parents(step) + 2 < d->get_num_parameters()) { + unsigned num_args = 1; + expr* conclusion = m.get_fact(step); + expr* const* args = &conclusion; + if (m.is_or(conclusion)) { + app* _or = to_app(conclusion); + num_args = _or->get_num_args(); + args = _or->get_args(); + } + SASSERT(m.get_num_parents(step) + 2 + num_args == d->get_num_parameters()); + + bool_rewriter brw(m_learner.m); + for (unsigned i = 0; i < num_args; ++i) { + expr* premise = args[i]; + + expr_ref negatedPremise(m_learner.m); + brw.mk_not(premise, negatedPremise); + pinned.push_back(negatedPremise); + rational coefficient = params[i].get_rational(); + coeff_lits.push_back(std::make_pair(abs(coefficient), to_app(negatedPremise))); + } } } - } - if (m_use_constant_from_a) - { - params += m_learner.m.get_num_parents(step); // point to the first Farkas coefficient, which corresponds to a formula in the conclusion - - // the conclusion can either be a single formula or a disjunction of several formulas, we have to deal with both situations - if (m_learner.m.get_num_parents(step) + 2 < d->get_num_parameters()) - { - unsigned num_args = 1; - expr* conclusion = m_learner.m.get_fact(step); - expr* const* args = &conclusion; - if (m_learner.m.is_or(conclusion)) - { - app* _or = to_app(conclusion); - num_args = _or->get_num_args(); - args = _or->get_args(); - } - SASSERT(m_learner.m.get_num_parents(step) + 2 + num_args == d->get_num_parameters()); - - bool_rewriter brw(m_learner.m); - for (unsigned i = 0; i < num_args; ++i) - { - expr* premise = args[i]; - - expr_ref negatedPremise(m_learner.m); - brw.mk_not(premise, negatedPremise); - literals.push_back(to_app(negatedPremise)); - - rational coefficient; - VERIFY(params[i].is_rational(coefficient)); - coefficients.push_back(abs(coefficient)); - } + // only if all b-premises can be used directly, add the farkas core and close the step + if (can_be_closed) { + m_learner.set_closed(step, true); + + expr_ref res = compute_linear_combination(coeff_lits); + + m_learner.add_lemma_to_core(res); } } + } - // only if all b-premises can be used directly, add the farkas core and close the step - if (can_be_closed) - { - m_learner.set_closed(step, true); + expr_ref unsat_core_plugin_farkas_lemma::compute_linear_combination(const coeff_lits_t& coeff_lits) + { - expr_ref res(m_learner.m); - compute_linear_combination(coefficients, literals, res); - - m_learner.add_lemma_to_core(res); + smt::farkas_util util(m); + if (m_use_constant_from_a) { + util.set_split_literals (m_split_literals); // small optimization: if flag m_split_literals is set, then preserve diff constraints + } + for (auto& p : coeff_lits) { + util.add(p.first, p.second); + } + if (m_use_constant_from_a) { + return util.get(); + } + else { + expr_ref negated_linear_combination = util.get(); + return expr_ref(mk_not(m, negated_linear_combination), m); } } -} - -void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector& coefficients, const ptr_vector& literals, expr_ref& res) -{ - SASSERT(literals.size() == coefficients.size()); - - ast_manager& m = res.get_manager(); - smt::farkas_util util(m); - if (m_use_constant_from_a) - { - util.set_split_literals (m_split_literals); // small optimization: if flag m_split_literals is set, then preserve diff constraints - } - for(unsigned i = 0; i < literals.size(); ++i) - { - util.add(coefficients[i], literals[i]); - } - if (m_use_constant_from_a) - { - res = util.get(); - } - else - { - expr_ref negated_linear_combination = util.get(); - res = mk_not(m, negated_linear_combination); - } -} void unsat_core_plugin_farkas_lemma_optimized::compute_partial_core(proof* step) { @@ -298,34 +252,29 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vectorget_num_parameters() >= 2 && // the Farkas coefficients are saved in the parameters of step d->get_parameter(0).is_symbol(sym) && sym == "arith" && // the first two parameters are "arith", "farkas", d->get_parameter(1).is_symbol(sym) && sym == "farkas" && - d->get_num_parameters() >= m_learner.m.get_num_parents(step) + 2) // the following parameters are the Farkas coefficients + d->get_num_parameters() >= m.get_num_parents(step) + 2) // the following parameters are the Farkas coefficients { - SASSERT(m_learner.m.has_fact(step)); + SASSERT(m.has_fact(step)); vector > linear_combination; // collects all summands of the linear combination parameter const* params = d->get_parameters() + 2; // point to the first Farkas coefficient STRACE("spacer.farkas", - verbose_stream() << "Farkas input: "<< "\n"; - for (unsigned i = 0; i < m_learner.m.get_num_parents(step); ++i) - { - SASSERT(m_learner.m.is_proof(step->get_arg(i))); - proof *prem = m.get_parent (step, i); + verbose_stream() << "Farkas input: "<< "\n"; + for (unsigned i = 0; i < m.get_num_parents(step); ++i) { + proof * prem = m.get_parent(step, i); - rational coef; - VERIFY(params[i].is_rational(coef)); - - bool b_pure = m_learner.m_pr.is_b_pure (prem); - verbose_stream() << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m_learner.m.get_fact(prem), m_learner.m) << "\n"; - } - ); + rational coef = params[i].get_rational(); + + bool b_pure = m_learner.m_pr.is_b_pure (prem); + verbose_stream() << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m.get_fact(prem), m_learner.m) << "\n"; + } + ); bool can_be_closed = true; - for(unsigned i = 0; i < m_learner.m.get_num_parents(step); ++i) - { - SASSERT(m_learner.m.is_proof(step->get_arg(i))); - proof * premise = m.get_parent (step, i); + for (unsigned i = 0; i < m.get_num_parents(step); ++i) { + proof * premise = m.get_parent(step, i); if (m_learner.m_pr.is_b_marked(premise) && !m_learner.is_closed(premise)) { @@ -333,10 +282,9 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector 0); + SASSERT(!linear_combination.empty()); }); // 1. construct ordered basis ptr_vector ordered_basis; obj_map map; unsigned counter = 0; - for (const auto& linear_combination : m_linear_combinations) - { - for (const auto& pair : linear_combination) - { - if (!map.contains(pair.first)) - { + for (const auto& linear_combination : m_linear_combinations) { + for (const auto& pair : linear_combination) { + if (!map.contains(pair.first)) { ordered_basis.push_back(pair.first); map.insert(pair.first, counter++); } @@ -396,11 +341,9 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector literals; - vector coefficients; - for (unsigned l=0; l < matrix.num_cols(); ++l) - { - if (!matrix.get(k,l).is_zero()) - { - literals.push_back(ordered_basis[l]); - coefficients.push_back(matrix.get(k,l)); + coeff_lits_t coeff_lits; + for (unsigned l = 0; l < matrix.num_cols(); ++l) { + if (!matrix.get(k,l).is_zero()) { + coeff_lits.push_back(std::make_pair(matrix.get(k, l), ordered_basis[l])); + } } - } - SASSERT(literals.size() > 0); - expr_ref linear_combination(m); - compute_linear_combination(coefficients, literals, linear_combination); + SASSERT(!coeff_lits.empty()); + expr_ref linear_combination = compute_linear_combination(coeff_lits); m_learner.add_lemma_to_core(linear_combination); } } - void unsat_core_plugin_farkas_lemma_optimized::compute_linear_combination(const vector& coefficients, const ptr_vector& literals, expr_ref& res) - { - SASSERT(literals.size() == coefficients.size()); + expr_ref unsat_core_plugin_farkas_lemma_optimized::compute_linear_combination(const coeff_lits_t& coeff_lits) + { + + smt::farkas_util util(m); + for (auto const & p : coeff_lits) { + util.add(p.first, p.second); + } + expr_ref negated_linear_combination = util.get(); + SASSERT(m.is_not(negated_linear_combination)); + return expr_ref(mk_not(m, negated_linear_combination), m); + //TODO: rewrite the get-method to return nonnegated stuff? + } - ast_manager& m = res.get_manager(); - smt::farkas_util util(m); - for(unsigned i = 0; i < literals.size(); ++i) - { - util.add(coefficients[i], literals[i]); - } - expr_ref negated_linear_combination = util.get(); - SASSERT(m.is_not(negated_linear_combination)); - res = mk_not(m, negated_linear_combination); //TODO: rewrite the get-method to return nonnegated stuff? - } - - - void unsat_core_plugin_farkas_lemma_bounded::finalize() { + void unsat_core_plugin_farkas_lemma_bounded::finalize() { if (m_linear_combinations.empty()) { return; } @@ -486,13 +421,12 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector coeffs; - for (unsigned i=0; i < matrix.num_rows(); ++i) { + for (unsigned i = 0; i < matrix.num_rows(); ++i) { coeffs.push_back(expr_ref_vector(m)); } vector bounded_vectors; - for (unsigned j=0; j < matrix.num_cols(); ++j) - { + for (unsigned j = 0; j < matrix.num_cols(); ++j) { bounded_vectors.push_back(expr_ref_vector(m)); } @@ -501,37 +435,24 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector s = mk_smt_solver(m, p, symbol::null); // TODO: incremental version? + solver_ref s = mk_smt_solver(m, p, symbol::null); // TODO: incremental version? // add new variables w_in, - for (unsigned i=0; i < matrix.num_rows(); ++i) - { + for (unsigned i = 0; i < matrix.num_rows(); ++i) { std::string name = "w_" + std::to_string(i) + std::to_string(n); - - func_decl_ref decl(m); - decl = m.mk_func_decl(symbol(name.c_str()), 0, (sort*const*)nullptr, util.mk_int()); - coeffs[i].push_back(m.mk_const(decl)); + coeffs[i].push_back(m.mk_const(name, util.mk_int())); } // we need s_jn - for (unsigned j=0; j < matrix.num_cols(); ++j) - { + for (unsigned j = 0; j < matrix.num_cols(); ++j) { std::string name = "s_" + std::to_string(j) + std::to_string(n); - - func_decl_ref decl(m); - decl = m.mk_func_decl(symbol(name.c_str()), 0, (sort*const*)nullptr, util.mk_int()); - - expr_ref s_jn(m); - s_jn = m.mk_const(decl); - - bounded_vectors[j].push_back(s_jn); + bounded_vectors[j].push_back(m.mk_const(name, util.mk_int())); } // assert bounds for all s_jn - for (unsigned l=0; l < n; ++l) { - for (unsigned j=0; j < matrix.num_cols(); ++j) { + for (unsigned l = 0; l < n; ++l) { + for (unsigned j = 0; j < matrix.num_cols(); ++j) { expr* s_jn = bounded_vectors[j][l].get(); - expr_ref lb(util.mk_le(util.mk_int(0), s_jn), m); expr_ref ub(util.mk_le(s_jn, util.mk_int(1)), m); s->assert_expr(lb); @@ -540,47 +461,40 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vectorassert_expr(eq); } + expr_ref eq(m.mk_eq(a_ij, sum), m); + s->assert_expr(eq); } - + } + // check result - lbool res = s->check_sat(0,nullptr); + lbool res = s->check_sat(0, nullptr); // if sat extract model and add corresponding linear combinations to core if (res == lbool::l_true) { model_ref model; s->get_model(model); - for (unsigned k=0; k < n; ++k) { - ptr_vector literals; - vector coefficients; - for (unsigned j=0; j < matrix.num_cols(); ++j) { + for (unsigned k = 0; k < n; ++k) { + coeff_lits_t coeff_lits; + for (unsigned j = 0; j < matrix.num_cols(); ++j) { expr_ref evaluation(m); model.get()->eval(bounded_vectors[j][k].get(), evaluation, false); if (!util.is_zero(evaluation)) { - literals.push_back(ordered_basis[j]); - coefficients.push_back(rational(1)); + coeff_lits.push_back(std::make_pair(rational(1), ordered_basis[j])); } } - SASSERT(!literals.empty()); // since then previous outer loop would have found solution already - expr_ref linear_combination(m); - compute_linear_combination(coefficients, literals, linear_combination); + SASSERT(!coeff_lits.empty()); // since then previous outer loop would have found solution already + expr_ref linear_combination = compute_linear_combination(coeff_lits); m_learner.add_lemma_to_core(linear_combination); } @@ -589,7 +503,7 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector todo_subproof; + ptr_buffer todo_subproof; - for (unsigned i = 0, sz = m.get_num_parents(step); i < sz; ++i) - { - proof* premise = m.get_parent (step, i); - { - if (m_learner.m_pr.is_b_marked(premise)) - { - todo_subproof.push_back(premise); - } + for (proof* premise : m.get_parents(step)) { + if (m_learner.m_pr.is_b_marked(premise)) { + todo_subproof.push_back(premise); } } while (!todo_subproof.empty()) @@ -685,10 +593,7 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vectorget_arg(i))); - proof* premise = m.get_parent (current, i); + for (proof* premise : m.get_parents(current)) { todo_subproof.push_back(premise); } } @@ -790,8 +695,8 @@ void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector> coeff_lits_t; + ast_manager& m; public: - unsat_core_plugin(unsat_core_learner& learner) : m_learner(learner){}; - virtual ~unsat_core_plugin(){}; + unsat_core_plugin(unsat_core_learner& learner); + virtual ~unsat_core_plugin() {}; virtual void compute_partial_core(proof* step) = 0; virtual void finalize(){}; @@ -68,24 +70,23 @@ private: /* * compute linear combination of literals 'literals' having coefficients 'coefficients' and save result in res */ - void compute_linear_combination(const vector& coefficients, const ptr_vector& literals, expr_ref& res); + expr_ref compute_linear_combination(const coeff_lits_t& coeff_lits); }; class unsat_core_plugin_farkas_lemma_optimized : public unsat_core_plugin { public: - unsat_core_plugin_farkas_lemma_optimized(unsat_core_learner& learner, ast_manager& m) : unsat_core_plugin(learner), m(m) {}; + unsat_core_plugin_farkas_lemma_optimized(unsat_core_learner& learner, ast_manager& m) : unsat_core_plugin(learner) {}; void compute_partial_core(proof* step) override; void finalize() override; protected: vector > > m_linear_combinations; - ast_manager& m; /* * compute linear combination of literals 'literals' having coefficients 'coefficients' and save result in res */ - void compute_linear_combination(const vector& coefficients, const ptr_vector& literals, expr_ref& res); + expr_ref compute_linear_combination(const coeff_lits_t& coeff_lits); }; class unsat_core_plugin_farkas_lemma_bounded : public unsat_core_plugin_farkas_lemma_optimized { @@ -104,7 +105,6 @@ private: void compute_partial_core(proof* step) override; void finalize() override; private: - ast_manager& m; ast_mark m_visited; // saves for each node i whether the subproof with root i has already been added to the min-cut-problem obj_map m_proof_to_node_minus; // maps proof-steps to the corresponding minus-nodes (the ones which are closer to source) diff --git a/src/solver/check_sat_result.h b/src/solver/check_sat_result.h index 716c68b00..762735b8d 100644 --- a/src/solver/check_sat_result.h +++ b/src/solver/check_sat_result.h @@ -47,7 +47,7 @@ public: virtual ~check_sat_result() {} void inc_ref() { m_ref_count++; } void dec_ref() { SASSERT(m_ref_count > 0); m_ref_count--; if (m_ref_count == 0) dealloc(this); } - void set_status(lbool r) { m_status = r; } + lbool set_status(lbool r) { return m_status = r; } lbool status() const { return m_status; } virtual void collect_statistics(statistics & st) const = 0; virtual void get_unsat_core(ptr_vector & r) = 0;