diff --git a/.gitignore b/.gitignore index b7e4a0186..e189a9569 100644 --- a/.gitignore +++ b/.gitignore @@ -43,6 +43,7 @@ bld_dbg/* bld_rel/* bld_dbg_x64/* bld_rel_x64/* +.vscode # Auto generated files. config.log config.status diff --git a/CMakeLists.txt b/CMakeLists.txt index 9877af89e..5934b7c17 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -34,7 +34,7 @@ endif() ################################################################################ set(Z3_VERSION_MAJOR 4) set(Z3_VERSION_MINOR 8) -set(Z3_VERSION_PATCH 4) +set(Z3_VERSION_PATCH 5) set(Z3_VERSION_TWEAK 0) set(Z3_VERSION "${Z3_VERSION_MAJOR}.${Z3_VERSION_MINOR}.${Z3_VERSION_PATCH}.${Z3_VERSION_TWEAK}") set(Z3_FULL_VERSION_STR "${Z3_VERSION}") # Note this might be modified diff --git a/RELEASE_NOTES b/RELEASE_NOTES index 06618a8f2..ba254db4e 100644 --- a/RELEASE_NOTES +++ b/RELEASE_NOTES @@ -1,5 +1,15 @@ RELEASE NOTES +Version 4.8.4 +============= + +- Notes + - fixes bugs + - a substantial update to how the seq theory solver handles regular + expressions. Other performance improvements to the seq solver. + - Managed .NET DLLs include dotnet standard 1.4 on supported platforms. + - Windows Managed DLLs are strong signed in the released binaries. + Version 4.8.3 ============= - New features diff --git a/scripts/mk_project.py b/scripts/mk_project.py index 8cf60ab62..58d087f32 100644 --- a/scripts/mk_project.py +++ b/scripts/mk_project.py @@ -8,7 +8,7 @@ from mk_util import * def init_version(): - set_version(4, 8, 4, 0) + set_version(4, 8, 5, 0) # Z3 Project definition def init_project_def(): diff --git a/src/qe/qe_lite.cpp b/src/qe/qe_lite.cpp index 3226f7554..4585c88c1 100644 --- a/src/qe/qe_lite.cpp +++ b/src/qe/qe_lite.cpp @@ -2370,6 +2370,7 @@ void qe_lite::operator()(uint_set const& index_set, bool index_of_bound, expr_re (*m_impl)(index_set, index_of_bound, fmls); } +namespace { class qe_lite_tactic : public tactic { struct imp { @@ -2494,7 +2495,6 @@ public: (*m_imp)(in, result); } - void collect_statistics(statistics & st) const override { // m_imp->collect_statistics(st); } @@ -2503,14 +2503,14 @@ public: // m_imp->reset_statistics(); } - void cleanup() override { ast_manager & m = m_imp->m; - dealloc(m_imp); - m_imp = alloc(imp, m, m_params); + m_imp->~imp(); + m_imp = new (m_imp) imp(m, m_params); } }; +} tactic * mk_qe_lite_tactic(ast_manager & m, params_ref const & p) { return alloc(qe_lite_tactic, m, p); diff --git a/src/qe/qe_lite.h b/src/qe/qe_lite.h index 63ad8bedd..3251fa3f8 100644 --- a/src/qe/qe_lite.h +++ b/src/qe/qe_lite.h @@ -18,8 +18,7 @@ Revision History: --*/ -#ifndef QE_LITE_H_ -#define QE_LITE_H_ +#pragma once #include "ast/ast.h" #include "util/uint_set.h" @@ -67,5 +66,3 @@ tactic * mk_qe_lite_tactic(ast_manager & m, params_ref const & p = params_ref()) /* ADD_TACTIC("qe-light", "apply light-weight quantifier elimination.", "mk_qe_lite_tactic(m, p)") */ - -#endif diff --git a/src/tactic/bv/bv_size_reduction_tactic.cpp b/src/tactic/bv/bv_size_reduction_tactic.cpp index 964102825..fd1d1499b 100644 --- a/src/tactic/bv/bv_size_reduction_tactic.cpp +++ b/src/tactic/bv/bv_size_reduction_tactic.cpp @@ -27,28 +27,8 @@ Notes: #include "tactic/generic_model_converter.h" #include "ast/ast_smt2_pp.h" +namespace { class bv_size_reduction_tactic : public tactic { - struct imp; - imp * m_imp; -public: - bv_size_reduction_tactic(ast_manager & m); - - tactic * translate(ast_manager & m) override { - return alloc(bv_size_reduction_tactic, m); - } - - ~bv_size_reduction_tactic() override; - - void operator()(goal_ref const & g, goal_ref_buffer & result) override; - - void cleanup() override; -}; - -tactic * mk_bv_size_reduction_tactic(ast_manager & m, params_ref const & p) { - return clean(alloc(bv_size_reduction_tactic, m)); -} - -struct bv_size_reduction_tactic::imp { typedef rational numeral; typedef generic_model_converter bv_size_reduction_mc; @@ -63,12 +43,29 @@ struct bv_size_reduction_tactic::imp { scoped_ptr m_replacer; bool m_produce_models; - imp(ast_manager & _m): - m(_m), +public: + bv_size_reduction_tactic(ast_manager & m) : + m(m), m_util(m), m_replacer(mk_default_expr_replacer(m)) { } + tactic * translate(ast_manager & m) override { + return alloc(bv_size_reduction_tactic, m); + } + + void operator()(goal_ref const & g, goal_ref_buffer & result) override; + + void cleanup() override { + m_signed_lowers.reset(); + m_signed_uppers.reset(); + m_unsigned_lowers.reset(); + m_unsigned_uppers.reset(); + m_mc = nullptr; + m_fmc = nullptr; + m_replacer->reset(); + } + void update_signed_lower(app * v, numeral const & k) { // k <= v obj_map::obj_map_entry * entry = m_signed_lowers.insert_if_not_there2(v, k); @@ -178,7 +175,7 @@ struct bv_size_reduction_tactic::imp { throw tactic_exception(m.limit().get_cancel_msg()); } - void operator()(goal & g, model_converter_ref & mc) { + void run(goal & g, model_converter_ref & mc) { if (g.inconsistent()) return; TRACE("before_bv_size_reduction", g.display(tout);); @@ -373,14 +370,6 @@ struct bv_size_reduction_tactic::imp { }; -bv_size_reduction_tactic::bv_size_reduction_tactic(ast_manager & m) { - m_imp = alloc(imp, m); -} - -bv_size_reduction_tactic::~bv_size_reduction_tactic() { - dealloc(m_imp); -} - void bv_size_reduction_tactic::operator()(goal_ref const & g, goal_ref_buffer & result) { SASSERT(g->is_well_sorted()); @@ -388,17 +377,14 @@ void bv_size_reduction_tactic::operator()(goal_ref const & g, fail_if_unsat_core_generation("bv-size-reduction", g); result.reset(); model_converter_ref mc; - m_imp->operator()(*(g.get()), mc); + run(*(g.get()), mc); g->inc_depth(); g->add(mc.get()); result.push_back(g.get()); SASSERT(g->is_well_sorted()); } - - -void bv_size_reduction_tactic::cleanup() { - ast_manager & m = m_imp->m; - m_imp->~imp(); - new (m_imp) imp(m); } +tactic * mk_bv_size_reduction_tactic(ast_manager & m, params_ref const & p) { + return clean(alloc(bv_size_reduction_tactic, m)); +} diff --git a/src/tactic/bv/bv_size_reduction_tactic.h b/src/tactic/bv/bv_size_reduction_tactic.h index 4a24a1d78..1bb512f3f 100644 --- a/src/tactic/bv/bv_size_reduction_tactic.h +++ b/src/tactic/bv/bv_size_reduction_tactic.h @@ -21,8 +21,7 @@ Author: Notes: --*/ -#ifndef BV_SIZE_REDUCTION_TACTIC_H_ -#define BV_SIZE_REDUCTION_TACTIC_H_ +#pragma once #include "util/params.h" class ast_manager; @@ -32,5 +31,3 @@ tactic * mk_bv_size_reduction_tactic(ast_manager & m, params_ref const & p = par /* ADD_TACTIC("reduce-bv-size", "try to reduce bit-vector sizes using inequalities.", "mk_bv_size_reduction_tactic(m, p)") */ - -#endif diff --git a/src/tactic/bv/elim_small_bv_tactic.cpp b/src/tactic/bv/elim_small_bv_tactic.cpp index 0b5b98308..38b4638ee 100644 --- a/src/tactic/bv/elim_small_bv_tactic.cpp +++ b/src/tactic/bv/elim_small_bv_tactic.cpp @@ -28,6 +28,7 @@ Revision History: #include "tactic/bv/elim_small_bv_tactic.h" +namespace { class elim_small_bv_tactic : public tactic { struct rw_cfg : public default_rewriter_cfg { @@ -183,20 +184,6 @@ class elim_small_bv_tactic : public tactic { return true; } - bool pre_visit(expr * t) { - TRACE("elim_small_bv_pre", tout << "pre_visit: " << mk_ismt2_pp(t, m) << std::endl;); - if (is_quantifier(t)) { - quantifier * q = to_quantifier(t); - TRACE("elim_small_bv", tout << "pre_visit quantifier [" << q->get_id() << "]: " << mk_ismt2_pp(q->get_expr(), m) << std::endl;); - sort_ref_vector new_bindings(m); - for (unsigned i = 0; i < q->get_num_decls(); i++) - new_bindings.push_back(q->get_decl_sort(i)); - SASSERT(new_bindings.size() == q->get_num_decls()); - m_bindings.append(new_bindings); - } - return true; - } - void updt_params(params_ref const & p) { m_params = p; m_max_memory = megabytes_to_bytes(p.get_uint("max_memory", UINT_MAX)); @@ -214,69 +201,24 @@ class elim_small_bv_tactic : public tactic { } }; - struct imp { - ast_manager & m; - rw m_rw; + ast_manager & m; + rw m_rw; + params_ref m_params; - imp(ast_manager & _m, params_ref const & p) : - m(_m), - m_rw(m, p) { - } - - void updt_params(params_ref const & p) { - m_rw.cfg().updt_params(p); - } - - void operator()(goal_ref const & g, goal_ref_buffer & result) { - SASSERT(g->is_well_sorted()); - tactic_report report("elim-small-bv", *g); - bool produce_proofs = g->proofs_enabled(); - fail_if_proof_generation("elim-small-bv", g); - fail_if_unsat_core_generation("elim-small-bv", g); - m_rw.cfg().m_produce_models = g->models_enabled(); - - m_rw.m_cfg.m_goal = g.get(); - expr_ref new_curr(m); - proof_ref new_pr(m); - unsigned size = g->size(); - for (unsigned idx = 0; idx < size; idx++) { - expr * curr = g->form(idx); - m_rw(curr, new_curr, new_pr); - if (produce_proofs) { - proof * pr = g->pr(idx); - new_pr = m.mk_modus_ponens(pr, new_pr); - } - g->update(idx, new_curr, new_pr, g->dep(idx)); - } - g->add(m_rw.m_cfg.m_mc.get()); - - report_tactic_progress(":elim-small-bv-num-eliminated", m_rw.m_cfg.m_num_eliminated); - g->inc_depth(); - result.push_back(g.get()); - TRACE("elim-small-bv", g->display(tout);); - SASSERT(g->is_well_sorted()); - } - }; - - imp * m_imp; - params_ref m_params; public: elim_small_bv_tactic(ast_manager & m, params_ref const & p) : + m(m), + m_rw(m, p), m_params(p) { - m_imp = alloc(imp, m, p); } tactic * translate(ast_manager & m) override { return alloc(elim_small_bv_tactic, m, m_params); } - ~elim_small_bv_tactic() override { - dealloc(m_imp); - } - void updt_params(params_ref const & p) override { m_params = p; - m_imp->m_rw.cfg().updt_params(p); + m_rw.cfg().updt_params(p); } void collect_param_descrs(param_descrs & r) override { @@ -285,18 +227,44 @@ public: r.insert("max_bits", CPK_UINT, "(default: 4) maximum bit-vector size of quantified bit-vectors to be eliminated."); } - void operator()(goal_ref const & in, + void operator()(goal_ref const & g, goal_ref_buffer & result) override { - (*m_imp)(in, result); + SASSERT(g->is_well_sorted()); + tactic_report report("elim-small-bv", *g); + bool produce_proofs = g->proofs_enabled(); + fail_if_proof_generation("elim-small-bv", g); + fail_if_unsat_core_generation("elim-small-bv", g); + m_rw.cfg().m_produce_models = g->models_enabled(); + + m_rw.m_cfg.m_goal = g.get(); + expr_ref new_curr(m); + proof_ref new_pr(m); + unsigned size = g->size(); + for (unsigned idx = 0; idx < size; idx++) { + expr * curr = g->form(idx); + m_rw(curr, new_curr, new_pr); + if (produce_proofs) { + proof * pr = g->pr(idx); + new_pr = m.mk_modus_ponens(pr, new_pr); + } + g->update(idx, new_curr, new_pr, g->dep(idx)); + } + g->add(m_rw.m_cfg.m_mc.get()); + + report_tactic_progress(":elim-small-bv-num-eliminated", m_rw.m_cfg.m_num_eliminated); + g->inc_depth(); + result.push_back(g.get()); + TRACE("elim-small-bv", g->display(tout);); + SASSERT(g->is_well_sorted()); } void cleanup() override { - ast_manager & m = m_imp->m; - m_imp->~imp(); - m_imp = new (m_imp) imp(m, m_params); + m_rw.~rw(); + new (&m_rw) rw(m, m_params); } }; +} tactic * mk_elim_small_bv_tactic(ast_manager & m, params_ref const & p) { return clean(alloc(elim_small_bv_tactic, m, p)); diff --git a/src/tactic/bv/elim_small_bv_tactic.h b/src/tactic/bv/elim_small_bv_tactic.h index 675ec3de7..e4a91f70f 100644 --- a/src/tactic/bv/elim_small_bv_tactic.h +++ b/src/tactic/bv/elim_small_bv_tactic.h @@ -16,8 +16,7 @@ Author: Revision History: --*/ -#ifndef ELIM_SMALL_BV_H_ -#define ELIM_SMALL_BV_H_ +#pragma once #include "util/params.h" class ast_manager; @@ -28,5 +27,3 @@ tactic * mk_elim_small_bv_tactic(ast_manager & m, params_ref const & p = params_ /* ADD_TACTIC("elim-small-bv", "eliminate small, quantified bit-vectors by expansion.", "mk_elim_small_bv_tactic(m, p)") */ - -#endif diff --git a/src/tactic/core/elim_uncnstr_tactic.cpp b/src/tactic/core/elim_uncnstr_tactic.cpp index 577db30cd..5b77fb9a2 100644 --- a/src/tactic/core/elim_uncnstr_tactic.cpp +++ b/src/tactic/core/elim_uncnstr_tactic.cpp @@ -28,881 +28,864 @@ Notes: #include "ast/ast_smt2_pp.h" #include "ast/ast_ll_pp.h" +namespace { class elim_uncnstr_tactic : public tactic { - struct imp { - // unconstrained vars collector + // unconstrained vars collector - typedef generic_model_converter mc; + typedef generic_model_converter mc; - struct rw_cfg : public default_rewriter_cfg { - bool m_produce_proofs; - obj_hashtable & m_vars; - ref m_mc; - arith_util m_a_util; - bv_util m_bv_util; - array_util m_ar_util; - datatype_util m_dt_util; - app_ref_vector m_fresh_vars; - obj_map m_cache; - app_ref_vector m_cache_domain; - unsigned long long m_max_memory; - unsigned m_max_steps; - - rw_cfg(ast_manager & m, bool produce_proofs, obj_hashtable & vars, mc * _m, - unsigned long long max_memory, unsigned max_steps): - m_produce_proofs(produce_proofs), - m_vars(vars), - m_mc(_m), - m_a_util(m), - m_bv_util(m), - m_ar_util(m), - m_dt_util(m), - m_fresh_vars(m), - m_cache_domain(m), - m_max_memory(max_memory), - m_max_steps(max_steps) { - } - - ast_manager & m() const { return m_a_util.get_manager(); } - - bool max_steps_exceeded(unsigned num_steps) const { - cooperate("elim-uncnstr-vars"); - if (memory::get_allocation_size() > m_max_memory) - throw tactic_exception(TACTIC_MAX_MEMORY_MSG); - return num_steps > m_max_steps; - } - - bool uncnstr(expr * arg) const { - return m_vars.contains(arg); - } - - bool uncnstr(unsigned num, expr * const * args) const { - for (unsigned i = 0; i < num; i++) - if (!uncnstr(args[i])) - return false; - return true; - } - - /** - \brief Create a fresh variable for abstracting (f args[0] ... args[num-1]) - Return true if it a new variable was created, and false if the variable already existed for this - application. Store the variable in v - */ - bool mk_fresh_uncnstr_var_for(app * t, app * & v) { - if (m_cache.find(t, v)) { - return false; // variable already existed for this application - } - - v = m().mk_fresh_const(nullptr, m().get_sort(t)); - TRACE("elim_uncnstr_bug", tout << "eliminating:\n" << mk_ismt2_pp(t, m()) << "\n";); - TRACE("elim_uncnstr_bug_ll", tout << "eliminating:\n" << mk_bounded_pp(t, m()) << "\n";); - m_fresh_vars.push_back(v); - if (m_mc) m_mc->hide(v); - m_cache_domain.push_back(t); - m_cache.insert(t, v); - return true; - } - - bool mk_fresh_uncnstr_var_for(func_decl * f, unsigned num, expr * const * args, app * & v) { - return mk_fresh_uncnstr_var_for(m().mk_app(f, num, args), v); - } - - bool mk_fresh_uncnstr_var_for(func_decl * f, expr * arg1, expr * arg2, app * & v) { - return mk_fresh_uncnstr_var_for(m().mk_app(f, arg1, arg2), v); - } - - bool mk_fresh_uncnstr_var_for(func_decl * f, expr * arg, app * & v) { - return mk_fresh_uncnstr_var_for(m().mk_app(f, arg), v); - } - - void add_def(expr * v, expr * def) { - SASSERT(uncnstr(v)); - SASSERT(to_app(v)->get_num_args() == 0); - if (m_mc) - m_mc->add(to_app(v)->get_decl(), def); - } - - void add_defs(unsigned num, expr * const * args, expr * u, expr * identity) { - if (m_mc) { - add_def(args[0], u); - for (unsigned i = 1; i < num; i++) - add_def(args[i], identity); - } - } - - // return a term that is different from t. - bool mk_diff(expr * t, expr_ref & r) { - sort * s = m().get_sort(t); - if (m().is_bool(s)) { - r = m().mk_not(t); - return true; - } - family_id fid = s->get_family_id(); - if (fid == m_a_util.get_family_id()) { - r = m_a_util.mk_add(t, m_a_util.mk_numeral(rational(1), s)); - return true; - } - if (fid == m_bv_util.get_family_id()) { - r = m().mk_app(m_bv_util.get_family_id(), OP_BNOT, t); - return true; - } - if (fid == m_ar_util.get_family_id()) { - if (m().is_uninterp(get_array_range(s))) - return false; - unsigned arity = get_array_arity(s); - for (unsigned i = 0; i < arity; i++) - if (m().is_uninterp(get_array_domain(s, i))) - return false; - // building - // r = (store t i1 ... in d) - // where i1 ... in are arbitrary values - // and d is a term different from (select t i1 ... in) - ptr_buffer new_args; - new_args.push_back(t); - for (unsigned i = 0; i < arity; i++) - new_args.push_back(m().get_some_value(get_array_domain(s, i))); - expr_ref sel(m()); - sel = m().mk_app(fid, OP_SELECT, new_args.size(), new_args.c_ptr()); - expr_ref diff_sel(m()); - if (!mk_diff(sel, diff_sel)) - return false; - new_args.push_back(diff_sel); - r = m().mk_app(fid, OP_STORE, new_args.size(), new_args.c_ptr()); - return true; - } - if (fid == m_dt_util.get_family_id()) { - // In the current implementation, I only handle the case where - // the datatype has a recursive constructor. - ptr_vector const & constructors = *m_dt_util.get_datatype_constructors(s); - for (func_decl * constructor : constructors) { - unsigned num = constructor->get_arity(); - unsigned target = UINT_MAX; - for (unsigned i = 0; i < num; i++) { - sort * s_arg = constructor->get_domain(i); - if (s == s_arg) { - target = i; - continue; - } - if (m().is_uninterp(s_arg)) - break; - } - if (target == UINT_MAX) - continue; - // use the constructor the distinct term constructor(...,t,...) - ptr_buffer new_args; - for (unsigned i = 0; i < num; i++) { - if (i == target) { - new_args.push_back(t); - } - else { - new_args.push_back(m().get_some_value(constructor->get_domain(i))); - } - } - r = m().mk_app(constructor, new_args.size(), new_args.c_ptr()); - return true; - } - // TODO: handle more cases. + struct rw_cfg : public default_rewriter_cfg { + bool m_produce_proofs; + obj_hashtable & m_vars; + ref m_mc; + arith_util m_a_util; + bv_util m_bv_util; + array_util m_ar_util; + datatype_util m_dt_util; + app_ref_vector m_fresh_vars; + obj_map m_cache; + app_ref_vector m_cache_domain; + unsigned long long m_max_memory; + unsigned m_max_steps; + + rw_cfg(ast_manager & m, bool produce_proofs, obj_hashtable & vars, mc * _m, + unsigned long long max_memory, unsigned max_steps): + m_produce_proofs(produce_proofs), + m_vars(vars), + m_mc(_m), + m_a_util(m), + m_bv_util(m), + m_ar_util(m), + m_dt_util(m), + m_fresh_vars(m), + m_cache_domain(m), + m_max_memory(max_memory), + m_max_steps(max_steps) { + } + + ast_manager & m() const { return m_a_util.get_manager(); } + + bool max_steps_exceeded(unsigned num_steps) const { + cooperate("elim-uncnstr-vars"); + if (memory::get_allocation_size() > m_max_memory) + throw tactic_exception(TACTIC_MAX_MEMORY_MSG); + return num_steps > m_max_steps; + } + + bool uncnstr(expr * arg) const { + return m_vars.contains(arg); + } + + bool uncnstr(unsigned num, expr * const * args) const { + for (unsigned i = 0; i < num; i++) + if (!uncnstr(args[i])) return false; + return true; + } + + /** + \brief Create a fresh variable for abstracting (f args[0] ... args[num-1]) + Return true if it a new variable was created, and false if the variable already existed for this + application. Store the variable in v + */ + bool mk_fresh_uncnstr_var_for(app * t, app * & v) { + if (m_cache.find(t, v)) { + return false; // variable already existed for this application + } + + v = m().mk_fresh_const(nullptr, m().get_sort(t)); + TRACE("elim_uncnstr_bug", tout << "eliminating:\n" << mk_ismt2_pp(t, m()) << "\n";); + TRACE("elim_uncnstr_bug_ll", tout << "eliminating:\n" << mk_bounded_pp(t, m()) << "\n";); + m_fresh_vars.push_back(v); + if (m_mc) m_mc->hide(v); + m_cache_domain.push_back(t); + m_cache.insert(t, v); + return true; + } + + bool mk_fresh_uncnstr_var_for(func_decl * f, unsigned num, expr * const * args, app * & v) { + return mk_fresh_uncnstr_var_for(m().mk_app(f, num, args), v); + } + + bool mk_fresh_uncnstr_var_for(func_decl * f, expr * arg1, expr * arg2, app * & v) { + return mk_fresh_uncnstr_var_for(m().mk_app(f, arg1, arg2), v); + } + + bool mk_fresh_uncnstr_var_for(func_decl * f, expr * arg, app * & v) { + return mk_fresh_uncnstr_var_for(m().mk_app(f, arg), v); + } + + void add_def(expr * v, expr * def) { + SASSERT(uncnstr(v)); + SASSERT(to_app(v)->get_num_args() == 0); + if (m_mc) + m_mc->add(to_app(v)->get_decl(), def); + } + + void add_defs(unsigned num, expr * const * args, expr * u, expr * identity) { + if (m_mc) { + add_def(args[0], u); + for (unsigned i = 1; i < num; i++) + add_def(args[i], identity); + } + } + + // return a term that is different from t. + bool mk_diff(expr * t, expr_ref & r) { + sort * s = m().get_sort(t); + if (m().is_bool(s)) { + r = m().mk_not(t); + return true; + } + family_id fid = s->get_family_id(); + if (fid == m_a_util.get_family_id()) { + r = m_a_util.mk_add(t, m_a_util.mk_numeral(rational(1), s)); + return true; + } + if (fid == m_bv_util.get_family_id()) { + r = m().mk_app(m_bv_util.get_family_id(), OP_BNOT, t); + return true; + } + if (fid == m_ar_util.get_family_id()) { + if (m().is_uninterp(get_array_range(s))) + return false; + unsigned arity = get_array_arity(s); + for (unsigned i = 0; i < arity; i++) + if (m().is_uninterp(get_array_domain(s, i))) + return false; + // building + // r = (store t i1 ... in d) + // where i1 ... in are arbitrary values + // and d is a term different from (select t i1 ... in) + ptr_buffer new_args; + new_args.push_back(t); + for (unsigned i = 0; i < arity; i++) + new_args.push_back(m().get_some_value(get_array_domain(s, i))); + expr_ref sel(m()); + sel = m().mk_app(fid, OP_SELECT, new_args.size(), new_args.c_ptr()); + expr_ref diff_sel(m()); + if (!mk_diff(sel, diff_sel)) + return false; + new_args.push_back(diff_sel); + r = m().mk_app(fid, OP_STORE, new_args.size(), new_args.c_ptr()); + return true; + } + if (fid == m_dt_util.get_family_id()) { + // In the current implementation, I only handle the case where + // the datatype has a recursive constructor. + ptr_vector const & constructors = *m_dt_util.get_datatype_constructors(s); + for (func_decl * constructor : constructors) { + unsigned num = constructor->get_arity(); + unsigned target = UINT_MAX; + for (unsigned i = 0; i < num; i++) { + sort * s_arg = constructor->get_domain(i); + if (s == s_arg) { + target = i; + continue; + } + if (m().is_uninterp(s_arg)) + break; + } + if (target == UINT_MAX) + continue; + // use the constructor the distinct term constructor(...,t,...) + ptr_buffer new_args; + for (unsigned i = 0; i < num; i++) { + if (i == target) { + new_args.push_back(t); + } + else { + new_args.push_back(m().get_some_value(constructor->get_domain(i))); + } + } + r = m().mk_app(constructor, new_args.size(), new_args.c_ptr()); + return true; } + // TODO: handle more cases. return false; } + return false; + } - app * process_eq(func_decl * f, expr * arg1, expr * arg2) { - expr * v; - expr * t; - if (uncnstr(arg1)) { - v = arg1; - t = arg2; - } - else if (uncnstr(arg2)) { - v = arg2; - t = arg1; - } - else { - return nullptr; - } - - sort * s = m().get_sort(arg1); - - // Remark: - // I currently do not support unconstrained vars that have - // uninterpreted sorts, for the following reasons: - // - Soundness - // (forall ((x S) (y S)) (= x y)) - // (not (= c1 c2)) - // - // The constants c1 and c2 have only one occurrence in - // the formula above, but they are not really unconstrained. - // The quantifier forces S to have interpretations of size 1. - // If we replace (= c1 c2) with fresh k. The formula will - // become satisfiable. - // - // - Even if the formula is quantifier free, I would still - // have to build an interpretation for the eliminated - // variables. - // - if (!m().is_fully_interp(s)) - return nullptr; - - // If the interpreted sort has only one element, - // then it is unsound to eliminate the unconstrained variable in the equality - sort_size sz = s->get_num_elements(); - - if (sz.is_finite() && sz.size() <= 1) - return nullptr; - - if (!m_mc) { - // easy case, model generation is disabled. - app * u; - mk_fresh_uncnstr_var_for(f, arg1, arg2, u); - return u; - } - - expr_ref d(m()); - if (mk_diff(t, d)) { - app * u; - if (!mk_fresh_uncnstr_var_for(f, arg1, arg2, u)) - return u; - add_def(v, m().mk_ite(u, t, d)); - return u; - } + app * process_eq(func_decl * f, expr * arg1, expr * arg2) { + expr * v; + expr * t; + if (uncnstr(arg1)) { + v = arg1; + t = arg2; + } + else if (uncnstr(arg2)) { + v = arg2; + t = arg1; + } + else { return nullptr; } - app * process_basic_app(func_decl * f, unsigned num, expr * const * args) { - SASSERT(f->get_family_id() == m().get_basic_family_id()); - switch (f->get_decl_kind()) { - case OP_ITE: - SASSERT(num == 3); - if (uncnstr(args[1]) && uncnstr(args[2])) { - app * r; - if (!mk_fresh_uncnstr_var_for(f, num, args, r)) - return r; - add_def(args[1], r); - add_def(args[2], r); - return r; - } - if (uncnstr(args[0]) && uncnstr(args[1])) { - app * r; - if (!mk_fresh_uncnstr_var_for(f, num, args, r)) - return r; - add_def(args[0], m().mk_true()); - add_def(args[1], r); - return r; - } - if (uncnstr(args[0]) && uncnstr(args[2])) { - app * r; - if (!mk_fresh_uncnstr_var_for(f, num, args, r)) - return r; - add_def(args[0], m().mk_false()); - add_def(args[2], r); - return r; - } - return nullptr; - case OP_NOT: - SASSERT(num == 1); - if (uncnstr(args[0])) { - app * r; - if (!mk_fresh_uncnstr_var_for(f, num, args, r)) - return r; - if (m_mc) - add_def(args[0], m().mk_not(r)); - return r; - } - return nullptr; - case OP_AND: - if (num > 0 && uncnstr(num, args)) { - app * r; - if (!mk_fresh_uncnstr_var_for(f, num, args, r)) - return r; - if (m_mc) - add_defs(num, args, r, m().mk_true()); - return r; - } - return nullptr; - case OP_OR: - if (num > 0 && uncnstr(num, args)) { - app * r; - if (!mk_fresh_uncnstr_var_for(f, num, args, r)) - return r; - if (m_mc) - add_defs(num, args, r, m().mk_false()); - return r; - } - return nullptr; - case OP_EQ: - SASSERT(num == 2); - return process_eq(f, args[0], args[1]); - default: - return nullptr; - } + sort * s = m().get_sort(arg1); + + // Remark: + // I currently do not support unconstrained vars that have + // uninterpreted sorts, for the following reasons: + // - Soundness + // (forall ((x S) (y S)) (= x y)) + // (not (= c1 c2)) + // + // The constants c1 and c2 have only one occurrence in + // the formula above, but they are not really unconstrained. + // The quantifier forces S to have interpretations of size 1. + // If we replace (= c1 c2) with fresh k. The formula will + // become satisfiable. + // + // - Even if the formula is quantifier free, I would still + // have to build an interpretation for the eliminated + // variables. + // + if (!m().is_fully_interp(s)) + return nullptr; + + // If the interpreted sort has only one element, + // then it is unsound to eliminate the unconstrained variable in the equality + sort_size sz = s->get_num_elements(); + + if (sz.is_finite() && sz.size() <= 1) + return nullptr; + + if (!m_mc) { + // easy case, model generation is disabled. + app * u; + mk_fresh_uncnstr_var_for(f, arg1, arg2, u); + return u; } - - app * process_le_ge(func_decl * f, expr * arg1, expr * arg2, bool le) { - expr * v; - expr * t; - if (uncnstr(arg1)) { - v = arg1; - t = arg2; - } - else if (uncnstr(arg2)) { - v = arg2; - t = arg1; - le = !le; - } - else { - return nullptr; - } + + expr_ref d(m()); + if (mk_diff(t, d)) { app * u; if (!mk_fresh_uncnstr_var_for(f, arg1, arg2, u)) return u; - if (!m_mc) - return u; - // v = ite(u, t, t + 1) if le - // v = ite(u, t, t - 1) if !le - add_def(v, m().mk_ite(u, t, m_a_util.mk_add(t, m_a_util.mk_numeral(rational(le ? 1 : -1), m().get_sort(arg1))))); + add_def(v, m().mk_ite(u, t, d)); return u; } + return nullptr; + } + + app * process_basic_app(func_decl * f, unsigned num, expr * const * args) { + SASSERT(f->get_family_id() == m().get_basic_family_id()); + switch (f->get_decl_kind()) { + case OP_ITE: + SASSERT(num == 3); + if (uncnstr(args[1]) && uncnstr(args[2])) { + app * r; + if (!mk_fresh_uncnstr_var_for(f, num, args, r)) + return r; + add_def(args[1], r); + add_def(args[2], r); + return r; + } + if (uncnstr(args[0]) && uncnstr(args[1])) { + app * r; + if (!mk_fresh_uncnstr_var_for(f, num, args, r)) + return r; + add_def(args[0], m().mk_true()); + add_def(args[1], r); + return r; + } + if (uncnstr(args[0]) && uncnstr(args[2])) { + app * r; + if (!mk_fresh_uncnstr_var_for(f, num, args, r)) + return r; + add_def(args[0], m().mk_false()); + add_def(args[2], r); + return r; + } + return nullptr; + case OP_NOT: + SASSERT(num == 1); + if (uncnstr(args[0])) { + app * r; + if (!mk_fresh_uncnstr_var_for(f, num, args, r)) + return r; + if (m_mc) + add_def(args[0], m().mk_not(r)); + return r; + } + return nullptr; + case OP_AND: + if (num > 0 && uncnstr(num, args)) { + app * r; + if (!mk_fresh_uncnstr_var_for(f, num, args, r)) + return r; + if (m_mc) + add_defs(num, args, r, m().mk_true()); + return r; + } + return nullptr; + case OP_OR: + if (num > 0 && uncnstr(num, args)) { + app * r; + if (!mk_fresh_uncnstr_var_for(f, num, args, r)) + return r; + if (m_mc) + add_defs(num, args, r, m().mk_false()); + return r; + } + return nullptr; + case OP_EQ: + SASSERT(num == 2); + return process_eq(f, args[0], args[1]); + default: + return nullptr; + } + } - app * process_add(family_id fid, decl_kind add_k, decl_kind sub_k, unsigned num, expr * const * args) { - if (num == 0) - return nullptr; - unsigned i; - expr * v = nullptr; - for (i = 0; i < num; i++) { - expr * arg = args[i]; - if (uncnstr(arg)) { - v = arg; - break; - } - } - if (v == nullptr) - return nullptr; - app * u; - if (!mk_fresh_uncnstr_var_for(m().mk_app(fid, add_k, num, args), u)) - return u; - if (!m_mc) - return u; - ptr_buffer new_args; - for (unsigned j = 0; j < num; j++) { - if (j == i) - continue; - new_args.push_back(args[j]); - } - if (new_args.empty()) { - add_def(v, u); - } - else { - expr * rest; - if (new_args.size() == 1) - rest = new_args[0]; - else - rest = m().mk_app(fid, add_k, new_args.size(), new_args.c_ptr()); - add_def(v, m().mk_app(fid, sub_k, u, rest)); - } + app * process_le_ge(func_decl * f, expr * arg1, expr * arg2, bool le) { + expr * v; + expr * t; + if (uncnstr(arg1)) { + v = arg1; + t = arg2; + } + else if (uncnstr(arg2)) { + v = arg2; + t = arg1; + le = !le; + } + else { + return nullptr; + } + app * u; + if (!mk_fresh_uncnstr_var_for(f, arg1, arg2, u)) return u; - } - - app * process_arith_mul(func_decl * f, unsigned num, expr * const * args) { - if (num == 0) - return nullptr; - sort * s = m().get_sort(args[0]); - if (uncnstr(num, args)) { - app * r; - if (!mk_fresh_uncnstr_var_for(f, num, args, r)) - return r; - if (m_mc) - add_defs(num, args, r, m_a_util.mk_numeral(rational(1), s)); - return r; - } - // c * v case for reals - bool is_int; - rational val; - if (num == 2 && uncnstr(args[1]) && m_a_util.is_numeral(args[0], val, is_int) && !is_int) { - if (val.is_zero()) - return nullptr; - app * r; - if (!mk_fresh_uncnstr_var_for(f, num, args, r)) - return r; - if (m_mc) { - val = rational(1) / val; - add_def(args[1], m_a_util.mk_mul(m_a_util.mk_numeral(val, false), r)); - } - return r; - } + if (!m_mc) + return u; + // v = ite(u, t, t + 1) if le + // v = ite(u, t, t - 1) if !le + add_def(v, m().mk_ite(u, t, m_a_util.mk_add(t, m_a_util.mk_numeral(rational(le ? 1 : -1), m().get_sort(arg1))))); + return u; + } + + app * process_add(family_id fid, decl_kind add_k, decl_kind sub_k, unsigned num, expr * const * args) { + if (num == 0) return nullptr; - } - - app * process_arith_app(func_decl * f, unsigned num, expr * const * args) { - - SASSERT(f->get_family_id() == m_a_util.get_family_id()); - switch (f->get_decl_kind()) { - case OP_ADD: - return process_add(f->get_family_id(), OP_ADD, OP_SUB, num, args); - case OP_MUL: - return process_arith_mul(f, num, args); - case OP_LE: - SASSERT(num == 2); - return process_le_ge(f, args[0], args[1], true); - case OP_GE: - SASSERT(num == 2); - return process_le_ge(f, args[0], args[1], false); - default: - return nullptr; + unsigned i; + expr * v = nullptr; + for (i = 0; i < num; i++) { + expr * arg = args[i]; + if (uncnstr(arg)) { + v = arg; + break; } } - - app * process_bv_mul(func_decl * f, unsigned num, expr * const * args) { - if (num == 0) - return nullptr; - if (uncnstr(num, args)) { - sort * s = m().get_sort(args[0]); - app * r; - if (!mk_fresh_uncnstr_var_for(f, num, args, r)) - return r; - if (m_mc) - add_defs(num, args, r, m_bv_util.mk_numeral(rational(1), s)); - return r; - } - // c * v (c is even) case - unsigned bv_size; - rational val; - rational inv; - if (num == 2 && - uncnstr(args[1]) && - m_bv_util.is_numeral(args[0], val, bv_size) && - m_bv_util.mult_inverse(val, bv_size, inv)) { - app * r; - if (!mk_fresh_uncnstr_var_for(f, num, args, r)) - return r; - sort * s = m().get_sort(args[1]); - if (m_mc) - add_def(args[1], m_bv_util.mk_bv_mul(m_bv_util.mk_numeral(inv, s), r)); - return r; - } + if (v == nullptr) return nullptr; + app * u; + if (!mk_fresh_uncnstr_var_for(m().mk_app(fid, add_k, num, args), u)) + return u; + if (!m_mc) + return u; + ptr_buffer new_args; + for (unsigned j = 0; j < num; j++) { + if (j == i) + continue; + new_args.push_back(args[j]); } - - app * process_extract(func_decl * f, expr * arg) { - if (!uncnstr(arg)) - return nullptr; + if (new_args.empty()) { + add_def(v, u); + } + else { + expr * rest; + if (new_args.size() == 1) + rest = new_args[0]; + else + rest = m().mk_app(fid, add_k, new_args.size(), new_args.c_ptr()); + add_def(v, m().mk_app(fid, sub_k, u, rest)); + } + return u; + } + + app * process_arith_mul(func_decl * f, unsigned num, expr * const * args) { + if (num == 0) + return nullptr; + sort * s = m().get_sort(args[0]); + if (uncnstr(num, args)) { app * r; - if (!mk_fresh_uncnstr_var_for(f, arg, r)) + if (!mk_fresh_uncnstr_var_for(f, num, args, r)) return r; - if (!m_mc) - return r; - unsigned high = m_bv_util.get_extract_high(f); - unsigned low = m_bv_util.get_extract_low(f); - unsigned bv_size = m_bv_util.get_bv_size(m().get_sort(arg)); - if (bv_size == high - low + 1) { - add_def(arg, r); - } - else { - ptr_buffer args; - if (high < bv_size - 1) - args.push_back(m_bv_util.mk_numeral(rational(0), bv_size - high - 1)); - args.push_back(r); - if (low > 0) - args.push_back(m_bv_util.mk_numeral(rational(0), low)); - add_def(arg, m_bv_util.mk_concat(args.size(), args.c_ptr())); - } + if (m_mc) + add_defs(num, args, r, m_a_util.mk_numeral(rational(1), s)); return r; } - - app * process_bv_div(func_decl * f, expr * arg1, expr * arg2) { - if (uncnstr(arg1) && uncnstr(arg2)) { - sort * s = m().get_sort(arg1); - app * r; - if (!mk_fresh_uncnstr_var_for(f, arg1, arg2, r)) - return r; - if (!m_mc) - return r; - add_def(arg1, r); - add_def(arg2, m_bv_util.mk_numeral(rational(1), s)); - return r; - } - return nullptr; - } - - app * process_concat(func_decl * f, unsigned num, expr * const * args) { - if (num == 0) - return nullptr; - if (!uncnstr(num, args)) + // c * v case for reals + bool is_int; + rational val; + if (num == 2 && uncnstr(args[1]) && m_a_util.is_numeral(args[0], val, is_int) && !is_int) { + if (val.is_zero()) return nullptr; app * r; if (!mk_fresh_uncnstr_var_for(f, num, args, r)) return r; if (m_mc) { - unsigned i = num; - unsigned low = 0; - while (i > 0) { - --i; - expr * arg = args[i]; - unsigned sz = m_bv_util.get_bv_size(arg); - add_def(arg, m_bv_util.mk_extract(low + sz - 1, low, r)); - low += sz; - } + val = rational(1) / val; + add_def(args[1], m_a_util.mk_mul(m_a_util.mk_numeral(val, false), r)); } return r; } + return nullptr; + } + + app * process_arith_app(func_decl * f, unsigned num, expr * const * args) { - app * process_bv_le(func_decl * f, expr * arg1, expr * arg2, bool is_signed) { - if (m_produce_proofs) { - // The result of bv_le is not just introducing a new fresh name, - // we need a side condition. - // TODO: the correct proof step - return nullptr; - } - if (uncnstr(arg1)) { - // v <= t - expr * v = arg1; - expr * t = arg2; - // v <= t ---> (u or t == MAX) u is fresh - // add definition v = ite(u or t == MAX, t, t+1) - unsigned bv_sz = m_bv_util.get_bv_size(arg1); - rational MAX; - if (is_signed) - MAX = rational::power_of_two(bv_sz - 1) - rational(1); - else - MAX = rational::power_of_two(bv_sz) - rational(1); - app * u; - bool is_new = mk_fresh_uncnstr_var_for(f, arg1, arg2, u); - app * r = m().mk_or(u, m().mk_eq(t, m_bv_util.mk_numeral(MAX, bv_sz))); - if (m_mc && is_new) - add_def(v, m().mk_ite(r, t, m_bv_util.mk_bv_add(t, m_bv_util.mk_numeral(rational(1), bv_sz)))); - return r; - } - if (uncnstr(arg2)) { - // v >= t - expr * v = arg2; - expr * t = arg1; - // v >= t ---> (u ot t == MIN) u is fresh - // add definition v = ite(u or t == MIN, t, t-1) - unsigned bv_sz = m_bv_util.get_bv_size(arg1); - rational MIN; - if (is_signed) - MIN = -rational::power_of_two(bv_sz - 1); - else - MIN = rational(0); - app * u; - bool is_new = mk_fresh_uncnstr_var_for(f, arg1, arg2, u); - app * r = m().mk_or(u, m().mk_eq(t, m_bv_util.mk_numeral(MIN, bv_sz))); - if (m_mc && is_new) - add_def(v, m().mk_ite(r, t, m_bv_util.mk_bv_sub(t, m_bv_util.mk_numeral(rational(1), bv_sz)))); - return r; - } + SASSERT(f->get_family_id() == m_a_util.get_family_id()); + switch (f->get_decl_kind()) { + case OP_ADD: + return process_add(f->get_family_id(), OP_ADD, OP_SUB, num, args); + case OP_MUL: + return process_arith_mul(f, num, args); + case OP_LE: + SASSERT(num == 2); + return process_le_ge(f, args[0], args[1], true); + case OP_GE: + SASSERT(num == 2); + return process_le_ge(f, args[0], args[1], false); + default: return nullptr; } - - app * process_bv_app(func_decl * f, unsigned num, expr * const * args) { - SASSERT(f->get_family_id() == m_bv_util.get_family_id()); - switch (f->get_decl_kind()) { - case OP_BADD: - return process_add(f->get_family_id(), OP_BADD, OP_BSUB, num, args); - case OP_BMUL: - return process_bv_mul(f, num, args); - case OP_BSDIV: - case OP_BUDIV: - case OP_BSDIV_I: - case OP_BUDIV_I: - SASSERT(num == 2); - return process_bv_div(f, args[0], args[1]); - case OP_SLEQ: - SASSERT(num == 2); - return process_bv_le(f, args[0], args[1], true); - case OP_ULEQ: - SASSERT(num == 2); - return process_bv_le(f, args[0], args[1], false); - case OP_CONCAT: - return process_concat(f, num, args); - case OP_EXTRACT: - SASSERT(num == 1); - return process_extract(f, args[0]); - case OP_BNOT: - SASSERT(num == 1); - if (uncnstr(args[0])) { - app * r; - if (!mk_fresh_uncnstr_var_for(f, num, args, r)) - return r; - if (m_mc) - add_def(args[0], m().mk_app(f, r)); - return r; - } - return nullptr; - case OP_BOR: - if (num > 0 && uncnstr(num, args)) { - sort * s = m().get_sort(args[0]); - app * r; - if (!mk_fresh_uncnstr_var_for(f, num, args, r)) - return r; - if (m_mc) - add_defs(num, args, r, m_bv_util.mk_numeral(rational(0), s)); - return r; - } - return nullptr; - default: - return nullptr; + } + + app * process_bv_mul(func_decl * f, unsigned num, expr * const * args) { + if (num == 0) + return nullptr; + if (uncnstr(num, args)) { + sort * s = m().get_sort(args[0]); + app * r; + if (!mk_fresh_uncnstr_var_for(f, num, args, r)) + return r; + if (m_mc) + add_defs(num, args, r, m_bv_util.mk_numeral(rational(1), s)); + return r; + } + // c * v (c is even) case + unsigned bv_size; + rational val; + rational inv; + if (num == 2 && + uncnstr(args[1]) && + m_bv_util.is_numeral(args[0], val, bv_size) && + m_bv_util.mult_inverse(val, bv_size, inv)) { + app * r; + if (!mk_fresh_uncnstr_var_for(f, num, args, r)) + return r; + sort * s = m().get_sort(args[1]); + if (m_mc) + add_def(args[1], m_bv_util.mk_bv_mul(m_bv_util.mk_numeral(inv, s), r)); + return r; + } + return nullptr; + } + + app * process_extract(func_decl * f, expr * arg) { + if (!uncnstr(arg)) + return nullptr; + app * r; + if (!mk_fresh_uncnstr_var_for(f, arg, r)) + return r; + if (!m_mc) + return r; + unsigned high = m_bv_util.get_extract_high(f); + unsigned low = m_bv_util.get_extract_low(f); + unsigned bv_size = m_bv_util.get_bv_size(m().get_sort(arg)); + if (bv_size == high - low + 1) { + add_def(arg, r); + } + else { + ptr_buffer args; + if (high < bv_size - 1) + args.push_back(m_bv_util.mk_numeral(rational(0), bv_size - high - 1)); + args.push_back(r); + if (low > 0) + args.push_back(m_bv_util.mk_numeral(rational(0), low)); + add_def(arg, m_bv_util.mk_concat(args.size(), args.c_ptr())); + } + return r; + } + + app * process_bv_div(func_decl * f, expr * arg1, expr * arg2) { + if (uncnstr(arg1) && uncnstr(arg2)) { + sort * s = m().get_sort(arg1); + app * r; + if (!mk_fresh_uncnstr_var_for(f, arg1, arg2, r)) + return r; + if (!m_mc) + return r; + add_def(arg1, r); + add_def(arg2, m_bv_util.mk_numeral(rational(1), s)); + return r; + } + return nullptr; + } + + app * process_concat(func_decl * f, unsigned num, expr * const * args) { + if (num == 0) + return nullptr; + if (!uncnstr(num, args)) + return nullptr; + app * r; + if (!mk_fresh_uncnstr_var_for(f, num, args, r)) + return r; + if (m_mc) { + unsigned i = num; + unsigned low = 0; + while (i > 0) { + --i; + expr * arg = args[i]; + unsigned sz = m_bv_util.get_bv_size(arg); + add_def(arg, m_bv_util.mk_extract(low + sz - 1, low, r)); + low += sz; } } - - app * process_array_app(func_decl * f, unsigned num, expr * const * args) { - SASSERT(f->get_family_id() == m_ar_util.get_family_id()); - switch (f->get_decl_kind()) { - case OP_SELECT: - if (uncnstr(args[0])) { - app * r; - if (!mk_fresh_uncnstr_var_for(f, num, args, r)) - return r; - sort * s = m().get_sort(args[0]); - if (m_mc) - add_def(args[0], m_ar_util.mk_const_array(s, r)); - return r; - } - return nullptr; - case OP_STORE: - if (uncnstr(args[0]) && uncnstr(args[num-1])) { - app * r; - if (!mk_fresh_uncnstr_var_for(f, num, args, r)) - return r; - if (m_mc) { - add_def(args[num-1], m().mk_app(m_ar_util.get_family_id(), OP_SELECT, num-1, args)); - add_def(args[0], r); - } - return r; - } - default: - return nullptr; - } - } - - app * process_datatype_app(func_decl * f, unsigned num, expr * const * args) { - if (m_dt_util.is_recognizer(f)) { - SASSERT(num == 1); - if (uncnstr(args[0])) { - if (!m_mc) { - app * r; - mk_fresh_uncnstr_var_for(f, num, args, r); - return r; - } - // TODO: handle model generation - } - } - else if (m_dt_util.is_accessor(f)) { - SASSERT(num == 1); - if (uncnstr(args[0])) { - if (!m_mc) { - app * r; - mk_fresh_uncnstr_var_for(f, num, args, r); - return r; - } - func_decl * c = m_dt_util.get_accessor_constructor(f); - for (unsigned i = 0; i < c->get_arity(); i++) - if (!m().is_fully_interp(c->get_domain(i))) - return nullptr; - app * u; - if (!mk_fresh_uncnstr_var_for(f, num, args, u)) - return u; - ptr_vector const & accs = *m_dt_util.get_constructor_accessors(c); - ptr_buffer new_args; - for (unsigned i = 0; i < accs.size(); i++) { - if (accs[i] == f) - new_args.push_back(u); - else - new_args.push_back(m().get_some_value(c->get_domain(i))); - } - add_def(args[0], m().mk_app(c, new_args.size(), new_args.c_ptr())); - return u; - } - } - else if (m_dt_util.is_constructor(f)) { - if (uncnstr(num, args)) { - app * u; - if (!mk_fresh_uncnstr_var_for(f, num, args, u)) - return u; - if (!m_mc) - return u; - ptr_vector const & accs = *m_dt_util.get_constructor_accessors(f); - for (unsigned i = 0; i < num; i++) { - add_def(args[i], m().mk_app(accs[i], u)); - } - return u; - } - } + return r; + } + + app * process_bv_le(func_decl * f, expr * arg1, expr * arg2, bool is_signed) { + if (m_produce_proofs) { + // The result of bv_le is not just introducing a new fresh name, + // we need a side condition. + // TODO: the correct proof step return nullptr; } - - br_status reduce_app(func_decl * f, unsigned num, expr * const * args, expr_ref & result, proof_ref & result_pr) { - if (num == 0) - return BR_FAILED; - family_id fid = f->get_family_id(); - if (fid == null_family_id) - return BR_FAILED; - - for (unsigned i = 0; i < num; i++) { - if (!is_ground(args[i])) - return BR_FAILED; // non-ground terms are not handled. - } - - app * u = nullptr; - - if (fid == m().get_basic_family_id()) - u = process_basic_app(f, num, args); - else if (fid == m_a_util.get_family_id()) - u = process_arith_app(f, num, args); - else if (fid == m_bv_util.get_family_id()) - u = process_bv_app(f, num, args); - else if (fid == m_ar_util.get_family_id()) - u = process_array_app(f, num, args); - else if (fid == m_dt_util.get_family_id()) - u = process_datatype_app(f, num, args); - - if (u == nullptr) - return BR_FAILED; - - result = u; - if (m_produce_proofs) { - expr * s = m().mk_app(f, num, args); - expr * eq = m().mk_eq(s, u); - proof * pr1 = m().mk_def_intro(eq); - result_pr = m().mk_apply_def(s, u, pr1); - } - - return BR_DONE; - } - }; - - class rw : public rewriter_tpl { - rw_cfg m_cfg; - public: - rw(ast_manager & m, bool produce_proofs, obj_hashtable & vars, mc * _m, - unsigned long long max_memory, unsigned max_steps): - rewriter_tpl(m, produce_proofs, m_cfg), - m_cfg(m, produce_proofs, vars, _m, max_memory, max_steps) { - } - }; - - ast_manager & m_manager; - ref m_mc; - obj_hashtable m_vars; - scoped_ptr m_rw; - unsigned m_num_elim_apps; - unsigned long long m_max_memory; - unsigned m_max_steps; - - imp(ast_manager & m, params_ref const & p): - m_manager(m), - m_num_elim_apps(0) { - updt_params(p); - } - - void updt_params(params_ref const & p) { - m_max_memory = megabytes_to_bytes(p.get_uint("max_memory", UINT_MAX)); - m_max_steps = p.get_uint("max_steps", UINT_MAX); - } - - ast_manager & m() { return m_manager; } - - void init_mc(bool produce_models) { - m_mc = nullptr; - if (produce_models) { - m_mc = alloc(mc, m(), "elim_uncstr"); - } - } - - void init_rw(bool produce_proofs) { - m_rw = alloc(rw, m(), produce_proofs, m_vars, m_mc.get(), m_max_memory, m_max_steps); - } - - void operator()(goal_ref const & g, goal_ref_buffer & result) { - bool produce_proofs = g->proofs_enabled(); - - TRACE("elim_uncnstr_bug", g->display(tout);); - tactic_report report("elim-uncnstr-vars", *g); - m_vars.reset(); - collect_occs p; - p(*g, m_vars); - if (m_vars.empty()) { - result.push_back(g.get()); - // did not increase depth since it didn't do anything. - return; - } - bool modified = true; - TRACE("elim_uncnstr", tout << "unconstrained variables...\n"; - for (expr * v : m_vars) tout << mk_ismt2_pp(v, m()) << " "; - tout << "\n";); - init_mc(g->models_enabled()); - init_rw(produce_proofs); - - expr_ref new_f(m()); - proof_ref new_pr(m()); - unsigned round = 0; - unsigned size = g->size(); - unsigned idx = 0; - while (true) { - for (; idx < size; idx++) { - expr * f = g->form(idx); - m_rw->operator()(f, new_f, new_pr); - if (f == new_f) - continue; - modified = true; - if (produce_proofs) { - proof * pr = g->pr(idx); - new_pr = m().mk_modus_ponens(pr, new_pr); - } - g->update(idx, new_f, new_pr, g->dep(idx)); - } - if (!modified) { - if (round == 0) { - } - else { - m_num_elim_apps = m_rw->cfg().m_fresh_vars.size(); - g->add(m_mc.get()); - } - TRACE("elim_uncnstr", if (m_mc) m_mc->display(tout); else tout << "no mc\n";); - m_mc = nullptr; - m_rw = nullptr; - result.push_back(g.get()); - g->inc_depth(); - return; - } - modified = false; - round ++; - size = g->size(); - m_rw->reset(); // reset cache - m_vars.reset(); - { - collect_occs p; - p(*g, m_vars); - } - if (m_vars.empty()) - idx = size; // force to finish + if (uncnstr(arg1)) { + // v <= t + expr * v = arg1; + expr * t = arg2; + // v <= t ---> (u or t == MAX) u is fresh + // add definition v = ite(u or t == MAX, t, t+1) + unsigned bv_sz = m_bv_util.get_bv_size(arg1); + rational MAX; + if (is_signed) + MAX = rational::power_of_two(bv_sz - 1) - rational(1); else - idx = 0; + MAX = rational::power_of_two(bv_sz) - rational(1); + app * u; + bool is_new = mk_fresh_uncnstr_var_for(f, arg1, arg2, u); + app * r = m().mk_or(u, m().mk_eq(t, m_bv_util.mk_numeral(MAX, bv_sz))); + if (m_mc && is_new) + add_def(v, m().mk_ite(r, t, m_bv_util.mk_bv_add(t, m_bv_util.mk_numeral(rational(1), bv_sz)))); + return r; + } + if (uncnstr(arg2)) { + // v >= t + expr * v = arg2; + expr * t = arg1; + // v >= t ---> (u ot t == MIN) u is fresh + // add definition v = ite(u or t == MIN, t, t-1) + unsigned bv_sz = m_bv_util.get_bv_size(arg1); + rational MIN; + if (is_signed) + MIN = -rational::power_of_two(bv_sz - 1); + else + MIN = rational(0); + app * u; + bool is_new = mk_fresh_uncnstr_var_for(f, arg1, arg2, u); + app * r = m().mk_or(u, m().mk_eq(t, m_bv_util.mk_numeral(MIN, bv_sz))); + if (m_mc && is_new) + add_def(v, m().mk_ite(r, t, m_bv_util.mk_bv_sub(t, m_bv_util.mk_numeral(rational(1), bv_sz)))); + return r; + } + return nullptr; + } + + app * process_bv_app(func_decl * f, unsigned num, expr * const * args) { + SASSERT(f->get_family_id() == m_bv_util.get_family_id()); + switch (f->get_decl_kind()) { + case OP_BADD: + return process_add(f->get_family_id(), OP_BADD, OP_BSUB, num, args); + case OP_BMUL: + return process_bv_mul(f, num, args); + case OP_BSDIV: + case OP_BUDIV: + case OP_BSDIV_I: + case OP_BUDIV_I: + SASSERT(num == 2); + return process_bv_div(f, args[0], args[1]); + case OP_SLEQ: + SASSERT(num == 2); + return process_bv_le(f, args[0], args[1], true); + case OP_ULEQ: + SASSERT(num == 2); + return process_bv_le(f, args[0], args[1], false); + case OP_CONCAT: + return process_concat(f, num, args); + case OP_EXTRACT: + SASSERT(num == 1); + return process_extract(f, args[0]); + case OP_BNOT: + SASSERT(num == 1); + if (uncnstr(args[0])) { + app * r; + if (!mk_fresh_uncnstr_var_for(f, num, args, r)) + return r; + if (m_mc) + add_def(args[0], m().mk_app(f, r)); + return r; + } + return nullptr; + case OP_BOR: + if (num > 0 && uncnstr(num, args)) { + sort * s = m().get_sort(args[0]); + app * r; + if (!mk_fresh_uncnstr_var_for(f, num, args, r)) + return r; + if (m_mc) + add_defs(num, args, r, m_bv_util.mk_numeral(rational(0), s)); + return r; + } + return nullptr; + default: + return nullptr; } } + + app * process_array_app(func_decl * f, unsigned num, expr * const * args) { + SASSERT(f->get_family_id() == m_ar_util.get_family_id()); + switch (f->get_decl_kind()) { + case OP_SELECT: + if (uncnstr(args[0])) { + app * r; + if (!mk_fresh_uncnstr_var_for(f, num, args, r)) + return r; + sort * s = m().get_sort(args[0]); + if (m_mc) + add_def(args[0], m_ar_util.mk_const_array(s, r)); + return r; + } + return nullptr; + case OP_STORE: + if (uncnstr(args[0]) && uncnstr(args[num-1])) { + app * r; + if (!mk_fresh_uncnstr_var_for(f, num, args, r)) + return r; + if (m_mc) { + add_def(args[num-1], m().mk_app(m_ar_util.get_family_id(), OP_SELECT, num-1, args)); + add_def(args[0], r); + } + return r; + } + default: + return nullptr; + } + } + + app * process_datatype_app(func_decl * f, unsigned num, expr * const * args) { + if (m_dt_util.is_recognizer(f)) { + SASSERT(num == 1); + if (uncnstr(args[0])) { + if (!m_mc) { + app * r; + mk_fresh_uncnstr_var_for(f, num, args, r); + return r; + } + // TODO: handle model generation + } + } + else if (m_dt_util.is_accessor(f)) { + SASSERT(num == 1); + if (uncnstr(args[0])) { + if (!m_mc) { + app * r; + mk_fresh_uncnstr_var_for(f, num, args, r); + return r; + } + func_decl * c = m_dt_util.get_accessor_constructor(f); + for (unsigned i = 0; i < c->get_arity(); i++) + if (!m().is_fully_interp(c->get_domain(i))) + return nullptr; + app * u; + if (!mk_fresh_uncnstr_var_for(f, num, args, u)) + return u; + ptr_vector const & accs = *m_dt_util.get_constructor_accessors(c); + ptr_buffer new_args; + for (unsigned i = 0; i < accs.size(); i++) { + if (accs[i] == f) + new_args.push_back(u); + else + new_args.push_back(m().get_some_value(c->get_domain(i))); + } + add_def(args[0], m().mk_app(c, new_args.size(), new_args.c_ptr())); + return u; + } + } + else if (m_dt_util.is_constructor(f)) { + if (uncnstr(num, args)) { + app * u; + if (!mk_fresh_uncnstr_var_for(f, num, args, u)) + return u; + if (!m_mc) + return u; + ptr_vector const & accs = *m_dt_util.get_constructor_accessors(f); + for (unsigned i = 0; i < num; i++) { + add_def(args[i], m().mk_app(accs[i], u)); + } + return u; + } + } + return nullptr; + } + + br_status reduce_app(func_decl * f, unsigned num, expr * const * args, expr_ref & result, proof_ref & result_pr) { + if (num == 0) + return BR_FAILED; + family_id fid = f->get_family_id(); + if (fid == null_family_id) + return BR_FAILED; + for (unsigned i = 0; i < num; i++) { + if (!is_ground(args[i])) + return BR_FAILED; // non-ground terms are not handled. + } + + app * u = nullptr; + + if (fid == m().get_basic_family_id()) + u = process_basic_app(f, num, args); + else if (fid == m_a_util.get_family_id()) + u = process_arith_app(f, num, args); + else if (fid == m_bv_util.get_family_id()) + u = process_bv_app(f, num, args); + else if (fid == m_ar_util.get_family_id()) + u = process_array_app(f, num, args); + else if (fid == m_dt_util.get_family_id()) + u = process_datatype_app(f, num, args); + + if (u == nullptr) + return BR_FAILED; + + result = u; + if (m_produce_proofs) { + expr * s = m().mk_app(f, num, args); + expr * eq = m().mk_eq(s, u); + proof * pr1 = m().mk_def_intro(eq); + result_pr = m().mk_apply_def(s, u, pr1); + } + + return BR_DONE; + } }; - imp * m_imp; + class rw : public rewriter_tpl { + rw_cfg m_cfg; + public: + rw(ast_manager & m, bool produce_proofs, obj_hashtable & vars, mc * _m, + unsigned long long max_memory, unsigned max_steps): + rewriter_tpl(m, produce_proofs, m_cfg), + m_cfg(m, produce_proofs, vars, _m, max_memory, max_steps) { + } + }; + + ast_manager & m_manager; + ref m_mc; + obj_hashtable m_vars; + scoped_ptr m_rw; + unsigned m_num_elim_apps = 0; + unsigned long long m_max_memory; + unsigned m_max_steps; + + ast_manager & m() { return m_manager; } + + void init_mc(bool produce_models) { + m_mc = nullptr; + if (produce_models) { + m_mc = alloc(mc, m(), "elim_uncstr"); + } + } + + void init_rw(bool produce_proofs) { + m_rw = alloc(rw, m(), produce_proofs, m_vars, m_mc.get(), m_max_memory, m_max_steps); + } + + void run(goal_ref const & g, goal_ref_buffer & result) { + bool produce_proofs = g->proofs_enabled(); + + TRACE("elim_uncnstr_bug", g->display(tout);); + tactic_report report("elim-uncnstr-vars", *g); + m_vars.reset(); + collect_occs p; + p(*g, m_vars); + if (m_vars.empty()) { + result.push_back(g.get()); + // did not increase depth since it didn't do anything. + return; + } + bool modified = true; + TRACE("elim_uncnstr", tout << "unconstrained variables...\n"; + for (expr * v : m_vars) tout << mk_ismt2_pp(v, m()) << " "; + tout << "\n";); + init_mc(g->models_enabled()); + init_rw(produce_proofs); + + expr_ref new_f(m()); + proof_ref new_pr(m()); + unsigned round = 0; + unsigned size = g->size(); + unsigned idx = 0; + while (true) { + for (; idx < size; idx++) { + expr * f = g->form(idx); + m_rw->operator()(f, new_f, new_pr); + if (f == new_f) + continue; + modified = true; + if (produce_proofs) { + proof * pr = g->pr(idx); + new_pr = m().mk_modus_ponens(pr, new_pr); + } + g->update(idx, new_f, new_pr, g->dep(idx)); + } + if (!modified) { + if (round == 0) { + } + else { + m_num_elim_apps = m_rw->cfg().m_fresh_vars.size(); + g->add(m_mc.get()); + } + TRACE("elim_uncnstr", if (m_mc) m_mc->display(tout); else tout << "no mc\n";); + m_mc = nullptr; + m_rw = nullptr; + result.push_back(g.get()); + g->inc_depth(); + return; + } + modified = false; + round ++; + size = g->size(); + m_rw->reset(); // reset cache + m_vars.reset(); + { + collect_occs p; + p(*g, m_vars); + } + if (m_vars.empty()) + idx = size; // force to finish + else + idx = 0; + } + } + params_ref m_params; public: elim_uncnstr_tactic(ast_manager & m, params_ref const & p): - m_params(p) { - m_imp = alloc(imp, m, p); + m_manager(m), m_params(p) { + updt_params(p); } tactic * translate(ast_manager & m) override { return alloc(elim_uncnstr_tactic, m, m_params); } - - ~elim_uncnstr_tactic() override { - dealloc(m_imp); - } void updt_params(params_ref const & p) override { m_params = p; - m_imp->updt_params(p); + m_max_memory = megabytes_to_bytes(p.get_uint("max_memory", UINT_MAX)); + m_max_steps = p.get_uint("max_steps", UINT_MAX); } void collect_param_descrs(param_descrs & r) override { @@ -912,32 +895,26 @@ public: void operator()(goal_ref const & g, goal_ref_buffer & result) override { - (*m_imp)(g, result); - report_tactic_progress(":num-elim-apps", get_num_elim_apps()); + run(g, result); + report_tactic_progress(":num-elim-apps", m_num_elim_apps); } void cleanup() override { - unsigned num_elim_apps = get_num_elim_apps(); - ast_manager & m = m_imp->m_manager; - imp * d = alloc(imp, m, m_params); - std::swap(d, m_imp); - dealloc(d); - m_imp->m_num_elim_apps = num_elim_apps; - } - - unsigned get_num_elim_apps() const { - return m_imp->m_num_elim_apps; + m_mc = nullptr; + m_rw = nullptr; + m_vars.reset(); } void collect_statistics(statistics & st) const override { - st.update("eliminated applications", get_num_elim_apps()); + st.update("eliminated applications", m_num_elim_apps); } void reset_statistics() override { - m_imp->m_num_elim_apps = 0; + m_num_elim_apps = 0; } }; +} tactic * mk_elim_uncnstr_tactic(ast_manager & m, params_ref const & p) { return clean(alloc(elim_uncnstr_tactic, m, p)); diff --git a/src/tactic/core/elim_uncnstr_tactic.h b/src/tactic/core/elim_uncnstr_tactic.h index 19f9021ac..5824a6974 100644 --- a/src/tactic/core/elim_uncnstr_tactic.h +++ b/src/tactic/core/elim_uncnstr_tactic.h @@ -16,8 +16,7 @@ Author: Notes: --*/ -#ifndef ELIM_UNCNSTR_TACTIC_H_ -#define ELIM_UNCNSTR_TACTIC_H_ +#pragma once #include "util/params.h" @@ -29,5 +28,3 @@ tactic * mk_elim_uncnstr_tactic(ast_manager & m, params_ref const & p = params_r /* ADD_TACTIC("elim-uncnstr", "eliminate application containing unconstrained variables.", "mk_elim_uncnstr_tactic(m, p)") */ -#endif - diff --git a/src/tactic/core/propagate_values_tactic.cpp b/src/tactic/core/propagate_values_tactic.cpp index 41e48d864..14c715f03 100644 --- a/src/tactic/core/propagate_values_tactic.cpp +++ b/src/tactic/core/propagate_values_tactic.cpp @@ -25,224 +25,206 @@ Revision History: #include "tactic/goal_shared_occs.h" #include "ast/pb_decl_plugin.h" +namespace { class propagate_values_tactic : public tactic { - struct imp { - ast_manager & m; - th_rewriter m_r; - scoped_ptr m_subst; - goal * m_goal; - goal_shared_occs m_occs; - unsigned m_idx; - unsigned m_max_rounds; - bool m_modified; - - imp(ast_manager & m, params_ref const & p): - m(m), - m_r(m, p), - m_goal(nullptr), - m_occs(m, true /* track atoms */) { - updt_params_core(p); - } + ast_manager & m; + th_rewriter m_r; + scoped_ptr m_subst; + goal * m_goal; + goal_shared_occs m_occs; + unsigned m_idx; + unsigned m_max_rounds; + bool m_modified; + params_ref m_params; - ~imp() { } + void updt_params_core(params_ref const & p) { + m_max_rounds = p.get_uint("max_rounds", 4); + } - void updt_params_core(params_ref const & p) { - m_max_rounds = p.get_uint("max_rounds", 4); - } - - void updt_params(params_ref const & p) { - m_r.updt_params(p); - updt_params_core(p); - } - - - bool is_shared(expr * t) { - return m_occs.is_shared(t); - } - - bool is_shared_neg(expr * t, expr * & atom) { - if (!m.is_not(t, atom)) - return false; - return is_shared(atom); - } + bool is_shared(expr * t) { + return m_occs.is_shared(t); + } - bool is_shared_eq(expr * t, expr * & lhs, expr * & value) { - expr* arg1, *arg2; - if (!m.is_eq(t, arg1, arg2)) - return false; - if (m.is_value(arg1) && is_shared(arg2)) { - lhs = arg2; - value = arg1; - return true; - } - if (m.is_value(arg2) && is_shared(arg1)) { - lhs = arg1; - value = arg2; - return true; - } + bool is_shared_neg(expr * t, expr * & atom) { + if (!m.is_not(t, atom)) return false; + return is_shared(atom); + } + + bool is_shared_eq(expr * t, expr * & lhs, expr * & value) { + expr* arg1, *arg2; + if (!m.is_eq(t, arg1, arg2)) + return false; + if (m.is_value(arg1) && is_shared(arg2)) { + lhs = arg2; + value = arg1; + return true; + } + if (m.is_value(arg2) && is_shared(arg1)) { + lhs = arg1; + value = arg2; + return true; + } + return false; + } + + void push_result(expr * new_curr, proof * new_pr) { + if (m_goal->proofs_enabled()) { + proof * pr = m_goal->pr(m_idx); + new_pr = m.mk_modus_ponens(pr, new_pr); } - void push_result(expr * new_curr, proof * new_pr) { - if (m_goal->proofs_enabled()) { - proof * pr = m_goal->pr(m_idx); - new_pr = m.mk_modus_ponens(pr, new_pr); + expr_dependency_ref new_d(m); + if (m_goal->unsat_core_enabled()) { + new_d = m_goal->dep(m_idx); + expr_dependency * used_d = m_r.get_used_dependencies(); + if (used_d != nullptr) { + new_d = m.mk_join(new_d, used_d); + m_r.reset_used_dependencies(); } - - expr_dependency_ref new_d(m); - if (m_goal->unsat_core_enabled()) { - new_d = m_goal->dep(m_idx); - expr_dependency * used_d = m_r.get_used_dependencies(); - if (used_d != nullptr) { - new_d = m.mk_join(new_d, used_d); - m_r.reset_used_dependencies(); + } + + m_goal->update(m_idx, new_curr, new_pr, new_d); + + if (is_shared(new_curr)) { + m_subst->insert(new_curr, m.mk_true(), m.mk_iff_true(new_pr), new_d); + } + expr * atom; + if (is_shared_neg(new_curr, atom)) { + m_subst->insert(atom, m.mk_false(), m.mk_iff_false(new_pr), new_d); + } + expr * lhs, * value; + if (is_shared_eq(new_curr, lhs, value)) { + TRACE("shallow_context_simplifier_bug", tout << "found eq:\n" << mk_ismt2_pp(new_curr, m) << "\n";); + m_subst->insert(lhs, value, new_pr, new_d); + } + } + + void process_current() { + expr * curr = m_goal->form(m_idx); + expr_ref new_curr(m); + proof_ref new_pr(m); + + if (!m_subst->empty()) { + m_r(curr, new_curr, new_pr); + } + else { + new_curr = curr; + if (m.proofs_enabled()) + new_pr = m.mk_reflexivity(curr); + } + + TRACE("shallow_context_simplifier_bug", tout << mk_ismt2_pp(curr, m) << "\n---->\n" << mk_ismt2_pp(new_curr, m) << "\n";); + if (new_curr != curr) { + m_modified = true; + //if (has_pb(curr)) + // IF_VERBOSE(0, verbose_stream() << mk_ismt2_pp(curr, m) << "\n---->\n" << mk_ismt2_pp(new_curr, m) << "\n"); + } + push_result(new_curr, new_pr); + } + + bool has_pb(expr* e) { + pb_util pb(m); + if (pb.is_ge(e)) return true; + if (m.is_or(e)) { + for (expr* a : *to_app(e)) { + if (pb.is_ge(a)) return true; + } + } + return false; + } + + void run(goal_ref const & g, goal_ref_buffer & result) { + SASSERT(g->is_well_sorted()); + tactic_report report("propagate-values", *g); + m_goal = g.get(); + + bool forward = true; + expr_ref new_curr(m); + proof_ref new_pr(m); + unsigned size = m_goal->size(); + m_idx = 0; + m_modified = false; + unsigned round = 0; + + + if (m_goal->inconsistent()) + goto end; + + if (m_max_rounds == 0) + goto end; + + m_subst = alloc(expr_substitution, m, g->unsat_core_enabled(), g->proofs_enabled()); + m_r.set_substitution(m_subst.get()); + m_occs(*m_goal); + + while (true) { + TRACE("propagate_values", tout << "while(true) loop\n"; m_goal->display_with_dependencies(tout);); + if (forward) { + for (; m_idx < size; m_idx++) { + process_current(); + if (m_goal->inconsistent()) + goto end; } - } - - m_goal->update(m_idx, new_curr, new_pr, new_d); - - if (is_shared(new_curr)) { - m_subst->insert(new_curr, m.mk_true(), m.mk_iff_true(new_pr), new_d); - } - expr * atom; - if (is_shared_neg(new_curr, atom)) { - m_subst->insert(atom, m.mk_false(), m.mk_iff_false(new_pr), new_d); - } - expr * lhs, * value; - if (is_shared_eq(new_curr, lhs, value)) { - TRACE("shallow_context_simplifier_bug", tout << "found eq:\n" << mk_ismt2_pp(new_curr, m) << "\n";); - m_subst->insert(lhs, value, new_pr, new_d); - } - } - - void process_current() { - expr * curr = m_goal->form(m_idx); - expr_ref new_curr(m); - proof_ref new_pr(m); - - if (!m_subst->empty()) { - m_r(curr, new_curr, new_pr); + if (m_subst->empty() && !m_modified) + goto end; + m_occs(*m_goal); + m_idx = m_goal->size(); + forward = false; + m_subst->reset(); + m_r.set_substitution(m_subst.get()); // reset, but keep substitution } else { - new_curr = curr; - if (m.proofs_enabled()) - new_pr = m.mk_reflexivity(curr); - } - - TRACE("shallow_context_simplifier_bug", tout << mk_ismt2_pp(curr, m) << "\n---->\n" << mk_ismt2_pp(new_curr, m) << "\n";); - if (new_curr != curr) { - m_modified = true; - //if (has_pb(curr)) - // IF_VERBOSE(0, verbose_stream() << mk_ismt2_pp(curr, m) << "\n---->\n" << mk_ismt2_pp(new_curr, m) << "\n"); - } - push_result(new_curr, new_pr); - } - - bool has_pb(expr* e) { - pb_util pb(m); - if (pb.is_ge(e)) return true; - if (m.is_or(e)) { - for (expr* a : *to_app(e)) { - if (pb.is_ge(a)) return true; - } - } - return false; - } - - void operator()(goal_ref const & g, - goal_ref_buffer & result) { - SASSERT(g->is_well_sorted()); - tactic_report report("propagate-values", *g); - m_goal = g.get(); - - bool forward = true; - expr_ref new_curr(m); - proof_ref new_pr(m); - unsigned size = m_goal->size(); - m_idx = 0; - m_modified = false; - unsigned round = 0; - - - if (m_goal->inconsistent()) - goto end; - - if (m_max_rounds == 0) - goto end; - - m_subst = alloc(expr_substitution, m, g->unsat_core_enabled(), g->proofs_enabled()); - m_r.set_substitution(m_subst.get()); - m_occs(*m_goal); - - while (true) { - TRACE("propagate_values", tout << "while(true) loop\n"; m_goal->display_with_dependencies(tout);); - if (forward) { - for (; m_idx < size; m_idx++) { - process_current(); - if (m_goal->inconsistent()) - goto end; - } - if (m_subst->empty() && !m_modified) + while (m_idx > 0) { + m_idx--; + process_current(); + if (m_goal->inconsistent()) goto end; - m_occs(*m_goal); - m_idx = m_goal->size(); - forward = false; - m_subst->reset(); - m_r.set_substitution(m_subst.get()); // reset, but keep substitution } - else { - while (m_idx > 0) { - m_idx--; - process_current(); - if (m_goal->inconsistent()) - goto end; - } - if (!m_modified) - goto end; - m_subst->reset(); - m_r.set_substitution(m_subst.get()); // reset, but keep substitution - m_modified = false; - m_occs(*m_goal); - m_idx = 0; - size = m_goal->size(); - forward = true; - } - round++; - if (round >= m_max_rounds) - break; - IF_VERBOSE(100, verbose_stream() << "starting new round, goal size: " << m_goal->num_exprs() << std::endl;); - TRACE("propagate_values", tout << "round finished\n"; m_goal->display(tout); tout << "\n";); + if (!m_modified) + goto end; + m_subst->reset(); + m_r.set_substitution(m_subst.get()); // reset, but keep substitution + m_modified = false; + m_occs(*m_goal); + m_idx = 0; + size = m_goal->size(); + forward = true; } - end: - m_goal->elim_redundancies(); - m_goal->inc_depth(); - result.push_back(m_goal); - SASSERT(m_goal->is_well_sorted()); - TRACE("propagate_values", tout << "end\n"; m_goal->display(tout);); - TRACE("propagate_values_core", m_goal->display_with_dependencies(tout);); - m_goal = nullptr; + round++; + if (round >= m_max_rounds) + break; + IF_VERBOSE(100, verbose_stream() << "starting new round, goal size: " << m_goal->num_exprs() << std::endl;); + TRACE("propagate_values", tout << "round finished\n"; m_goal->display(tout); tout << "\n";); } - }; + end: + m_goal->elim_redundancies(); + m_goal->inc_depth(); + result.push_back(m_goal); + SASSERT(m_goal->is_well_sorted()); + TRACE("propagate_values", tout << "end\n"; m_goal->display(tout);); + TRACE("propagate_values_core", m_goal->display_with_dependencies(tout);); + m_goal = nullptr; + } - imp * m_imp; - params_ref m_params; public: propagate_values_tactic(ast_manager & m, params_ref const & p): + m(m), + m_r(m, p), + m_goal(nullptr), + m_occs(m, true /* track atoms */), m_params(p) { - m_imp = alloc(imp, m, p); + updt_params_core(p); } tactic * translate(ast_manager & m) override { return alloc(propagate_values_tactic, m, m_params); } - - ~propagate_values_tactic() override { - dealloc(m_imp); - } void updt_params(params_ref const & p) override { m_params = p; - m_imp->updt_params(p); + m_r.updt_params(p); + updt_params_core(p); } void collect_param_descrs(param_descrs & r) override { @@ -252,7 +234,7 @@ public: void operator()(goal_ref const & in, goal_ref_buffer & result) override { try { - (*m_imp)(in, result); + run(in, result); } catch (rewriter_exception & ex) { throw tactic_exception(ex.msg()); @@ -260,15 +242,14 @@ public: } void cleanup() override { - ast_manager & m = m_imp->m; - params_ref p = std::move(m_params); - m_imp->~imp(); - new (m_imp) imp(m, p); + m_r.cleanup(); + m_subst = nullptr; + m_occs.cleanup(); } }; +} tactic * mk_propagate_values_tactic(ast_manager & m, params_ref const & p) { return alloc(propagate_values_tactic, m, p); } - diff --git a/src/tactic/core/propagate_values_tactic.h b/src/tactic/core/propagate_values_tactic.h index 635b0a36f..d9324ff82 100644 --- a/src/tactic/core/propagate_values_tactic.h +++ b/src/tactic/core/propagate_values_tactic.h @@ -17,8 +17,7 @@ Author: Revision History: --*/ -#ifndef PROPAGATE_VALUES_TACTIC_H_ -#define PROPAGATE_VALUES_TACTIC_H_ +#pragma once #include "util/params.h" class ast_manager; @@ -29,5 +28,3 @@ tactic * mk_propagate_values_tactic(ast_manager & m, params_ref const & p = para /* ADD_TACTIC("propagate-values", "propagate constants.", "mk_propagate_values_tactic(m, p)") */ - -#endif