3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-08-20 18:20:22 +00:00

Merge branch 'unstable' of https://git01.codeplex.com/z3 into fpa-api

Conflicts:
	scripts/mk_project.py
	src/api/z3.h
	src/ast/float_decl_plugin.cpp
	src/ast/float_decl_plugin.h
	src/ast/fpa/fpa2bv_converter.cpp
	src/ast/fpa/fpa2bv_rewriter.h
	src/ast/rewriter/float_rewriter.cpp
	src/ast/rewriter/float_rewriter.h

Signed-off-by: Christoph M. Wintersteiger <cwinter@microsoft.com>
This commit is contained in:
Christoph M. Wintersteiger 2014-11-11 11:53:39 +00:00
commit 8d3ef92383
240 changed files with 4848 additions and 2395 deletions

View file

@ -604,7 +604,9 @@ bool arith_eq_solver::solve_integer_equations_gcd(
}
SASSERT(g == r0[i]);
}
SASSERT(abs(r0[i]).is_one());
if (!abs(r0[i]).is_one()) {
return false;
}
live.erase(live.begin()+live_pos);
for (j = 0; j < live.size(); ++j) {
row& r = rows[live[j]];

View file

@ -19,10 +19,11 @@ Revision History:
#include"smt_params.h"
#include"smt_params_helper.hpp"
#include"model_params.hpp"
#include"gparams.h"
void smt_params::updt_local_params(params_ref const & _p) {
smt_params_helper p(_p);
m_auto_config = p.auto_config();
m_auto_config = p.auto_config() && gparams::get_value("auto_config") == "true"; // auto-config is not scoped by smt in gparams.
m_random_seed = p.random_seed();
m_relevancy_lvl = p.relevancy();
m_ematching = p.ematching();

View file

@ -1615,6 +1615,8 @@ namespace smt {
unsigned qhead = m_qhead;
if (!bcp())
return false;
if (m_cancel_flag)
return true;
SASSERT(!inconsistent());
propagate_relevancy(qhead);
if (inconsistent())
@ -3945,7 +3947,7 @@ namespace smt {
m_fingerprints.display(tout);
);
failure fl = get_last_search_failure();
if (fl == TIMEOUT || fl == MEMOUT || fl == CANCELED || fl == NUM_CONFLICTS || fl == THEORY) {
if (fl == TIMEOUT || fl == MEMOUT || fl == CANCELED || fl == NUM_CONFLICTS) {
// don't generate model.
return;
}

View file

@ -54,7 +54,11 @@ Revision History:
// the case that each context only references a few expressions.
// Using a map instead of a vector for the literals can compress space
// consumption.
#ifdef SPARSE_MAP
#define USE_BOOL_VAR_VECTOR 0
#else
#define USE_BOOL_VAR_VECTOR 1
#endif
namespace smt {
@ -69,9 +73,10 @@ namespace smt {
std::string last_failure_as_string() const;
void set_progress_callback(progress_callback *callback);
protected:
ast_manager & m_manager;
smt_params & m_fparams;
smt_params & m_fparams;
params_ref m_params;
setup m_setup;
volatile bool m_cancel_flag;
@ -106,7 +111,7 @@ namespace smt {
// -----------------------------------
enode * m_true_enode;
enode * m_false_enode;
ptr_vector<enode> m_app2enode; // app -> enode
app2enode_t m_app2enode; // app -> enode
ptr_vector<enode> m_enodes;
plugin_manager<theory> m_theories; // mapping from theory_id -> theory
ptr_vector<theory> m_theory_set; // set of theories for fast traversal

View file

@ -24,7 +24,7 @@ namespace smt {
/**
\brief Initialize an enode in the given memory position.
*/
enode * enode::init(ast_manager & m, void * mem, ptr_vector<enode> const & app2enode, app * owner,
enode * enode::init(ast_manager & m, void * mem, app2enode_t const & app2enode, app * owner,
unsigned generation, bool suppress_args, bool merge_tf, unsigned iscope_lvl,
bool cgc_enabled, bool update_children_parent) {
SASSERT(m.is_bool(owner) || !merge_tf);
@ -60,7 +60,7 @@ namespace smt {
return n;
}
enode * enode::mk(ast_manager & m, region & r, ptr_vector<enode> const & app2enode, app * owner,
enode * enode::mk(ast_manager & m, region & r, app2enode_t const & app2enode, app * owner,
unsigned generation, bool suppress_args, bool merge_tf, unsigned iscope_lvl,
bool cgc_enabled, bool update_children_parent) {
SASSERT(m.is_bool(owner) || !merge_tf);
@ -69,7 +69,7 @@ namespace smt {
return init(m, mem, app2enode, owner, generation, suppress_args, merge_tf, iscope_lvl, cgc_enabled, update_children_parent);
}
enode * enode::mk_dummy(ast_manager & m, ptr_vector<enode> const & app2enode, app * owner) {
enode * enode::mk_dummy(ast_manager & m, app2enode_t const & app2enode, app * owner) {
unsigned sz = get_enode_size(owner->get_num_args());
void * mem = alloc_svect(char, sz);
return init(m, mem, app2enode, owner, 0, false, false, 0, true, false);

View file

@ -38,6 +38,29 @@ namespace smt {
}
};
/** \ brief Use sparse maps in SMT solver.
Define this to use hash maps rather than vectors over ast
nodes. This is useful in the case there are many solvers, each
referencing few nodes from a large ast manager. There is some
unknown performance penalty for this. */
// #define SPARSE_MAP
#ifndef SPARSE_MAP
typedef ptr_vector<enode> app2enode_t; // app -> enode
#else
class app2enode_t : public u_map<enode *> {
public:
void setx(unsigned x, enode *val, enode *def){
if(val == 0)
erase(x);
else
insert(x,val);
}
};
#endif
class tmp_enode;
/**
@ -115,7 +138,7 @@ namespace smt {
friend class tmp_enode;
static enode * init(ast_manager & m, void * mem, ptr_vector<enode> const & app2enode, app * owner,
static enode * init(ast_manager & m, void * mem, app2enode_t const & app2enode, app * owner,
unsigned generation, bool suppress_args, bool merge_tf, unsigned iscope_lvl,
bool cgc_enabled, bool update_children_parent);
public:
@ -124,11 +147,11 @@ namespace smt {
return sizeof(enode) + num_args * sizeof(enode*);
}
static enode * mk(ast_manager & m, region & r, ptr_vector<enode> const & app2enode, app * owner,
static enode * mk(ast_manager & m, region & r, app2enode_t const & app2enode, app * owner,
unsigned generation, bool suppress_args, bool merge_tf, unsigned iscope_lvl,
bool cgc_enabled, bool update_children_parent);
static enode * mk_dummy(ast_manager & m, ptr_vector<enode> const & app2enode, app * owner);
static enode * mk_dummy(ast_manager & m, app2enode_t const & app2enode, app * owner);
static void del_dummy(enode * n) { dealloc_svect(reinterpret_cast<char*>(n)); }

View file

@ -136,9 +136,8 @@ namespace smt {
if (cex == 0)
return false; // no model available.
unsigned num_decls = q->get_num_decls();
unsigned num_sks = sks.size();
// Remark: sks were created for the flat version of q.
SASSERT(num_sks >= num_decls);
SASSERT(sks.size() >= num_decls);
expr_ref_buffer bindings(m_manager);
bindings.resize(num_decls);
unsigned max_generation = 0;

View file

@ -453,9 +453,9 @@ namespace smt {
instantiated.
*/
virtual void add(quantifier * q) {
if (m_fparams->m_mbqi && mbqi_enabled(q)) {
m_model_finder->register_quantifier(q);
}
if (m_fparams->m_mbqi && mbqi_enabled(q)) {
m_model_finder->register_quantifier(q);
}
}
virtual void del(quantifier * q) {

View file

@ -85,6 +85,7 @@ namespace smt {
typedef typename Ext::numeral numeral;
typedef typename Ext::inf_numeral inf_numeral;
typedef vector<numeral> numeral_vector;
typedef map<rational, theory_var, obj_hash<rational>, default_eq<rational> > rational2var;
static const int dead_row_id = -1;
protected:
@ -409,6 +410,7 @@ namespace smt {
atoms m_atoms; // set of theory atoms
ptr_vector<bound> m_asserted_bounds; // set of asserted bounds
unsigned m_asserted_qhead;
ptr_vector<atom> m_new_atoms; // new bound atoms that have yet to be internalized.
svector<theory_var> m_nl_monomials; // non linear monomials
svector<theory_var> m_nl_propagated; // non linear monomials that became linear
v_dependency_manager m_dep_manager; // for tracking bounds during non-linear reasoning
@ -569,6 +571,22 @@ namespace smt {
void mk_clause(literal l1, literal l2, unsigned num_params, parameter * params);
void mk_clause(literal l1, literal l2, literal l3, unsigned num_params, parameter * params);
void mk_bound_axioms(atom * a);
void mk_bound_axiom(atom* a1, atom* a2);
void flush_bound_axioms();
typename atoms::iterator next_sup(atom* a1, atom_kind kind,
typename atoms::iterator it,
typename atoms::iterator end,
bool& found_compatible);
typename atoms::iterator next_inf(atom* a1, atom_kind kind,
typename atoms::iterator it,
typename atoms::iterator end,
bool& found_compatible);
typename atoms::iterator first(atom_kind kind,
typename atoms::iterator it,
typename atoms::iterator end);
struct compare_atoms {
bool operator()(atom* a1, atom* a2) const { return a1->get_k() < a2->get_k(); }
};
virtual bool default_internalizer() const { return false; }
virtual bool internalize_atom(app * n, bool gate_ctx);
virtual bool internalize_term(app * term);

View file

@ -348,13 +348,24 @@ namespace smt {
context & ctx = get_context();
simplifier & s = ctx.get_simplifier();
expr_ref s_ante(m), s_conseq(m);
expr* s_conseq_n, * s_ante_n;
bool negated;
proof_ref pr(m);
s(ante, s_ante, pr);
negated = m.is_not(s_ante, s_ante_n);
if (negated) s_ante = s_ante_n;
ctx.internalize(s_ante, false);
literal l_ante = ctx.get_literal(s_ante);
if (negated) l_ante.neg();
s(conseq, s_conseq, pr);
negated = m.is_not(s_conseq, s_conseq_n);
if (negated) s_conseq = s_conseq_n;
ctx.internalize(s_conseq, false);
literal l_conseq = ctx.get_literal(s_conseq);
if (negated) l_conseq.neg();
literal lits[2] = {l_ante, l_conseq};
ctx.mk_th_axiom(get_id(), 2, lits);
if (ctx.relevancy()) {
@ -800,48 +811,238 @@ namespace smt {
template<typename Ext>
void theory_arith<Ext>::mk_bound_axioms(atom * a1) {
theory_var v = a1->get_var();
literal l1(a1->get_bool_var());
atoms & occs = m_var_occs[v];
if (!get_context().is_searching()) {
//
// NB. We make an assumption that user push calls propagation
// before internal scopes are pushed. This flushes all newly
// asserted atoms into the right context.
//
m_new_atoms.push_back(a1);
return;
}
numeral const & k1(a1->get_k());
atom_kind kind1 = a1->get_atom_kind();
TRACE("mk_bound_axioms", tout << "making bound axioms for v" << v << " " << kind1 << " " << k1 << "\n";);
atoms & occs = m_var_occs[v];
typename atoms::iterator it = occs.begin();
typename atoms::iterator end = occs.end();
typename atoms::iterator lo_inf = end, lo_sup = end;
typename atoms::iterator hi_inf = end, hi_sup = end;
for (; it != end; ++it) {
atom * a2 = *it;
literal l2(a2->get_bool_var());
numeral const & k2 = a2->get_k();
atom_kind kind2 = a2->get_atom_kind();
atom * a2 = *it;
numeral const & k2(a2->get_k());
atom_kind kind2 = a2->get_atom_kind();
SASSERT(k1 != k2 || kind1 != kind2);
SASSERT(a2->get_var() == v);
parameter coeffs[3] = { parameter(symbol("farkas")), parameter(rational(1)), parameter(rational(1)) };
if (kind1 == A_LOWER) {
if (kind2 == A_LOWER) {
// x >= k1, x >= k2
if (k1 >= k2) mk_clause(~l1, l2, 3, coeffs);
else mk_clause(~l2, l1, 3, coeffs);
if (kind2 == A_LOWER) {
if (k2 < k1) {
if (lo_inf == end || k2 > (*lo_inf)->get_k()) {
lo_inf = it;
}
}
else {
// x >= k1, x <= k2
if (k1 > k2) mk_clause(~l1, ~l2, 3, coeffs);
else mk_clause(l1, l2, 3, coeffs);
else if (lo_sup == end || k2 < (*lo_sup)->get_k()) {
lo_sup = it;
}
}
else {
if (kind2 == A_LOWER) {
// x <= k1, x >= k2
if (k1 < k2) mk_clause(~l1, ~l2, 3, coeffs);
else mk_clause(l1, l2, 3, coeffs);
else if (k2 < k1) {
if (hi_inf == end || k2 > (*hi_inf)->get_k()) {
hi_inf = it;
}
}
else if (hi_sup == end || k2 < (*hi_sup)->get_k()) {
hi_sup = it;
}
}
if (lo_inf != end) mk_bound_axiom(a1, *lo_inf);
if (lo_sup != end) mk_bound_axiom(a1, *lo_sup);
if (hi_inf != end) mk_bound_axiom(a1, *hi_inf);
if (hi_sup != end) mk_bound_axiom(a1, *hi_sup);
}
template<typename Ext>
void theory_arith<Ext>::mk_bound_axiom(atom* a1, atom* a2) {
theory_var v = a1->get_var();
literal l1(a1->get_bool_var());
literal l2(a2->get_bool_var());
numeral const & k1(a1->get_k());
numeral const & k2(a2->get_k());
atom_kind kind1 = a1->get_atom_kind();
atom_kind kind2 = a2->get_atom_kind();
bool v_is_int = is_int(v);
SASSERT(v == a2->get_var());
SASSERT(k1 != k2 || kind1 != kind2);
parameter coeffs[3] = { parameter(symbol("farkas")),
parameter(rational(1)), parameter(rational(1)) };
if (kind1 == A_LOWER) {
if (kind2 == A_LOWER) {
if (k2 <= k1) {
mk_clause(~l1, l2, 3, coeffs);
}
else {
// x <= k1, x <= k2
if (k1 < k2) mk_clause(~l1, l2, 3, coeffs);
else mk_clause(~l2, l1, 3, coeffs);
mk_clause(l1, ~l2, 3, coeffs);
}
}
else if (k1 <= k2) {
// k1 <= k2, k1 <= x or x <= k2
mk_clause(l1, l2, 3, coeffs);
}
else {
// k1 > hi_inf, k1 <= x => ~(x <= hi_inf)
mk_clause(~l1, ~l2, 3, coeffs);
if (v_is_int && k1 == k2 + numeral(1)) {
// k1 <= x or x <= k1-1
mk_clause(l1, l2, 3, coeffs);
}
}
}
else if (kind2 == A_LOWER) {
if (k1 >= k2) {
// k1 >= lo_inf, k1 >= x or lo_inf <= x
mk_clause(l1, l2, 3, coeffs);
}
else {
// k1 < k2, k2 <= x => ~(x <= k1)
mk_clause(~l1, ~l2, 3, coeffs);
if (v_is_int && k1 == k2 - numeral(1)) {
// x <= k1 or k1+l <= x
mk_clause(l1, l2, 3, coeffs);
}
}
}
else {
// kind1 == A_UPPER, kind2 == A_UPPER
if (k1 >= k2) {
// k1 >= k2, x <= k2 => x <= k1
mk_clause(l1, ~l2, 3, coeffs);
}
else {
// k1 <= hi_sup , x <= k1 => x <= hi_sup
mk_clause(~l1, l2, 3, coeffs);
}
}
}
template<typename Ext>
void theory_arith<Ext>::flush_bound_axioms() {
while (!m_new_atoms.empty()) {
ptr_vector<atom> atoms;
atoms.push_back(m_new_atoms.back());
m_new_atoms.pop_back();
theory_var v = atoms.back()->get_var();
for (unsigned i = 0; i < m_new_atoms.size(); ++i) {
if (m_new_atoms[i]->get_var() == v) {
atoms.push_back(m_new_atoms[i]);
m_new_atoms[i] = m_new_atoms.back();
m_new_atoms.pop_back();
--i;
}
}
ptr_vector<atom> occs(m_var_occs[v]);
std::sort(atoms.begin(), atoms.end(), compare_atoms());
std::sort(occs.begin(), occs.end(), compare_atoms());
typename atoms::iterator begin1 = occs.begin();
typename atoms::iterator begin2 = occs.begin();
typename atoms::iterator end = occs.end();
begin1 = first(A_LOWER, begin1, end);
begin2 = first(A_UPPER, begin2, end);
typename atoms::iterator lo_inf = begin1, lo_sup = begin1;
typename atoms::iterator hi_inf = begin2, hi_sup = begin2;
typename atoms::iterator lo_inf1 = begin1, lo_sup1 = begin1;
typename atoms::iterator hi_inf1 = begin2, hi_sup1 = begin2;
bool flo_inf, fhi_inf, flo_sup, fhi_sup;
ptr_addr_hashtable<atom> visited;
for (unsigned i = 0; i < atoms.size(); ++i) {
atom* a1 = atoms[i];
lo_inf1 = next_inf(a1, A_LOWER, lo_inf, end, flo_inf);
hi_inf1 = next_inf(a1, A_UPPER, hi_inf, end, fhi_inf);
lo_sup1 = next_sup(a1, A_LOWER, lo_sup, end, flo_sup);
hi_sup1 = next_sup(a1, A_UPPER, hi_sup, end, fhi_sup);
if (lo_inf1 != end) lo_inf = lo_inf1;
if (lo_sup1 != end) lo_sup = lo_sup1;
if (hi_inf1 != end) hi_inf = hi_inf1;
if (hi_sup1 != end) hi_sup = hi_sup1;
if (!flo_inf) lo_inf = end;
if (!fhi_inf) hi_inf = end;
if (!flo_sup) lo_sup = end;
if (!fhi_sup) hi_sup = end;
visited.insert(a1);
if (lo_inf1 != end && lo_inf != end && !visited.contains(*lo_inf)) mk_bound_axiom(a1, *lo_inf);
if (lo_sup1 != end && lo_sup != end && !visited.contains(*lo_sup)) mk_bound_axiom(a1, *lo_sup);
if (hi_inf1 != end && hi_inf != end && !visited.contains(*hi_inf)) mk_bound_axiom(a1, *hi_inf);
if (hi_sup1 != end && hi_sup != end && !visited.contains(*hi_sup)) mk_bound_axiom(a1, *hi_sup);
}
}
}
template<typename Ext>
typename theory_arith<Ext>::atoms::iterator
theory_arith<Ext>::first(
atom_kind kind,
typename atoms::iterator it,
typename atoms::iterator end) {
for (; it != end; ++it) {
atom* a = *it;
if (a->get_atom_kind() == kind) return it;
}
return end;
}
template<typename Ext>
typename theory_arith<Ext>::atoms::iterator
theory_arith<Ext>::next_inf(
atom* a1,
atom_kind kind,
typename atoms::iterator it,
typename atoms::iterator end,
bool& found_compatible) {
numeral const & k1(a1->get_k());
typename atoms::iterator result = end;
found_compatible = false;
for (; it != end; ++it) {
atom * a2 = *it;
if (a1 == a2) continue;
if (a2->get_atom_kind() != kind) continue;
numeral const & k2(a2->get_k());
found_compatible = true;
if (k2 <= k1) {
result = it;
}
else {
break;
}
}
return result;
}
template<typename Ext>
typename theory_arith<Ext>::atoms::iterator
theory_arith<Ext>::next_sup(
atom* a1,
atom_kind kind,
typename atoms::iterator it,
typename atoms::iterator end,
bool& found_compatible) {
numeral const & k1(a1->get_k());
found_compatible = false;
for (; it != end; ++it) {
atom * a2 = *it;
if (a1 == a2) continue;
if (a2->get_atom_kind() != kind) continue;
numeral const & k2(a2->get_k());
found_compatible = true;
if (k1 < k2) {
return it;
}
}
return end;
}
template<typename Ext>
bool theory_arith<Ext>::internalize_atom(app * n, bool gate_ctx) {
TRACE("arith_internalize", tout << "internalising atom:\n" << mk_pp(n, this->get_manager()) << "\n";);
@ -879,7 +1080,7 @@ namespace smt {
bool_var bv = ctx.mk_bool_var(n);
ctx.set_var_theory(bv, get_id());
rational _k;
m_util.is_numeral(rhs, _k);
VERIFY(m_util.is_numeral(rhs, _k));
numeral k(_k);
atom * a = alloc(atom, bv, v, k, kind);
mk_bound_axioms(a);
@ -927,6 +1128,7 @@ namespace smt {
template<typename Ext>
void theory_arith<Ext>::assign_eh(bool_var v, bool is_true) {
TRACE("arith", tout << "v" << v << " " << is_true << "\n";);
atom * a = get_bv2a(v);
if (!a) return;
SASSERT(get_context().get_assignment(a->get_bool_var()) != l_undef);
@ -1072,8 +1274,10 @@ namespace smt {
}
}
while (m_final_check_idx != old_idx);
if (result == FC_DONE && m_found_unsupported_op)
if (result == FC_DONE && m_found_unsupported_op) {
TRACE("arith", tout << "Found unsupported operation\n";);
result = FC_GIVEUP;
}
return result;
}
@ -1142,6 +1346,7 @@ namespace smt {
CASSERT("arith", wf_columns());
CASSERT("arith", valid_row_assignment());
flush_bound_axioms();
propagate_linear_monomials();
while (m_asserted_qhead < m_asserted_bounds.size()) {
bound * b = m_asserted_bounds[m_asserted_qhead];
@ -2421,6 +2626,8 @@ namespace smt {
if (val == l_undef)
continue;
// TODO: check if the following line is a bottleneck
TRACE("arith", tout << "v" << a->get_bool_var() << " " << (val == l_true) << "\n";);
a->assign_eh(val == l_true, get_epsilon(a->get_var()));
if (val != l_undef && a->get_bound_kind() == b->get_bound_kind()) {
SASSERT((ctx.get_assignment(bv) == l_true) == a->is_true());
@ -2780,7 +2987,6 @@ namespace smt {
*/
template<typename Ext>
void theory_arith<Ext>::refine_epsilon() {
typedef map<rational, theory_var, obj_hash<rational>, default_eq<rational> > rational2var;
while (true) {
rational2var mapping;
theory_var num = get_num_vars();
@ -2788,6 +2994,8 @@ namespace smt {
for (theory_var v = 0; v < num; v++) {
if (is_int(v))
continue;
if (!get_context().is_shared(get_enode(v)))
continue;
inf_numeral const & val = get_value(v);
rational value = val.get_rational().to_rational() + m_epsilon.to_rational() * val.get_infinitesimal().to_rational();
theory_var v2;
@ -2915,6 +3123,7 @@ namespace smt {
SASSERT(m_to_patch.empty());
m_to_check.reset();
m_in_to_check.reset();
m_new_atoms.reset();
CASSERT("arith", wf_rows());
CASSERT("arith", wf_columns());
CASSERT("arith", valid_row_assignment());

View file

@ -1294,8 +1294,10 @@ namespace smt {
}
tout << "max: " << max << ", min: " << min << "\n";);
if (m_params.m_arith_ignore_int)
if (m_params.m_arith_ignore_int) {
TRACE("arith", tout << "Ignore int: give up\n";);
return FC_GIVEUP;
}
if (!gcd_test())
return FC_CONTINUE;

View file

@ -654,6 +654,7 @@ namespace smt {
}
return get_value(v, computed_epsilon) == val;
}
/**
\brief Return true if for every monomial x_1 * ... * x_n,
@ -2309,16 +2310,19 @@ namespace smt {
if (m_nl_monomials.empty())
return FC_DONE;
if (check_monomial_assignments())
if (check_monomial_assignments()) {
return FC_DONE;
}
if (!m_params.m_nl_arith)
if (!m_params.m_nl_arith) {
TRACE("non_linear", tout << "Non-linear is not enabled\n";);
return FC_GIVEUP;
}
TRACE("process_non_linear", display(tout););
if (m_nl_rounds > m_params.m_nl_arith_rounds) {
TRACE("non_linear", tout << "GIVE UP non linear problem...\n";);
TRACE("non_linear", tout << "GIVEUP non linear problem...\n";);
IF_VERBOSE(3, verbose_stream() << "Max. non linear arithmetic rounds. Increase threshold using NL_ARITH_ROUNDS=<limit>\n";);
return FC_GIVEUP;
}
@ -2338,9 +2342,10 @@ namespace smt {
if (!max_min_nl_vars())
return FC_CONTINUE;
if (check_monomial_assignments())
if (check_monomial_assignments()) {
return m_liberal_final_check || !m_changed_assignment ? FC_DONE : FC_CONTINUE;
}
svector<theory_var> vars;
get_non_linear_cluster(vars);
@ -2391,8 +2396,9 @@ namespace smt {
}
while (m_nl_strategy_idx != old_idx);
if (check_monomial_assignments())
if (check_monomial_assignments()) {
return m_liberal_final_check || !m_changed_assignment ? FC_DONE : FC_CONTINUE;
}
TRACE("non_linear", display(tout););

View file

@ -362,7 +362,6 @@ namespace smt {
(store A i v) <--- v is used as an value
*/
bool theory_array_base::is_shared(theory_var v) const {
context & ctx = get_context();
enode * n = get_enode(v);
enode * r = n->get_root();
bool is_array = false;
@ -376,8 +375,10 @@ namespace smt {
enode_vector::const_iterator end = r->end_parents();
for (; it != end; ++it) {
enode * parent = *it;
#if 0
if (!ctx.is_relevant(parent))
continue;
#endif
unsigned num_args = parent->get_num_args();
if (is_store(parent)) {
SET_ARRAY(parent->get_arg(0));
@ -399,6 +400,7 @@ namespace smt {
return false;
}
#if 0
void theory_array_base::collect_shared_vars(sbuffer<theory_var> & result) {
TRACE("array_shared", tout << "collecting shared vars...\n";);
context & ctx = get_context();
@ -420,6 +422,31 @@ namespace smt {
}
unmark_enodes(to_unmark.size(), to_unmark.c_ptr());
}
#else
void theory_array_base::collect_shared_vars(sbuffer<theory_var> & result) {
TRACE("array_shared", tout << "collecting shared vars...\n";);
context & ctx = get_context();
ptr_buffer<enode> to_unmark;
unsigned num_vars = get_num_vars();
for (unsigned i = 0; i < num_vars; i++) {
enode * n = get_enode(i);
if (ctx.is_relevant(n)) {
enode * r = n->get_root();
if (!r->is_marked()){
if(is_array_sort(r) && ctx.is_shared(r)) {
TRACE("array_shared", tout << "new shared var: #" << r->get_owner_id() << "\n";);
theory_var r_th_var = r->get_th_var(get_id());
SASSERT(r_th_var != null_theory_var);
result.push_back(r_th_var);
}
r->set_mark();
to_unmark.push_back(r);
}
}
}
unmark_enodes(to_unmark.size(), to_unmark.c_ptr());
}
#endif
/**
\brief Create interface variables for shared array variables.

View file

@ -53,6 +53,7 @@ namespace smt {
unsigned bv_size = get_bv_size(n);
context & ctx = get_context();
literal_vector & bits = m_bits[v];
bits.reset();
for (unsigned i = 0; i < bv_size; i++) {
app * bit = mk_bit2bool(owner, i);
ctx.internalize(bit, true);
@ -75,12 +76,14 @@ namespace smt {
void theory_bv::mk_bit2bool(app * n) {
context & ctx = get_context();
SASSERT(!ctx.b_internalized(n));
if (!ctx.e_internalized(n->get_arg(0))) {
expr* first_arg = n->get_arg(0);
if (!ctx.e_internalized(first_arg)) {
// This may happen if bit2bool(x) is in a conflict
// clause that is being reinitialized, and x was not reinitialized
// yet.
// So, we internalize x (i.e., n->get_arg(0))
expr * first_arg = n->get_arg(0);
// So, we internalize x (i.e., arg)
ctx.internalize(first_arg, false);
SASSERT(ctx.e_internalized(first_arg));
// In most cases, when x is internalized, its bits are created.
@ -91,10 +94,27 @@ namespace smt {
// This will also force the creation of all bits for x.
enode * first_arg_enode = ctx.get_enode(first_arg);
get_var(first_arg_enode);
SASSERT(ctx.b_internalized(n));
// numerals are not blasted into bit2bool, so we do this directly.
if (!ctx.b_internalized(n)) {
rational val;
unsigned sz;
VERIFY(m_util.is_numeral(first_arg, val, sz));
theory_var v = first_arg_enode->get_th_var(get_id());
app* owner = first_arg_enode->get_owner();
for (unsigned i = 0; i < sz; ++i) {
ctx.internalize(mk_bit2bool(owner, i), true);
}
m_bits[v].reset();
rational bit;
for (unsigned i = 0; i < sz; ++i) {
div(val, rational::power_of_two(i), bit);
mod(bit, rational(2), bit);
m_bits[v].push_back(bit.is_zero()?false_literal:true_literal);
}
}
}
else {
enode * arg = ctx.get_enode(n->get_arg(0));
enode * arg = ctx.get_enode(first_arg);
// The argument was already internalized, but it may not have a theory variable associated with it.
// For example, for ite-terms the method apply_sort_cnstr is not invoked.
// See comment in the then-branch.
@ -1041,6 +1061,7 @@ namespace smt {
void theory_bv::new_diseq_eh(theory_var v1, theory_var v2) {
if (is_bv(v1)) {
SASSERT(m_bits[v1].size() == m_bits[v2].size());
expand_diseq(v1, v2);
}
}
@ -1381,6 +1402,7 @@ namespace smt {
if (v1 != null_theory_var) {
// conflict was detected ... v1 and v2 have complementary bits
SASSERT(m_bits[v1][it->m_idx] == ~(m_bits[v2][it->m_idx]));
SASSERT(m_bits[v1].size() == m_bits[v2].size());
mk_new_diseq_axiom(v1, v2, it->m_idx);
RESET_MERGET_AUX();
return false;

View file

@ -983,11 +983,8 @@ template<typename Ext>
void theory_diff_logic<Ext>::get_eq_antecedents(
theory_var v1, theory_var v2, unsigned timestamp, conflict_resolution & cr) {
imp_functor functor(cr);
bool r;
r = m_graph.find_shortest_zero_edge_path(v1, v2, timestamp, functor);
SASSERT(r);
r = m_graph.find_shortest_zero_edge_path(v2, v1, timestamp, functor);
SASSERT(r);
VERIFY(m_graph.find_shortest_zero_edge_path(v1, v2, timestamp, functor));
VERIFY(m_graph.find_shortest_zero_edge_path(v2, v1, timestamp, functor));
}
template<typename Ext>