3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-04-13 12:28:44 +00:00
This commit is contained in:
Nikolaj Bjorner 2016-06-03 10:13:27 -07:00
commit 19db0c5f2c
37 changed files with 965 additions and 524 deletions

View file

@ -1185,7 +1185,7 @@ extern "C" {
case OP_FPA_TO_IEEE_BV: return Z3_OP_FPA_TO_IEEE_BV;
case OP_FPA_INTERNAL_MIN_I: return Z3_OP_FPA_MIN_I;
case OP_FPA_INTERNAL_MAX_I: return Z3_OP_FPA_MAX_I;
case OP_FPA_INTERNAL_RM_BVWRAP:
case OP_FPA_INTERNAL_BV2RM:
case OP_FPA_INTERNAL_BVWRAP:
case OP_FPA_INTERNAL_MIN_UNSPECIFIED:
case OP_FPA_INTERNAL_MAX_UNSPECIFIED:

View file

@ -8994,6 +8994,92 @@ def fpToFP(a1, a2=None, a3=None, ctx=None):
else:
raise Z3Exception("Unsupported combination of arguments for conversion to floating-point term.")
def fpBVToFP(v, sort, ctx=None):
"""Create a Z3 floating-point conversion expression that represents the
conversion from a bit-vector term to a floating-point term.
>>> x_bv = BitVecVal(0x3F800000, 32)
>>> x_fp = fpBVToFP(x_bv, Float32())
>>> x_fp
fpToFP(1065353216)
>>> simplify(x_fp)
1
"""
_z3_assert(is_bv(v), "First argument must be a Z3 floating-point rounding mode expression.")
_z3_assert(is_fp_sort(sort), "Second argument must be a Z3 floating-point sort.")
ctx = _get_ctx(ctx)
return FPRef(Z3_mk_fpa_to_fp_bv(ctx.ref(), v.ast, sort.ast), ctx)
def fpFPToFP(rm, v, sort, ctx=None):
"""Create a Z3 floating-point conversion expression that represents the
conversion from a floating-point term to a floating-point term of different precision.
>>> x_sgl = FPVal(1.0, Float32())
>>> x_dbl = fpFPToFP(RNE(), x_sgl, Float64())
>>> x_dbl
fpToFP(RNE(), 1)
>>> simplify(x_dbl)
1
>>> x_dbl.sort()
FPSort(11, 53)
"""
_z3_assert(is_fprm(rm), "First argument must be a Z3 floating-point rounding mode expression.")
_z3_assert(is_fp(v), "Second argument must be a Z3 floating-point expression.")
_z3_assert(is_fp_sort(sort), "Third argument must be a Z3 floating-point sort.")
ctx = _get_ctx(ctx)
return FPRef(Z3_mk_fpa_to_fp_float(ctx.ref(), rm.ast, v.ast, sort.ast), ctx)
def fpRealToFP(rm, v, sort, ctx=None):
"""Create a Z3 floating-point conversion expression that represents the
conversion from a real term to a floating-point term.
>>> x_r = RealVal(1.5)
>>> x_fp = fpRealToFP(RNE(), x_r, Float32())
>>> x_fp
fpToFP(RNE(), 3/2)
>>> simplify(x_fp)
1.5
"""
_z3_assert(is_fprm(rm), "First argument must be a Z3 floating-point rounding mode expression.")
_z3_assert(is_real(v), "Second argument must be a Z3 expression or real sort.")
_z3_assert(is_fp_sort(sort), "Third argument must be a Z3 floating-point sort.")
ctx = _get_ctx(ctx)
return FPRef(Z3_mk_fpa_to_fp_real(ctx.ref(), rm.ast, v.ast, sort.ast), ctx)
def fpSignedToFP(rm, v, sort, ctx=None):
"""Create a Z3 floating-point conversion expression that represents the
conversion from a signed bit-vector term (encoding an integer) to a floating-point term.
>>> x_signed = BitVecVal(-5, BitVecSort(32))
>>> x_fp = fpSignedToFP(RNE(), x_signed, Float32())
>>> x_fp
fpToFP(RNE(), 4294967291)
>>> simplify(x_fp)
-1.25*(2**2)
"""
_z3_assert(is_fprm(rm), "First argument must be a Z3 floating-point rounding mode expression.")
_z3_assert(is_bv(v), "Second argument must be a Z3 expression or real sort.")
_z3_assert(is_fp_sort(sort), "Third argument must be a Z3 floating-point sort.")
ctx = _get_ctx(ctx)
return FPRef(Z3_mk_fpa_to_fp_signed(ctx.ref(), rm.ast, v.ast, sort.ast), ctx)
def fpUnsignedToFP(rm, v, sort, ctx=None):
"""Create a Z3 floating-point conversion expression that represents the
conversion from an unsigned bit-vector term (encoding an integer) to a floating-point term.
>>> x_signed = BitVecVal(-5, BitVecSort(32))
>>> x_fp = fpUnsignedToFP(RNE(), x_signed, Float32())
>>> x_fp
fpToFPUnsigned(RNE(), 4294967291)
>>> simplify(x_fp)
1*(2**32)
"""
_z3_assert(is_fprm(rm), "First argument must be a Z3 floating-point rounding mode expression.")
_z3_assert(is_bv(v), "Second argument must be a Z3 expression or real sort.")
_z3_assert(is_fp_sort(sort), "Third argument must be a Z3 floating-point sort.")
ctx = _get_ctx(ctx)
return FPRef(Z3_mk_fpa_to_fp_unsigned(ctx.ref(), rm.ast, v.ast, sort.ast), ctx)
def fpToFPUnsigned(rm, x, s, ctx=None):
"""Create a Z3 floating-point conversion expression, from unsigned bit-vector to floating-point expression."""
if __debug__:

View file

@ -40,7 +40,7 @@ Notes:
The number of unused entries (m_unused) is equal to the number of entries
of the form
t -> (s, 0)
That is, it is the number of keys that were never accessed by cliend code.
That is, it is the number of keys that were never accessed by client code.
The cache maintains at most m_max_unused entries.
When the maximum number of unused entries exceeds m_max_unused, then

View file

@ -1420,7 +1420,7 @@ ast_manager::~ast_manager() {
std::cout << to_sort(a)->get_name() << "\n";
}
else {
std::cout << mk_ll_pp(a, *this, false);
std::cout << mk_ll_pp(a, *this, false) << "id: " << a->get_id() << "\n";
}
}
});
@ -1575,6 +1575,7 @@ bool ast_manager::are_equal(expr * a, expr * b) const {
return false;
}
bool ast_manager::are_distinct(expr* a, expr* b) const {
if (is_app(a) && is_app(b)) {
app* ap = to_app(a), *bp = to_app(b);
@ -2562,6 +2563,8 @@ proof * ast_manager::mk_modus_ponens(proof * p1, proof * p2) {
CTRACE("mk_modus_ponens", to_app(get_fact(p2))->get_arg(0) != get_fact(p1),
tout << mk_pp(get_fact(p1), *this) << "\n" << mk_pp(get_fact(p2), *this) << "\n";);
SASSERT(to_app(get_fact(p2))->get_arg(0) == get_fact(p1));
CTRACE("mk_modus_ponens", !is_ground(p2) && !has_quantifiers(p2), tout << "Non-ground: " << mk_pp(p2, *this) << "\n";);
CTRACE("mk_modus_ponens", !is_ground(p1) && !has_quantifiers(p1), tout << "Non-ground: " << mk_pp(p1, *this) << "\n";);
if (is_reflexivity(p2))
return p1;
expr * f = to_app(get_fact(p2))->get_arg(1);

View file

@ -838,6 +838,7 @@ inline bool is_func_decl(ast const * n) { return n->get_kind() == AST_FUNC_DECL
inline bool is_expr(ast const * n) { return !is_decl(n); }
inline bool is_app(ast const * n) { return n->get_kind() == AST_APP; }
inline bool is_var(ast const * n) { return n->get_kind() == AST_VAR; }
inline bool is_var(ast const * n, unsigned& idx) { return is_var(n) && (idx = static_cast<var const*>(n)->get_idx(), true); }
inline bool is_quantifier(ast const * n) { return n->get_kind() == AST_QUANTIFIER; }
inline bool is_forall(ast const * n) { return is_quantifier(n) && static_cast<quantifier const *>(n)->is_forall(); }
inline bool is_exists(ast const * n) { return is_quantifier(n) && static_cast<quantifier const *>(n)->is_exists(); }
@ -1571,11 +1572,12 @@ public:
void debug_ref_count() { m_debug_ref_count = true; }
void inc_ref(ast * n) {
if (n)
if (n) {
n->inc_ref();
}
}
void dec_ref(ast * n) {
void dec_ref(ast* n) {
if (n) {
n->dec_ref();
if (n->get_ref_count() == 0)

View file

@ -25,7 +25,6 @@ Notes:
#include"fpa2bv_converter.h"
#define BVULT(X,Y,R) { expr_ref bvult_eq(m), bvult_not(m); m_simp.mk_eq(X, Y, bvult_eq); m_simp.mk_not(bvult_eq, bvult_not); expr_ref t(m); t = m_bv_util.mk_ule(X,Y); m_simp.mk_and(t, bvult_not, R); }
#define BVSLT(X,Y,R) { expr_ref bvslt_eq(m), bvslt_not(m); m_simp.mk_eq(X, Y, bvslt_eq); m_simp.mk_not(bvslt_eq, bvslt_not); expr_ref t(m); t = m_bv_util.mk_sle(X,Y); m_simp.mk_and(t, bvslt_not, R); }
fpa2bv_converter::fpa2bv_converter(ast_manager & m) :
m(m),
@ -79,7 +78,7 @@ void fpa2bv_converter::mk_eq(expr * a, expr * b, expr_ref & result) {
m_simp.mk_or(both_are_nan, both_the_same, result);
}
else if (is_rm(a) && is_rm(b)) {
SASSERT(m_util.is_rm_bvwrap(b) && m_util.is_rm_bvwrap(a));
SASSERT(m_util.is_bv2rm(b) && m_util.is_bv2rm(a));
TRACE("fpa2bv", tout << "mk_eq_rm a=" << mk_ismt2_pp(a, m) << std::endl;
tout << "mk_eq_rm b=" << mk_ismt2_pp(b, m) << std::endl;);
@ -125,10 +124,12 @@ void fpa2bv_converter::mk_numeral(func_decl * f, unsigned num, expr * const * ar
SASSERT(num == 0);
SASSERT(f->get_num_parameters() == 1);
SASSERT(f->get_parameter(0).is_external());
unsigned p_id = f->get_parameter(0).get_ext_id();
mpf const & v = m_plugin->get_value(p_id);
mk_numeral(f->get_range(), v, result);
}
void fpa2bv_converter::mk_numeral(sort * s, mpf const & v, expr_ref & result) {
unsigned sbits = v.get_sbits();
unsigned ebits = v.get_ebits();
@ -137,12 +138,12 @@ void fpa2bv_converter::mk_numeral(func_decl * f, unsigned num, expr * const * ar
mpf_exp_t const & exp = m_util.fm().exp(v);
if (m_util.fm().is_nan(v))
mk_nan(f, result);
mk_nan(s, result);
else if (m_util.fm().is_inf(v)) {
if (m_util.fm().sgn(v))
mk_ninf(f, result);
mk_ninf(s, result);
else
mk_pinf(f, result);
mk_pinf(s, result);
}
else {
expr_ref bv_sgn(m), bv_sig(m), e(m), biased_exp(m);
@ -236,7 +237,7 @@ void fpa2bv_converter::mk_function_output(sort * rng, func_decl * fbv, expr * co
else if (m_util.is_rm(rng)) {
app_ref na(m);
na = m.mk_app(fbv, fbv->get_arity(), new_args);
result = m_util.mk_rm(na);
result = m_util.mk_bv2rm(na);
}
else
result = m.mk_app(fbv, fbv->get_arity(), new_args);
@ -284,7 +285,7 @@ void fpa2bv_converter::mk_function(func_decl * f, unsigned num, expr * const * a
bv_rng = m_bv_util.mk_sort(3);
func_decl * bv_f = get_bv_uf(f, bv_rng, num);
bv_app = m.mk_app(bv_f, num, args);
flt_app = m_util.mk_rm(bv_app);
flt_app = m_util.mk_bv2rm(bv_app);
new_eq = m.mk_eq(fapp, flt_app);
m_extra_assertions.push_back(new_eq);
result = flt_app;
@ -315,7 +316,7 @@ void fpa2bv_converter::mk_rm_const(func_decl * f, expr_ref & result) {
#endif
, m_bv_util.mk_sort(3));
result = m_util.mk_rm(bv3);
result = m_util.mk_bv2rm(bv3);
m_rm_const2bv.insert(f, result);
m.inc_ref(f);
m.inc_ref(result);
@ -522,7 +523,7 @@ void fpa2bv_converter::add_core(unsigned sbits, unsigned ebits,
void fpa2bv_converter::mk_add(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
SASSERT(num == 3);
SASSERT(m_util.is_rm_bvwrap(args[0]));
SASSERT(m_util.is_bv2rm(args[0]));
expr_ref rm(m), x(m), y(m);
rm = to_app(args[0])->get_arg(0);
@ -692,7 +693,7 @@ void fpa2bv_converter::mk_neg(sort * srt, expr_ref & x, expr_ref & result) {
void fpa2bv_converter::mk_mul(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
SASSERT(num == 3);
SASSERT(m_util.is_rm_bvwrap(args[0]));
SASSERT(m_util.is_bv2rm(args[0]));
expr_ref rm(m), x(m), y(m);
rm = to_app(args[0])->get_arg(0);
@ -842,7 +843,7 @@ void fpa2bv_converter::mk_mul(sort * s, expr_ref & rm, expr_ref & x, expr_ref &
void fpa2bv_converter::mk_div(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
SASSERT(num == 3);
SASSERT(m_util.is_rm_bvwrap(args[0]));
SASSERT(m_util.is_bv2rm(args[0]));
expr_ref rm(m), x(m), y(m);
rm = to_app(args[0])->get_arg(0);
x = args[1];
@ -1012,15 +1013,17 @@ void fpa2bv_converter::mk_rem(sort * s, expr_ref & x, expr_ref & y, expr_ref & r
mk_ninf(s, ninf);
mk_pinf(s, pinf);
expr_ref x_is_nan(m), x_is_zero(m), x_is_pos(m), x_is_inf(m);
expr_ref y_is_nan(m), y_is_zero(m), y_is_pos(m), y_is_inf(m);
expr_ref x_is_nan(m), x_is_zero(m), x_is_pos(m), x_is_neg(m), x_is_inf(m);
expr_ref y_is_nan(m), y_is_zero(m), y_is_pos(m), y_is_neg(m), y_is_inf(m);
mk_is_nan(x, x_is_nan);
mk_is_zero(x, x_is_zero);
mk_is_pos(x, x_is_pos);
mk_is_neg(x, x_is_neg);
mk_is_inf(x, x_is_inf);
mk_is_nan(y, y_is_nan);
mk_is_zero(y, y_is_zero);
mk_is_pos(y, y_is_pos);
mk_is_neg(y, y_is_neg);
mk_is_inf(y, y_is_inf);
dbg_decouple("fpa2bv_rem_x_is_nan", x_is_nan);
@ -1054,34 +1057,120 @@ void fpa2bv_converter::mk_rem(sort * s, expr_ref & x, expr_ref & y, expr_ref & r
// (x is 0) -> x
c5 = x_is_zero;
v5 = pzero;
// exp(x) < exp(y) -> x
unsigned ebits = m_util.get_ebits(s);
unsigned sbits = m_util.get_sbits(s);
expr * x_sgn, *x_sig, *x_exp;
expr * y_sgn, *y_sig, *y_exp;
split_fp(x, x_sgn, x_exp, x_sig);
split_fp(y, y_sgn, y_exp, y_sig);
BVSLT(x_exp, y_exp, c6);
expr_ref one_ebits(m), y_exp_m1(m), xe_lt_yem1(m), ye_neq_zero(m);
one_ebits = m_bv_util.mk_numeral(1, ebits);
y_exp_m1 = m_bv_util.mk_bv_sub(y_exp, one_ebits);
BVULT(x_exp, y_exp_m1, xe_lt_yem1);
ye_neq_zero = m.mk_not(m.mk_eq(y_exp, m_bv_util.mk_numeral(0, ebits)));
c6 = m.mk_and(ye_neq_zero, xe_lt_yem1);
v6 = x;
// else the actual remainder, r = x - y * n
expr_ref rne(m), nr(m), n(m), yn(m), r(m);
rne = m_bv_util.mk_numeral(BV_RM_TIES_TO_EVEN, 3);
mk_div(s, rne, x, y, nr);
mk_round_to_integral(s, rne, nr, n);
mk_mul(s, rne, y, n, yn);
mk_sub(s, rne, x, yn, r);
expr_ref a_sgn(m), a_sig(m), a_exp(m), a_lz(m);
expr_ref b_sgn(m), b_sig(m), b_exp(m), b_lz(m);
unpack(x, a_sgn, a_sig, a_exp, a_lz, true);
unpack(y, b_sgn, b_sig, b_exp, b_lz, true);
expr_ref r_is_zero(m), x_sgn_ref(x_sgn, m), x_sgn_zero(m);
mk_is_zero(r, r_is_zero);
mk_zero(s, x_sgn_ref, x_sgn_zero);
mk_ite(r_is_zero, x_sgn_zero, r, v7);
dbg_decouple("fpa2bv_rem_a_sgn", a_sgn);
dbg_decouple("fpa2bv_rem_a_sig", a_sig);
dbg_decouple("fpa2bv_rem_a_exp", a_exp);
dbg_decouple("fpa2bv_rem_a_lz", a_lz);
dbg_decouple("fpa2bv_rem_b_sgn", b_sgn);
dbg_decouple("fpa2bv_rem_b_sig", b_sig);
dbg_decouple("fpa2bv_rem_b_exp", b_exp);
dbg_decouple("fpa2bv_rem_b_lz", b_lz);
dbg_decouple("fpa2bv_rem_nr", nr);
dbg_decouple("fpa2bv_rem_n", n);
dbg_decouple("fpa2bv_rem_yn", yn);
dbg_decouple("fpa2bv_rem_r", r);
dbg_decouple("fpa2bv_rem_v7", v7);
// else the actual remainder.
// max. exponent difference is (2^ebits) - 3
const mpz & two_to_ebits = fu().fm().m_powers2(ebits);
mpz max_exp_diff;
m_mpz_manager.sub(two_to_ebits, 3, max_exp_diff);
SASSERT(m_mpz_manager.is_int64(max_exp_diff));
SASSERT(m_mpz_manager.get_uint64(max_exp_diff) <= UINT_MAX);
uint64 max_exp_diff_ui64 = m_mpz_manager.get_uint64(max_exp_diff);
SASSERT(max_exp_diff_ui64 <= UINT_MAX);
unsigned max_exp_diff_ui = (unsigned)max_exp_diff_ui64;
m_mpz_manager.del(max_exp_diff);
expr_ref a_exp_ext(m), b_exp_ext(m);
a_exp_ext = m_bv_util.mk_sign_extend(2, a_exp);
b_exp_ext = m_bv_util.mk_sign_extend(2, b_exp);
expr_ref a_lz_ext(m), b_lz_ext(m);
a_lz_ext = m_bv_util.mk_zero_extend(2, a_lz);
b_lz_ext = m_bv_util.mk_zero_extend(2, b_lz);
expr_ref exp_diff(m), neg_exp_diff(m), exp_diff_is_neg(m);
exp_diff = m_bv_util.mk_bv_sub(
m_bv_util.mk_bv_sub(a_exp_ext, a_lz_ext),
m_bv_util.mk_bv_sub(b_exp_ext, b_lz_ext));
neg_exp_diff = m_bv_util.mk_bv_neg(exp_diff);
exp_diff_is_neg = m_bv_util.mk_sle(exp_diff, m_bv_util.mk_numeral(0, ebits+2));
dbg_decouple("fpa2bv_rem_exp_diff", exp_diff);
// CMW: This creates _huge_ bit-vectors, which is potentially sub-optimal,
// but calculating this via rem = x - y * nearest(x/y) creates huge
// circuits, too. Lazy instantiation seems the way to go in the long run
// (using the lazy bit-blaster helps on simple instances).
expr_ref a_sig_ext(m), b_sig_ext(m), lshift(m), rshift(m), shifted(m), huge_rem(m);
a_sig_ext = m_bv_util.mk_concat(m_bv_util.mk_zero_extend(max_exp_diff_ui, a_sig), m_bv_util.mk_numeral(0, 3));
b_sig_ext = m_bv_util.mk_concat(m_bv_util.mk_zero_extend(max_exp_diff_ui, b_sig), m_bv_util.mk_numeral(0, 3));
lshift = m_bv_util.mk_zero_extend(max_exp_diff_ui + sbits - (ebits+2) + 3, exp_diff);
rshift = m_bv_util.mk_zero_extend(max_exp_diff_ui + sbits - (ebits+2) + 3, neg_exp_diff);
shifted = m.mk_ite(exp_diff_is_neg, m_bv_util.mk_bv_ashr(a_sig_ext, rshift),
m_bv_util.mk_bv_shl(a_sig_ext, lshift));
huge_rem = m_bv_util.mk_bv_urem(shifted, b_sig_ext);
dbg_decouple("fpa2bv_rem_huge_rem", huge_rem);
expr_ref rndd_sgn(m), rndd_exp(m), rndd_sig(m), rne_bv(m), rndd(m);
rndd_sgn = a_sgn;
rndd_exp = m_bv_util.mk_bv_sub(b_exp_ext, b_lz_ext);
rndd_sig = m_bv_util.mk_extract(sbits+3, 0, huge_rem);
rne_bv = m_bv_util.mk_numeral(BV_RM_TIES_TO_EVEN, 3);
round(s, rne_bv, rndd_sgn, rndd_sig, rndd_exp, rndd);
expr_ref y_half(m), ny_half(m), zero_e(m), two_e(m);
expr_ref y_half_is_zero(m), y_half_is_nz(m);
expr_ref r_ge_y_half(m), r_gt_ny_half(m), r_le_y_half(m), r_lt_ny_half(m);
expr_ref r_ge_zero(m), r_le_zero(m);
expr_ref rounded_sub_y(m), rounded_add_y(m);
mpf zero, two;
m_mpf_manager.set(two, ebits, sbits, 2);
m_mpf_manager.set(zero, ebits, sbits, 0);
mk_numeral(s, two, two_e);
mk_numeral(s, zero, zero_e);
mk_div(s, rne_bv, y, two_e, y_half);
mk_neg(s, y_half, ny_half);
mk_is_zero(y_half, y_half_is_zero);
y_half_is_nz = m.mk_not(y_half_is_zero);
mk_float_ge(s, rndd, y_half, r_ge_y_half);
mk_float_gt(s, rndd, ny_half, r_gt_ny_half);
mk_float_le(s, rndd, y_half, r_le_y_half);
mk_float_lt(s, rndd, ny_half, r_lt_ny_half);
mk_sub(s, rne_bv, rndd, y, rounded_sub_y);
mk_add(s, rne_bv, rndd, y, rounded_add_y);
expr_ref sub_cnd(m), add_cnd(m);
sub_cnd = m.mk_and(y_half_is_nz,
m.mk_or(m.mk_and(y_is_pos, r_ge_y_half),
m.mk_and(y_is_neg, r_le_y_half)));
add_cnd = m.mk_and(y_half_is_nz,
m.mk_or(m.mk_and(y_is_pos, r_lt_ny_half),
m.mk_and(y_is_neg, r_gt_ny_half)));
mk_ite(add_cnd, rounded_add_y, rndd, v7);
mk_ite(sub_cnd, rounded_sub_y, v7, v7);
// And finally, we tie them together.
mk_ite(c6, v6, v7, result);
mk_ite(c5, v5, result, result);
@ -1102,9 +1191,15 @@ void fpa2bv_converter::mk_rem(sort * s, expr_ref & x, expr_ref & y, expr_ref & r
void fpa2bv_converter::mk_abs(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
SASSERT(num == 1);
expr * sgn, * s, * e;
split_fp(args[0], sgn, e, s);
result = m_util.mk_fp(m_bv_util.mk_numeral(0, 1), e, s);
expr_ref x(m);
x = args[0];
mk_abs(f->get_range(), x, result);
}
void fpa2bv_converter::mk_abs(sort * s, expr_ref & x, expr_ref & result) {
expr * sgn, *sig, *exp;
split_fp(x, sgn, exp, sig);
result = m_util.mk_fp(m_bv_util.mk_numeral(0, 1), exp, sig);
}
void fpa2bv_converter::mk_min(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
@ -1112,10 +1207,22 @@ void fpa2bv_converter::mk_min(func_decl * f, unsigned num, expr * const * args,
x = args[0];
y = args[1];
expr_ref x_is_zero(m), y_is_zero(m), both_are_zero(m);
x_is_zero = m_util.mk_is_zero(x);
y_is_zero = m_util.mk_is_zero(y);
both_are_zero = m.mk_and(x_is_zero, y_is_zero);
expr_ref x_is_positive(m), x_is_negative(m), y_is_positive(m), y_is_negative(m), pn(m), np(m), pn_or_np(m);
x_is_positive = m_util.mk_is_positive(x);
x_is_negative = m_util.mk_is_negative(x);
y_is_positive = m_util.mk_is_positive(y);
y_is_negative = m_util.mk_is_negative(y);
pn = m.mk_and(x_is_positive, y_is_negative);
np = m.mk_and(x_is_negative, y_is_positive);
pn_or_np = m.mk_or(pn, np);
expr_ref c(m), v(m);
c = m.mk_and(m.mk_and(m_util.mk_is_zero(x), m_util.mk_is_zero(y)),
m.mk_or(m.mk_and(m_util.mk_is_positive(x), m_util.mk_is_negative(y)),
m.mk_and(m_util.mk_is_negative(x), m_util.mk_is_positive(y))));
c = m.mk_and(both_are_zero, pn_or_np);
v = m.mk_app(m_util.get_family_id(), OP_FPA_INTERNAL_MIN_UNSPECIFIED, x, y);
// Note: This requires BR_REWRITE_FULL afterwards.
@ -1142,8 +1249,9 @@ void fpa2bv_converter::mk_min_i(func_decl * f, unsigned num, expr * const * args
mk_is_nan(y, y_is_nan);
mk_pzero(f, pzero);
expr_ref sgn_diff(m);
sgn_diff = m.mk_not(m.mk_eq(x_sgn, y_sgn));
expr_ref sgn_eq(m), sgn_diff(m);
sgn_eq = m.mk_eq(x_sgn, y_sgn);
sgn_diff = m.mk_not(sgn_eq);
expr_ref lt(m);
mk_float_lt(f, num, args, lt);
@ -1192,10 +1300,22 @@ void fpa2bv_converter::mk_max(func_decl * f, unsigned num, expr * const * args,
x = args[0];
y = args[1];
expr_ref x_is_zero(m), y_is_zero(m), both_are_zero(m);
x_is_zero = m_util.mk_is_zero(x);
y_is_zero = m_util.mk_is_zero(y);
both_are_zero = m.mk_and(x_is_zero, y_is_zero);
expr_ref x_is_positive(m), x_is_negative(m), y_is_positive(m), y_is_negative(m), pn(m), np(m), pn_or_np(m);
x_is_positive = m_util.mk_is_positive(x);
x_is_negative = m_util.mk_is_negative(x);
y_is_positive = m_util.mk_is_positive(y);
y_is_negative = m_util.mk_is_negative(y);
pn = m.mk_and(x_is_positive, y_is_negative);
np = m.mk_and(x_is_negative, y_is_positive);
pn_or_np = m.mk_or(pn, np);
expr_ref c(m), v(m);
c = m.mk_and(m.mk_and(m_util.mk_is_zero(x), m_util.mk_is_zero(y)),
m.mk_or(m.mk_and(m_util.mk_is_positive(x), m_util.mk_is_negative(y)),
m.mk_and(m_util.mk_is_negative(x), m_util.mk_is_positive(y))));
c = m.mk_and(both_are_zero, pn_or_np);
v = m.mk_app(m_util.get_family_id(), OP_FPA_INTERNAL_MAX_UNSPECIFIED, x, y);
// Note: This requires BR_REWRITE_FULL afterwards.
@ -1239,7 +1359,7 @@ void fpa2bv_converter::mk_max_i(func_decl * f, unsigned num, expr * const * args
void fpa2bv_converter::mk_fma(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
SASSERT(num == 4);
SASSERT(m_util.is_rm_bvwrap(args[0]));
SASSERT(m_util.is_bv2rm(args[0]));
// fusedma means (x * y) + z
expr_ref rm(m), x(m), y(m), z(m);
@ -1557,7 +1677,7 @@ void fpa2bv_converter::mk_fma(func_decl * f, unsigned num, expr * const * args,
void fpa2bv_converter::mk_sqrt(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
SASSERT(num == 2);
SASSERT(m_util.is_rm_bvwrap(args[0]));
SASSERT(m_util.is_bv2rm(args[0]));
expr_ref rm(m), x(m);
rm = to_app(args[0])->get_arg(0);
@ -1706,7 +1826,7 @@ void fpa2bv_converter::mk_sqrt(func_decl * f, unsigned num, expr * const * args,
void fpa2bv_converter::mk_round_to_integral(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
SASSERT(num == 2);
SASSERT(m_util.is_rm_bvwrap(args[0]));
SASSERT(m_util.is_bv2rm(args[0]));
expr_ref rm(m), x(m);
rm = to_app(args[0])->get_arg(0);
@ -1859,8 +1979,12 @@ void fpa2bv_converter::mk_round_to_integral(sort * s, expr_ref & rm, expr_ref &
tie_pttrn = m_bv_util.mk_concat(one_1, m_bv_util.mk_numeral(0, sbits-1));
m_simp.mk_eq(rem, tie_pttrn, tie2);
div_last = m_bv_util.mk_extract(0, 0, div);
tie2_c = m.mk_ite(tie2, m.mk_or(m.mk_and(rm_is_rte, m.mk_eq(div_last, one_1)), rm_is_rta),
m_bv_util.mk_ule(tie_pttrn, rem));
expr_ref div_last_eq_1(m), rte_and_dl_eq_1(m), rte_and_dl_eq_1_or_rta(m), tie_pttrn_ule_rem(m);
div_last_eq_1 = m.mk_eq(div_last, one_1);
rte_and_dl_eq_1 = m.mk_and(rm_is_rte, div_last_eq_1);
rte_and_dl_eq_1_or_rta = m.mk_or(rte_and_dl_eq_1, rm_is_rta);
tie_pttrn_ule_rem = m_bv_util.mk_ule(tie_pttrn, rem);
tie2_c = m.mk_ite(tie2, rte_and_dl_eq_1_or_rta, tie_pttrn_ule_rem);
m_simp.mk_ite(tie2_c, div_p1, div, v51);
dbg_decouple("fpa2bv_r2i_v51", v51);
@ -1925,11 +2049,15 @@ void fpa2bv_converter::mk_round_to_integral(sort * s, expr_ref & rm, expr_ref &
void fpa2bv_converter::mk_float_eq(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
SASSERT(num == 2);
expr_ref x(m), y(m);
x = args[0];
y = args[1];
mk_float_eq(f->get_range(), x, y, result);
}
expr * x = args[0], * y = args[1];
void fpa2bv_converter::mk_float_eq(sort * s, expr_ref & x, expr_ref & y, expr_ref & result) {
TRACE("fpa2bv_float_eq", tout << "X = " << mk_ismt2_pp(x, m) << std::endl;
tout << "Y = " << mk_ismt2_pp(y, m) << std::endl;);
tout << "Y = " << mk_ismt2_pp(y, m) << std::endl;);
expr_ref c1(m), c2(m), x_is_nan(m), y_is_nan(m), x_is_zero(m), y_is_zero(m);
mk_is_nan(x, x_is_nan);
@ -1963,9 +2091,13 @@ void fpa2bv_converter::mk_float_eq(func_decl * f, unsigned num, expr * const * a
void fpa2bv_converter::mk_float_lt(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
SASSERT(num == 2);
expr_ref x(m), y(m);
x = args[0];
y = args[1];
mk_float_lt(f->get_range(), x, y, result);
}
expr * x = args[0], * y = args[1];
void fpa2bv_converter::mk_float_lt(sort * s, expr_ref & x, expr_ref & y, expr_ref & result) {
expr_ref c1(m), c2(m), x_is_nan(m), y_is_nan(m), x_is_zero(m), y_is_zero(m);
mk_is_nan(x, x_is_nan);
mk_is_nan(y, y_is_nan);
@ -2010,11 +2142,15 @@ void fpa2bv_converter::mk_float_lt(func_decl * f, unsigned num, expr * const * a
void fpa2bv_converter::mk_float_gt(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
SASSERT(num == 2);
expr_ref x(m), y(m);
x = args[0];
y = args[1];
mk_float_gt(f->get_range(), x, y, result);
}
expr * x = args[0], * y = args[1];
void fpa2bv_converter::mk_float_gt(sort * s, expr_ref & x, expr_ref & y, expr_ref & result) {
expr_ref t3(m);
mk_float_le(f, num, args, t3);
mk_float_le(s, x, y, t3);
expr_ref nan_or(m), xy_zero(m), not_t3(m), r_else(m);
expr_ref x_is_nan(m), y_is_nan(m), x_is_zero(m), y_is_zero(m);
@ -2031,17 +2167,31 @@ void fpa2bv_converter::mk_float_gt(func_decl * f, unsigned num, expr * const * a
void fpa2bv_converter::mk_float_le(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
SASSERT(num == 2);
expr_ref x(m), y(m);
x = args[0];
y = args[1];
mk_float_le(f->get_range(), x, y, result);
}
void fpa2bv_converter::mk_float_le(sort * s, expr_ref & x, expr_ref & y, expr_ref & result) {
expr_ref a(m), b(m);
mk_float_lt(f, num, args, a);
mk_float_eq(f, num, args, b);
mk_float_lt(s, x, y, a);
mk_float_eq(s, x, y, b);
m_simp.mk_or(a, b, result);
}
void fpa2bv_converter::mk_float_ge(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
SASSERT(num == 2);
expr_ref x(m), y(m);
x = args[0];
y = args[1];
mk_float_ge(f->get_range(), x, y, result);
}
void fpa2bv_converter::mk_float_ge(sort * s, expr_ref & x, expr_ref & y, expr_ref & result) {
expr_ref a(m), b(m);
mk_float_gt(f, num, args, a);
mk_float_eq(f, num, args, b);
mk_float_gt(s, x, y, a);
mk_float_eq(s, x, y, b);
m_simp.mk_or(a, b, result);
}
@ -2088,19 +2238,21 @@ void fpa2bv_converter::mk_is_subnormal(func_decl * f, unsigned num, expr * const
void fpa2bv_converter::mk_is_negative(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
SASSERT(num == 1);
expr_ref t1(m), t2(m);
expr_ref t1(m), t2(m), nt1(m);
mk_is_nan(args[0], t1);
mk_is_neg(args[0], t2);
result = m.mk_and(m.mk_not(t1), t2);
nt1 = m.mk_not(t1);
result = m.mk_and(nt1, t2);
TRACE("fpa2bv_is_negative", tout << "result = " << mk_ismt2_pp(result, m) << std::endl;);
}
void fpa2bv_converter::mk_is_positive(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
SASSERT(num == 1);
expr_ref t1(m), t2(m);
expr_ref t1(m), t2(m), nt1(m);
mk_is_nan(args[0], t1);
mk_is_pos(args[0], t2);
result = m.mk_and(m.mk_not(t1), t2);
nt1 = m.mk_not(t1);
result = m.mk_and(nt1, t2);
}
void fpa2bv_converter::mk_to_fp(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
@ -2166,7 +2318,7 @@ void fpa2bv_converter::mk_to_fp(func_decl * f, unsigned num, expr * const * args
void fpa2bv_converter::mk_to_fp_float(func_decl * f, sort * s, expr * rm, expr * x, expr_ref & result) {
SASSERT(m_util.is_rm_bvwrap(rm));
SASSERT(m_util.is_bv2rm(rm));
mk_to_fp_float(s, to_app(rm)->get_arg(0), x, result);
}
@ -2337,7 +2489,7 @@ void fpa2bv_converter::mk_to_fp_real(func_decl * f, sort * s, expr * rm, expr *
"x: " << mk_ismt2_pp(x, m) << std::endl;);
SASSERT(m_util.is_float(s));
SASSERT(au().is_real(x) || au().is_int(x));
SASSERT(m_util.is_rm_bvwrap(rm));
SASSERT(m_util.is_bv2rm(rm));
expr * bv_rm = to_app(rm)->get_arg(0);
unsigned ebits = m_util.get_ebits(s);
@ -2472,7 +2624,7 @@ void fpa2bv_converter::mk_to_fp_real_int(func_decl * f, unsigned num, expr * con
unsigned ebits = m_util.get_ebits(f->get_range());
unsigned sbits = m_util.get_sbits(f->get_range());
SASSERT(m_util.is_rm_bvwrap(args[0]));
SASSERT(m_util.is_bv2rm(args[0]));
expr * bv_rm = to_app(args[0])->get_arg(0);
rational e;
@ -2596,11 +2748,12 @@ void fpa2bv_converter::mk_to_real(func_decl * f, unsigned num, expr * const * ar
exp2 = m.mk_ite(exp_is_neg, one_div_exp2, exp2);
dbg_decouple("fpa2bv_to_real_exp2", exp2);
expr_ref res(m), two_exp2(m), minus_res(m);
expr_ref res(m), two_exp2(m), minus_res(m), sgn_is_1(m);
two_exp2 = m_arith_util.mk_power(two, exp2);
res = m_arith_util.mk_mul(rsig, two_exp2);
minus_res = m_arith_util.mk_uminus(res);
res = m.mk_ite(m.mk_eq(sgn, bv1), minus_res, res);
sgn_is_1 = m.mk_eq(sgn, bv1);
res = m.mk_ite(sgn_is_1, minus_res, res);
dbg_decouple("fpa2bv_to_real_sig_times_exp2", res);
TRACE("fpa2bv_to_real", tout << "rsig = " << mk_ismt2_pp(rsig, m) << std::endl;
@ -2630,7 +2783,7 @@ void fpa2bv_converter::mk_to_fp_signed(func_decl * f, unsigned num, expr * const
SASSERT(num == 2);
SASSERT(m_util.is_float(f->get_range()));
SASSERT(m_util.is_rm_bvwrap(args[0]));
SASSERT(m_util.is_bv2rm(args[0]));
SASSERT(m_bv_util.is_bv(args[1]));
expr_ref rm(m), x(m);
@ -2772,7 +2925,7 @@ void fpa2bv_converter::mk_to_fp_unsigned(func_decl * f, unsigned num, expr * con
SASSERT(num == 2);
SASSERT(m_util.is_float(f->get_range()));
SASSERT(m_util.is_rm_bvwrap(args[0]));
SASSERT(m_util.is_bv2rm(args[0]));
SASSERT(m_bv_util.is_bv(args[1]));
expr_ref rm(m), x(m);
@ -2924,7 +3077,7 @@ void fpa2bv_converter::mk_to_bv(func_decl * f, unsigned num, expr * const * args
tout << "arg" << i << " = " << mk_ismt2_pp(args[i], m) << std::endl;);
SASSERT(num == 2);
SASSERT(m_util.is_rm_bvwrap(args[0]));
SASSERT(m_util.is_bv2rm(args[0]));
SASSERT(m_util.is_float(args[1]));
expr * rm = to_app(args[0])->get_arg(0);
@ -3007,10 +3160,15 @@ void fpa2bv_converter::mk_to_bv(func_decl * f, unsigned num, expr * const * args
expr_ref c_in_limits(m);
if (!is_signed)
c_in_limits = m_bv_util.mk_sle(bv0_e2, shift);
else
c_in_limits = m.mk_or(m_bv_util.mk_sle(m_bv_util.mk_numeral(1, ebits + 2), shift),
m.mk_and(m.mk_eq(m_bv_util.mk_numeral(0, ebits + 2), shift),
m.mk_eq(sig, m_bv_util.mk_concat(bv1, m_bv_util.mk_numeral(0, sig_sz-1)))));
else {
expr_ref one_sle_shift(m), one_eq_shift(m), p2(m), sig_is_p2(m), shift1_and_sig_p2(m);
one_sle_shift = m_bv_util.mk_sle(m_bv_util.mk_numeral(1, ebits + 2), shift);
one_eq_shift = m.mk_eq(m_bv_util.mk_numeral(0, ebits + 2), shift);
p2 = m_bv_util.mk_concat(bv1, m_bv_util.mk_numeral(0, sig_sz-1));
sig_is_p2 = m.mk_eq(sig, p2);
shift1_and_sig_p2 = m.mk_and(one_eq_shift, sig_is_p2);
c_in_limits = m.mk_or(one_sle_shift, shift1_and_sig_p2);
}
dbg_decouple("fpa2bv_to_bv_in_limits", c_in_limits);
expr_ref r_shifted_sig(m), l_shifted_sig(m);
@ -3162,13 +3320,14 @@ expr_ref fpa2bv_converter::mk_to_ieee_bv_unspecified(unsigned ebits, unsigned sb
}
result = m.mk_const(fd);
app_ref mask(m), extra(m);
app_ref mask(m), extra(m), result_and_mask(m);
mask = m_bv_util.mk_concat(m_bv_util.mk_numeral(0, 1),
m_bv_util.mk_concat(m_bv_util.mk_numeral(-1, ebits),
m_bv_util.mk_concat(m_bv_util.mk_numeral(0, sbits - 2),
m_bv_util.mk_numeral(1, 1))));
expr * args[2] = { result, mask };
extra = m.mk_eq(m.mk_app(m_bv_util.get_fid(), OP_BAND, 2, args), mask);
result_and_mask = m.mk_app(m_bv_util.get_fid(), OP_BAND, 2, args);
extra = m.mk_eq(result_and_mask, mask);
m_extra_assertions.push_back(extra);
return result;
@ -3528,7 +3687,7 @@ void fpa2bv_converter::mk_rounding_mode(decl_kind k, expr_ref & result)
default: UNREACHABLE();
}
result = m_util.mk_rm(result);
result = m_util.mk_bv2rm(result);
}
void fpa2bv_converter::dbg_decouple(const char * prefix, expr_ref & e) {
@ -3547,15 +3706,21 @@ void fpa2bv_converter::dbg_decouple(const char * prefix, expr_ref & e) {
bv_sgn = m_bv_util.mk_extract(bv_sz-1, bv_sz-1, new_bv);
bv_exp = m_bv_util.mk_extract(bv_sz-2, bv_sz-ebits-1, new_bv);
bv_sig = m_bv_util.mk_extract(sbits-2, 0, new_bv);
m_extra_assertions.push_back(m.mk_eq(e_sgn, bv_sgn));
m_extra_assertions.push_back(m.mk_eq(e_exp, bv_exp));
m_extra_assertions.push_back(m.mk_eq(e_sig, bv_sig));
expr_ref e_sgn_eq_bv_sgn(m), e_exp_eq_bv_exp(m), e_sig_eq_bv_sig(m);
e_sgn_eq_bv_sgn = m.mk_eq(e_sgn, bv_sgn);
e_exp_eq_bv_exp = m.mk_eq(e_exp, bv_exp);
e_sig_eq_bv_sig = m.mk_eq(e_sig, bv_sig);
m_extra_assertions.push_back(e_sgn_eq_bv_sgn);
m_extra_assertions.push_back(e_exp_eq_bv_exp);
m_extra_assertions.push_back(e_sig_eq_bv_sig);
e = m_util.mk_fp(bv_sgn, bv_exp, bv_sig);
}
else {
expr_ref new_e(m);
expr_ref new_e(m), new_e_eq_e(m);
new_e = m.mk_fresh_const(prefix, m.get_sort(e));
m_extra_assertions.push_back(m.mk_eq(new_e, e));
new_e_eq_e = m.mk_eq(new_e, e);
m_extra_assertions.push_back(new_e_eq_e);
e = new_e;
}
#endif

View file

@ -79,6 +79,7 @@ public:
void mk_rounding_mode(decl_kind k, expr_ref & result);
void mk_numeral(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
void mk_numeral(sort * s, mpf const & v, expr_ref & result);
virtual void mk_const(func_decl * f, expr_ref & result);
virtual void mk_rm_const(func_decl * f, expr_ref & result);
virtual void mk_function(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
@ -100,12 +101,18 @@ public:
void mk_fma(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
void mk_sqrt(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
void mk_round_to_integral(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
void mk_abs(sort * s, expr_ref & x, expr_ref & result);
void mk_float_eq(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
void mk_float_lt(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
void mk_float_gt(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
void mk_float_le(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
void mk_float_ge(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
void mk_float_eq(sort * s, expr_ref & x, expr_ref & y, expr_ref & result);
void mk_float_lt(sort * s, expr_ref & x, expr_ref & y, expr_ref & result);
void mk_float_gt(sort *, expr_ref & x, expr_ref & y, expr_ref & result);
void mk_float_le(sort * s, expr_ref & x, expr_ref & y, expr_ref & result);
void mk_float_ge(sort * s, expr_ref & x, expr_ref & y, expr_ref & result);
void mk_is_zero(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
void mk_is_nzero(func_decl * f, unsigned num, expr * const * args, expr_ref & result);

View file

@ -156,7 +156,7 @@ br_status fpa2bv_rewriter_cfg::reduce_app(func_decl * f, unsigned num, expr * co
case OP_FPA_INTERNAL_MAX_I: m_conv.mk_max_i(f, num, args, result); return BR_DONE;
case OP_FPA_INTERNAL_BVWRAP:
case OP_FPA_INTERNAL_RM_BVWRAP:
case OP_FPA_INTERNAL_BV2RM:
case OP_FPA_INTERNAL_TO_REAL_UNSPECIFIED:
case OP_FPA_INTERNAL_TO_UBV_UNSPECIFIED:
@ -172,8 +172,10 @@ br_status fpa2bv_rewriter_cfg::reduce_app(func_decl * f, unsigned num, expr * co
else
{
SASSERT(!m_conv.is_float_family(f));
m_conv.mk_function(f, num, args, result);
return BR_DONE;
if (m_conv.fu().contains_floats(f)) {
m_conv.mk_function(f, num, args, result);
return BR_DONE;
}
}
return BR_FAILED;

View file

@ -676,7 +676,7 @@ func_decl * fpa_decl_plugin::mk_to_ieee_bv(decl_kind k, unsigned num_parameters,
return m_manager->mk_func_decl(name, 1, domain, bv_srt, func_decl_info(m_family_id, k));
}
func_decl * fpa_decl_plugin::mk_internal_rm(decl_kind k, unsigned num_parameters, parameter const * parameters,
func_decl * fpa_decl_plugin::mk_internal_bv2rm(decl_kind k, unsigned num_parameters, parameter const * parameters,
unsigned arity, sort * const * domain, sort * range) {
if (arity != 1)
m_manager->raise_exception("invalid number of arguments to internal_rm");
@ -837,8 +837,8 @@ func_decl * fpa_decl_plugin::mk_func_decl(decl_kind k, unsigned num_parameters,
case OP_FPA_INTERNAL_BVWRAP:
return mk_internal_bv_wrap(k, num_parameters, parameters, arity, domain, range);
case OP_FPA_INTERNAL_RM_BVWRAP:
return mk_internal_rm(k, num_parameters, parameters, arity, domain, range);
case OP_FPA_INTERNAL_BV2RM:
return mk_internal_bv2rm(k, num_parameters, parameters, arity, domain, range);
case OP_FPA_INTERNAL_MIN_I:
case OP_FPA_INTERNAL_MAX_I:

View file

@ -88,7 +88,7 @@ enum fpa_op_kind {
/* Internal use only */
OP_FPA_INTERNAL_BVWRAP,
OP_FPA_INTERNAL_RM_BVWRAP, // Internal conversion from (_ BitVec 3) to RoundingMode
OP_FPA_INTERNAL_BV2RM,
OP_FPA_INTERNAL_MIN_I,
OP_FPA_INTERNAL_MAX_I,
@ -164,8 +164,8 @@ class fpa_decl_plugin : public decl_plugin {
func_decl * mk_to_ieee_bv(decl_kind k, unsigned num_parameters, parameter const * parameters,
unsigned arity, sort * const * domain, sort * range);
func_decl * mk_internal_rm(decl_kind k, unsigned num_parameters, parameter const * parameters,
unsigned arity, sort * const * domain, sort * range);
func_decl * mk_internal_bv2rm(decl_kind k, unsigned num_parameters, parameter const * parameters,
unsigned arity, sort * const * domain, sort * range);
func_decl * mk_internal_bv_wrap(decl_kind k, unsigned num_parameters, parameter const * parameters,
unsigned arity, sort * const * domain, sort * range);
func_decl * mk_internal_bv_unwrap(decl_kind k, unsigned num_parameters, parameter const * parameters,
@ -301,11 +301,6 @@ public:
return m().mk_app(m_fid, OP_FPA_FP, sgn, exp, sig);
}
app * mk_rm(expr * bv3) {
SASSERT(m_bv_util.is_bv(bv3) && m_bv_util.get_bv_size(bv3) == 3);
return m().mk_app(m_fid, OP_FPA_INTERNAL_RM_BVWRAP, 0, 0, 1, &bv3, mk_rm_sort());
}
app * mk_to_fp(sort * s, expr * bv_t) {
SASSERT(is_float(s) && s->get_num_parameters() == 2);
return m().mk_app(m_fid, OP_FPA_TO_FP, 2, s->get_parameters(), 1, &bv_t);
@ -373,6 +368,10 @@ public:
app * mk_to_ieee_bv(expr * arg1) { return m().mk_app(m_fid, OP_FPA_TO_IEEE_BV, arg1); }
app * mk_bv2rm(expr * bv3) {
SASSERT(m_bv_util.is_bv(bv3) && m_bv_util.get_bv_size(bv3) == 3);
return m().mk_app(m_fid, OP_FPA_INTERNAL_BV2RM, 0, 0, 1, &bv3, mk_rm_sort());
}
app * mk_internal_to_ubv_unspecified(unsigned ebits, unsigned sbits, unsigned width);
app * mk_internal_to_sbv_unspecified(unsigned ebits, unsigned sbits, unsigned width);
app * mk_internal_to_ieee_bv_unspecified(unsigned ebits, unsigned sbits);
@ -380,8 +379,8 @@ public:
bool is_bvwrap(expr * e) const { return is_app_of(e, get_family_id(), OP_FPA_INTERNAL_BVWRAP); }
bool is_bvwrap(func_decl * f) const { return f->get_family_id() == get_family_id() && f->get_decl_kind() == OP_FPA_INTERNAL_BVWRAP; }
bool is_rm_bvwrap(expr * e) const { return is_app_of(e, get_family_id(), OP_FPA_INTERNAL_RM_BVWRAP); }
bool is_rm_bvwrap(func_decl * f) const { return f->get_family_id() == get_family_id() && f->get_decl_kind() == OP_FPA_INTERNAL_RM_BVWRAP; }
bool is_bv2rm(expr * e) const { return is_app_of(e, get_family_id(), OP_FPA_INTERNAL_BV2RM); }
bool is_bv2rm(func_decl * f) const { return f->get_family_id() == get_family_id() && f->get_decl_kind() == OP_FPA_INTERNAL_BV2RM; }
bool is_min_interpreted(expr * e) { return is_app_of(e, get_family_id(), OP_FPA_INTERNAL_MIN_I); }
bool is_min_unspecified(expr * e) { return is_app_of(e, get_family_id(), OP_FPA_INTERNAL_MIN_UNSPECIFIED); }

View file

@ -100,7 +100,7 @@ br_status fpa_rewriter::mk_app_core(func_decl * f, unsigned num_args, expr * con
SASSERT(num_args == 2); st = BR_FAILED; break;
case OP_FPA_INTERNAL_BVWRAP: SASSERT(num_args == 1); st = mk_bvwrap(args[0], result); break;
case OP_FPA_INTERNAL_RM_BVWRAP:SASSERT(num_args == 1); st = mk_rm(args[0], result); break;
case OP_FPA_INTERNAL_BV2RM: SASSERT(num_args == 1); st = mk_bv2rm(args[0], result); break;
case OP_FPA_INTERNAL_TO_UBV_UNSPECIFIED:
case OP_FPA_INTERNAL_TO_SBV_UNSPECIFIED:
@ -719,7 +719,7 @@ br_status fpa_rewriter::mk_eq_core(expr * arg1, expr * arg2, expr_ref & result)
return BR_FAILED;
}
br_status fpa_rewriter::mk_rm(expr * arg, expr_ref & result) {
br_status fpa_rewriter::mk_bv2rm(expr * arg, expr_ref & result) {
bv_util bu(m());
rational bv_val;
unsigned sz = 0;

View file

@ -78,7 +78,7 @@ public:
br_status mk_to_fp(func_decl * f, unsigned num_args, expr * const * args, expr_ref & result);
br_status mk_to_fp_unsigned(func_decl * f, expr * arg1, expr * arg2, expr_ref & result);
br_status mk_rm(expr * arg, expr_ref & result);
br_status mk_bv2rm(expr * arg, expr_ref & result);
br_status mk_fp(expr * sgn, expr * exp, expr * sig, expr_ref & result);
br_status mk_to_fp_unsigned(expr * arg1, expr * arg2, expr_ref & result);
br_status mk_to_ubv(func_decl * f, expr * arg1, expr * arg2, expr_ref & result);

View file

@ -418,7 +418,7 @@ void bv_simplifier_plugin::mk_extract(unsigned high, unsigned low, expr* arg, ex
mk_extract_core(high, low, arg, result);
}
if (m_extract_cache.size() > (1 << 12)) {
m_extract_cache.reset();
flush_caches();
}
TRACE("bv_simplifier_plugin", tout << "mk_extract [" << high << ":" << low << "]\n";

View file

@ -28,6 +28,7 @@ Notes:
#include"eval_cmd.h"
#include"gparams.h"
#include"env_params.h"
#include"well_sorted.h"
class help_cmd : public cmd {
svector<symbol> m_cmds;
@ -156,11 +157,16 @@ ATOMIC_CMD(get_proof_cmd, "get-proof", "retrieve proof", {
pr = ctx.get_check_sat_result()->get_proof();
if (pr == 0)
throw cmd_exception("proof is not available");
if (ctx.well_sorted_check_enabled() && !is_well_sorted(ctx.m(), pr)) {
throw cmd_exception("proof is not well sorted");
}
// TODO: reimplement a new SMT2 pretty printer
ast_smt_pp pp(ctx.m());
cmd_is_declared isd(ctx);
pp.set_is_declared(&isd);
pp.set_logic(ctx.get_logic());
// ctx.regular_stream() << mk_pp(pr, ctx.m()) << "\n";
pp.display_smt2(ctx.regular_stream(), pr);
ctx.regular_stream() << std::endl;
});

View file

@ -1487,18 +1487,18 @@ void cmd_context::check_sat(unsigned num_assumptions, expr * const * assumptions
return;
}
display_sat_result(r);
if (r == l_true) {
validate_model();
}
validate_check_sat_result(r);
if (was_opt && r != l_false && !was_pareto) {
get_opt()->display_assignment(regular_stream());
}
if (r == l_true) {
validate_model();
if (m_params.m_dump_models) {
model_ref md;
get_check_sat_result()->get_model(md);
display_model(md);
}
if (r == l_true && m_params.m_dump_models) {
model_ref md;
get_check_sat_result()->get_model(md);
display_model(md);
}
}

View file

@ -31,6 +31,7 @@ Revision History:
#include"rewriter_def.h"
#include"cooperate.h"
#include"ast_pp.h"
#include"ast_util.h"
struct evaluator_cfg : public default_rewriter_cfg {
@ -196,7 +197,8 @@ struct evaluator_cfg : public default_rewriter_cfg {
void expand_value(expr_ref& val) {
vector<expr_ref_vector> stores;
expr_ref else_case(m());
if (m_ar.is_array(val) && extract_array_func_interp(val, stores, else_case)) {
bool args_are_unique;
if (m_ar.is_array(val) && extract_array_func_interp(val, stores, else_case, args_are_unique)) {
sort* srt = m().get_sort(val);
val = m_ar.mk_const_array(srt, else_case);
for (unsigned i = stores.size(); i > 0; ) {
@ -260,25 +262,40 @@ struct evaluator_cfg : public default_rewriter_cfg {
br_status mk_array_eq(expr* a, expr* b, expr_ref& result) {
return BR_FAILED;
// disabled until made more efficient
if (a == b) {
result = m().mk_true();
return BR_DONE;
}
vector<expr_ref_vector> stores;
// disabled until made more efficient
vector<expr_ref_vector> stores1, stores2;
bool args_are_unique1, args_are_unique2;
expr_ref else1(m()), else2(m());
if (extract_array_func_interp(a, stores, else1) &&
extract_array_func_interp(b, stores, else2)) {
if (extract_array_func_interp(a, stores1, else1, args_are_unique1) &&
extract_array_func_interp(b, stores2, else2, args_are_unique2)) {
expr_ref_vector conj(m()), args1(m()), args2(m());
conj.push_back(m().mk_eq(else1, else2));
if (m().are_equal(else1, else2)) {
// no op
}
else if (m().are_distinct(else1, else2) && !(m().get_sort(else1)->get_info()->get_num_elements().is_finite())) {
result = m().mk_false();
return BR_DONE;
}
else {
conj.push_back(m().mk_eq(else1, else2));
}
args1.push_back(a);
args2.push_back(b);
if (args_are_unique1 && args_are_unique2 && !stores1.empty()) {
return mk_array_eq(stores1, else1, stores2, else2, conj, result);
}
// TBD: this is too inefficient.
for (unsigned i = 0; i < stores.size(); ++i) {
args1.resize(1); args1.append(stores[i].size() - 1, stores[i].c_ptr());
args2.resize(1); args2.append(stores[i].size() - 1, stores[i].c_ptr());
expr* s1 = m_ar.mk_select(args1.size(), args1.c_ptr());
expr* s2 = m_ar.mk_select(args2.size(), args2.c_ptr());
stores1.append(stores2);
for (unsigned i = 0; i < stores1.size(); ++i) {
args1.resize(1); args1.append(stores1[i].size() - 1, stores1[i].c_ptr());
args2.resize(1); args2.append(stores1[i].size() - 1, stores1[i].c_ptr());
expr_ref s1(m_ar.mk_select(args1.size(), args1.c_ptr()), m());
expr_ref s2(m_ar.mk_select(args2.size(), args2.c_ptr()), m());
conj.push_back(m().mk_eq(s1, s2));
}
result = m().mk_and(conj.size(), conj.c_ptr());
@ -287,12 +304,107 @@ struct evaluator_cfg : public default_rewriter_cfg {
return BR_FAILED;
}
bool extract_array_func_interp(expr* a, vector<expr_ref_vector>& stores, expr_ref& else_case) {
SASSERT(m_ar.is_array(a));
struct args_eq {
unsigned m_arity;
args_eq(unsigned arity): m_arity(arity) {}
bool operator()(expr * const* args1, expr* const* args2) const {
for (unsigned i = 0; i < m_arity; ++i) {
if (args1[i] != args2[i]) {
return false;
}
}
return true;
}
};
struct args_hash {
unsigned m_arity;
args_hash(unsigned arity): m_arity(arity) {}
unsigned operator()(expr * const* args) const {
return get_composite_hash(args, m_arity, default_kind_hash_proc<expr*const*>(), *this);
}
unsigned operator()(expr* const* args, unsigned idx) const {
return args[idx]->hash();
}
};
typedef hashtable<expr*const*, args_hash, args_eq> args_table;
br_status mk_array_eq(vector<expr_ref_vector> const& stores1, expr* else1,
vector<expr_ref_vector> const& stores2, expr* else2,
expr_ref_vector& conj, expr_ref& result) {
unsigned arity = stores1[0].size()-1; // TBD: fix arity.
args_hash ah(arity);
args_eq ae(arity);
args_table table1(DEFAULT_HASHTABLE_INITIAL_CAPACITY, ah, ae);
args_table table2(DEFAULT_HASHTABLE_INITIAL_CAPACITY, ah, ae);
for (unsigned i = 0; i < stores1.size(); ++i) {
table1.insert(stores1[i].c_ptr());
}
for (unsigned i = stores2.size(); i > 0; ) {
--i;
if (table2.contains(stores2[i].c_ptr())) {
// first insertion takes precedence.
continue;
}
table2.insert(stores2[i].c_ptr());
expr * const* args = 0;
expr* val = stores2[i][arity];
if (table1.find(stores2[i].c_ptr(), args)) {
switch (compare(args[arity], val)) {
case l_true: table1.remove(stores2[i].c_ptr()); break;
case l_false: result = m().mk_false(); return BR_DONE;
default: conj.push_back(m().mk_eq(val, args[arity])); break;
}
}
else {
switch (compare(else1, val)) {
case l_true: break;
case l_false: result = m().mk_false(); return BR_DONE;
default: conj.push_back(m().mk_eq(else1, val)); break;
}
}
}
args_table::iterator it = table1.begin(), end = table1.end();
for (; it != end; ++it) {
switch (compare((*it)[arity], else2)) {
case l_true: break;
case l_false: result = m().mk_false(); return BR_DONE;
default: conj.push_back(m().mk_eq((*it)[arity], else2)); break;
}
}
result = mk_and(conj);
return BR_REWRITE_FULL;
}
lbool compare(expr* a, expr* b) {
if (m().are_equal(a, b)) return l_true;
if (m().are_distinct(a, b)) return l_false;
return l_undef;
}
bool args_are_values(expr_ref_vector const& store, bool& are_unique) {
bool are_values = true;
for (unsigned j = 0; are_values && j + 1 < store.size(); ++j) {
are_values = m().is_value(store[j]);
are_unique &= m().is_unique_value(store[j]);
}
SASSERT(!are_unique || are_values);
return are_values;
}
bool extract_array_func_interp(expr* a, vector<expr_ref_vector>& stores, expr_ref& else_case, bool& are_unique) {
SASSERT(m_ar.is_array(a));
bool are_values = true;
are_unique = true;
while (m_ar.is_store(a)) {
expr_ref_vector store(m());
store.append(to_app(a)->get_num_args()-1, to_app(a)->get_args()+1);
are_values &= args_are_values(store, are_unique);
stores.push_back(store);
a = to_app(a)->get_arg(0);
}
@ -302,54 +414,51 @@ struct evaluator_cfg : public default_rewriter_cfg {
return true;
}
if (m_ar.is_as_array(a)) {
func_decl* f = m_ar.get_as_array_func_decl(to_app(a));
func_interp* g = m_model.get_func_interp(f);
unsigned sz = g->num_entries();
unsigned arity = f->get_arity();
for (unsigned i = 0; i < sz; ++i) {
expr_ref_vector store(m());
func_entry const* fe = g->get_entry(i);
store.append(arity, fe->get_args());
store.push_back(fe->get_result());
for (unsigned j = 0; j < store.size(); ++j) {
if (!is_ground(store[j].get())) {
TRACE("model_evaluator", tout << "could not extract array interpretation: " << mk_pp(a, m()) << "\n" << mk_pp(store[j].get(), m()) << "\n";);
return false;
}
}
stores.push_back(store);
}
else_case = g->get_else();
if (!else_case) {
TRACE("model_evaluator", tout << "no else case " << mk_pp(a, m()) << "\n";);
return false;
}
if (!is_ground(else_case)) {
TRACE("model_evaluator", tout << "non-ground else case " << mk_pp(a, m()) << "\n" << mk_pp(else_case, m()) << "\n";);
return false;
}
bool is_value = true;
for (unsigned i = stores.size(); is_value && i > 0; ) {
--i;
if (else_case == stores[i].back()) {
for (unsigned j = i + 1; j < stores.size(); ++j) {
stores[j-1].reset();
stores[j-1].append(stores[j]);
}
stores.pop_back();
continue;
}
for (unsigned j = 0; is_value && j + 1 < stores[i].size(); ++j) {
is_value = m().is_value(stores[i][j].get());
}
}
TRACE("model_evaluator", tout << "else case: " << mk_pp(else_case, m()) << "\n";);
return true;
if (!m_ar.is_as_array(a)) {
TRACE("model_evaluator", tout << "no translation: " << mk_pp(a, m()) << "\n";);
return false;
}
TRACE("model_evaluator", tout << "no translation: " << mk_pp(a, m()) << "\n";);
return false;
func_decl* f = m_ar.get_as_array_func_decl(to_app(a));
func_interp* g = m_model.get_func_interp(f);
unsigned sz = g->num_entries();
unsigned arity = f->get_arity();
for (unsigned i = 0; i < sz; ++i) {
expr_ref_vector store(m());
func_entry const* fe = g->get_entry(i);
store.append(arity, fe->get_args());
store.push_back(fe->get_result());
for (unsigned j = 0; j < store.size(); ++j) {
if (!is_ground(store[j].get())) {
TRACE("model_evaluator", tout << "could not extract array interpretation: " << mk_pp(a, m()) << "\n" << mk_pp(store[j].get(), m()) << "\n";);
return false;
}
}
stores.push_back(store);
}
else_case = g->get_else();
if (!else_case) {
TRACE("model_evaluator", tout << "no else case " << mk_pp(a, m()) << "\n";);
return false;
}
if (!is_ground(else_case)) {
TRACE("model_evaluator", tout << "non-ground else case " << mk_pp(a, m()) << "\n" << else_case << "\n";);
return false;
}
for (unsigned i = stores.size(); are_values && i > 0; ) {
--i;
if (m().are_equal(else_case, stores[i].back())) {
for (unsigned j = i + 1; j < stores.size(); ++j) {
stores[j-1].reset();
stores[j-1].append(stores[j]);
}
stores.pop_back();
continue;
}
are_values &= args_are_values(stores[i], are_unique);
}
TRACE("model_evaluator", tout << "else case: " << mk_pp(else_case, m()) << "\n";);
return true;
}

View file

@ -26,7 +26,8 @@ Revision History:
#include"dl_util.h"
#include"dl_compiler.h"
#include"ast_pp.h"
#include"ast_smt2_pp.h"
// include"ast_smt2_pp.h"
#include"ast_util.h"
namespace datalog {
@ -185,10 +186,11 @@ namespace datalog {
}
}
void compiler::make_add_unbound_column(rule* compiled_rule, unsigned col_idx, func_decl* pred, reg_idx src, const relation_sort s, reg_idx & result,
void compiler::make_add_unbound_column(rule* compiled_rule, unsigned col_idx, func_decl* pred, reg_idx src, const relation_sort& s, reg_idx & result,
bool & dealloc, instruction_block & acc) {
TRACE("dl", tout << "Adding unbound column " << mk_pp(pred, m_context.get_manager()) << "\n";);
TRACE("dl", tout << "Adding unbound column " << mk_pp(pred, m_context.get_manager())
<< " " << m_context.get_rel_context()->get_rmanager().to_nice_string(s) << "\n";);
IF_VERBOSE(3, {
expr_ref e(m_context.get_manager());
m_context.get_rule_manager().to_formula(*compiled_rule, e);
@ -328,13 +330,15 @@ namespace datalog {
continue;
}
unsigned bound_column_index;
if(acis[i].kind!=ACK_UNBOUND_VAR || !handled_unbound.find(acis[i].var_index,bound_column_index)) {
if(acis[i].kind != ACK_UNBOUND_VAR || !handled_unbound.find(acis[i].var_index,bound_column_index)) {
bound_column_index=curr_sig->size();
if(acis[i].kind==ACK_CONSTANT) {
make_add_constant_column(head_pred, curr, acis[i].domain, acis[i].constant, curr, dealloc, acc);
}
else {
SASSERT(acis[i].kind==ACK_UNBOUND_VAR);
TRACE("dl", tout << head_pred->get_name() << " index: " << i
<< " " << m_context.get_rel_context()->get_rmanager().to_nice_string(acis[i].domain) << "\n";);
make_add_unbound_column(compiled_rule, i, head_pred, curr, acis[i].domain, curr, dealloc, acc);
handled_unbound.insert(acis[i].var_index,bound_column_index);
}
@ -598,14 +602,15 @@ namespace datalog {
// add unbounded columns for interpreted filter
unsigned ut_len = r->get_uninterpreted_tail_size();
unsigned ft_len = r->get_tail_size(); // full tail
ptr_vector<expr> tail;
expr_ref_vector tail(m);
for (unsigned tail_index = ut_len; tail_index < ft_len; ++tail_index) {
tail.push_back(r->get_tail(tail_index));
}
expr_ref_vector binding(m);
if (!tail.empty()) {
app_ref filter_cond(tail.size() == 1 ? to_app(tail.back()) : m.mk_and(tail.size(), tail.c_ptr()), m);
expr_ref filter_cond = mk_and(tail);
m_free_vars.reset();
m_free_vars(filter_cond);
// create binding
binding.resize(m_free_vars.size()+1);
@ -620,6 +625,7 @@ namespace datalog {
} else {
// we have an unbound variable, so we add an unbound column for it
relation_sort unbound_sort = m_free_vars[v];
TRACE("dl", tout << "unbound: " << v << "\n" << filter_cond << " " << mk_pp(unbound_sort, m) << "\n";);
make_add_unbound_column(r, 0, head_pred, filtered_res, unbound_sort, filtered_res, dealloc, acc);
src_col = single_res_expr.size();

View file

@ -180,7 +180,7 @@ namespace datalog {
void make_add_constant_column(func_decl* pred, reg_idx src, const relation_sort s, const relation_element val,
reg_idx & result, bool & dealloc, instruction_block & acc);
void make_add_unbound_column(rule* compiled_rule, unsigned col_idx, func_decl* pred, reg_idx src, const relation_sort s, reg_idx & result,
void make_add_unbound_column(rule* compiled_rule, unsigned col_idx, func_decl* pred, reg_idx src, const relation_sort& s, reg_idx & result,
bool & dealloc, instruction_block & acc);
void make_full_relation(func_decl* pred, const relation_signature & sig, reg_idx & result,
instruction_block & acc);

View file

@ -1052,7 +1052,8 @@ namespace datalog {
}
virtual void display_head_impl(execution_context const& ctx, std::ostream & out) const {
out << "mk_total into " << m_tgt << " sort:"
<< ctx.get_rel_context().get_rmanager().to_nice_string(m_sig);
<< ctx.get_rel_context().get_rmanager().to_nice_string(m_sig)
<< " " << m_pred->get_name();
}
virtual void make_annotations(execution_context & ctx) {
std::string s;

View file

@ -102,7 +102,7 @@ namespace datalog {
in the pair, and so it should be removed.
*/
bool remove_rule(rule * r, unsigned original_length) {
TRUSTME( remove_from_vector(m_rules, r) );
VERIFY( remove_from_vector(m_rules, r) );
if (original_length>2) {
SASSERT(m_consumers>0);
m_consumers--;
@ -114,22 +114,23 @@ namespace datalog {
pair_info & operator=(const pair_info &); //to avoid the implicit one
};
typedef std::pair<app*, app*> app_pair;
typedef map<app_pair, pair_info *,
pair_hash<obj_ptr_hash<app>, obj_ptr_hash<app> >, default_eq<app_pair> > cost_map;
typedef pair_hash<obj_ptr_hash<app>, obj_ptr_hash<app> > app_pair_hash;
typedef map<app_pair, pair_info *, app_pair_hash, default_eq<app_pair> > cost_map;
typedef map<rule *, ptr_vector<app>, ptr_hash<rule>, ptr_eq<rule> > rule_pred_map;
typedef ptr_hashtable<rule, rule_hash_proc, default_eq<rule *> > rule_hashtable;
context & m_context;
ast_manager & m;
rule_manager & rm;
var_subst & m_var_subst;
rule_set & m_rs_aux_copy; //reference to a rule_set that will allow to ask for stratum levels
cost_map m_costs;
ptr_vector<app> m_interpreted;
rule_pred_map m_rules_content;
rule_ref_vector m_introduced_rules;
ptr_hashtable<rule, ptr_hash<rule>, ptr_eq<rule> > m_modified_rules;
context & m_context;
ast_manager & m;
rule_manager & rm;
var_subst & m_var_subst;
rule_set & m_rs_aux_copy; //reference to a rule_set that will allow to ask for stratum levels
cost_map m_costs;
ptr_vector<app> m_interpreted;
rule_pred_map m_rules_content;
rule_ref_vector m_introduced_rules;
bool m_modified_rules;
ast_ref_vector m_pinned;
mutable ptr_vector<sort> m_vars;
@ -140,6 +141,7 @@ namespace datalog {
m_var_subst(ctx.get_var_subst()),
m_rs_aux_copy(rs_aux_copy),
m_introduced_rules(ctx.get_rule_manager()),
m_modified_rules(false),
m_pinned(ctx.get_manager())
{
}
@ -248,20 +250,13 @@ namespace datalog {
m_var_subst(t2, norm_subst.size(), norm_subst.c_ptr(), t2n_ref);
app * t1n = to_app(t1n_ref);
app * t2n = to_app(t2n_ref);
if (t1n>t2n) {
if (t1n->get_id() > t2n->get_id()) {
std::swap(t1n, t2n);
}
m_pinned.push_back(t1n);
m_pinned.push_back(t2n);
/*
IF_VERBOSE(0,
print_renaming(norm_subst, verbose_stream());
display_predicate(m_context, t1, verbose_stream());
display_predicate(m_context, t2, verbose_stream());
display_predicate(m_context, t1n, verbose_stream());
display_predicate(m_context, t2n, verbose_stream()););
*/
return app_pair(t1n, t2n);
}
@ -392,7 +387,7 @@ namespace datalog {
func_decl* parent_head = one_parent->get_decl();
const char * one_parent_name = parent_head->get_name().bare_str();
std::string parent_name;
if (inf.m_rules.size()>1) {
if (inf.m_rules.size() > 1) {
parent_name = one_parent_name + std::string("_and_") + to_string(inf.m_rules.size()-1);
}
else {
@ -417,15 +412,16 @@ namespace datalog {
//here we copy the inf.m_rules vector because inf.m_rules will get changed
//in the iteration. Also we use hashtable instead of vector because we do
//not want to process one rule twice.
typedef ptr_hashtable<rule, ptr_hash<rule>, default_eq<rule *> > rule_hashtable;
rule_hashtable relevant_rules;
insert_into_set(relevant_rules, inf.m_rules);
rule_hashtable::iterator rit = relevant_rules.begin();
rule_hashtable::iterator rend = relevant_rules.end();
for(; rit!=rend; ++rit) {
apply_binary_rule(*rit, pair_key, head);
}
rule_hashtable processed_rules;
rule_vector rules(inf.m_rules);
for (unsigned i = 0; i < rules.size(); ++i) {
rule* r = rules[i];
if (!processed_rules.contains(r)) {
apply_binary_rule(r, pair_key, head);
processed_rules.insert(r);
}
}
// SASSERT(!m_costs.contains(pair_key));
}
@ -553,7 +549,7 @@ namespace datalog {
}
SASSERT(!removed_tails.empty());
SASSERT(!added_tails.empty());
m_modified_rules.insert(r);
m_modified_rules = true;
replace_edges(r, removed_tails, added_tails, rule_content);
}
@ -705,7 +701,7 @@ namespace datalog {
join_pair(selected);
}
if (m_modified_rules.empty()) {
if (!m_modified_rules) {
return 0;
}
rule_set * result = alloc(rule_set, m_context);

View file

@ -488,7 +488,9 @@ namespace datalog {
}
std::string relation_manager::to_nice_string(const relation_sort & s) const {
return std::string(s->get_name().bare_str());
std::ostringstream strm;
strm << mk_pp(s, get_context().get_manager());
return strm.str();
}
std::string relation_manager::to_nice_string(const relation_signature & s) const {

View file

@ -97,7 +97,7 @@ namespace datalog {
TRACE("dl", tout << mk_pp(body, m) << "\n";);
// 2. replace bound variables by constants.
expr_ref_vector consts(m), bound(m), free(m);
expr_ref_vector consts(m), bound(m), _free(m);
svector<symbol> names;
ptr_vector<sort> bound_sorts;
for (unsigned i = 0; i < sorts.size(); ++i) {
@ -110,7 +110,7 @@ namespace datalog {
bound_sorts.push_back(s);
}
else {
free.push_back(consts.back());
_free.push_back(consts.back());
}
}
rep(body);
@ -123,8 +123,8 @@ namespace datalog {
TRACE("dl", tout << mk_pp(body, m) << "\n";);
// 4. replace remaining constants by variables.
for (unsigned i = 0; i < free.size(); ++i) {
rep.insert(free[i].get(), m.mk_var(i, m.get_sort(free[i].get())));
for (unsigned i = 0; i < _free.size(); ++i) {
rep.insert(_free[i].get(), m.mk_var(i, m.get_sort(_free[i].get())));
}
rep(body);
g->set_else(body);

View file

@ -43,16 +43,16 @@ namespace datalog {
bool mk_unbound_compressor::is_unbound_argument(rule * r, unsigned head_index) {
app * head = r->get_head();
expr * head_arg = head->get_arg(head_index);
if (!is_var(head_arg)) {
return false;
}
unsigned var_idx = to_var(head_arg)->get_idx();
return rm.collect_tail_vars(r).contains(var_idx);
unsigned var_idx;
return
is_var(head_arg, var_idx) &&
rm.collect_tail_vars(r).contains(var_idx);
}
void mk_unbound_compressor::add_task(func_decl * pred, unsigned arg_index) {
c_info ci = c_info(pred, arg_index);
SASSERT(pred->get_arity() > 0);
c_info ci(pred, arg_index);
if (m_map.contains(ci)) {
return; //this task was already added
}
@ -74,9 +74,11 @@ namespace datalog {
func_decl * cpred = m_context.mk_fresh_head_predicate(parent_name, symbol(name_suffix.str().c_str()),
arity, domain.c_ptr(), pred);
m_pinned.push_back(cpred);
m_pinned.push_back(pred);
m_todo.push_back(ci);
m_map.insert(ci, cpred);
TRACE("dl", tout << "inserting: " << pred->get_name() << " " << arg_index << " for " << cpred->get_name() << "\n";);
}
void mk_unbound_compressor::detect_tasks(rule_set const& source, unsigned rule_index) {
@ -95,28 +97,26 @@ namespace datalog {
rm.get_counter().reset();
rm.get_counter().count_vars(head, 1);
for (unsigned i=0; i<n; i++) {
for (unsigned i = 0; i < n; i++) {
expr * arg = head->get_arg(i);
if (!is_var(arg)) {
continue;
}
unsigned var_idx = to_var(arg)->get_idx();
if (!tail_vars.contains(var_idx)) {
//unbound
unsigned occurence_cnt = rm.get_counter().get(var_idx);
SASSERT(occurence_cnt>0);
if (occurence_cnt == 1) {
TRACE("dl", r->display(m_context, tout << "Compress: "););
add_task(head_pred, i);
return; //we compress out the unbound arguments one by one
}
unsigned var_idx;
if (is_var(arg, var_idx) &&
!tail_vars.contains(var_idx) &&
(1 == rm.get_counter().get(var_idx))) {
TRACE("dl", r->display(m_context, tout << "Compress: "););
add_task(head_pred, i);
break;
//we compress out the unbound arguments one by one
}
}
}
void mk_unbound_compressor::try_compress(rule_set const& source, unsigned rule_index) {
start:
//
// l_undef if nothing to compress.
// l_true if compressed.
// l_false if removed.
//
lbool mk_unbound_compressor::try_compress(rule_set const& source, unsigned rule_index) {
rule * r = m_rules.get(rule_index);
var_idx_set& tail_vars = rm.collect_tail_vars(r);
@ -130,30 +130,25 @@ namespace datalog {
unsigned arg_index;
for (arg_index = 0; arg_index < head_arity; arg_index++) {
expr * arg = head->get_arg(arg_index);
if (!is_var(arg)) {
continue;
}
unsigned var_idx = to_var(arg)->get_idx();
if (!tail_vars.contains(var_idx)) {
//unbound
unsigned occurence_cnt = rm.get_counter().get(var_idx);
SASSERT(occurence_cnt>0);
if ( occurence_cnt==1 && m_in_progress.contains(c_info(head_pred, arg_index)) ) {
//we have found what to compress
break;
}
unsigned var_idx;
if (is_var(arg, var_idx) &&
!tail_vars.contains(var_idx) &&
(rm.get_counter().get(var_idx) == 1) &&
m_in_progress.contains(c_info(head_pred, arg_index))) {
break;
}
}
if (arg_index == head_arity) {
//we didn't find anything to compress
return;
return l_undef;
}
SASSERT(arg_index<head_arity);
c_info ci(head_pred, arg_index);
func_decl * cpred;
TRUSTME( m_map.find(ci, cpred) );
SASSERT(arg_index < head_arity);
SASSERT(m_in_progress.contains(ci));
func_decl * cpred = m_map.find(ci);
ptr_vector<expr> cargs;
for (unsigned i=0; i<head_arity; i++) {
for (unsigned i=0; i < head_arity; i++) {
if (i != arg_index) {
cargs.push_back(head->get_arg(i));
}
@ -161,41 +156,48 @@ namespace datalog {
app_ref chead(m.mk_app(cpred, head_arity-1, cargs.c_ptr()), m);
m_modified = true;
if (r->get_tail_size()==0 && m_context.get_rule_manager().is_fact(chead)) {
m_non_empty_rels.insert(cpred);
m_context.add_fact(chead);
//remove the rule that became fact by placing the last rule on its place
m_head_occurrence_ctr.dec(m_rules.get(rule_index)->get_decl());
m_rules.set(rule_index, m_rules.get(m_rules.size()-1));
m_rules.shrink(m_rules.size()-1);
//since we moved the last rule to rule_index, we have to try to compress it as well
if (rule_index<m_rules.size()) {
goto start;
unsigned new_size = m_rules.size() - 1;
rule* last_rule = m_rules.get(new_size);
TRACE("dl", tout << "remove\n"; r->display(m_context, tout);
tout << "shift\n"; last_rule->display(m_context, tout););
if (rule_index < new_size) {
m_rules.set(rule_index, last_rule);
}
m_rules.shrink(new_size);
return l_false;
}
else {
rule_ref new_rule(m_context.get_rule_manager().mk(r, chead), m_context.get_rule_manager());
new_rule->set_accounting_parent_object(m_context, r);
m_head_occurrence_ctr.dec(m_rules.get(rule_index)->get_decl());
TRACE("dl", tout << "remove\n"; r->display(m_context, tout);
tout << "set\n"; new_rule->display(m_context, tout););
m_rules.set(rule_index, new_rule);
m_head_occurrence_ctr.inc(m_rules.get(rule_index)->get_decl());
detect_tasks(source, rule_index);
return l_true;
}
m_modified = true;
}
void mk_unbound_compressor::mk_decompression_rule(rule * r, unsigned tail_index, unsigned arg_index,
rule_ref& res)
{
rule_ref mk_unbound_compressor::mk_decompression_rule(rule * r, unsigned tail_index, unsigned arg_index) {
rule_ref res(m_context.get_rule_manager());
app * orig_dtail = r->get_tail(tail_index); //dtail ~ decompressed tail
c_info ci(orig_dtail->get_decl(), arg_index);
func_decl * dtail_pred;
TRUSTME( m_map.find(ci, dtail_pred) );
TRACE("dl", tout << "retrieving: " << ci.first->get_name() << " " << ci.second << "\n";);
func_decl * dtail_pred = m_map.find(ci);
ptr_vector<expr> dtail_args;
unsigned orig_dtail_arity = orig_dtail->get_num_args();
for (unsigned i=0;i<orig_dtail_arity;i++) {
for (unsigned i = 0; i < orig_dtail_arity; i++) {
if (i != arg_index) {
dtail_args.push_back(orig_dtail->get_arg(i));
}
@ -206,9 +208,9 @@ namespace datalog {
svector<bool> tails_negated;
app_ref_vector tails(m);
unsigned tail_len = r->get_tail_size();
for (unsigned i=0; i<tail_len; i++) {
for (unsigned i = 0; i < tail_len; i++) {
tails_negated.push_back(r->is_neg_tail(i));
if (i==tail_index && !r->is_neg_tail(i)) {
if (i == tail_index && !r->is_neg_tail(i)) {
tails.push_back(dtail);
}
else {
@ -226,97 +228,108 @@ namespace datalog {
res = m_context.get_rule_manager().mk( r->get_head(), tails.size(), tails.c_ptr(), tails_negated.c_ptr());
res->set_accounting_parent_object(m_context, r);
m_context.get_rule_manager().fix_unbound_vars(res, true);
return res;
}
//TODO: avoid rule duplicity
//If two predicates are compressed in a rule, applying decompression
//to the results can cause a rule being added multiple times:
//P:- R(x,y), S(x,y)
//is decompressed into rules
//P:- R1(x), S(x,y)
//P:- R(x,y), S1(x)
//and each of these rules is again decompressed giving the same rule
//P:- R1(x), S1(x)
//P:- R1(x), S1(x)
void mk_unbound_compressor::add_decompression_rule(rule_set const& source, rule * r, unsigned tail_index, unsigned arg_index) {
rule_ref new_rule(m_context.get_rule_manager());
mk_decompression_rule(r, tail_index, arg_index, new_rule);
rule_ref new_rule = mk_decompression_rule(r, tail_index, arg_index);
unsigned new_rule_index = m_rules.size();
m_rules.push_back(new_rule);
TRACE("dl", r->display(m_context, tout); new_rule->display(m_context, tout); );
m_context.get_rule_manager().mk_rule_rewrite_proof(*r, *new_rule.get());
m_head_occurrence_ctr.inc(new_rule->get_decl());
detect_tasks(source, new_rule_index);
m_modified = true;
//TODO: avoid rule duplicity
//If two predicates are compressed in a rule, applying decompression
//to the results can cause a rule being added multiple times:
//P:- R(x,y), S(x,y)
//is decompressed into rules
//P:- R1(x), S(x,y)
//P:- R(x,y), S1(x)
//and each of these rules is again decompressed giving the same rule
//P:- R1(x), S1(x)
//P:- R1(x), S1(x)
}
void mk_unbound_compressor::replace_by_decompression_rule(rule_set const& source, unsigned rule_index, unsigned tail_index, unsigned arg_index)
{
void mk_unbound_compressor::replace_by_decompression_rule(rule_set const& source, unsigned rule_index, unsigned tail_index, unsigned arg_index) {
rule * r = m_rules.get(rule_index);
rule_ref new_rule(m_context.get_rule_manager());
mk_decompression_rule(r, tail_index, arg_index, new_rule);
rule_ref new_rule = mk_decompression_rule(r, tail_index, arg_index);
TRACE("dl", tout << "remove\n"; r->display(m_context, tout); tout << "set\n"; new_rule->display(m_context, tout););
m_rules.set(rule_index, new_rule);
//we don't update the m_head_occurrence_ctr because the head predicate doesn't change
// we don't update the m_head_occurrence_ctr because the head predicate doesn't change
detect_tasks(source, rule_index);
m_modified = true;
}
void mk_unbound_compressor::add_in_progress_indices(unsigned_vector& arg_indices, app* p) {
arg_indices.reset();
for (unsigned i = 0; i < p->get_num_args(); ++i) {
if (m_in_progress.contains(c_info(p->get_decl(), i))) {
SASSERT(m_map.contains(c_info(p->get_decl(), i)));
arg_indices.push_back(i);
}
}
}
bool mk_unbound_compressor::decompress_rule(rule_set const& source, rule* r, unsigned_vector const& arg_indices, unsigned rule_index, unsigned tail_index) {
app * t = r->get_tail(tail_index);
func_decl * t_pred = t->get_decl();
bool is_negated_predicate = r->is_neg_tail(tail_index);
bool replace_original_rule = false;
for (unsigned i = 0; i < arg_indices.size(); ++i) {
unsigned arg_index = arg_indices[i];
SASSERT(m_in_progress.contains(c_info(t_pred, arg_index)));
SASSERT(m_map.contains(c_info(t_pred, arg_index)));
bool can_remove_orig_rule =
arg_indices.empty() &&
!m_non_empty_rels.contains(t_pred) &&
m_head_occurrence_ctr.get(t_pred) == 0;
if (can_remove_orig_rule || is_negated_predicate) {
replace_original_rule = true;
replace_by_decompression_rule(source, rule_index, tail_index, arg_index);
// NB. arg_indices becomes stale after original rule is replaced.
if (is_negated_predicate && !can_remove_orig_rule) {
break;
}
}
else {
add_decompression_rule(source, r, tail_index, arg_index);
}
}
return replace_original_rule;
}
void mk_unbound_compressor::add_decompression_rules(rule_set const& source, unsigned rule_index) {
unsigned_vector compressed_tail_pred_arg_indexes;
//this value is updated inside the loop if replace_by_decompression_rule is called
unsigned_vector arg_indices;
// this value is updated inside the loop if replace_by_decompression_rule is called
rule_ref r(m_rules.get(rule_index), m_context.get_rule_manager());
unsigned utail_len = r->get_uninterpreted_tail_size();
unsigned tail_index=0;
while (tail_index<utail_len) {
unsigned tail_index = 0;
while (tail_index < utail_len) {
app * t = r->get_tail(tail_index);
func_decl * t_pred = t->get_decl();
unsigned t_arity = t_pred->get_arity();
bool is_negated_predicate = r->is_neg_tail(tail_index);
compressed_tail_pred_arg_indexes.reset();
for (unsigned arg_index=0; arg_index<t_arity; arg_index++) {
c_info ci(t_pred, arg_index);
if (m_in_progress.contains(ci)) {
compressed_tail_pred_arg_indexes.push_back(arg_index);
}
}
bool orig_rule_replaced = false;
while (!compressed_tail_pred_arg_indexes.empty()) {
unsigned arg_index = compressed_tail_pred_arg_indexes.back();
compressed_tail_pred_arg_indexes.pop_back();
bool can_remove_orig_rule =
compressed_tail_pred_arg_indexes.empty() &&
!m_non_empty_rels.contains(t_pred) &&
m_head_occurrence_ctr.get(t_pred)==0;
add_in_progress_indices(arg_indices, t);
if (can_remove_orig_rule || is_negated_predicate) {
replace_by_decompression_rule(source, rule_index, tail_index, arg_index);
orig_rule_replaced = true;
}
else {
add_decompression_rule(source, r, tail_index, arg_index);
}
}
if (orig_rule_replaced) {
//update r with the new rule
bool replace_original_rule = decompress_rule(source, r, arg_indices, rule_index, tail_index);
if (replace_original_rule) {
// update r with the new rule
rule * new_rule = m_rules.get(rule_index);
SASSERT(new_rule->get_uninterpreted_tail_size() >= utail_len);
//here we check that the rule replacement didn't affect other uninterpreted literals
//in the tail (aside of variable renaming)
SASSERT(tail_index==0 ||
new_rule->get_tail(tail_index-1)->get_decl()==r->get_tail(tail_index-1)->get_decl());
// here we check that the rule replacement didn't affect other uninterpreted literals
// in the tail (aside of variable renaming)
SASSERT(tail_index==0 || new_rule->get_decl(tail_index-1) == r->get_decl(tail_index-1));
r = new_rule;
@ -334,19 +347,20 @@ namespace datalog {
// TODO mc
m_modified = false;
SASSERT(m_rules.empty());
rel_context_base* rel = m_context.get_rel_context();
if (rel) {
rel->collect_non_empty_predicates(m_non_empty_rels);
}
unsigned init_rule_cnt = source.get_num_rules();
SASSERT(m_rules.empty());
for (unsigned i=0; i<init_rule_cnt; i++) {
for (unsigned i = 0; i < init_rule_cnt; i++) {
rule * r = source.get_rule(i);
m_rules.push_back(r);
m_head_occurrence_ctr.inc(r->get_decl());
}
for (unsigned i=0; i<init_rule_cnt; i++) {
for (unsigned i = 0; i < init_rule_cnt; i++) {
detect_tasks(source, i);
}
@ -356,13 +370,16 @@ namespace datalog {
m_in_progress.insert(m_todo.back());
m_todo.pop_back();
}
unsigned rule_index = 0;
while (rule_index<m_rules.size()) {
try_compress(source, rule_index); //m_rules.size() can change here
if (rule_index<m_rules.size()) {
add_decompression_rules(source, rule_index); //m_rules.size() can change here
for (unsigned rule_index = 0; rule_index < m_rules.size(); ) {
switch (try_compress(source, rule_index)) {
case l_true:
case l_undef:
add_decompression_rules(source, rule_index); //m_rules.size() can be increased here
++rule_index;
break;
case l_false:
break;
}
rule_index++;
}
}

View file

@ -74,11 +74,14 @@ namespace datalog {
void detect_tasks(rule_set const& source, unsigned rule_index);
void add_task(func_decl * pred, unsigned arg_index);
void try_compress(rule_set const& source, unsigned rule_index);
lbool try_compress(rule_set const& source, unsigned rule_index);
void add_decompression_rules(rule_set const& source, unsigned rule_index);
void mk_decompression_rule(rule * r, unsigned tail_index, unsigned arg_index, rule_ref& res);
rule_ref mk_decompression_rule(rule * r, unsigned tail_index, unsigned arg_index);
void add_decompression_rule(rule_set const& source, rule * r, unsigned tail_index, unsigned arg_index);
void replace_by_decompression_rule(rule_set const& source, unsigned rule_index, unsigned tail_index, unsigned arg_index);
void add_in_progress_indices(unsigned_vector& arg_indices, app* p);
bool decompress_rule(rule_set const& source, rule* r, unsigned_vector const& cmpressed_tail_pred_arg_indexes, unsigned rule_index, unsigned tail_index);
void reset();
public:
mk_unbound_compressor(context & ctx);

View file

@ -2939,7 +2939,7 @@ namespace smt {
if (!m_asserted_formulas.inconsistent()) {
unsigned sz = m_asserted_formulas.get_num_formulas();
unsigned qhead = m_asserted_formulas.get_qhead();
while (qhead < sz) {
while (qhead < sz && !m_manager.canceled()) {
expr * f = m_asserted_formulas.get_formula(qhead);
proof * pr = m_asserted_formulas.get_formula_proof(qhead);
internalize_assertion(f, pr, 0);

View file

@ -656,10 +656,10 @@ namespace smt {
/**
\brief Enable the flag m_merge_tf in the given enode. When the
flag m_merge_tf is enabled, the enode n will be merged with the
true_enode (false_enode) whenever the boolean variable v is
true_enode (false_enode) whenever the Boolean variable v is
assigned to true (false).
If is_new_var is true, then trail is not created for the flag uodate.
If is_new_var is true, then trail is not created for the flag update.
*/
void context::set_merge_tf(enode * n, bool_var v, bool is_new_var) {
SASSERT(bool_var2enode(v) == n);
@ -674,8 +674,8 @@ namespace smt {
}
/**
\brief Trail object to disable the m_enode flag of a boolean
variable. The flag m_enode is true for a boolean variable v,
\brief Trail object to disable the m_enode flag of a Boolean
variable. The flag m_enode is true for a Boolean variable v,
if there is an enode n associated with it.
*/
class set_enode_flag_trail : public trail<context> {

View file

@ -35,6 +35,7 @@ namespace smt {
virtual void undo(theory_fpa & th) {
expr * val = m_map.find(key);
m_map.remove(key);
m.dec_ref(key);
m.dec_ref(val);
key = 0;
}
@ -75,7 +76,7 @@ namespace smt {
SASSERT(is_rm(f->get_range()));
expr_ref bv(m);
bv = m_th.wrap(m.mk_const(f));
result = m_util.mk_rm(bv);
result = m_util.mk_bv2rm(bv);
m_rm_const2bv.insert(f, result);
m.inc_ref(f);
m.inc_ref(result);
@ -88,7 +89,6 @@ namespace smt {
}
expr_ref theory_fpa::fpa2bv_converter_wrapped::mk_min_max_unspecified(func_decl * f, expr * x, expr * y) {
expr_ref a(m), wrapped(m), wu(m), wu_eq(m);
a = m.mk_app(f, x, y);
wrapped = m_th.wrap(a);
@ -96,8 +96,9 @@ namespace smt {
wu_eq = m.mk_eq(wu, a);
m_extra_assertions.push_back(wu_eq);
unsigned bv_sz = m_bv_util.get_bv_size(wrapped);
expr_ref sc(m);
m_th.m_converter.mk_is_zero(wu, sc);
sc = m.mk_eq(m_bv_util.mk_extract(bv_sz-2, 0, wrapped), m_bv_util.mk_numeral(0, bv_sz-1));
m_extra_assertions.push_back(sc);
return wu;
@ -125,17 +126,19 @@ namespace smt {
if (m_is_initialized) {
ast_manager & m = get_manager();
dec_ref_map_values(m, m_conversions);
dec_ref_map_values(m, m_wraps);
}
else {
SASSERT(m_conversions.empty());
SASSERT(m_wraps.empty());
dec_ref_map_key_values(m, m_conversions);
dec_ref_collection_values(m, m_is_added_to_model);
m_converter.reset();
m_rw.reset();
m_th_rw.reset();
m_is_initialized = false;
}
m_is_initialized = false;
}
SASSERT(m_trail_stack.get_num_scopes() == 0);
SASSERT(m_conversions.empty());
SASSERT(m_is_added_to_model.empty());
}
void theory_fpa::init(context * ctx) {
smt::theory::init(ctx);
m_is_initialized = true;
@ -258,28 +261,21 @@ namespace smt {
m_th_rw((expr_ref&)res);
}
else {
sort * e_srt = m.get_sort(e);
func_decl * w;
sort * es = m.get_sort(e);
if (!m_wraps.find(e_srt, w)) {
SASSERT(!m_wraps.contains(e_srt));
sort * bv_srt;
if (m_converter.is_rm(e_srt))
bv_srt = m_bv_util.mk_sort(3);
else {
SASSERT(m_converter.is_float(e_srt));
unsigned ebits = m_fpa_util.get_ebits(e_srt);
unsigned sbits = m_fpa_util.get_sbits(e_srt);
bv_srt = m_bv_util.mk_sort(ebits + sbits);
}
w = m.mk_func_decl(get_family_id(), OP_FPA_INTERNAL_BVWRAP, 0, 0, 1, &e_srt, bv_srt);
m_wraps.insert(e_srt, w);
m.inc_ref(w);
sort_ref bv_srt(m);
if (m_converter.is_rm(es))
bv_srt = m_bv_util.mk_sort(3);
else {
SASSERT(m_converter.is_float(es));
unsigned ebits = m_fpa_util.get_ebits(es);
unsigned sbits = m_fpa_util.get_sbits(es);
bv_srt = m_bv_util.mk_sort(ebits + sbits);
}
res = m.mk_app(w, e);
func_decl_ref wrap_fd(m);
wrap_fd = m.mk_func_decl(get_family_id(), OP_FPA_INTERNAL_BVWRAP, 0, 0, 1, &es, bv_srt);
res = m.mk_app(wrap_fd, e);
}
return res;
@ -337,18 +333,11 @@ namespace smt {
TRACE("t_fpa_detail", tout << "term: " << mk_ismt2_pp(e, get_manager()) << std::endl;
tout << "converted term: " << mk_ismt2_pp(e_conv, get_manager()) << std::endl;);
if (is_app(e_conv) && to_app(e_conv)->get_family_id() != get_family_id()) {
UNREACHABLE();
if (!m_fpa_util.is_float(e_conv) && !m_fpa_util.is_rm(e_conv))
m_th_rw(e_conv, res);
else
res = unwrap(wrap(e_conv), m.get_sort(e));
}
else if (m_fpa_util.is_rm(e)) {
SASSERT(m_fpa_util.is_rm_bvwrap(e_conv));
if (m_fpa_util.is_rm(e)) {
SASSERT(m_fpa_util.is_bv2rm(e_conv));
expr_ref bv_rm(m);
m_th_rw(to_app(e_conv)->get_arg(0), bv_rm);
res = m_fpa_util.mk_rm(bv_rm);
res = m_fpa_util.mk_bv2rm(bv_rm);
}
else if (m_fpa_util.is_float(e)) {
SASSERT(m_fpa_util.is_fp(e_conv));
@ -361,20 +350,14 @@ namespace smt {
}
else
UNREACHABLE();
SASSERT(res.get() != 0);
return res;
}
expr_ref theory_fpa::convert_conversion_term(expr * e) {
SASSERT(to_app(e)->get_family_id() == get_family_id());
/* This is for the conversion functions fp.to_* */
ast_manager & m = get_manager();
expr_ref res(m);
proof_ref pr(m);
SASSERT(m_arith_util.is_real(e) || m_bv_util.is_bv(e));
expr_ref res(get_manager());
m_rw(e, res);
m_th_rw(res, res);
return res;
@ -384,11 +367,11 @@ namespace smt {
{
ast_manager & m = get_manager();
expr_ref res(m);
expr * ccnv;
TRACE("t_fpa", tout << "converting " << mk_ismt2_pp(e, m) << std::endl;);
if (m_conversions.contains(e)) {
res = m_conversions.find(e);
if (m_conversions.find(e, ccnv)) {
res = ccnv;
TRACE("t_fpa_detail", tout << "cached:" << std::endl;
tout << mk_ismt2_pp(e, m) << std::endl << " -> " << std::endl <<
mk_ismt2_pp(res, m) << std::endl;);
@ -400,16 +383,15 @@ namespace smt {
res = convert_atom(e);
else if (m_fpa_util.is_float(e) || m_fpa_util.is_rm(e))
res = convert_term(e);
else if (m_arith_util.is_real(e) || m_bv_util.is_bv(e))
else
res = convert_conversion_term(e);
else
UNREACHABLE();
TRACE("t_fpa_detail", tout << "converted; caching:" << std::endl;
tout << mk_ismt2_pp(e, m) << std::endl << " -> " << std::endl <<
mk_ismt2_pp(res, m) << std::endl;);
m_conversions.insert(e, res);
m.inc_ref(e);
m.inc_ref(res);
m_trail_stack.push(fpa2bv_conversion_trail_elem(m, m_conversions, e));
}
@ -434,7 +416,6 @@ namespace smt {
res = m.mk_and(res, t);
}
m_converter.m_extra_assertions.reset();
m_th_rw(res);
CTRACE("t_fpa", !m.is_true(res), tout << "side condition: " << mk_ismt2_pp(res, m) << std::endl;);
@ -476,10 +457,11 @@ namespace smt {
ctx.set_var_theory(l.var(), get_id());
expr_ref bv_atom(convert_atom(atom));
expr_ref bv_atom_w_side_c(m);
expr_ref bv_atom_w_side_c(m), atom_eq(m);
bv_atom_w_side_c = m.mk_and(bv_atom, mk_side_conditions());
m_th_rw(bv_atom_w_side_c);
assert_cnstr(m.mk_eq(atom, bv_atom_w_side_c));
atom_eq = m.mk_eq(atom, bv_atom_w_side_c);
assert_cnstr(atom_eq);
return true;
}
@ -512,7 +494,6 @@ namespace smt {
case OP_FPA_TO_SBV:
case OP_FPA_TO_REAL:
case OP_FPA_TO_IEEE_BV: {
expr_ref conv(m);
conv = convert(term);
assert_cnstr(m.mk_eq(term, conv));
@ -543,7 +524,7 @@ namespace smt {
if (m_fpa_util.is_rm(s)) {
// For every RM term, we need to make sure that it's
// associated bit-vector is within the valid range.
if (!m_fpa_util.is_rm_bvwrap(owner)) {
if (!m_fpa_util.is_bv2rm(owner)) {
expr_ref valid(m), limit(m);
limit = m_bv_util.mk_numeral(4, 3);
valid = m_bv_util.mk_ule(wrap(owner), limit);
@ -590,7 +571,10 @@ namespace smt {
c = m.mk_eq(xc, yc);
m_th_rw(c);
assert_cnstr(m.mk_iff(m.mk_eq(xe, ye), c));
expr_ref xe_eq_ye(m), c_eq_iff(m);
xe_eq_ye = m.mk_eq(xe, ye);
c_eq_iff = m.mk_iff(xe_eq_ye, c);
assert_cnstr(c_eq_iff);
assert_cnstr(mk_side_conditions());
return;
@ -625,11 +609,19 @@ namespace smt {
m_converter.mk_eq(xc, yc, c);
c = m.mk_not(c);
}
else
c = m.mk_not(m.mk_eq(xc, yc));
else {
expr_ref xc_eq_yc(m);
xc_eq_yc = m.mk_eq(xc, yc);
c = m.mk_not(xc_eq_yc);
}
m_th_rw(c);
assert_cnstr(m.mk_iff(m.mk_not(m.mk_eq(xe, ye)), c));
expr_ref xe_eq_ye(m), not_xe_eq_ye(m), c_eq_iff(m);
xe_eq_ye = m.mk_eq(xe, ye);
not_xe_eq_ye = m.mk_not(xe_eq_ye);
c_eq_iff = m.mk_iff(not_xe_eq_ye, c);
assert_cnstr(c_eq_iff);
assert_cnstr(mk_side_conditions());
return;
@ -647,7 +639,6 @@ namespace smt {
void theory_fpa::pop_scope_eh(unsigned num_scopes) {
m_trail_stack.pop_scope(num_scopes);
TRACE("t_fpa", tout << "pop " << num_scopes << "; now " << m_trail_stack.get_num_scopes() << "\n";);
// unsigned num_old_vars = get_old_num_vars(num_scopes);
theory::pop_scope_eh(num_scopes);
}
@ -680,19 +671,23 @@ namespace smt {
mpf_rounding_mode rm;
scoped_mpf val(mpfm);
if (m_fpa_util.is_rm_numeral(n, rm)) {
c = m.mk_eq(wrapped, m_bv_util.mk_numeral(rm, 3));
expr_ref rm_num(m);
rm_num = m_bv_util.mk_numeral(rm, 3);
c = m.mk_eq(wrapped, rm_num);
assert_cnstr(c);
}
else if (m_fpa_util.is_numeral(n, val)) {
expr_ref bv_val_e(m);
expr_ref bv_val_e(m), cc_args(m);
bv_val_e = convert(n);
SASSERT(is_app(bv_val_e));
SASSERT(to_app(bv_val_e)->get_num_args() == 3);
app_ref bv_val_a(to_app(bv_val_e.get()), m);
app_ref bv_val_a(m);
bv_val_a = to_app(bv_val_e.get());
expr * args[] = { bv_val_a->get_arg(0), bv_val_a->get_arg(1), bv_val_a->get_arg(2) };
c = m.mk_eq(wrapped, m_bv_util.mk_concat(3, args));
c = m.mk_and(c, mk_side_conditions());
cc_args = m_bv_util.mk_concat(3, args);
c = m.mk_eq(wrapped, cc_args);
assert_cnstr(c);
assert_cnstr(mk_side_conditions());
}
else {
expr_ref wu(m);
@ -715,12 +710,12 @@ namespace smt {
pop_scope_eh(m_trail_stack.get_num_scopes());
m_converter.reset();
m_rw.reset();
m_th_rw.reset();
m_trail_stack.pop_scope(m_trail_stack.get_num_scopes());
m_th_rw.reset();
m_trail_stack.pop_scope(m_trail_stack.get_num_scopes());
if (m_factory) dealloc(m_factory); m_factory = 0;
ast_manager & m = get_manager();
dec_ref_map_values(m, m_conversions);
dec_ref_map_values(m, m_wraps);
dec_ref_map_key_values(m, m_conversions);
dec_ref_collection_values(m, m_is_added_to_model);
theory::reset_eh();
}
@ -783,7 +778,7 @@ namespace smt {
mk_ismt2_pp(a2, m) << " eq. cls. #" << get_enode(a2)->get_root()->get_owner()->get_id() << std::endl;);
res = vp;
}
else if (m_fpa_util.is_rm_bvwrap(owner)) {
else if (m_fpa_util.is_bv2rm(owner)) {
SASSERT(to_app(owner)->get_num_args() == 1);
app_ref a0(m);
a0 = to_app(owner->get_arg(0));
@ -882,6 +877,7 @@ namespace smt {
m_fpa_util.is_to_real_unspecified(f);
if (include && !m_is_added_to_model.contains(f)) {
m_is_added_to_model.insert(f);
get_manager().inc_ref(f);
return true;
}
return false;

View file

@ -144,8 +144,6 @@ namespace smt {
fpa_util & m_fpa_util;
bv_util & m_bv_util;
arith_util & m_arith_util;
obj_map<sort, func_decl*> m_wraps;
obj_map<sort, func_decl*> m_unwraps;
obj_map<expr, expr*> m_conversions;
bool m_is_initialized;
obj_hashtable<func_decl> m_is_added_to_model;

View file

@ -242,6 +242,7 @@ void theory_seq::init(context* ctx) {
}
final_check_status theory_seq::final_check_eh() {
m_new_propagation = false;
TRACE("seq", display(tout << "level: " << get_context().get_scope_level() << "\n"););
if (simplify_and_solve_eqs()) {
++m_stats.m_solve_eqs;
@ -1398,7 +1399,9 @@ bool theory_seq::is_var(expr* a) {
!m_util.str.is_concat(a) &&
!m_util.str.is_empty(a) &&
!m_util.str.is_string(a) &&
!m_util.str.is_unit(a);
!m_util.str.is_unit(a) &&
!m_util.str.is_itos(a) &&
!m.is_ite(a);
}
@ -1434,7 +1437,7 @@ bool theory_seq::solve_eqs(unsigned i) {
change = true;
}
}
return change || ctx.inconsistent();
return change || m_new_propagation || ctx.inconsistent();
}
bool theory_seq::solve_eq(expr_ref_vector const& l, expr_ref_vector const& r, dependency* deps) {
@ -1829,8 +1832,6 @@ bool theory_seq::solve_ne(unsigned idx) {
continue;
}
else {
if (!updated) {
for (unsigned j = 0; j < i; ++j) {
new_ls.push_back(n.ls(j));
@ -2118,7 +2119,6 @@ bool theory_seq::explain_empty(expr_ref_vector& es, dependency*& dep) {
bool theory_seq::simplify_and_solve_eqs() {
context & ctx = get_context();
m_new_propagation = false;
m_new_solution = true;
while (m_new_solution && !ctx.inconsistent()) {
m_new_solution = false;
@ -2208,6 +2208,7 @@ bool theory_seq::check_int_string() {
}
bool theory_seq::add_itos_axiom(expr* e) {
context& ctx = get_context();
rational val;
expr* n;
VERIFY(m_util.str.is_itos(e, n));
@ -2216,7 +2217,23 @@ bool theory_seq::add_itos_axiom(expr* e) {
m_itos_axioms.insert(val);
app_ref e1(m_util.str.mk_string(symbol(val.to_string().c_str())), m);
expr_ref n1(arith_util(m).mk_numeral(val, true), m);
#if 1
// itos(n) = "25" <=> n = 25
literal eq1 = mk_eq(n1, n , false);
literal eq2 = mk_eq(e, e1, false);
add_axiom(~eq1, eq2);
add_axiom(~eq2, eq1);
ctx.force_phase(eq1);
ctx.force_phase(eq2);
#else
// "25" = itos(25)
// stoi(itos(n)) = n
app_ref e2(m_util.str.mk_stoi(e), m);
add_axiom(mk_eq(e2, n, false));
add_axiom(mk_eq(m_util.str.mk_itos(n1), e1, false));
#endif
m_trail_stack.push(insert_map<theory_seq, rational_set, rational>(m_itos_axioms, val));
m_trail_stack.push(push_replay(alloc(replay_axiom, m, e)));
return true;
@ -2433,9 +2450,10 @@ public:
}
else {
zstring zs;
VERIFY(th.m_util.str.is_string(m_strings[k++], zs));
for (unsigned l = 0; l < zs.length(); ++l) {
sbuffer.push_back(zs[l]);
if (th.m_util.str.is_string(m_strings[k++], zs)) {
for (unsigned l = 0; l < zs.length(); ++l) {
sbuffer.push_back(zs[l]);
}
}
}
}
@ -2625,6 +2643,7 @@ expr_ref theory_seq::expand(expr* e0, dependency*& eqs) {
else if (m_util.str.is_itos(e, e1)) {
rational val;
if (get_value(e1, val)) {
TRACE("seq", tout << mk_pp(e, m) << " -> " << val << "\n";);
expr_ref num(m), res(m);
num = m_autil.mk_numeral(val, true);
if (!ctx.e_internalized(num)) {
@ -2638,6 +2657,7 @@ expr_ref theory_seq::expand(expr* e0, dependency*& eqs) {
deps = m_dm.mk_join(deps, m_dm.mk_leaf(assumption(n1, n2)));
}
else {
TRACE("seq", tout << "add axiom\n";);
add_axiom(~mk_eq(num, e1, false), mk_eq(e, res, false));
add_axiom(mk_eq(num, e1, false), ~mk_eq(e, res, false));
result = e;
@ -2883,7 +2903,7 @@ void theory_seq::add_length_axiom(expr* n) {
add_axiom(mk_eq(len, n, false));
}
else if (m_util.str.is_itos(x)) {
add_axiom(mk_literal(m_autil.mk_ge(n, m_autil.mk_int(1))));
add_itos_length_axiom(n);
}
else {
add_axiom(mk_literal(m_autil.mk_ge(n, m_autil.mk_int(0))));
@ -2891,7 +2911,68 @@ void theory_seq::add_length_axiom(expr* n) {
if (!ctx.at_base_level()) {
m_trail_stack.push(push_replay(alloc(replay_axiom, m, n)));
}
}
void theory_seq::add_itos_length_axiom(expr* len) {
expr* x, *n;
VERIFY(m_util.str.is_length(len, x));
VERIFY(m_util.str.is_itos(x, n));
unsigned num_char1 = 1, num_char2 = 1;
rational len1, len2;
rational ten(10);
if (get_value(n, len1)) {
bool neg = len1.is_neg();
if (neg) len1.neg();
num_char1 = neg?2:1;
// 0 <= x < 10
// 10 <= x < 100
// 100 <= x < 1000
rational upper(10);
while (len1 > upper) {
++num_char1;
upper *= ten;
}
SASSERT(len1 <= upper);
}
if (get_value(len, len2) && len2.is_unsigned()) {
num_char2 = len2.get_unsigned();
}
unsigned num_char = std::max(num_char1, num_char2);
literal len_le(mk_literal(m_autil.mk_le(len, m_autil.mk_int(num_char))));
literal len_ge(mk_literal(m_autil.mk_ge(len, m_autil.mk_int(num_char))));
if (num_char == 1) {
add_axiom(len_ge);
literal n_ge_0(mk_literal(m_autil.mk_ge(n, m_autil.mk_int(0))));
literal n_ge_10(mk_literal(m_autil.mk_ge(n, m_autil.mk_int(10))));
add_axiom(~n_ge_0, n_ge_10, len_le);
add_axiom(~len_le, n_ge_0);
add_axiom(~len_le, ~n_ge_10);
return;
}
rational hi(1);
for (unsigned i = 2; i < num_char; ++i) {
hi *= ten;
}
// n <= -hi or n >= hi*10 <=> len >= num_chars
// -10*hi < n < 100*hi <=> len <= num_chars
literal n_le_hi = mk_literal(m_autil.mk_le(n, m_autil.mk_numeral(-hi, true)));
literal n_ge_10hi = mk_literal(m_autil.mk_ge(n, m_autil.mk_numeral(ten*hi, true)));
literal n_le_m10hi = mk_literal(m_autil.mk_le(n, m_autil.mk_numeral(-ten*hi, true)));
literal n_ge_100hi = mk_literal(m_autil.mk_ge(n, m_autil.mk_numeral(ten*ten*hi, true)));
add_axiom(~n_le_hi, len_ge);
add_axiom(~n_ge_10hi, len_ge);
add_axiom(n_le_hi, n_ge_10hi, ~len_ge);
add_axiom(n_le_m10hi, n_ge_100hi, len_le);
add_axiom(~n_le_m10hi, ~len_le);
add_axiom(~n_ge_100hi, ~len_le);
add_axiom(mk_literal(m_autil.mk_ge(len, m_autil.mk_int(1))));
}
@ -2992,8 +3073,17 @@ bool theory_seq::get_value(expr* e, rational& val) const {
context& ctx = get_context();
theory_mi_arith* tha = get_th_arith(ctx, m_autil.get_family_id(), e);
expr_ref _val(m);
if (!tha || !tha->get_value(ctx.get_enode(e), _val)) return false;
return m_autil.is_numeral(_val, val) && val.is_int();
if (!tha) return false;
enode* next = ctx.get_enode(e), *n;
do {
n = next;
if (tha->get_value(n, _val) && m_autil.is_numeral(_val, val) && val.is_int()) {
return true;
}
next = n->get_next();
}
while (next != n);
return false;
}
bool theory_seq::lower_bound(expr* _e, rational& lo) const {

View file

@ -494,6 +494,7 @@ namespace smt {
void add_at_axiom(expr* n);
void add_in_re_axiom(expr* n);
bool add_itos_axiom(expr* n);
void add_itos_length_axiom(expr* n);
literal mk_literal(expr* n);
literal mk_eq_empty(expr* n, bool phase = true);
literal mk_seq_eq(expr* a, expr* b);

View file

@ -21,6 +21,10 @@ Notes:
#include"model_v2_pp.h"
#include"pb2bv_model_converter.h"
pb2bv_model_converter::pb2bv_model_converter(ast_manager & _m) : m(_m) {
}
pb2bv_model_converter::pb2bv_model_converter(ast_manager & _m, obj_map<func_decl, expr*> const & c2bit, bound_manager const & bm):
m(_m) {
obj_map<func_decl, expr*>::iterator it = c2bit.begin();
@ -98,5 +102,16 @@ void pb2bv_model_converter::display(std::ostream & out) {
}
model_converter * pb2bv_model_converter::translate(ast_translation & translator) {
NOT_IMPLEMENTED_YET();
ast_manager & to = translator.to();
pb2bv_model_converter * res = alloc(pb2bv_model_converter, to);
svector<func_decl_pair>::iterator it = m_c2bit.begin();
svector<func_decl_pair>::iterator end = m_c2bit.end();
for (; it != end; it++) {
func_decl * f1 = translator(it->first);
func_decl * f2 = translator(it->second);
res->m_c2bit.push_back(func_decl_pair(f1, f2));
to.inc_ref(f1);
to.inc_ref(f2);
}
return res;
}

View file

@ -28,6 +28,7 @@ class pb2bv_model_converter : public model_converter {
ast_manager & m;
svector<func_decl_pair> m_c2bit;
public:
pb2bv_model_converter(ast_manager & _m);
pb2bv_model_converter(ast_manager & _m, obj_map<func_decl, expr*> const & c2bit, bound_manager const & bm);
virtual ~pb2bv_model_converter();
virtual void operator()(model_ref & md);

View file

@ -395,7 +395,7 @@ void fpa2bv_model_converter::convert(model * bv_mdl, model * float_mdl) {
func_decl * var = it->m_key;
SASSERT(m_fpa_util.is_rm(var->get_range()));
expr * val = it->m_value;
SASSERT(m_fpa_util.is_rm_bvwrap(val));
SASSERT(m_fpa_util.is_bv2rm(val));
expr * bvval = to_app(val)->get_arg(0);
expr_ref fv(m);
fv = convert_bv2rm(bv_mdl, bvval);

View file

@ -25,4 +25,8 @@ class tactic;
tactic * mk_default_tactic(ast_manager & m, params_ref const & p = params_ref());
/*
ADD_TACTIC("default", "default strategy used when no logic is specified.", "mk_default_tactic(m, p)")
*/
#endif

View file

@ -21,4 +21,8 @@ Notes:
tactic * mk_nra_tactic(ast_manager & m, params_ref const & p = params_ref());
/*
ADD_TACTIC("nra", "builtin strategy for solving NRA problems.", "mk_nra_tactic(m, p)")
*/
#endif

View file

@ -239,35 +239,6 @@ void hwf_manager::mul(mpf_rounding_mode rm, hwf const & x, hwf const & y, hwf &
#else
o.value = x.value * y.value;
#endif
#if 0
// On the x86 FPU (x87), we use custom assembly routines because
// the code generated for x*y and x/y suffers from the double
// rounding on underflow problem. The scaling trick is described
// in Roger Golliver: `Efficiently producing default orthogonal IEEE
// double results using extended IEEE hardware', see
// http://www.open-std.org/JTC1/SC22/JSG/docs/m3/docs/jsgn326.pdf
// CMW: Tthis is not really needed if we use only the SSE2 FPU,
// it shouldn't hurt the performance too much though.
static const int const1 = -DBL_SCALE;
static const int const2 = +DBL_SCALE;
double xv = x.value;
double yv = y.value;
double & ov = o.value;
__asm {
fild const1;
fld xv;
fscale;
fstp st(1);
fmul yv;
fild const2;
fxch st(1);
fscale;
fstp ov;
}
#endif
}
void hwf_manager::div(mpf_rounding_mode rm, hwf const & x, hwf const & y, hwf & o) {
@ -277,28 +248,6 @@ void hwf_manager::div(mpf_rounding_mode rm, hwf const & x, hwf const & y, hwf &
#else
o.value = x.value / y.value;
#endif
#if 0
// see mul(...)
static const int const1 = -DBL_SCALE;
static const int const2 = +DBL_SCALE;
double xv = x.value;
double yv = y.value;
double & ov = o.value;
__asm {
fild const1;
fld xv;
fscale;
fstp st(1);
fdiv yv;
fild const2;
fxch st(1);
fscale;
fstp ov;
}
#endif
}
#ifdef _M_IA64
@ -390,25 +339,6 @@ void hwf_manager::rem(hwf const & x, hwf const & y, hwf & o) {
#else
o.value = remainder(x.value, y.value);
#endif
#if 0
// Here is an x87 alternative if the above makes problems; this may also be faster.
double xv = x.value;
double yv = y.value;
double & ov = o.value;
// This is from: http://webster.cs.ucr.edu/AoA/DOS/ch14/CH14-4.html#HEADING4-173
__asm {
fld yv
fld xv
L: fprem1
fstsw ax // Get condition bits in AX.
test ah, 100b // See if C2 is set.
jnz L // Repeat if not done yet.
fstp ov // Store remainder away.
fstp st(0) // Pop old y value.
}
#endif
}
void hwf_manager::maximum(hwf const & x, hwf const & y, hwf & o) {

View file

@ -1304,7 +1304,7 @@ void mpf_manager::partial_remainder(mpf & x, mpf const & y, mpf_exp_t const & ex
// 3. Compute Y*Q / Y*QQ*2^{D-N}
bool YQ_sgn = y.sign ^ Q_sgn;
bool YQ_sgn = x.sign;
scoped_mpz YQ_sig(m_mpz_manager);
mpf_exp_t YQ_exp = Q_exp + y.exponent;
m_mpz_manager.mul(y.significand, Q_sig, YQ_sig);
@ -1360,9 +1360,7 @@ void mpf_manager::partial_remainder(mpf & x, mpf const & y, mpf_exp_t const & ex
bool neg = m_mpz_manager.is_neg(X_YQ_sig);
if (neg) m_mpz_manager.neg(X_YQ_sig);
bool X_YQ_sgn = ((!x.sign && !YQ_sgn && neg) ||
(x.sign && YQ_sgn && !neg) ||
(x.sign && !YQ_sgn));
bool X_YQ_sgn = x.sign ^ neg;
// 5. Rounding
if (m_mpz_manager.is_zero(X_YQ_sig))