diff --git a/src/ast/fpa/fpa2bv_converter.cpp b/src/ast/fpa/fpa2bv_converter.cpp index 3c25d447f..42c79eaf9 100644 --- a/src/ast/fpa/fpa2bv_converter.cpp +++ b/src/ast/fpa/fpa2bv_converter.cpp @@ -1122,12 +1122,13 @@ void fpa2bv_converter::mk_rem(sort * s, expr_ref & x, expr_ref & y, expr_ref & r a_lz_ext = m_bv_util.mk_zero_extend(2, a_lz); b_lz_ext = m_bv_util.mk_zero_extend(2, b_lz); - expr_ref exp_diff(m), neg_exp_diff(m), exp_diff_is_neg(m), exp_diff_ge_zero(m); + expr_ref exp_diff(m), neg_exp_diff(m), exp_diff_is_neg(m), exp_diff_ge_zero(m), exp_diff_is_zero(m); exp_diff = m_bv_util.mk_bv_sub( m_bv_util.mk_bv_sub(a_exp_ext, a_lz_ext), m_bv_util.mk_bv_sub(b_exp_ext, b_lz_ext)); neg_exp_diff = m_bv_util.mk_bv_neg(exp_diff); exp_diff_is_neg = m_bv_util.mk_sle(exp_diff, m_bv_util.mk_numeral(0, ebits+2)); + exp_diff_is_zero = m.mk_eq(exp_diff, m_bv_util.mk_numeral(0, ebits+2)); exp_diff_ge_zero = m_bv_util.mk_sle(m_bv_util.mk_numeral(0, m_bv_util.get_bv_size(exp_diff)), exp_diff); dbg_decouple("fpa2bv_rem_exp_diff", exp_diff); @@ -1135,7 +1136,7 @@ void fpa2bv_converter::mk_rem(sort * s, expr_ref & x, expr_ref & y, expr_ref & r // but calculating this via rem = x - y * nearest(x/y) creates huge // circuits, too. Lazy instantiation seems the way to go in the long run // (using the lazy bit-blaster helps on simple instances). - expr_ref a_sig_ext(m), b_sig_ext(m), lshift(m), rshift(m), shifted(m), huge_rem(m); + expr_ref a_sig_ext(m), b_sig_ext(m), lshift(m), rshift(m), shifted(m), huge_rem(m), huge_div(m), huge_div_is_even(m); a_sig_ext = m_bv_util.mk_concat(m_bv_util.mk_zero_extend(max_exp_diff_ui, a_sig), m_bv_util.mk_numeral(0, 3)); b_sig_ext = m_bv_util.mk_concat(m_bv_util.mk_zero_extend(max_exp_diff_ui, b_sig), m_bv_util.mk_numeral(0, 3)); lshift = m_bv_util.mk_zero_extend(max_exp_diff_ui + sbits - (ebits+2) + 3, exp_diff); @@ -1143,57 +1144,47 @@ void fpa2bv_converter::mk_rem(sort * s, expr_ref & x, expr_ref & y, expr_ref & r shifted = m.mk_ite(exp_diff_is_neg, m_bv_util.mk_bv_ashr(a_sig_ext, rshift), m_bv_util.mk_bv_shl(a_sig_ext, lshift)); huge_rem = m_bv_util.mk_bv_urem(shifted, b_sig_ext); + huge_div = m.mk_app(m_bv_util.get_fid(), OP_BUDIV_I, shifted, b_sig_ext); + huge_div_is_even = m.mk_eq(m_bv_util.mk_extract(0, 0, huge_div), m_bv_util.mk_numeral(0, 1)); + dbg_decouple("fpa2bv_rem_exp_diff_is_neg", exp_diff_is_neg); + dbg_decouple("fpa2bv_rem_lshift", lshift); + dbg_decouple("fpa2bv_rem_rshift", rshift); dbg_decouple("fpa2bv_rem_huge_rem", huge_rem); + dbg_decouple("fpa2bv_rem_huge_div", huge_div); - expr_ref rndd_sgn(m), rndd_exp(m), rndd_sig(m), rne_bv(m), rndd(m); + expr_ref rndd_sgn(m), rndd_exp(m), rndd_sig(m), rne_bv(m), rndd_sig_lz(m); rndd_sgn = a_sgn; rndd_exp = m_bv_util.mk_bv_sub(b_exp_ext, b_lz_ext); rndd_sig = m_bv_util.mk_extract(sbits+3, 0, huge_rem); rne_bv = m_bv_util.mk_numeral(BV_RM_TIES_TO_EVEN, 3); - round(s, rne_bv, rndd_sgn, rndd_sig, rndd_exp, rndd); + mk_leading_zeros(rndd_sig, ebits+2, rndd_sig_lz); + dbg_decouple("fpa2bv_rem_rndd_sig", rndd_sig); + dbg_decouple("fpa2bv_rem_rndd_sig_lz", rndd_sig_lz); - expr_ref y_half(m), ny_half(m), zero_e(m), two_e(m); - expr_ref y_half_is_zero(m), y_half_is_nz(m); - expr_ref r_ge_y_half(m), r_gt_ny_half(m), r_le_y_half(m), r_lt_ny_half(m); - expr_ref r_ge_zero(m), r_le_zero(m); - expr_ref rounded_sub_y(m), rounded_add_y(m); - mpf zero, two; - m_mpf_manager.set(two, ebits, sbits, 2); - m_mpf_manager.set(zero, ebits, sbits, 0); - mk_numeral(s, two, two_e); - mk_numeral(s, zero, zero_e); - mk_div(s, rne_bv, y, two_e, y_half); - mk_neg(s, y_half, ny_half); - mk_is_zero(y_half, y_half_is_zero); - y_half_is_nz = m.mk_not(y_half_is_zero); + SASSERT(m_bv_util.get_bv_size(rndd_exp) == ebits+2); + SASSERT(m_bv_util.get_bv_size(y_exp_m1) == ebits); - mk_float_ge(s, rndd, y_half, r_ge_y_half); - mk_float_gt(s, rndd, ny_half, r_gt_ny_half); - mk_float_le(s, rndd, y_half, r_le_y_half); - mk_float_lt(s, rndd, ny_half, r_lt_ny_half); + expr_ref rndd_exp_eq_y_exp_m1(m), y_sig_le_rndd_sig(m); + rndd_exp_eq_y_exp_m1 = m.mk_eq(rndd_sig_lz, m_bv_util.mk_numeral(2, ebits+2)); + dbg_decouple("fpa2bv_rem_rndd_exp_eq_y_exp_m1", rndd_exp_eq_y_exp_m1); - mk_sub(s, rne_bv, rndd, y, rounded_sub_y); - mk_add(s, rne_bv, rndd, y, rounded_add_y); + expr_ref y_sig_ext(m), y_sig_eq_rndd_sig(m); + y_sig_ext = m_bv_util.mk_concat(m_bv_util.mk_zero_extend(2, b_sig), m_bv_util.mk_numeral(0, 2)); + y_sig_le_rndd_sig = m_bv_util.mk_sle(y_sig_ext, rndd_sig); + y_sig_eq_rndd_sig = m.mk_eq(y_sig_ext, rndd_sig); + dbg_decouple("fpa2bv_rem_y_sig_ext", y_sig_ext); + dbg_decouple("fpa2bv_rem_y_sig_le_rndd_sig", y_sig_le_rndd_sig); + dbg_decouple("fpa2bv_rem_y_sig_eq_rndd_sig", y_sig_eq_rndd_sig); - expr_ref sub_cnd(m), add_cnd(m); - sub_cnd = m.mk_and(exp_diff_ge_zero, - m.mk_and(y_half_is_nz, - m.mk_or(m.mk_and(y_is_pos, r_ge_y_half), - m.mk_and(y_is_neg, r_le_y_half)))); - add_cnd = m.mk_and(exp_diff_ge_zero, - m.mk_and(y_half_is_nz, - m.mk_or(m.mk_and(y_is_pos, r_lt_ny_half), - m.mk_and(y_is_neg, r_gt_ny_half)))); - - dbg_decouple("fpa2bv_rem_y_half", y_half); + expr_ref sub_cnd(m); + sub_cnd = m.mk_or(m.mk_and(rndd_exp_eq_y_exp_m1, y_sig_le_rndd_sig, m.mk_not(y_sig_eq_rndd_sig)), + m.mk_and(rndd_exp_eq_y_exp_m1, y_sig_eq_rndd_sig, m.mk_not(huge_div_is_even))); dbg_decouple("fpa2bv_rem_sub_cnd", sub_cnd); - dbg_decouple("fpa2bv_rem_add_cnd", add_cnd); - dbg_decouple("fpa2bv_rem_rounded_add_y", rounded_add_y); - dbg_decouple("fpa2bv_rem_rounded_sub_y", rounded_sub_y); - dbg_decouple("fpa2bv_rem_rndd", rndd); - mk_ite(add_cnd, rounded_add_y, rndd, v7); - mk_ite(sub_cnd, rounded_sub_y, v7, v7); + expr_ref rndd(m), rounded_sub_y(m), rounded_add_y(m); + round(s, rne_bv, rndd_sgn, rndd_sig, rndd_exp, rndd); + mk_sub(s, rne_bv, rndd, y, rounded_sub_y); + mk_ite(sub_cnd, rounded_sub_y, rndd, v7); // And finally, we tie them together. mk_ite(c6, v6, v7, result);