From f6f16c1e9269a65994e728601821a78f22ca318a Mon Sep 17 00:00:00 2001 From: Aleksandar Zeljic Date: Thu, 28 May 2015 14:31:34 +0200 Subject: [PATCH] Added smallFloats files. --- src/sat/sat_solver.h | 2 +- src/tactic/fpa/const_intro_rewriter.h | 150 ++ src/tactic/fpa/fpa2bv_approx_tactic.cpp | 1257 +++++++++++++++ src/tactic/fpa/fpa2bv_approx_tactic.h | 33 + src/tactic/fpa/fpa2bv_converter_prec.cpp | 1783 ++++++++++++++++++++++ src/tactic/fpa/fpa2bv_converter_prec.h | 162 ++ src/tactic/fpa/fpa2bv_rewriter_prec.h | 229 +++ 7 files changed, 3615 insertions(+), 1 deletion(-) create mode 100644 src/tactic/fpa/const_intro_rewriter.h create mode 100644 src/tactic/fpa/fpa2bv_approx_tactic.cpp create mode 100644 src/tactic/fpa/fpa2bv_approx_tactic.h create mode 100644 src/tactic/fpa/fpa2bv_converter_prec.cpp create mode 100644 src/tactic/fpa/fpa2bv_converter_prec.h create mode 100644 src/tactic/fpa/fpa2bv_rewriter_prec.h diff --git a/src/sat/sat_solver.h b/src/sat/sat_solver.h index 357b79f49..e3628823f 100644 --- a/src/sat/sat_solver.h +++ b/src/sat/sat_solver.h @@ -210,6 +210,7 @@ namespace sat { public: bool inconsistent() const { return m_inconsistent; } unsigned num_vars() const { return m_level.size(); } + unsigned num_clauses() const; bool is_external(bool_var v) const { return m_external[v] != 0; } bool was_eliminated(bool_var v) const { return m_eliminated[v] != 0; } unsigned scope_lvl() const { return m_scope_lvl; } @@ -447,7 +448,6 @@ namespace sat { protected: void display_binary(std::ostream & out) const; void display_units(std::ostream & out) const; - unsigned num_clauses() const; bool is_unit(clause const & c) const; bool is_empty(clause const & c) const; bool check_missed_propagation(clause_vector const & cs) const; diff --git a/src/tactic/fpa/const_intro_rewriter.h b/src/tactic/fpa/const_intro_rewriter.h new file mode 100644 index 000000000..5c4675fbb --- /dev/null +++ b/src/tactic/fpa/const_intro_rewriter.h @@ -0,0 +1,150 @@ +/*++ +Copyright (c) 2012 Microsoft Corporation + +Module Name: + + const_intro_rewriter.h + +Abstract: + + Rewriter for converting FPA to BV + +Author: + + Christoph (cwinter) 2012-02-09 + +Notes: + +--*/ + +#ifndef _CONST_INTRO_REWRITER_H_ +#define _CONST_INTRO_REWRITER_H_ + +#include"cooperate.h" +#include"bv_decl_plugin.h" +#include"tactic_exception.h" +#include"fpa2bv_converter_prec.h" + +struct const_intro_rewriter_cfg : public default_rewriter_cfg { + ast_manager & m_manager; + + expr * m_exp; + func_decl_ref_vector m_introduced_consts; + obj_map m_const2term_map; + + unsigned long long m_max_memory; + unsigned m_max_steps; + + fpa_util m_float_util; + + ast_manager & m() const { return m_manager; } + + const_intro_rewriter_cfg(ast_manager & m, params_ref const & p): + m_manager(m), + m_introduced_consts(m), + m_float_util(m) { + updt_params(p); + // We need to make sure that the mananger has the BV plugin loaded. + symbol s_bv("bv"); + if (!m_manager.has_plugin(s_bv)) + m_manager.register_plugin(s_bv, alloc(bv_decl_plugin)); + } + + ~const_intro_rewriter_cfg() { + for (obj_map::iterator it = m_const2term_map.begin(); + it != m_const2term_map.end(); + it++) + { + m().dec_ref(it->m_key); + m().dec_ref(it->m_value); + } + } + + void cleanup_buffers() { + } + + void updt_params(params_ref const & p) { + m_max_memory = megabytes_to_bytes(p.get_uint("max_memory", UINT_MAX)); + m_max_steps = p.get_uint("max_steps", UINT_MAX); + } + + bool max_steps_exceeded(unsigned num_steps) const { + cooperate("fpa2bv"); + if (memory::get_allocation_size() > m_max_memory) + throw tactic_exception(TACTIC_MAX_MEMORY_MSG); + return num_steps > m_max_steps; + } + + br_status reduce_app(func_decl * f, unsigned num, expr * const * args, expr_ref & result, proof_ref & result_pr) { + TRACE("fpa2bv_rw", tout << "APP: " << f->get_name() << std::endl; ); + + if (num == 0 && f->get_family_id() == null_family_id && m_float_util.is_float(f->get_range())) { + app * f_cnst = m_manager.mk_const(f); + if (!m_introduced_consts.contains(f)) + m_introduced_consts.push_back(f); + result = f_cnst; + return BR_DONE; + } + + if (f->get_family_id() == m_float_util.get_family_id()) { + switch (f->get_decl_kind()) { + case OP_FPA_ADD: + case OP_FPA_SUB: + case OP_FPA_NEG: + case OP_FPA_MUL: + case OP_FPA_DIV: + case OP_FPA_REM: + case OP_FPA_ABS: + case OP_FPA_MIN: + case OP_FPA_MAX: + case OP_FPA_FMA: + case OP_FPA_SQRT: + case OP_FPA_TO_FP: + case OP_FPA_ROUND_TO_INTEGRAL: + { + app * f_app = m_manager.mk_app(f, num, args); + result = m_manager.mk_fresh_const(NULL, f->get_range()); + func_decl * fd = to_app(result)->get_decl(); + m_introduced_consts.push_back(fd); + m_const2term_map.insert_if_not_there(fd, f_app); + m().inc_ref(fd); + m().inc_ref(f_app); + return BR_DONE; + } + default: + return BR_FAILED; + } + } + + return BR_FAILED; + } + + bool reduce_quantifier(quantifier * old_q, + expr * new_body, + expr * const * new_patterns, + expr * const * new_no_patterns, + expr_ref & result, + proof_ref & result_pr) { + return false; + } + + bool reduce_var(var * t, expr_ref & result, proof_ref & result_pr) { + return false; + } + + bool pre_visit(expr * t){ + return true; + } +}; + +template class rewriter_tpl; + +struct const_intro_rewriter : public rewriter_tpl { + const_intro_rewriter_cfg m_cfg; + const_intro_rewriter(ast_manager & m, params_ref const & p): + rewriter_tpl(m, m.proofs_enabled(), m_cfg), + m_cfg(m, p) { + } +}; + +#endif diff --git a/src/tactic/fpa/fpa2bv_approx_tactic.cpp b/src/tactic/fpa/fpa2bv_approx_tactic.cpp new file mode 100644 index 000000000..8a86ec0e8 --- /dev/null +++ b/src/tactic/fpa/fpa2bv_approx_tactic.cpp @@ -0,0 +1,1257 @@ +/*++ + Copyright (c) 2012 Microsoft Corporation + + Module Name: + + fpa2bv_approx_tactic.cpp + + Abstract: + + Tactic that converts floating points to bit-vectors lazily + + Author: + + Aleksander Zeljic 2012-11-15 + + Notes: + + --*/ +#include"tactical.h" +#include"cooperate.h" +#include"ref_util.h" + +#include"th_rewriter.h" +#include"bit_blaster_rewriter.h" +#include"bit_blaster_model_converter.h" +#include"model_v2_pp.h" +#include"goal2sat.h" +#include"sat_solver.h" +#include"fpa_decl_plugin.h" +#include"fpa2bv_converter_prec.h" +#include"fpa2bv_rewriter_prec.h" +#include"fpa2bv_approx_tactic.h" +#include"const_intro_rewriter.h" +#include"ctx_simplify_tactic.h" +#include"filter_model_converter.h" +#include +#include +#include + +#include + +#define K_MIN 10 +#define K_PERCENTAGE 0.3 +#define PREC_INCREMENT 20 +#define ERR_OP 0 // + +struct pair +{ + expr * exp; + double quotient;// mpf * +}; + +bool isinfinite(double x) { +#ifdef _WIN32 + int c = _fpclass(x); + return c == _FPCLASS_PINF || c == _FPCLASS_NINF; +#else + return fpclassify(x) == FP_INFINITE; +#endif +} + +class fpa2bv_approx_tactic: public tactic { + struct imp { + ast_manager & m; + goal2sat m_goal2sat; + sat2goal m_sat2goal; + params_ref m_params; + unsigned m_num_steps; + bool m_proofs_enabled; + bool m_produce_models; + bool m_produce_unsat_cores; + bool m_cancel; + + fpa_approximation_mode m_mode; + ast_manager * m_temp_manager; + model_ref m_fpa_model; + fpa_util m_float_util; + + imp(ast_manager & _m, params_ref const & p, fpa_approximation_mode mode) : + m(_m), + m_params(p), + m_proofs_enabled(false), + m_produce_models(false), + m_produce_unsat_cores(false), + m_cancel(false), + m_mode(mode), + m_temp_manager(0), + m_float_util(_m) { + } + + void updt_params(params_ref const & p) { + m_params = p; + } + + void set_cancel(bool f) { + //If f is true stop everything + m_cancel = f; + } + + void init_precision_mapping(func_decl_ref_vector const & cnsts, + obj_map & map, + obj_map & const2term_map) { + for (unsigned i = 0; i < cnsts.size(); i++) + { + if (const2term_map.contains(cnsts[i]) || m_mode == FPAA_SMALL_FLOATS) + map.insert_if_not_there(cnsts.get(i), 0); + else + map.insert_if_not_there(cnsts.get(i), MAX_PRECISION); + } + } + + bool proof_guided_refinement( + goal_ref const & g, + func_decl_ref_vector const & cnsts, + obj_map & cnst2prec_map, + obj_map & new_map) + { + // We have no model. Let's just increase precision of everything. + bool res = false; + for (unsigned i = 0; i < cnsts.size(); i++) + { + unsigned old = cnst2prec_map.find(cnsts.get(i)); + unsigned n = old + PREC_INCREMENT; + if (old >= MAX_PRECISION) n = MAX_PRECISION; + else { if (n > MAX_PRECISION) n = MAX_PRECISION; res = true; } + new_map.insert(cnsts.get(i), n); + } + return res; + } + + void boolean_comparison_of_models(goal_ref g, model_ref const & mdl, model_ref const & full_mdl, obj_map & cnst2term_map, obj_map& count) + { + std::queue to_traverse; + app * cur; + int cur_cnt; + + expr_ref mdl_eval(m), full_eval(m); + + for (unsigned i=0; i < g->size(); i++){ + mdl->eval(g->form(i),mdl_eval,true); + full_mdl->eval(g->form(i),full_eval,true); + + //Push only if the full model evaluates to false, or if the models differ? + if (!m.is_true(full_eval)) // m.is_true(full_eval) != m.is_true(mdl_eval) + to_traverse.push(g->form(i)); + } + + while (to_traverse.size() > 0) { + cur = to_app(to_traverse.front()); + mpf_rounding_mode rm; +#ifdef Z3DEBUG + std::cout<<"Analyze - traversing: "<get_decl())) + count.insert(cur,1); + + if(cnst2term_map.contains(cur->get_decl())) + to_traverse.push(cnst2term_map.find(cur->get_decl())); + + for(unsigned i=0;iget_num_args();i++) { + if(m_float_util.is_rm(cur->get_arg(i)) || m_float_util.is_numeral(cur->get_arg(i))) + continue; + to_traverse.push(cur->get_arg(i)); + } + } + else { //Comparing boolean values from the model and the expanded model + mdl->eval(cur,mdl_eval,true); + full_mdl->eval(cur,full_eval,true); + + + if (m.is_true(full_eval) != m.is_true(mdl_eval)) { + //queue arguments + for(unsigned i=0; i < cur->get_num_args(); i++) + to_traverse.push(cur->get_arg(i)); + } + } + to_traverse.pop(); + } +#ifdef Z3DEBUG + std::cout<<"Expression count"<::iterator it = count.begin(); + it!= count.end(); + it++) { + std::cout<m_key,m)<<":"<m_value<size();i++) { + eq = to_app(g->form(i)); + + if (eq->get_family_id() == m.get_basic_family_id() && + eq->get_decl_kind() == OP_EQ){ + //eq is in fact an equality + app * lhs = to_app(eq->get_arg(0)); + app * rhs = to_app(eq->get_arg(1)); + expr * lhs_e,*rhs_e,*exp, *exp_e; + app *other = NULL; + + + if(lhs->get_num_args()==0 && + rhs ->get_num_args()==0){ + //over constants + lhs_e = full_mdl->get_const_interp(lhs->get_decl()); + rhs_e = full_mdl->get_const_interp(rhs->get_decl()); + + // != would work as well, to make sure they are not both NULL, + //and could simplify later checks + if(lhs_e != rhs_e) { //SASSERT(lhs_e || rhs_e); + //and one is registered in the full model while the other is not + if(!lhs_e){// && rhs_e){ + other = lhs; + exp_e = rhs_e; + exp = rhs; + } + else { // if(!rhs_e && lhs_e){ + other = rhs; + exp_e = lhs_e; + exp = lhs; + } + full_mdl->register_decl(other->get_decl(),exp_e); + +#ifdef Z3DEBUG + std::cout< & cnst2term_map, + obj_map & precise_op, + obj_map & actual_value, + obj_map & err_est, + mpf_rounding_mode & rm, + bool & precise_children, + bool & seen_all_children, + bool & children_have_finite_err, + mpf * arg_val, + mpf * est_arg_val + //expr_ref * arg_e + ){ + + expr_ref arg_e[] = { expr_ref(m), expr_ref(m), expr_ref(m), expr_ref(m) }; + unsigned i=0; + //Set rounding mode + if (rhs->get_num_args() > 0 && m_float_util.is_rm(rhs->get_arg(0))) + i = 1; + //Collect argument values + for (; i < rhs->get_num_args(); i++) { + expr * arg = rhs->get_arg(i); + + if (is_app(arg) && to_app(arg)->get_num_args() == 0) { + if (precise_op.contains(arg)) { + precise_children &= precise_op.find(arg); + } + else if (!cnst2term_map.contains(to_app(arg)->get_decl())) { + /* that's okay */ + } + else { +#ifdef Z3DEBUG + std::cout << "Not seen all children of " << mk_ismt2_pp(rhs, m) << + " (spec. " << mk_ismt2_pp(arg, m) << ")" << std::endl; +#endif + precise_children = false; + seen_all_children = false; + break; + } + } + + // Value from small model + mdl->eval(arg, arg_e[i],true); + m_float_util.is_numeral(arg_e[i], arg_val[i]); + + if( children_have_finite_err && + err_est.contains(arg) && + isinfinite(err_est.find(arg))) + children_have_finite_err=false; + + if (actual_value.contains(arg)) + mpf_mngr.set(est_arg_val[i], *actual_value.find(arg)); + else if (seen_all_children && is_app(arg) && to_app(arg)->get_num_args()==0) { + //We have seen all children so if it is a constant and not in actual_value then + //it is an input variable and its est_val is the same as actual value + mpf * tmp = alloc(mpf); + mpf_mngr.set(*tmp, arg_val[i]); + actual_value.insert(arg, tmp); + mpf_mngr.set(est_arg_val[i], *tmp); + } + else + std::cout << "Estimated value missing: " << mk_ismt2_pp(arg,m) << std::endl; + } + + } + + + void full_semantics_eval( + app * rhs, + mpf_manager & mpf_mngr, + mpf_rounding_mode & rm, + mpf * arg_val, + mpf * est_arg_val, + mpf & rhs_value, + mpf & est_rhs_value){ + + switch (rhs->get_decl()->get_decl_kind()) { + case OP_FPA_ADD: + mpf_mngr.add(rm, arg_val[1], arg_val[2], rhs_value); + mpf_mngr.add(rm, est_arg_val[1], est_arg_val[2], est_rhs_value); + break; + case OP_FPA_SUB: + mpf_mngr.sub(rm, arg_val[1], arg_val[2], rhs_value); + mpf_mngr.sub(rm, est_arg_val[1], est_arg_val[2], est_rhs_value); + break; + case OP_FPA_NEG: + mpf_mngr.neg(arg_val[0], rhs_value); + mpf_mngr.neg(est_arg_val[0], est_rhs_value);//Does it even make sense to look at this? + break; + case OP_FPA_MUL: + mpf_mngr.mul(rm, arg_val[1], arg_val[2], rhs_value); + mpf_mngr.mul(rm, est_arg_val[1], est_arg_val[2], est_rhs_value); + break; + case OP_FPA_DIV: + mpf_mngr.div(rm, arg_val[1], arg_val[2], rhs_value); + mpf_mngr.div(rm, est_arg_val[1], est_arg_val[2], est_rhs_value); + break; + case OP_FPA_REM: + mpf_mngr.rem(arg_val[0], arg_val[1], rhs_value); + mpf_mngr.rem(est_arg_val[0], est_arg_val[1], est_rhs_value); + break; + case OP_FPA_FMA: + mpf_mngr.fused_mul_add(rm, arg_val[1], arg_val[2], arg_val[3], rhs_value); + mpf_mngr.fused_mul_add(rm, est_arg_val[1], est_arg_val[2], est_arg_val[3], est_rhs_value); + break; + case OP_FPA_SQRT: + mpf_mngr.sqrt(rm, arg_val[1], rhs_value); + mpf_mngr.sqrt(rm, est_arg_val[1], est_rhs_value); + break; + case OP_FPA_TO_FP: + { + unsigned ebits = rhs->get_decl()->get_parameter(0).get_int(); + unsigned sbits = rhs->get_decl()->get_parameter(1).get_int(); + mpf_mngr.set(rhs_value, ebits, sbits, rm, arg_val[1]); + mpf_mngr.set(est_rhs_value, ebits, sbits, rm, est_arg_val[1]); + break; + } + default: + NOT_IMPLEMENTED_YET(); + break; + } + + } + + void evaluate_constant( + app * rhs, + model_ref const & mdl, + mpf_manager & mpf_mngr, + obj_map & actual_value, + mpf & rhs_value, + mpf & est_rhs_value){ + + expr_ref exp(m); + mdl->eval(rhs, exp, true); + m_float_util.is_numeral(exp, rhs_value); //OLD:is_value + + if (actual_value.contains(rhs)) + mpf_mngr.set(est_rhs_value, *actual_value.find(rhs)); + else { + mpf * tmp = alloc(mpf); + mpf_mngr.set(*tmp, rhs_value); + actual_value.insert(rhs, tmp); + mpf_mngr.set(est_rhs_value, rhs_value); + } + } + + void calculate_error( + expr_ref & lhs, + mpf_manager & mpf_mngr, + obj_map & precise_op, + obj_map & err_est, + mpf & lhs_value, + mpf & est_rhs_value, + bool children_have_finite_err){ + mpf err, rel_err; + if (!mpf_mngr.eq(lhs_value, est_rhs_value) && + !(mpf_mngr.is_nan(lhs_value) && mpf_mngr.is_nan(est_rhs_value))) { +#ifdef Z3DEBUG + std::cout << "Increasing precision of " << mk_ismt2_pp(lhs, m) << + " because " << mk_ismt2_pp(lhs, m) << " != " << + mpf_mngr.to_string(est_rhs_value) << std::endl; +#endif + //TODO: smarter adjustment to be implemented + precise_op.insert(lhs, false); + if (mpf_mngr.is_regular(lhs_value) && mpf_mngr.is_regular(est_rhs_value)) { + mpf_mngr.sub(MPF_ROUND_TOWARD_ZERO, est_rhs_value, lhs_value, err); + mpf_mngr.div(MPF_ROUND_TOWARD_ZERO, err, lhs_value, rel_err); + mpf_mngr.abs(rel_err); + } + else// One of the two is a special value; in this case the relative error is +INF + mpf_mngr.mk_pinf(11, 53, rel_err); + + if(children_have_finite_err) + err_est.insert(lhs, mpf_mngr.to_double(rel_err)); + +#ifdef Z3DEBUG + std::cout << "Error estimate: "<size(); j++) { + mdl->eval(g->form(j), res, true); + if (!m.is_true(res)) { + std::cout << "Failed: " << mk_ismt2_pp(g->form(j), m) << std::endl; + std::cout << "Evaluates to: " << mk_ismt2_pp(res, m) << std::endl; + is_model=false; + } + } + return is_model; + } + void evaluate_and_patch( + func_decl_ref_vector const & cnsts, + model_ref const & mdl, + model_ref & full_mdl, + goal_ref const & g, + obj_map & cnst2term_map, + obj_map & err_est) { + + mpf_manager & mpf_mngr = m_float_util.fm(); + expr_ref lhs(m), lhs_eval(m); + app * rhs; + mpf arg_val[4]; //First argument can be rounding mode + mpf est_arg_val[4]; + mpf lhs_value, rhs_value, est_rhs_value; + mpf_rounding_mode rm; + + mpf err, rel_err; + + + + obj_map precise_op; + obj_map actual_value; + while (precise_op.size() != cnst2term_map.size()) + for(unsigned i=0;ieval(lhs, lhs_eval, true); + + if (m_float_util.is_numeral(lhs_eval, lhs_value)) {//OLD:is_value + bool precise_children = true; + bool seen_all_children = true; + bool children_have_finite_err = true; + + obtain_values(rhs, mdl, full_mdl,mpf_mngr,cnst2term_map,precise_op,actual_value, + err_est, rm, precise_children, seen_all_children, children_have_finite_err, arg_val, est_arg_val ); + + + if (seen_all_children) {//If some arguments are not evaluated yet, skip + if (rhs->get_num_args() == 0) + evaluate_constant(rhs,mdl,mpf_mngr,actual_value, rhs_value, est_rhs_value); + else + full_semantics_eval(rhs,mpf_mngr,rm,arg_val,est_arg_val, rhs_value, est_rhs_value); + + full_mdl->register_decl((to_app(lhs))->get_decl(), m_float_util.mk_value(est_rhs_value)); +#ifdef Z3DEBUG + std::cout << "Assigning " << mk_ismt2_pp(lhs, m) << + " value " << mpf_mngr.to_string(est_rhs_value) << std::endl + << "Values of " << mk_ismt2_pp(lhs, m) << std::endl + << "Precise children: " << ((precise_children) ? "True" : "False") << std::endl + << "Lhs: " << mk_ismt2_pp(lhs_eval, m) << std::endl + << "Model: " << mpf_mngr.to_string(rhs_value) << std::endl + << "Estimate: " << mpf_mngr.to_string(est_rhs_value) << std::endl; +#endif + + calculate_error(lhs,mpf_mngr,precise_op,err_est,lhs_value,est_rhs_value,children_have_finite_err); + + if (!actual_value.contains(lhs)) { + mpf * tmp = alloc(mpf); + mpf_mngr.set(*tmp, est_rhs_value); + actual_value.insert(lhs, tmp); + } + + if (!precise_children && !precise_op.contains(lhs)) { + std::cout << mk_ismt2_pp(lhs, m) << " is imprecise because some children are imprecise." << std::endl; + precise_op.insert(lhs, false); + } + } + } + } + + for (obj_map::iterator it = actual_value.begin(); + it != actual_value.end(); + it++) + mpf_mngr.del(*it->m_value); + + mpf_mngr.del(err); + mpf_mngr.del(rel_err); + mpf_mngr.del(lhs_value); + mpf_mngr.del(rhs_value); + mpf_mngr.del(est_rhs_value); + + for (unsigned i = 0; i < 4; i++) { + mpf_mngr.del(arg_val[i]); + mpf_mngr.del(est_arg_val[i]); + } + } + + bool precise_model_reconstruction( + model_ref const & mdl, + model_ref & full_mdl, + goal_ref const & g, + obj_map & err_est,//mpf* + func_decl_ref_vector const & cnsts, + obj_map & cnst2term_map) { +#ifdef Z3DEBUG + std::cout << "Attempting to patch small-float model" << std::endl; +#endif + expr_ref res(m); + bool is_model=true; + + //Evaluation of the model using full fpa semantics and construction of the full model + evaluate_and_patch(cnsts, mdl, full_mdl, g, cnst2term_map, err_est); + +#ifdef Z3DEBUG + std::cout<::iterator it = err_est.begin(); + it!= err_est.end(); it++) { + std::cout<m_key,m)<<":"<m_value<get_num_constants(); j++) { + if (!cnst2term_map.contains(mdl->get_constant(j)) + && !full_mdl->get_const_interp(mdl->get_constant(j))) { + mdl->eval(mdl->get_constant(j), res); + full_mdl->register_decl(mdl->get_constant(j), res); + } + } + + //Evaluate the full model + is_model = evaluate_model(g,full_mdl); + + return is_model; + } + + void calculate_relative_error( + obj_map & err_est, + obj_map & expr_count, + obj_map & err_ratio_map) { + unsigned num_args=0; + expr_ref exp(m); + double out_err,cur,err_ratio, avg_err; + + //AZ: Currently ignoring the expr_count, since it was blocking consideration of some expressions + for (obj_map::iterator it = err_est.begin(); + it != err_est.end(); + it++) { + // if any ancestor node has an error current node will be in expr_count. + /*if (!expr_count.contains(it->m_key)) + continue;*/ + + exp = it->m_key; + out_err = it->m_value; + num_args = to_app(exp)->get_num_args(); + + // Calculate average error of input params + avg_err = 0.0; + if (num_args > 0) { + for (unsigned i=0; iget_arg(i); + if (err_est.contains(arg)) { + cur = err_est.find(arg); + avg_err = avg_err + cur; + } + } + avg_err = avg_err/num_args; + } + // Relative error when input error exists, otherwise just output error + err_ratio = fabs((avg_err != (double) 0)? out_err / avg_err : out_err); + + if(expr_count.contains(exp)) { + if(ERR_OP) + err_ratio *= 1 + expr_count.find(exp); + else + err_ratio += expr_count.find(exp); + } + err_ratio_map.insert(exp, err_ratio); + } + + TRACE("fpa2bv_approx", + tout << "ERROR RATIO MAP: " << std::endl; + for (obj_map::iterator it = err_ratio_map.begin();// mpf* + it != err_ratio_map.end(); + it++) + tout << mk_ismt2_pp(it->m_key, m) << ": " <m_value<< std::endl; ); + + +#ifdef Z3DEBUG + std::cout<<"Error ratio:"<::iterator it = err_ratio_map.begin();//mpf* + it != err_ratio_map.end(); + it++) + std::cout<< mk_ismt2_pp(it->m_key, m) << ": " << it->m_value<< std::endl; + std::cout.flush(); +#endif + + } + + + void rank_terms(obj_map & err_ratio_map, std::list & ranked_terms) + { + unsigned kth = (unsigned)(err_ratio_map.size()*K_PERCENTAGE); + if (kth<10) kth=K_MIN; + SASSERT(!err_ratio_map.empty()); + + //Insertion sort the error ratios, keeping only the k highest elements + obj_map::iterator it = err_ratio_map.begin(); + struct pair * p = new struct pair(); + p->exp=it->m_key; + p->quotient=it->m_value; + ranked_terms.push_front(p); + + for (it++; it != err_ratio_map.end(); it++) { + if (ranked_terms.size()m_value >= ranked_terms.back()->quotient) { + std::list::iterator pos = ranked_terms.begin(); + while (pos!=ranked_terms.end() && it->m_value <= ranked_terms.back()->quotient) + pos++; + struct pair * p = new struct pair(); + p->exp=it->m_key; + p->quotient=it->m_value; + ranked_terms.insert(pos, p); + if (ranked_terms.size() > kth) { + delete ranked_terms.back(); + ranked_terms.pop_back(); + } + } + } + } + + void increase_precision( + std::list & ranked_terms, + func_decl_ref_vector const & cnsts, + obj_map & cnst2prec_map, + obj_map & cnst2term_map, + obj_map & new_map){ + + //Refine chosen terms and find the any input 'variables' which are + //its immediate arguments and refine them as well +#ifdef Z3DEBUG + std::cout<<"Increasing precision:"<::iterator itp = ranked_terms.begin(); + itp != ranked_terms.end(); + itp++) { + app * cur = to_app((*itp)->exp); + func_decl * f = cur->get_decl(); + unsigned new_prec = PREC_INCREMENT, old_prec; + bool in_new_map; + + if (cnst2prec_map.contains(f)) + new_prec += cnst2prec_map.find(f); + + new_prec= (new_prec > MAX_PRECISION) ? MAX_PRECISION : new_prec; + new_map.insert(f, new_prec); + +#ifdef Z3DEBUG + std::cout << f->get_name() << ":" << new_prec << std::endl; + std::cout << mk_ismt2_pp(cur, m) << ":" << new_prec << std::endl; +#endif + + if(cnst2term_map.contains(f)) + cur = cnst2term_map.find(f); + // Refine constants that are direct arguments of this term + for(unsigned i=0; iget_num_args();i++){ + func_decl * arg_decl = to_app(cur->get_arg(i))->get_decl(); + if (!cnst2term_map.contains(arg_decl) && //Not a constant introduced by flattening + !m_float_util.is_rm(cur->get_arg(i)) && //OLD:is_rm(...,rm) + !m_float_util.is_numeral(cur->get_arg(i))) { //OLD:is_value + //It is an input 'variable' + if ( (in_new_map = new_map.contains(arg_decl))) + old_prec=new_map.find(arg_decl); + else if (cnst2prec_map.contains(arg_decl)) + old_prec = cnst2prec_map.find(arg_decl); + else + old_prec=0; + + if (old_prec < new_prec) { + if (in_new_map) + new_map.remove(arg_decl); + SASSERT(new_prec <= MAX_PRECISION); + new_map.insert(arg_decl, new_prec); + std::cout << " " << arg_decl->get_name() << ":" << new_prec << std::endl; +#ifdef Z3DEBUG + std::cout<<" "<get_arg(i),m)<<":"< & cnst2prec_map, + obj_map & cnst2term_map, + obj_map & err_est, + obj_map & new_map) { + + obj_map err_ratio_map; + obj_map expr_count; + std::list ranked_terms; + + boolean_comparison_of_models(g, mdl, full_mdl, cnst2term_map, expr_count); + calculate_relative_error(err_est, expr_count, err_ratio_map); + rank_terms (err_ratio_map,ranked_terms); + increase_precision(ranked_terms,cnsts,cnst2prec_map,cnst2term_map,new_map); + } + + void simplify(goal_ref mg) { + ast_manager &m = mg->m(); // CMW: <--- We use the manager of the goal, so this works for any manager. + expr_ref new_curr(m); + proof_ref new_pr(m); + + th_rewriter simplifier(m, m_params); + + // CMW: we need to eliminate AND expressions. + params_ref elim_and(m_params); + elim_and.set_bool("elim_and", true); + // elim_and.set_uint("max_depth", 1); // CMW: This number can have a big impact on performance, either way. + simplifier.updt_params(elim_and); + + SASSERT(mg->is_well_sorted()); + TRACE("before_simplifier", mg->display(tout);); + m_num_steps = 0; + if (mg->inconsistent()) + return; + for (unsigned idx = 0; idx < mg->size(); idx++) { + if (mg->inconsistent()) + break; + expr * curr = mg->form(idx); + simplifier(curr, new_curr, new_pr); + m_num_steps += simplifier.get_num_steps(); + if (mg->proofs_enabled()) { + proof * pr = mg->pr(idx); + new_pr = m.mk_modus_ponens(pr, new_pr); + } + mg->update(idx, new_curr, new_pr, mg->dep(idx)); + } + TRACE("after_simplifier_bug", mg->display(tout);); + mg->elim_redundancies(); + TRACE("after_simplifier", mg->display(tout);); + TRACE("after_simplifier_detail", mg->display_with_dependencies(tout);); + SASSERT(mg->is_well_sorted()); + } + + bool fully_encoded(obj_map const & precision_map) + { + for (obj_map::iterator it = precision_map.begin(); + it != precision_map.end(); + it++) + if (it->m_value < MAX_PRECISION) return false; + return true; + } + + void bitblast(goal_ref const & g, + fpa2bv_converter_prec & fpa2bv, + bit_blaster_rewriter & bv2bool, + obj_map & const2prec_map, + sat::solver & solver, + atom2bool_var & map) + { + // CMW: This is all done using the temporary manager! + expr_ref new_curr(*m_temp_manager); + proof_ref new_pr(*m_temp_manager); + std::cout.flush(); + + SASSERT(g->is_well_sorted()); + + //fpa2bv + fpa2bv_rewriter_prec fpa2bv_rw(*m_temp_manager, fpa2bv, m_params); + fpa2bv_rw.m_cfg.set_mappings(&const2prec_map); + m_num_steps = 0; + unsigned size = g->size(); + for (unsigned idx = 0; idx < size; idx++) { + if (g->inconsistent()) + break; + expr * curr = g->form(idx); +#ifdef Z3DEBUG + std::cout<pr(idx); + new_pr = m_temp_manager->mk_modus_ponens(pr, new_pr); + } + g->update(idx, new_curr, new_pr, g->dep(idx)); + + SASSERT(g->is_well_sorted()); + } + + //Adding the equalities that fix bits + for(unsigned i=0;iassert_expr(fpa2bv.m_extra_assertions.get(i)); + + SASSERT(g->is_well_sorted()); + + simplify(g); + + //Bitblasting + TRACE("before_bit_blaster", g->display(tout);); + m_num_steps = 0; + size = g->size(); + for (unsigned idx = 0; idx < size; idx++) { + if (g->inconsistent()) + break; + expr * curr = g->form(idx); + bv2bool(curr, new_curr, new_pr); + m_num_steps += bv2bool.get_num_steps(); + if (m_proofs_enabled) { + proof * pr = g->pr(idx); + new_pr = m_temp_manager->mk_modus_ponens(pr, new_pr); + } + g->update(idx, new_curr, new_pr, g->dep(idx)); + } + + g->inc_depth(); + + simplify(g); + + TRACE("before_sat_solver", g->display(tout);); + g->elim_redundancies(); + + m_goal2sat(*g, m_params, solver, map); + + TRACE("sat_solver_unknown", tout << "interpreted_atoms: " << map.interpreted_atoms() << "\n"; + atom2bool_var::iterator it = map.begin(); + atom2bool_var::iterator end = map.end(); + for (; it != end; ++it) { + if (!is_uninterp_const(it->m_key)) + tout << mk_ismt2_pp(it->m_key, *m_temp_manager) << "\n"; + }); + + CASSERT("sat_solver", solver.check_invariant()); + IF_VERBOSE(TACTIC_VERBOSITY_LVL, solver.display_status(verbose_stream());); + TRACE("sat_dimacs", solver.display_dimacs(tout);); + } + + model_ref get_fpa_model(goal_ref const & g, + fpa2bv_converter_prec & fpa2bv, + bit_blaster_rewriter & bv2bool, + sat::solver & solver, + atom2bool_var & map) { + // CMW: This is all done using the temporary manager, until at the very end we translate the model back to this->m. + model_ref md = alloc(model, *m_temp_manager); + sat::model const & ll_m = solver.get_model(); + TRACE("sat_tactic", for (unsigned i = 0; i < ll_m.size(); i++) + tout << i << ":" << ll_m[i] << " "; tout << "\n";); + atom2bool_var::iterator it = map.begin(); + atom2bool_var::iterator end = map.end(); + for (; it != end; ++it) { + expr * n = it->m_key; + sat::bool_var v = it->m_value; + if (is_app(n) && to_app(n)->get_decl()->get_arity() != 0) + continue; + TRACE("sat_tactic", tout << "extracting value of " << mk_ismt2_pp(n, *m_temp_manager) << "\nvar: " << v << "\n";); + switch (sat::value_at(v, ll_m)) { + case l_true: md->register_decl(to_app(n)->get_decl(), m_temp_manager->mk_true()); break; + case l_false: md->register_decl(to_app(n)->get_decl(), m_temp_manager->mk_false()); break; + default: + break; + } + } + + TRACE("sat_tactic", model_v2_pp(tout, *md);); + model_converter_ref bb_mc = mk_bit_blaster_model_converter(*m_temp_manager, bv2bool.const2bits()); + model_converter_ref bv_mc = mk_fpa2bv_prec_model_converter(*m_temp_manager, fpa2bv.const2bv(), fpa2bv.rm_const2bv()); + bb_mc->operator()(md, 0); + bv_mc->operator()(md, 0); + +#ifdef Z3DEBUG + std::cout << "Model: " << std::endl; + for (unsigned i = 0 ; i < md->get_num_constants(); i++) { + func_decl * d = md->get_constant(i); + std::cout << d->get_name() << " = " << mk_ismt2_pp(md->get_const_interp(d), *m_temp_manager) << std::endl; + } +#endif + // md is in terms of the temporary manager. + ast_translation translator(*m_temp_manager, this->m); + return md->translate(translator); + } + + void encode_fpa_terms( goal_ref const & g, + obj_map & const2term_map) + { + for (obj_map::iterator it = const2term_map.begin(); + it!=const2term_map.end(); + it++) { + expr_ref q(m); +#ifdef Z3DEBUG + std::cout << "Adding " << it->m_key->get_name() << " = " << mk_ismt2_pp(it->m_value, m) << std::endl; +#endif + q = m.mk_eq(m.mk_const(it->m_key), it->m_value); + g->assert_expr(q); + } + } + + lbool approximate_model_construction(goal_ref & g, obj_map & const2prec_map) { + lbool r = l_undef; + // CMW: The following will introduce lots of stuff that we don't need (e.g., symbols) + // To save memory, we use a separate, new manager that we can throw away afterwards. + m_temp_manager = alloc(ast_manager, PGM_DISABLED); + { + ast_translation translator(m, *m_temp_manager); + goal_ref ng = g->translate(translator); + obj_map const2prec_map_tm; + + for (obj_map::iterator it = const2prec_map.begin(); + it!=const2prec_map.end(); + it++) + const2prec_map_tm.insert(translator(it->m_key), it->m_value); + + sat::solver sat_solver(m_params, 0); + atom2bool_var atom_map(*m_temp_manager); + { tactic_report report_i("fpa2bv_approx_before_bitblaster", *ng); } + fpa2bv_converter_prec fpa2bv(*m_temp_manager, m_mode); + bit_blaster_rewriter bv2bool(*m_temp_manager, m_params); + bitblast(ng, fpa2bv, bv2bool, const2prec_map_tm, sat_solver, atom_map); + { tactic_report report_i("fpa2bv_approx_after_bitblaster", *ng); } + + std::cout << "Iteration variables: " << sat_solver.num_vars() << std::endl; + std::cout << "Iteration clauses: " << sat_solver.num_clauses() << std::endl; + r = sat_solver.check(); + + if (r == l_true) + { + // we need to get the model and translate it back to m. + m_fpa_model = get_fpa_model(ng, fpa2bv, bv2bool, sat_solver, atom_map).get(); + } + else + m_fpa_model = 0; + + // CMW: translator, etc, gets destroyed here, so all references + // to temporary expressions are gone. + } + + dealloc(m_temp_manager); + m_temp_manager = 0; + + return r; + } + + void lift( goal_ref const & g, func_decl_ref_vector & constants, obj_map * const2term_map ) + { + expr_ref new_new_curr(m); + expr_ref new_curr(m); + proof_ref new_pr(m); + + simplify(g); + + //Renaming subexpressions using new constants + const_intro_rewriter const_rewriter(m, m_params); + for (unsigned idx = 0; idx < g->size(); idx++) { + if (g->inconsistent()) + break; + expr * curr = g->form(idx); + const_rewriter(curr, new_curr, new_pr); //Introduces constants that replace subexpressions + m_num_steps += const_rewriter.get_num_steps(); + if (m_proofs_enabled) { + proof * pr = g->pr(idx); + new_pr = m.mk_modus_ponens(pr, new_pr); + } + g->update(idx, new_curr, new_pr, g->dep(idx)); + } + + + constants.set(const_rewriter.m_cfg.m_introduced_consts); + const2term_map->swap(const_rewriter.m_cfg.m_const2term_map); + + // Note: Ideally, we would directly encode them. For now we're lazy and just add equalities + // and we rely on fpa2bv_converter_prec to `magically' recognize the equalities we added. + { tactic_report report_i("fpa2bv_approx_before_fpa_terms", *(g.get())); } + encode_fpa_terms(g, *const2term_map); + SASSERT(g.get()->is_well_sorted()); + + + } + + void verify_precise_model( goal_ref const & g, + model_ref & full_mdl, + func_decl_ref_vector & constants, + obj_map & const2term_map, + model_converter_ref & mc, + goal_ref_buffer & result ){ + expr_ref res(m); + + for (unsigned j = 0; j < g->size(); j++) { + full_mdl->eval(g->form(j), res, true); + if (!m.is_true(res)) { + std::cout << "Failed: " << mk_ismt2_pp(g->form(j), m) << std::endl; + std::cout << "Evaluates to: " << mk_ismt2_pp(res, m) << std::endl; + } + SASSERT(m.is_true(res)); + } + + std::cout << "Full model: " << std::endl; + for (unsigned i = 0 ; i < full_mdl->get_num_decls(); i++) + { + func_decl * d = full_mdl->get_decl(i); + if(constants.contains(d)) + std::cout << d->get_name() << " = " << mk_ismt2_pp(full_mdl->get_const_interp(d), m) << std::endl; + } + std::cout.flush(); + + result.back()->reset(); + + // Filter all the constants we introduced earlier from the model. + filter_model_converter * fmc = alloc(filter_model_converter, m); + for (obj_map::iterator it = const2term_map.begin(); + it != const2term_map.end(); + it++) + fmc->insert(it->m_key); + mc = concat(fmc, model2model_converter(m_fpa_model.get())); + } + + void setup_options(goal_ref const & g){ + SASSERT(g->is_well_sorted()); + fail_if_proof_generation("fpa2bv_approx", g); + fail_if_unsat_core_generation("fpa2bv_approx", g); + m_proofs_enabled = g->proofs_enabled(); + m_produce_models = g->models_enabled(); + m_produce_unsat_cores = g->unsat_core_enabled(); + m_num_steps = 0; + } + + void print_constants(func_decl_ref_vector & constants, obj_map & const2prec_map){ +#ifdef Z3DEBUG + for(unsigned i=0;iget_name()<<":"< const2prec_map; + obj_map next_const2prec_map; + func_decl_ref_vector constants(m); + obj_map const2term_map; + lbool r = l_true; + unsigned iteration_cnt = 0; + stopwatch sw; + + tactic_report report("fpa2bv_approx", *g); + TRACE("fpa2bv_approx", tout << "BEFORE: " << std::endl; g->display(tout);); + result.reset(); + result.push_back(g.get()); + + SASSERT(g->is_well_sorted()); + if (g->inconsistent()) + return; + + lift(g, constants, &const2term_map); + + init_precision_mapping(constants, const2prec_map, const2term_map); + + std::cout << "Simplified goal:" << std::endl; + g->display(std::cout); + + while (!solved && !m_cancel) + { + std::cout << "=============== Starting iteration " << ++iteration_cnt << std::endl; + + sw.reset(); + sw.start(); + + // Copy the goal + goal_ref mg(alloc(goal, g->m(),g->proofs_enabled(),g->models_enabled(),g->unsat_core_enabled())); + mg->copy_from(*g.get()); + tactic_report report_i("fpa2bv_approx_i", *mg); + + print_constants(constants, const2prec_map); + + TRACE("fpa2bv_approx_goal_i", mg->display(tout); ); + + r = approximate_model_construction(mg, const2prec_map); + + std::cout << "Approximation is " << (r==l_true?"SAT":r==l_false?"UNSAT":"UNKNOWN") << std::endl; + + if (r == l_true) { + model_ref full_mdl = alloc(model, m); + obj_map err_est; + + solved = precise_model_reconstruction(m_fpa_model, full_mdl, mg, err_est, constants, const2term_map); + + std::cout<<"Patching of the model "<<((solved)?"succeeded":"failed")< This is unsat. + solved = true; + result.back()->reset(); + result.back()->assert_expr(m.mk_false()); + } + } else { + // CMW: When the sat solver comes back with `unknown', what shall we do? + // AZ: Blindly refine? + m_cancel = true; + } + + const2prec_map.swap(next_const2prec_map); + next_const2prec_map.reset(); + std::cout << "Iteration time: " << sw.get_current_seconds() << std::endl; + } + + std::cout << "=============== Terminating " << std::endl; + dec_ref_map_key_values(m, const2term_map); + std::cout << "Iteration count: " << iteration_cnt << std::endl; + } + }; + + imp * m_imp; + params_ref m_params; + +public: + fpa2bv_approx_tactic(ast_manager & m, params_ref const & p) : + m_params(p){ + m_imp = alloc(imp, m, p, FPAA_DEFAULT_MODE); + } + + virtual tactic * translate(ast_manager & m) { + return alloc(fpa2bv_approx_tactic, m, m_params); + } + + virtual ~fpa2bv_approx_tactic() { + dealloc(m_imp); + } + + virtual void updt_params(params_ref const & p) { + m_params = p; + m_imp->updt_params(p); + } + + virtual void collect_param_descrs(param_descrs & r) { + } + + virtual void operator()(goal_ref const & in, goal_ref_buffer & result, + model_converter_ref & mc, proof_converter_ref & pc, + expr_dependency_ref & core) { + (*m_imp)(in, result, mc, pc, core); + } + + virtual void cleanup() { + ast_manager & m = m_imp->m; + imp * d = m_imp; +#pragma omp critical (tactic_cancel) + { + d = m_imp; + } + dealloc(d); + d = alloc(imp, m, m_params, FPAA_DEFAULT_MODE); +#pragma omp critical (tactic_cancel) + { + m_imp = d; + } + } + +protected: + virtual void set_cancel(bool f) { + if (m_imp) + m_imp->set_cancel(f); +} +}; + +tactic * mk_fpa2bv_approx_tactic(ast_manager & m, params_ref const & p) { + return and_then(clean(alloc(fpa2bv_approx_tactic, m, p)), mk_fail_if_undecided_tactic()); +} + + diff --git a/src/tactic/fpa/fpa2bv_approx_tactic.h b/src/tactic/fpa/fpa2bv_approx_tactic.h new file mode 100644 index 000000000..8838ca99d --- /dev/null +++ b/src/tactic/fpa/fpa2bv_approx_tactic.h @@ -0,0 +1,33 @@ +/*++ +Copyright (c) 2012 Microsoft Corporation + +Module Name: + + fpa2bv_lazy_tactic.h + +Abstract: + + Tactic that converts floating points to bit-vectors lazily + +Author: + + Aleksander Zeljic 2012-11-15 + +Notes: + +--*/ +#ifndef _FPA2BV_APPROX_TACTIC_ +#define _FPA2BV_APPROX_TACTIC_ + +#include"params.h" +class ast_manager; +class tactic; + + + +tactic * mk_fpa2bv_approx_tactic(ast_manager & m, params_ref const & p = params_ref()); +/* + ADD_TACTIC("fpa2bv_approx", "An iterative approximation based bit-blasting decision procedure for FPA.", "mk_fpa2bv_approx_tactic(m, p)") +*/ + +#endif diff --git a/src/tactic/fpa/fpa2bv_converter_prec.cpp b/src/tactic/fpa/fpa2bv_converter_prec.cpp new file mode 100644 index 000000000..ff627f7e4 --- /dev/null +++ b/src/tactic/fpa/fpa2bv_converter_prec.cpp @@ -0,0 +1,1783 @@ +/*++ +Copyright (c) 2012 Microsoft Corporation + +Module Name: + + fpa2bv_converter_prec.cpp + +Abstract: + + Conversion routines for Floating Point -> Bit-Vector + +Author: + + Christoph (cwinter) 2012-02-09 + +Notes: + +--*/ +#include"ast_smt2_pp.h" +#include"well_sorted.h" +#include + +#include"fpa2bv_converter_prec.h" + +#define BVULT(X,Y,R) { expr_ref bvult_eq(m), bvult_not(m); m_simp.mk_eq(X, Y, bvult_eq); m_simp.mk_not(bvult_eq, bvult_not); expr_ref t(m); t = m_bv_util.mk_ule(X,Y); m_simp.mk_and(t, bvult_not, R); } +#define BVSLT(X,Y,R) { expr_ref bvslt_eq(m), bvslt_not(m); m_simp.mk_eq(X, Y, bvslt_eq); m_simp.mk_not(bvslt_eq, bvslt_not); expr_ref t(m); t = m_bv_util.mk_sle(X,Y); m_simp.mk_and(t, bvslt_not, R); } + +#define MIN_EBITS 3 +#define MIN_SBITS 3 + +fpa2bv_converter_prec::fpa2bv_converter_prec(ast_manager & m, fpa_approximation_mode mode) : + fpa2bv_converter(m), + m_mode(mode) { +} + +void fpa2bv_converter_prec::fix_bits(unsigned prec, expr_ref rounded, unsigned sbits, unsigned ebits)//expr_ref& fixed, +{ //AZ: TODO: revise! minimal number of legal bits is 3!!!! Remove magic numbers + unsigned szeroes=((sbits-2)*(MAX_PRECISION - prec +0.0)/MAX_PRECISION);//3 bits are minimum for the significand + unsigned ezeroes=((ebits-2)*(MAX_PRECISION - prec+0.0)/MAX_PRECISION);//2 bits are minimum for the exponent + + expr_ref fix_sig(m), fix_exp(m); + expr * sgn, *sig, *expn; + split_fp( rounded.get(), sgn,sig,expn); + if(ezeroes>0) { + fix_exp=m.mk_eq(m_bv_util.mk_extract(ebits-2, ebits - ezeroes -1, sig), m_bv_util.mk_numeral(0,ezeroes)); + m_extra_assertions.push_back(fix_exp); + SASSERT(is_well_sorted(m, fix_exp)); + } + + if(szeroes>0) { + fix_sig=m.mk_eq(m_bv_util.mk_extract(sbits-2, sbits - szeroes -1, sig), m_bv_util.mk_numeral(0,szeroes)); + m_extra_assertions.push_back(fix_sig); + SASSERT(is_well_sorted(m, fix_sig)); + } +} + +void fpa2bv_converter_prec::mk_const(func_decl * f, unsigned prec, expr_ref & result) { + switch (m_mode) { + case FPAA_SMALL_FLOATS: + { + if (m_const2bv.contains(f)) + result = m_const2bv.find(f); + else { + if (prec == MAX_PRECISION) + fpa2bv_converter::mk_const(f, result); + else { + unsigned ebits = fu().get_ebits(f->get_range()); + unsigned sbits = fu().get_sbits(f->get_range()); + double rel = prec/(double)MAX_PRECISION; + unsigned new_ebits = (unsigned) (rel * (double)ebits); + unsigned new_sbits = (unsigned) (rel * (double)sbits); + if (new_ebits < MIN_EBITS) new_ebits = MIN_EBITS; + if (new_sbits < MIN_SBITS) new_sbits = MIN_SBITS; + sort_ref ns(m), fp_srt(m); + ns = fu().mk_float_sort(new_ebits, new_sbits); + app_ref small_const(m); + small_const = m.mk_fresh_const("small_const", ns); + + fp_srt = fu().mk_float_sort(ebits, sbits); + symbol name("asFloat"); + sort_ref rm_sort(m); + rm_sort = fu().mk_rm_sort(); + //sort * domain[2] = { rm_sort, ns }; + //parameter parameters[2] = { parameter(ebits), parameter(sbits) }; + + fpa2bv_converter::mk_const(small_const->get_decl(), result); + m_const2bv.insert(f, result.get()); + m.inc_ref(f); + m.inc_ref(result.get()); +#ifdef Z3DEBUG + std::cout << f->get_name() << " := " << small_const->get_decl()->get_name() << + " [" << new_sbits<<","<get_name() << " := " << mk_ismt2_pp(result, m) << std::endl;); + } + } + break; + } + case FPAA_PRECISE: + case FPAA_FIXBITS: + fpa2bv_converter::mk_const(f, result); break; + default: + UNREACHABLE(); + break; + } +} +void fpa2bv_converter_prec::mk_small_op(func_decl * f, unsigned new_ebits, unsigned new_sbits, unsigned num, expr * const * args, func_decl_ref & small_op, expr_ref_vector & cast_args) +{ + if (new_ebits < MIN_EBITS) new_ebits = MIN_EBITS; + if (new_sbits < MIN_SBITS) new_sbits = MIN_SBITS; + sort_ref new_srt(m), rm_srt(m); + new_srt = fu().mk_float_sort(new_ebits, new_sbits); + rm_srt = fu().mk_rm_sort(); + + sort_ref_vector new_domain(m); + cast_args.reset(); + + for (unsigned i=0; i < num; i++) // Recreate the domain by replacing the full fpa sort with the smaller one. + { + sort * d_i = f->get_domain(i); + expr * a_i = args[i]; + + if (fu().is_rm(d_i)) + { + new_domain.push_back(rm_srt); + cast_args.push_back(a_i); + } + else if (fu().is_float(f->get_domain(i))) + { + sort * a_i_srt = to_app(a_i)->get_decl()->get_range(); + new_domain.push_back(new_srt); + + // Cast the corresponding argument to the right fpa size + if (is_app(a_i)) + { + if (a_i_srt == new_srt) + cast_args.push_back(a_i); + else { + app_ref rtz(m); + rtz = fu().mk_round_toward_zero(); + expr_ref rm(m); + fpa2bv_converter::mk_rounding_mode(rtz->get_decl(), rm); + + sort * d[2] = { rm_srt, a_i_srt }; + symbol name("asFloat"); + func_decl_ref fd(m); + fd = m.mk_func_decl(name, 2, d, new_srt, func_decl_info(fu().get_family_id(), OP_FPA_TO_FP, new_srt->get_num_parameters(), new_srt->get_parameters())); + + expr_ref t(m); + expr * args[2] = { rm, a_i }; + fpa2bv_converter::mk_to_fp(fd, 2, args, t); + cast_args.push_back(t); + SASSERT(is_well_sorted(m, t)); + } + } + else + NOT_IMPLEMENTED_YET(); + } + else + // just keep everything else + cast_args.push_back(a_i); + } + + parameter parameters[2] = { parameter(new_ebits), parameter(new_sbits) }; + + // May I reuse parts of the existing declaration? CMW: Sure. + + small_op = m.mk_func_decl(f->get_name(), num, new_domain.c_ptr(), new_srt, + func_decl_info(fu().get_family_id(), f->get_decl_kind(), 2, parameters)); + + //std::cout<get_name()<<"["<get_range()); + unsigned sbits = fu().get_sbits(f->get_range()); + double rel = prec/(double)MAX_PRECISION; + unsigned new_ebits = (unsigned) (rel * (double)ebits);// (unsigned) (MIN_EBITS + rel * (double)(ebits-MIN_EBITS)) + unsigned new_sbits = (unsigned) (rel * (double)sbits);// (unsigned) (MIN_SBITS + rel * (double)(sbits-MIN_SBITS)) + mk_small_op(f,new_ebits,new_sbits,num,args, small_op, cast_args); +} + +void fpa2bv_converter_prec::mk_cast_small_to_big(func_decl * f, expr * arg, expr_ref & result) { + unsigned ebits = fu().get_ebits(f->get_range()); + unsigned sbits = fu().get_sbits(f->get_range()); + + app_ref rtz(m); + rtz = fu().mk_round_toward_zero(); + expr_ref rm(m); + fpa2bv_converter::mk_rounding_mode(rtz->get_decl(), rm); + sort_ref rm_srt(m); + rm_srt = fu().mk_rm_sort(); + sort * d[2] = { rm_srt, to_app(arg)->get_decl()->get_range() }; + parameter parameters[2] = { parameter(ebits), parameter(sbits) }; + symbol name("asFloat"); + func_decl_ref cast_up(m); + cast_up = m.mk_func_decl(name, 2, d, f->get_range(), func_decl_info(fu().get_family_id(), OP_FPA_TO_FP, f->get_range()->get_num_parameters(), parameters)); + expr * args[2] = { rm, arg }; + fpa2bv_converter::mk_to_fp(cast_up, 2, args, result); +} + +void fpa2bv_converter_prec::mk_cast_small_to_big(unsigned sbits, unsigned ebits, expr * arg, expr_ref & result) { + app_ref rtz(m); + rtz = fu().mk_round_toward_zero(); + expr_ref rm(m); + fpa2bv_converter::mk_rounding_mode(rtz->get_decl(), rm); + sort_ref rm_srt(m); + rm_srt = fu().mk_rm_sort(); + sort * d[2] = { rm_srt, to_app(arg)->get_decl()->get_range() }; + parameter parameters[2] = { parameter(ebits), parameter(sbits) }; + symbol name("asFloat"); + func_decl_ref cast_up(m); + sort_ref ns(m); + ns = fu().mk_float_sort(ebits, sbits); + cast_up = m.mk_func_decl(name, 2, d, ns, func_decl_info(fu().get_family_id(), OP_FPA_TO_FP, 2, parameters)); + expr * args[2] = { rm, arg }; + fpa2bv_converter::mk_to_fp(cast_up, 2, args, result); +} + + +void fpa2bv_converter_prec::match_sorts(expr * a, expr * b, expr_ref & n_a, expr_ref & n_b) +{ + //Check if the sorts of lhs and rhs match, otherwise cast them to appropriate size? + SASSERT(is_app_of(a, m_plugin->get_family_id(), OP_FPA_TO_FP)); + SASSERT(is_app_of(b, m_plugin->get_family_id(), OP_FPA_TO_FP)); + func_decl * a_decl = to_app(a)->get_decl(); + func_decl * b_decl = to_app(b)->get_decl(); + + unsigned a_ebits = fu().get_ebits(a_decl->get_range()); + unsigned a_sbits = fu().get_sbits(a_decl->get_range()); + unsigned b_ebits = fu().get_ebits(b_decl->get_range()); + unsigned b_sbits = fu().get_sbits(b_decl->get_range()); + unsigned n_ebits, n_sbits; + + //if( (a_ebits == b_ebits) && (a_sbits == b_sbits)) + //{//No need for adjustment + n_a = a; + n_b = b; + //} + //else + //{ + if ((a_ebits <= b_ebits) && (a_sbits<= b_sbits)) + {//sort of b is wider than sort of a, we cast a to the sort of b. + mk_cast_small_to_big(b_sbits,b_ebits,a,n_a); + n_b = b; + } + else if ((a_ebits >= b_ebits) && (a_sbits >= b_sbits)) + { + n_a = a; + mk_cast_small_to_big(a_sbits,a_ebits,b,n_b); + } + else + { + n_ebits = (a_ebits < b_ebits)? b_ebits:a_ebits; + n_sbits = (a_sbits < b_sbits)? b_sbits:a_sbits; + mk_cast_small_to_big(n_sbits,n_ebits,a,n_a); + mk_cast_small_to_big(n_sbits,n_ebits,b,n_b); + } + //} +} +void fpa2bv_converter_prec::mk_eq(expr * a, expr * b, expr_ref & result) { + // This is structural equality, not floating point equality. + expr_ref na(m),nb(m); + match_sorts(a,b,na,nb); + fpa2bv_converter::mk_eq(na,nb,result); +} + +void fpa2bv_converter_prec::mk_ite(expr * c, expr * t, expr * f, expr_ref & result) { + expr_ref nt(m),nf(m); + match_sorts(t,f,nt,nf); + fpa2bv_converter::mk_ite(c,nt,nf,result); +} + + + +void fpa2bv_converter_prec::mk_add(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result) { + // AZ: Switch can be moved just before the call to the fix_bits method, everything else should be the same + switch (m_mode) { + case FPAA_PRECISE: + fpa2bv_converter::mk_add(f,num,args,result); + break; + case FPAA_SMALL_FLOATS: + { + func_decl_ref small_fd(m); + expr_ref_vector small_args(m); + mk_small_op(f, prec, num, args, small_fd, small_args); + //expr_ref small_op(m); + fpa2bv_converter::mk_add(small_fd, num, small_args.c_ptr(), result); + //mk_cast_small_to_big(f, small_op, result); + SASSERT(is_well_sorted(m, result)); + TRACE("fpa2bv_small_float_add", tout << "small_fd: " << mk_ismt2_pp(small_fd, m) << std::endl << + "result = " << mk_ismt2_pp(result, m) << std::endl; ); + break; + } + + case FPAA_FIXBITS: { + if (MAX_PRECISION == prec) + fpa2bv_converter::mk_add(f,num,args,result); + else{ + //Alternative encoding + /*func_decl * nf = & func_decl(f->get_name(), + f->get_arity(), + f->get_domain(), + f->get_range(), + f->get_info());*/ + + + SASSERT(num == 3); + + expr_ref rm(m), x(m), y(m); + rm = args[0]; + x = args[1]; + y = args[2]; + + expr_ref nan(m), nzero(m), pzero(m); + mk_nan(f, nan); + mk_nzero(f, nzero); + mk_pzero(f, pzero); + + expr_ref x_is_nan(m), x_is_zero(m), x_is_pos(m), x_is_neg(m), x_is_inf(m); + expr_ref y_is_nan(m), y_is_zero(m), y_is_pos(m), y_is_neg(m), y_is_inf(m); + mk_is_nan(x, x_is_nan); + mk_is_zero(x, x_is_zero); + mk_is_pos(x, x_is_pos); + mk_is_neg(x, x_is_neg); + mk_is_inf(x, x_is_inf); + mk_is_nan(y, y_is_nan); + mk_is_zero(y, y_is_zero); + mk_is_pos(y, y_is_pos); + mk_is_neg(y, y_is_neg); + mk_is_inf(y, y_is_inf); + + dbg_decouple("fpa2bv_add_x_is_nan", x_is_nan); + dbg_decouple("fpa2bv_add_x_is_zero", x_is_zero); + dbg_decouple("fpa2bv_add_x_is_pos", x_is_pos); + dbg_decouple("fpa2bv_add_x_is_neg", x_is_neg); + dbg_decouple("fpa2bv_add_x_is_inf", x_is_inf); + dbg_decouple("fpa2bv_add_y_is_nan", y_is_nan); + dbg_decouple("fpa2bv_add_y_is_zero", y_is_zero); + dbg_decouple("fpa2bv_add_y_is_pos", y_is_pos); + dbg_decouple("fpa2bv_add_y_is_neg", y_is_neg); + dbg_decouple("fpa2bv_add_y_is_inf", y_is_inf); + + expr_ref c1(m), c2(m), c3(m), c4(m), c5(m), c6(m); + expr_ref v1(m), v2(m), v3(m), v4(m), v5(m), v6(m), v7(m); + + m_simp.mk_or(x_is_nan, y_is_nan, c1); + v1 = nan; + + mk_is_inf(x, c2); + expr_ref nx(m), ny(m), nx_xor_ny(m), inf_xor(m); + mk_is_neg(x, nx); + mk_is_neg(y, ny); + m_simp.mk_xor(nx, ny, nx_xor_ny); + m_simp.mk_and(y_is_inf, nx_xor_ny, inf_xor); + mk_ite(inf_xor, nan, x, v2); + + mk_is_inf(y, c3); + expr_ref xy_is_neg(m), v3_and(m); + m_simp.mk_xor(x_is_neg, y_is_neg, xy_is_neg); + m_simp.mk_and(x_is_inf, xy_is_neg, v3_and); + mk_ite(v3_and, nan, y, v3); + + expr_ref rm_is_to_neg(m), signs_and(m), signs_xor(m), v4_and(m), rm_and_xor(m), neg_cond(m); + m_simp.mk_and(x_is_zero, y_is_zero, c4); + m_simp.mk_and(x_is_neg, y_is_neg, signs_and); + m_simp.mk_xor(x_is_neg, y_is_neg, signs_xor); + mk_is_rm(rm, BV_RM_TO_NEGATIVE, rm_is_to_neg); + m_simp.mk_and(rm_is_to_neg, signs_xor, rm_and_xor); + m_simp.mk_or(signs_and, rm_and_xor, neg_cond); + mk_ite(neg_cond, nzero, pzero, v4); + m_simp.mk_and(x_is_neg, y_is_neg, v4_and); + mk_ite(v4_and, x, v4, v4); + + c5 = x_is_zero; + v5 = y; + + c6 = y_is_zero; + v6 = x; + + // Actual addition. + unsigned ebits = m_util.get_ebits(f->get_range()); + unsigned sbits = m_util.get_sbits(f->get_range()); + + expr_ref a_sgn(m), a_sig(m), a_exp(m), a_lz(m), b_sgn(m), b_sig(m), b_exp(m), b_lz(m); + unpack(x, a_sgn, a_sig, a_exp, a_lz, false); + unpack(y, b_sgn, b_sig, b_exp, b_lz, false); + + dbg_decouple("fpa2bv_add_unpack_a_sgn", a_sgn); + dbg_decouple("fpa2bv_add_unpack_a_sig", a_sig); + dbg_decouple("fpa2bv_add_unpack_a_exp", a_exp); + dbg_decouple("fpa2bv_add_unpack_b_sgn", b_sgn); + dbg_decouple("fpa2bv_add_unpack_b_sig", b_sig); + dbg_decouple("fpa2bv_add_unpack_b_exp", b_exp); + + expr_ref swap_cond(m); + swap_cond = m_bv_util.mk_sle(a_exp, b_exp); + + expr_ref c_sgn(m), c_sig(m), c_exp(m), d_sgn(m), d_sig(m), d_exp(m); + m_simp.mk_ite(swap_cond, b_sgn, a_sgn, c_sgn); + m_simp.mk_ite(swap_cond, b_sig, a_sig, c_sig); // has sbits + m_simp.mk_ite(swap_cond, b_exp, a_exp, c_exp); // has ebits + m_simp.mk_ite(swap_cond, a_sgn, b_sgn, d_sgn); + m_simp.mk_ite(swap_cond, a_sig, b_sig, d_sig); // has sbits + m_simp.mk_ite(swap_cond, a_exp, b_exp, d_exp); // has ebits + + expr_ref res_sgn(m), res_sig(m), res_exp(m); + add_core(sbits, ebits, rm, + c_sgn, c_sig, c_exp, d_sgn, d_sig, d_exp, + res_sgn, res_sig, res_exp); + + //Add the fixed zeroes here...? + expr_ref sbits_zero(m), ebits_zero(m); + // std::cout<<"res_sgn: "<1) + // //Exponent = 1 bit for bias, fixed bits, actual exponent bits + // res_exp=m_bv_util.mk_concat(m_bv_util.mk_extract(ebits+1, ebits-1, res_exp), + // m_bv_util.mk_concat(m_bv_util.mk_numeral(0,ezeroes), + // m_bv_util.mk_extract(ebits - 2 - ezeroes, 0 ,res_exp))); + // if(sones>1) + // res_sig=m_bv_util.mk_concat(m_bv_util.mk_extract(sbits+3,sones+4,res_sig), + // m_bv_util.mk_concat(m_bv_util.mk_numeral(0,sones), + // m_bv_util.mk_extract(3,0,res_sig))); + // + // std::cout<<"res_sgn': "<get_range(), rm, res_sgn, res_sig, res_exp, rounded); + + + mk_ite(is_zero_sig, zero_case, rounded, v7);*/ + + + + expr_ref rounded(m);//, fixed(m); + round(f->get_range(), rm, res_sgn, res_sig, res_exp, rounded); + + fix_bits(prec, rounded, sbits,ebits); + mk_ite(is_zero_sig, zero_case, rounded, v7); + + // signed sones=((sbits-3)*(MAX_PRECISION - prec +0.0)/MAX_PRECISION);//3 bits are minimum for the significand + // unsigned ezeroes=((ebits-2)*(MAX_PRECISION - prec+0.0)/MAX_PRECISION);//2 bits are minimum for the exponent + // expr_ref fix_sig(m), fix_exp(m), fix_sgn(m), rnd_sig(m), rnd_exp(m), rnd_sgn(m), rnd_lz(m); + // expr * sgn, *sig, *expn; + // split( rounded.get(), sgn,sig,expn); + // + // + // if(ezeroes>1) + // //Exponent = 1 bit for bias, fixed bits, actual exponent bits + // fix_exp=m_bv_util.mk_concat(m_bv_util.mk_extract(ebits-1, ebits-1, expn), + // m_bv_util.mk_concat(m_bv_util.mk_numeral(0,ezeroes), + // m_bv_util.mk_extract(ebits - 2 - ezeroes, 0 , expn))); + // if(sones>1) + // fix_sig=m_bv_util.mk_concat(m_bv_util.mk_extract(sbits-2,sones,sig), + // m_bv_util.mk_numeral(0,sones)); + // + // mk_triple(sgn, fix_sig, fix_exp, fixed); + // SASSERT(is_well_sorted(m, fixed)); + //mk_ite(is_zero_sig, zero_case, rounded, v7); + + + + + mk_ite(c6, v6, v7, result); + mk_ite(c5, v5, result, result); + mk_ite(c4, v4, result, result); + mk_ite(c3, v3, result, result); + mk_ite(c2, v2, result, result); + mk_ite(c1, v1, result, result); + + SASSERT(is_well_sorted(m, result)); + + TRACE("fpa2bv_add", tout << "ADD = " << mk_ismt2_pp(result, m) << std::endl; ); + } + break; + } + default: UNREACHABLE(); break; + } +} + +void fpa2bv_converter_prec::mk_sub(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result) { + SASSERT(num == 3); + expr_ref t(m); + fpa2bv_converter::mk_neg(f, 1, &args[2], t); + expr * nargs[3] = { args[0], args[1], t }; + + switch (m_mode) { + case FPAA_PRECISE: + fpa2bv_converter::mk_add(f, 3, nargs, result); + break; + case FPAA_FIXBITS: // Call the add with prec + case FPAA_SMALL_FLOATS: + fpa2bv_converter_prec::mk_add(f, prec, 3, nargs, result); + break; + default: UNREACHABLE(); break; + } +} + +void fpa2bv_converter_prec::mk_uminus(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result) { + switch (m_mode) { + case FPAA_PRECISE: + case FPAA_SMALL_FLOATS: + case FPAA_FIXBITS: // for now, encode fully. + fpa2bv_converter::mk_neg(f,num,args,result); + break; + default: UNREACHABLE(); break; + } +} + +void fpa2bv_converter_prec::mk_mul(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result) { + switch (m_mode) { + case FPAA_PRECISE: + fpa2bv_converter::mk_mul(f,num,args,result); + break; + case FPAA_SMALL_FLOATS: + { + func_decl_ref small_fd(m); + expr_ref_vector small_args(m); + mk_small_op(f, prec, num, args, small_fd, small_args); + expr_ref small_op(m); + fpa2bv_converter::mk_mul(small_fd, num, small_args.c_ptr(), small_op); + mk_cast_small_to_big(f, small_op, result); + SASSERT(is_well_sorted(m, result)); + TRACE("fpa2bv_small_float_mul", tout << mk_ismt2_pp(result, m) << std::endl; ); + break; + } + case FPAA_FIXBITS: // for now, encode fully. + { SASSERT(num == 3); + + expr_ref rm(m), x(m), y(m); + rm = args[0]; + x = args[1]; + y = args[2]; + + expr_ref nan(m), nzero(m), pzero(m), ninf(m), pinf(m); + mk_nan(f, nan); + mk_nzero(f, nzero); + mk_pzero(f, pzero); + mk_ninf(f, ninf); + mk_pinf(f, pinf); + + expr_ref x_is_nan(m), x_is_zero(m), x_is_pos(m), x_is_inf(m); + expr_ref y_is_nan(m), y_is_zero(m), y_is_pos(m), y_is_inf(m); + mk_is_nan(x, x_is_nan); + mk_is_zero(x, x_is_zero); + mk_is_pos(x, x_is_pos); + mk_is_inf(x, x_is_inf); + mk_is_nan(y, y_is_nan); + mk_is_zero(y, y_is_zero); + mk_is_pos(y, y_is_pos); + mk_is_inf(y, y_is_inf); + + dbg_decouple("fpa2bv_mul_x_is_nan", x_is_nan); + dbg_decouple("fpa2bv_mul_x_is_zero", x_is_zero); + dbg_decouple("fpa2bv_mul_x_is_pos", x_is_pos); + dbg_decouple("fpa2bv_mul_x_is_inf", x_is_inf); + dbg_decouple("fpa2bv_mul_y_is_nan", y_is_nan); + dbg_decouple("fpa2bv_mul_y_is_zero", y_is_zero); + dbg_decouple("fpa2bv_mul_y_is_pos", y_is_pos); + dbg_decouple("fpa2bv_mul_y_is_inf", y_is_inf); + + expr_ref c1(m), c2(m), c3(m), c4(m), c5(m), c6(m); + expr_ref v1(m), v2(m), v3(m), v4(m), v5(m), v6(m), v7(m); + + // (x is NaN) || (y is NaN) -> NaN + m_simp.mk_or(x_is_nan, y_is_nan, c1); + v1 = nan; + + // (x is +oo) -> if (y is 0) then NaN else inf with y's sign. + mk_is_pinf(x, c2); + expr_ref y_sgn_inf(m); + mk_ite(y_is_pos, pinf, ninf, y_sgn_inf); + mk_ite(y_is_zero, nan, y_sgn_inf, v2); + + // (y is +oo) -> if (x is 0) then NaN else inf with x's sign. + mk_is_pinf(y, c3); + expr_ref x_sgn_inf(m); + mk_ite(x_is_pos, pinf, ninf, x_sgn_inf); + mk_ite(x_is_zero, nan, x_sgn_inf, v3); + + // (x is -oo) -> if (y is 0) then NaN else inf with -y's sign. + mk_is_ninf(x, c4); + expr_ref neg_y_sgn_inf(m); + mk_ite(y_is_pos, ninf, pinf, neg_y_sgn_inf); + mk_ite(y_is_zero, nan, neg_y_sgn_inf, v4); + + // (y is -oo) -> if (x is 0) then NaN else inf with -x's sign. + mk_is_ninf(y, c5); + expr_ref neg_x_sgn_inf(m); + mk_ite(x_is_pos, ninf, pinf, neg_x_sgn_inf); + mk_ite(x_is_zero, nan, neg_x_sgn_inf, v5); + + // (x is 0) || (y is 0) -> x but with sign = x.sign ^ y.sign + m_simp.mk_or(x_is_zero, y_is_zero, c6); + expr_ref sign_xor(m); + m_simp.mk_xor(x_is_pos, y_is_pos, sign_xor); + mk_ite(sign_xor, nzero, pzero, v6); + + // else comes the actual multiplication. + unsigned ebits = m_util.get_ebits(f->get_range()); + unsigned sbits = m_util.get_sbits(f->get_range()); + SASSERT(ebits <= sbits); + + expr_ref a_sgn(m), a_sig(m), a_exp(m), a_lz(m), b_sgn(m), b_sig(m), b_exp(m), b_lz(m); + unpack(x, a_sgn, a_sig, a_exp, a_lz, true); + unpack(y, b_sgn, b_sig, b_exp, b_lz, true); + + dbg_decouple("fpa2bv_mul_a_sig", a_sig); + dbg_decouple("fpa2bv_mul_a_exp", a_exp); + dbg_decouple("fpa2bv_mul_b_sig", b_sig); + dbg_decouple("fpa2bv_mul_b_exp", b_exp); + + expr_ref a_lz_ext(m), b_lz_ext(m); + a_lz_ext = m_bv_util.mk_zero_extend(2, a_lz); + b_lz_ext = m_bv_util.mk_zero_extend(2, b_lz); + + dbg_decouple("fpa2bv_mul_lz_a", a_lz); + dbg_decouple("fpa2bv_mul_lz_b", b_lz); + + expr_ref a_sig_ext(m), b_sig_ext(m); + a_sig_ext = m_bv_util.mk_zero_extend(sbits, a_sig); + b_sig_ext = m_bv_util.mk_zero_extend(sbits, b_sig); + + expr_ref a_exp_ext(m), b_exp_ext(m); + a_exp_ext = m_bv_util.mk_sign_extend(2, a_exp); + b_exp_ext = m_bv_util.mk_sign_extend(2, b_exp); + + expr_ref res_sgn(m), res_sig(m), res_exp(m); + expr * signs[2] = { a_sgn, b_sgn }; + res_sgn = m_bv_util.mk_bv_xor(2, signs); + + dbg_decouple("fpa2bv_mul_res_sgn", res_sgn); + + res_exp = m_bv_util.mk_bv_add( + m_bv_util.mk_bv_sub(a_exp_ext, a_lz_ext), + m_bv_util.mk_bv_sub(b_exp_ext, b_lz_ext)); + + expr_ref product(m); + product = m_bv_util.mk_bv_mul(a_sig_ext, b_sig_ext); + + dbg_decouple("fpa2bv_mul_product", product); + + SASSERT(m_bv_util.get_bv_size(product) == 2*sbits); + + expr_ref h_p(m), l_p(m), rbits(m); + h_p = m_bv_util.mk_extract(2*sbits-1, sbits, product); + l_p = m_bv_util.mk_extract(sbits-1, 0, product); + + if (sbits >= 4) { + expr_ref sticky(m); + sticky = m.mk_app(m_bv_util.get_fid(), OP_BREDOR, m_bv_util.mk_extract(sbits-4, 0, product)); + rbits = m_bv_util.mk_concat(m_bv_util.mk_extract(sbits-1, sbits-3, product), sticky); + } + else + rbits = m_bv_util.mk_concat(l_p, m_bv_util.mk_numeral(0, 4 - sbits)); + + SASSERT(m_bv_util.get_bv_size(rbits) == 4); + res_sig = m_bv_util.mk_concat(h_p, rbits); + + //expr_ref rounded(m); + round(f->get_range(), rm, res_sgn, res_sig, res_exp, v7); + + //AZ:the only difference to the original + fix_bits(prec, v7, sbits, ebits); + + // And finally, we tie them together. + mk_ite(c6, v6, v7, result); + mk_ite(c5, v5, result, result); + mk_ite(c4, v4, result, result); + mk_ite(c3, v3, result, result); + mk_ite(c2, v2, result, result); + mk_ite(c1, v1, result, result); + + SASSERT(is_well_sorted(m, result)); + + TRACE("fpa2bv_mul", tout << "MUL = " << mk_ismt2_pp(result, m) << std::endl; ); + } + break; + default: UNREACHABLE(); break; + } +} + +void fpa2bv_converter_prec::mk_div(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result) { + switch (m_mode) { + case FPAA_SMALL_FLOATS: + { + func_decl_ref small_fd(m); + expr_ref_vector small_args(m); + mk_small_op(f, prec, num, args, small_fd, small_args); + expr_ref small_op(m); + fpa2bv_converter::mk_div(small_fd, num, small_args.c_ptr(), small_op); + mk_cast_small_to_big(f, small_op, result); + SASSERT(is_well_sorted(m, result)); + TRACE("fpa2bv_small_float_div", tout << mk_ismt2_pp(result, m) << std::endl; ); + break; + } + case FPAA_PRECISE: + case FPAA_FIXBITS: // for now, encode fully. + fpa2bv_converter::mk_div(f,num,args,result); + break; + + { + SASSERT(num == 3); + + expr_ref rm(m), x(m), y(m); + rm = args[0]; + x = args[1]; + y = args[2]; + + expr_ref nan(m), nzero(m), pzero(m), ninf(m), pinf(m); + mk_nan(f, nan); + mk_nzero(f, nzero); + mk_pzero(f, pzero); + mk_ninf(f, ninf); + mk_pinf(f, pinf); + + expr_ref x_is_nan(m), x_is_zero(m), x_is_pos(m), x_is_inf(m); + expr_ref y_is_nan(m), y_is_zero(m), y_is_pos(m), y_is_inf(m); + mk_is_nan(x, x_is_nan); + mk_is_zero(x, x_is_zero); + mk_is_pos(x, x_is_pos); + mk_is_inf(x, x_is_inf); + mk_is_nan(y, y_is_nan); + mk_is_zero(y, y_is_zero); + mk_is_pos(y, y_is_pos); + mk_is_inf(y, y_is_inf); + + dbg_decouple("fpa2bv_div_x_is_nan", x_is_nan); + dbg_decouple("fpa2bv_div_x_is_zero", x_is_zero); + dbg_decouple("fpa2bv_div_x_is_pos", x_is_pos); + dbg_decouple("fpa2bv_div_x_is_inf", x_is_inf); + dbg_decouple("fpa2bv_div_y_is_nan", y_is_nan); + dbg_decouple("fpa2bv_div_y_is_zero", y_is_zero); + dbg_decouple("fpa2bv_div_y_is_pos", y_is_pos); + dbg_decouple("fpa2bv_div_y_is_inf", y_is_inf); + + expr_ref c1(m), c2(m), c3(m), c4(m), c5(m), c6(m), c7(m); + expr_ref v1(m), v2(m), v3(m), v4(m), v5(m), v6(m), v7(m), v8(m); + + // (x is NaN) || (y is NaN) -> NaN + m_simp.mk_or(x_is_nan, y_is_nan, c1); + v1 = nan; + + // (x is +oo) -> if (y is oo) then NaN else inf with y's sign. + mk_is_pinf(x, c2); + expr_ref y_sgn_inf(m); + mk_ite(y_is_pos, pinf, ninf, y_sgn_inf); + mk_ite(y_is_inf, nan, y_sgn_inf, v2); + + // (y is +oo) -> if (x is oo) then NaN else 0 with sign x.sgn ^ y.sgn + mk_is_pinf(y, c3); + expr_ref xy_zero(m), signs_xor(m); + m_simp.mk_xor(x_is_pos, y_is_pos, signs_xor); + mk_ite(signs_xor, nzero, pzero, xy_zero); + mk_ite(x_is_inf, nan, xy_zero, v3); + + // (x is -oo) -> if (y is oo) then NaN else inf with -y's sign. + mk_is_ninf(x, c4); + expr_ref neg_y_sgn_inf(m); + mk_ite(y_is_pos, ninf, pinf, neg_y_sgn_inf); + mk_ite(y_is_inf, nan, neg_y_sgn_inf, v4); + + // (y is -oo) -> if (x is oo) then NaN else 0 with sign x.sgn ^ y.sgn + mk_is_ninf(y, c5); + mk_ite(x_is_inf, nan, xy_zero, v5); + + // (y is 0) -> if (x is 0) then NaN else inf with xor sign. + c6 = y_is_zero; + expr_ref sgn_inf(m); + mk_ite(signs_xor, ninf, pinf, sgn_inf); + mk_ite(x_is_zero, nan, sgn_inf, v6); + + // (x is 0) -> result is zero with sgn = x.sgn^y.sgn + // This is a special case to avoid problems with the unpacking of zero. + c7 = x_is_zero; + mk_ite(signs_xor, nzero, pzero, v7); + + // else comes the actual division. + unsigned ebits = m_util.get_ebits(f->get_range()); + unsigned sbits = m_util.get_sbits(f->get_range()); + SASSERT(ebits <= sbits); + + expr_ref a_sgn(m), a_sig(m), a_exp(m), a_lz(m), b_sgn(m), b_sig(m), b_exp(m), b_lz(m); + unpack(x, a_sgn, a_sig, a_exp, a_lz, true); + unpack(y, b_sgn, b_sig, b_exp, b_lz, true); + + unsigned extra_bits = sbits+2; + expr_ref a_sig_ext(m), b_sig_ext(m); + a_sig_ext = m_bv_util.mk_concat(a_sig, m_bv_util.mk_numeral(0, sbits + extra_bits)); + b_sig_ext = m_bv_util.mk_zero_extend(sbits + extra_bits, b_sig); + + expr_ref a_exp_ext(m), b_exp_ext(m); + a_exp_ext = m_bv_util.mk_sign_extend(2, a_exp); + b_exp_ext = m_bv_util.mk_sign_extend(2, b_exp); + + expr_ref res_sgn(m), res_sig(m), res_exp(m); + expr * signs[2] = { a_sgn, b_sgn }; + res_sgn = m_bv_util.mk_bv_xor(2, signs); + + expr_ref a_lz_ext(m), b_lz_ext(m); + a_lz_ext = m_bv_util.mk_zero_extend(2, a_lz); + b_lz_ext = m_bv_util.mk_zero_extend(2, b_lz); + + res_exp = m_bv_util.mk_bv_sub( + m_bv_util.mk_bv_sub(a_exp_ext, a_lz_ext), + m_bv_util.mk_bv_sub(b_exp_ext, b_lz_ext)); + + expr_ref quotient(m); + quotient = m.mk_app(m_bv_util.get_fid(), OP_BUDIV, a_sig_ext, b_sig_ext); + + dbg_decouple("fpa2bv_div_quotient", quotient); + + SASSERT(m_bv_util.get_bv_size(quotient) == (sbits + sbits + extra_bits)); + + expr_ref sticky(m); + sticky = m.mk_app(m_bv_util.get_fid(), OP_BREDOR, m_bv_util.mk_extract(extra_bits-2, 0, quotient)); + res_sig = m_bv_util.mk_concat(m_bv_util.mk_extract(extra_bits+sbits+1, extra_bits-1, quotient), sticky); + + SASSERT(m_bv_util.get_bv_size(res_sig) == (sbits + 4)); + + //expr_ref rounded(m);//AZ + round(f->get_range(), rm, res_sgn, res_sig, res_exp, v8); + + fix_bits(prec, v8,sbits,ebits);//AZ + + // And finally, we tie them together. + mk_ite(c7, v7, v8, result); + mk_ite(c6, v6, result, result); + mk_ite(c5, v5, result, result); + mk_ite(c4, v4, result, result); + mk_ite(c3, v3, result, result); + mk_ite(c2, v2, result, result); + mk_ite(c1, v1, result, result); + + SASSERT(is_well_sorted(m, result)); + + TRACE("fpa2bv_div", tout << "DIV = " << mk_ismt2_pp(result, m) << std::endl; ); + } + break; + default: UNREACHABLE(); break; + } +} + +void fpa2bv_converter_prec::mk_remainder(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result) { + switch (m_mode) { + case FPAA_SMALL_FLOATS: + { + func_decl_ref small_fd(m); + expr_ref_vector small_args(m); + mk_small_op(f, prec, num, args, small_fd, small_args); + expr_ref small_op(m); + fpa2bv_converter::mk_rem(small_fd, num, small_args.c_ptr(), small_op); + mk_cast_small_to_big(f, small_op, result); + SASSERT(is_well_sorted(m, result)); + TRACE("fpa2bv_small_float_remainder", tout << mk_ismt2_pp(result, m) << std::endl; ); + break; + } + case FPAA_PRECISE: + fpa2bv_converter::mk_rem(f,num,args,result); + break; + case FPAA_FIXBITS: // for now, encode fully. + { + SASSERT(num == 2); + + // Remainder is always exact, so there is no rounding mode. + expr_ref x(m), y(m); + x = args[0]; + y = args[1]; + + expr_ref nan(m), nzero(m), pzero(m), ninf(m), pinf(m); + mk_nan(f, nan); + mk_nzero(f, nzero); + mk_pzero(f, pzero); + mk_ninf(f, ninf); + mk_pinf(f, pinf); + + expr_ref x_is_nan(m), x_is_zero(m), x_is_pos(m), x_is_inf(m); + expr_ref y_is_nan(m), y_is_zero(m), y_is_pos(m), y_is_inf(m); + mk_is_nan(x, x_is_nan); + mk_is_zero(x, x_is_zero); + mk_is_pos(x, x_is_pos); + mk_is_inf(x, x_is_inf); + mk_is_nan(y, y_is_nan); + mk_is_zero(y, y_is_zero); + mk_is_pos(y, y_is_pos); + mk_is_inf(y, y_is_inf); + + dbg_decouple("fpa2bv_rem_x_is_nan", x_is_nan); + dbg_decouple("fpa2bv_rem_x_is_zero", x_is_zero); + dbg_decouple("fpa2bv_rem_x_is_pos", x_is_pos); + dbg_decouple("fpa2bv_rem_x_is_inf", x_is_inf); + dbg_decouple("fpa2bv_rem_y_is_nan", y_is_nan); + dbg_decouple("fpa2bv_rem_y_is_zero", y_is_zero); + dbg_decouple("fpa2bv_rem_y_is_pos", y_is_pos); + dbg_decouple("fpa2bv_rem_y_is_inf", y_is_inf); + + expr_ref c1(m), c2(m), c3(m), c4(m), c5(m), c6(m); + expr_ref v1(m), v2(m), v3(m), v4(m), v5(m), v6(m), v7(m); + + // (x is NaN) || (y is NaN) -> NaN + m_simp.mk_or(x_is_nan, y_is_nan, c1); + v1 = nan; + + // (x is +-oo) -> NaN + c2 = x_is_inf; + v2 = nan; + + // (y is +-oo) -> x + c3 = y_is_inf; + v3 = x; + + // (x is 0) -> x + c4 = x_is_zero; + v4 = pzero; + + // (y is 0) -> NaN. + c5 = y_is_zero; + v5 = nan; + + // else the actual remainder. + unsigned ebits = m_util.get_ebits(f->get_range()); + unsigned sbits = m_util.get_sbits(f->get_range()); + + expr_ref a_sgn(m), a_sig(m), a_exp(m), a_lz(m); + expr_ref b_sgn(m), b_sig(m), b_exp(m), b_lz(m); + unpack(x, a_sgn, a_sig, a_exp, a_lz, true); + unpack(y, b_sgn, b_sig, b_exp, b_lz, true); + + BVSLT(a_exp, b_exp, c6); + v6 = x; + + // max. exponent difference is (2^ebits) - 3 + const mpz & two_to_ebits = fu().fm().m_powers2(ebits); + mpz max_exp_diff; + m_mpz_manager.sub(two_to_ebits, 3, max_exp_diff); + SASSERT(m_mpz_manager.is_int64(max_exp_diff)); + SASSERT(m_mpz_manager.get_uint64(max_exp_diff) <= UINT_MAX); + + unsigned int max_exp_diff_ui = (unsigned int)m_mpz_manager.get_uint64(max_exp_diff); + m_mpz_manager.del(max_exp_diff); + + expr_ref exp_diff(m); + exp_diff = m_bv_util.mk_bv_sub(a_exp, b_exp); + dbg_decouple("fpa2bv_rem_exp_diff", exp_diff); + + // CMW: This creates _huge_ bit-vectors, which is potentially sub-optimal, + // but calculating this via rem = x - y * nearest(x/y) creates huge circuits. + expr_ref huge_sig(m), shifted_sig(m), huge_rem(m); + huge_sig = m_bv_util.mk_zero_extend(max_exp_diff_ui, a_sig); + shifted_sig = m_bv_util.mk_bv_shl(huge_sig, m_bv_util.mk_zero_extend(max_exp_diff_ui + sbits - ebits, exp_diff)); + huge_rem = m_bv_util.mk_bv_urem(shifted_sig, m_bv_util.mk_zero_extend(max_exp_diff_ui, b_sig)); + dbg_decouple("fpa2bv_rem_huge_rem", huge_rem); + + expr_ref res_sgn(m), res_sig(m), res_exp(m); + res_sgn = a_sgn; + res_sig = m_bv_util.mk_concat(m_bv_util.mk_extract(sbits, 0, huge_rem), + m_bv_util.mk_numeral(0, 3)); + res_exp = m_bv_util.mk_sign_extend(2, b_exp); + + expr_ref rm(m); + rm = m_bv_util.mk_numeral(BV_RM_TIES_TO_EVEN, 3); + + //expr_ref rounded(m); + round(f->get_range(), rm, res_sgn, res_sig, res_exp, v7); + + fix_bits(prec, v7,sbits, ebits); + + // And finally, we tie them together. + mk_ite(c6, v6, v7, result); + mk_ite(c5, v5, result, result); + mk_ite(c4, v4, result, result); + mk_ite(c3, v3, result, result); + mk_ite(c2, v2, result, result); + mk_ite(c1, v1, result, result); + + SASSERT(is_well_sorted(m, result)); + + TRACE("fpa2bv_rem", tout << "REM = " << mk_ismt2_pp(result, m) << std::endl; ); + } + break; + default: UNREACHABLE(); break; + } +} + +void fpa2bv_converter_prec::mk_abs(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result) { + //This doesn't need approximation in it's current form + switch (m_mode) { + case FPAA_PRECISE: + case FPAA_SMALL_FLOATS: + case FPAA_FIXBITS: // for now, encode fully. + fpa2bv_converter::mk_abs(f,num,args,result); + break; + default: UNREACHABLE(); break; + } +} + +void fpa2bv_converter_prec::mk_min(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result) { + switch (m_mode) { + case FPAA_PRECISE: + case FPAA_SMALL_FLOATS: + case FPAA_FIXBITS: // for now, encode fully. + fpa2bv_converter::mk_min(f,num,args,result); + break; + default: UNREACHABLE(); break; + } +} + +void fpa2bv_converter_prec::mk_max(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result) { + switch (m_mode) { + case FPAA_PRECISE: + case FPAA_SMALL_FLOATS: + case FPAA_FIXBITS: // for now, encode fully. + fpa2bv_converter::mk_max(f,num,args,result); + break; + default: UNREACHABLE(); break; + } +} + +void fpa2bv_converter_prec::mk_fusedma(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result) { + switch (m_mode) { + case FPAA_SMALL_FLOATS: + { + func_decl_ref small_fd(m); + expr_ref_vector small_args(m); + mk_small_op(f, prec, num, args, small_fd, small_args); + expr_ref small_op(m); + fpa2bv_converter::mk_fma(small_fd, num, small_args.c_ptr(), small_op); + mk_cast_small_to_big(f, small_op, result); + SASSERT(is_well_sorted(m, result)); + TRACE("fpa2bv_small_float_fma", tout << mk_ismt2_pp(result, m) << std::endl; ); + break; + } + case FPAA_PRECISE: + fpa2bv_converter::mk_fma(f,num,args,result); + break; + case FPAA_FIXBITS: + { + SASSERT(num == 4); + + // fusedma means (x * y) + z + expr_ref rm(m), x(m), y(m), z(m); + rm = args[0]; + x = args[1]; + y = args[2]; + z = args[3]; + + expr_ref nan(m), nzero(m), pzero(m), ninf(m), pinf(m); + mk_nan(f, nan); + mk_nzero(f, nzero); + mk_pzero(f, pzero); + mk_ninf(f, ninf); + mk_pinf(f, pinf); + + expr_ref x_is_nan(m), x_is_zero(m), x_is_pos(m), x_is_neg(m), x_is_inf(m); + expr_ref y_is_nan(m), y_is_zero(m), y_is_pos(m), y_is_neg(m), y_is_inf(m); + expr_ref z_is_nan(m), z_is_zero(m), z_is_pos(m), z_is_neg(m), z_is_inf(m); + mk_is_nan(x, x_is_nan); + mk_is_zero(x, x_is_zero); + mk_is_pos(x, x_is_pos); + mk_is_neg(x, x_is_neg); + mk_is_inf(x, x_is_inf); + mk_is_nan(y, y_is_nan); + mk_is_zero(y, y_is_zero); + mk_is_pos(y, y_is_pos); + mk_is_neg(y, y_is_neg); + mk_is_inf(y, y_is_inf); + mk_is_nan(z, z_is_nan); + mk_is_zero(z, z_is_zero); + mk_is_pos(z, z_is_pos); + mk_is_neg(z, z_is_neg); + mk_is_inf(z, z_is_inf); + + expr_ref rm_is_to_neg(m); + mk_is_rm(rm, BV_RM_TO_NEGATIVE, rm_is_to_neg); + + dbg_decouple("fpa2bv_fma_x_is_nan", x_is_nan); + dbg_decouple("fpa2bv_fma_x_is_zero", x_is_zero); + dbg_decouple("fpa2bv_fma_x_is_pos", x_is_pos); + dbg_decouple("fpa2bv_fma_x_is_inf", x_is_inf); + dbg_decouple("fpa2bv_fma_y_is_nan", y_is_nan); + dbg_decouple("fpa2bv_fma_y_is_zero", y_is_zero); + dbg_decouple("fpa2bv_fma_y_is_pos", y_is_pos); + dbg_decouple("fpa2bv_fma_y_is_inf", y_is_inf); + dbg_decouple("fpa2bv_fma_z_is_nan", z_is_nan); + dbg_decouple("fpa2bv_fma_z_is_zero", z_is_zero); + dbg_decouple("fpa2bv_fma_z_is_pos", z_is_pos); + dbg_decouple("fpa2bv_fma_z_is_inf", z_is_inf); + + expr_ref c1(m), c2(m), c3(m), c4(m), c5(m), c6(m), c7(m); + expr_ref v1(m), v2(m), v3(m), v4(m), v5(m), v6(m), v7(m), v8(m); + + expr_ref inf_xor(m), inf_cond(m); + m_simp.mk_xor(x_is_neg, y_is_neg, inf_xor); + m_simp.mk_xor(inf_xor, z_is_neg, inf_xor); + m_simp.mk_and(z_is_inf, inf_xor, inf_cond); + + // (x is NaN) || (y is NaN) || (z is Nan) -> NaN + m_simp.mk_or(x_is_nan, y_is_nan, z_is_nan, c1); + v1 = nan; + + // (x is +oo) -> if (y is 0) then NaN else inf with y's sign. + mk_is_pinf(x, c2); + expr_ref y_sgn_inf(m), inf_or(m); + mk_ite(y_is_pos, pinf, ninf, y_sgn_inf); + m_simp.mk_or(y_is_zero, inf_cond, inf_or); + mk_ite(inf_or, nan, y_sgn_inf, v2); + + // (y is +oo) -> if (x is 0) then NaN else inf with x's sign. + mk_is_pinf(y, c3); + expr_ref x_sgn_inf(m); + mk_ite(x_is_pos, pinf, ninf, x_sgn_inf); + m_simp.mk_or(x_is_zero, inf_cond, inf_or); + mk_ite(inf_or, nan, x_sgn_inf, v3); + + // (x is -oo) -> if (y is 0) then NaN else inf with -y's sign. + mk_is_ninf(x, c4); + expr_ref neg_y_sgn_inf(m); + mk_ite(y_is_pos, ninf, pinf, neg_y_sgn_inf); + m_simp.mk_or(y_is_zero, inf_cond, inf_or); + mk_ite(inf_or, nan, neg_y_sgn_inf, v4); + + // (y is -oo) -> if (x is 0) then NaN else inf with -x's sign. + mk_is_ninf(y, c5); + expr_ref neg_x_sgn_inf(m); + mk_ite(x_is_pos, ninf, pinf, neg_x_sgn_inf); + m_simp.mk_or(x_is_zero, inf_cond, inf_or); + mk_ite(inf_or, nan, neg_x_sgn_inf, v5); + + // z is +-INF -> z. + mk_is_inf(z, c6); + v6 = z; + + // (x is 0) || (y is 0) -> z + m_simp.mk_or(x_is_zero, y_is_zero, c7); + expr_ref ite_c(m); + m_simp.mk_and(z_is_zero, m.mk_not(rm_is_to_neg), ite_c); + mk_ite(ite_c, pzero, z, v7); + + + // else comes the fused multiplication. + unsigned ebits = m_util.get_ebits(f->get_range()); + unsigned sbits = m_util.get_sbits(f->get_range()); + SASSERT(ebits <= sbits); + + expr_ref a_sgn(m), a_sig(m), a_exp(m), a_lz(m); + expr_ref b_sgn(m), b_sig(m), b_exp(m), b_lz(m); + expr_ref c_sgn(m), c_sig(m), c_exp(m), c_lz(m); + unpack(x, a_sgn, a_sig, a_exp, a_lz, true); + unpack(y, b_sgn, b_sig, b_exp, b_lz, true); + unpack(z, c_sgn, c_sig, c_exp, c_lz, true); + + expr_ref a_lz_ext(m), b_lz_ext(m), c_lz_ext(m); + a_lz_ext = m_bv_util.mk_zero_extend(2, a_lz); + b_lz_ext = m_bv_util.mk_zero_extend(2, b_lz); + c_lz_ext = m_bv_util.mk_zero_extend(2, c_lz); + + expr_ref a_sig_ext(m), b_sig_ext(m); + a_sig_ext = m_bv_util.mk_zero_extend(sbits, a_sig); + b_sig_ext = m_bv_util.mk_zero_extend(sbits, b_sig); + + expr_ref a_exp_ext(m), b_exp_ext(m), c_exp_ext(m); + a_exp_ext = m_bv_util.mk_sign_extend(2, a_exp); + b_exp_ext = m_bv_util.mk_sign_extend(2, b_exp); + c_exp_ext = m_bv_util.mk_sign_extend(2, c_exp); + + dbg_decouple("fpa2bv_fma_a_sig", a_sig_ext); + dbg_decouple("fpa2bv_fma_b_sig", b_sig_ext); + dbg_decouple("fpa2bv_fma_c_sig", c_sig); + dbg_decouple("fpa2bv_fma_a_exp", a_exp_ext); + dbg_decouple("fpa2bv_fma_b_exp", b_exp_ext); + dbg_decouple("fpa2bv_fma_c_exp", c_exp_ext); + dbg_decouple("fpa2bv_fma_a_lz", a_lz_ext); + dbg_decouple("fpa2bv_fma_b_lz", b_lz_ext); + dbg_decouple("fpa2bv_fma_c_lz", c_lz_ext); + + expr_ref mul_sgn(m), mul_sig(m), mul_exp(m); + expr * signs[2] = { a_sgn, b_sgn }; + + mul_sgn = m_bv_util.mk_bv_xor(2, signs); + dbg_decouple("fpa2bv_fma_mul_sgn", mul_sgn); + + mul_exp = m_bv_util.mk_bv_add(m_bv_util.mk_bv_sub(a_exp_ext, a_lz_ext), + m_bv_util.mk_bv_sub(b_exp_ext, b_lz_ext)); + dbg_decouple("fpa2bv_fma_mul_exp", mul_exp); + + mul_sig = m_bv_util.mk_bv_mul(a_sig_ext, b_sig_ext); + dbg_decouple("fpa2bv_fma_mul_sig", mul_sig); + + SASSERT(m_bv_util.get_bv_size(mul_sig) == 2*sbits); + SASSERT(m_bv_util.get_bv_size(mul_exp) == ebits + 2); + + // The product has the form [-1][0].[2*sbits - 2]. + + // Extend c + c_sig = m_bv_util.mk_zero_extend(1, m_bv_util.mk_concat(c_sig, m_bv_util.mk_numeral(0, sbits-1))); + c_exp_ext = m_bv_util.mk_bv_sub(c_exp_ext, c_lz_ext); + + SASSERT(m_bv_util.get_bv_size(mul_sig) == 2 * sbits); + SASSERT(m_bv_util.get_bv_size(c_sig) == 2 * sbits); + + expr_ref swap_cond(m); + swap_cond = m_bv_util.mk_sle(mul_exp, c_exp_ext); + SASSERT(is_well_sorted(m, swap_cond)); + dbg_decouple("fpa2bv_fma_swap_cond", swap_cond); + + expr_ref e_sgn(m), e_sig(m), e_exp(m), f_sgn(m), f_sig(m), f_exp(m); + m_simp.mk_ite(swap_cond, c_sgn, mul_sgn, e_sgn); + m_simp.mk_ite(swap_cond, c_sig, mul_sig, e_sig); // has 2 * sbits + m_simp.mk_ite(swap_cond, c_exp_ext, mul_exp, e_exp); // has ebits + 2 + m_simp.mk_ite(swap_cond, mul_sgn, c_sgn, f_sgn); + m_simp.mk_ite(swap_cond, mul_sig, c_sig, f_sig); // has 2 * sbits + m_simp.mk_ite(swap_cond, mul_exp, c_exp_ext, f_exp); // has ebits + 2 + + SASSERT(is_well_sorted(m, e_sgn)); + SASSERT(is_well_sorted(m, e_sig)); + SASSERT(is_well_sorted(m, e_exp)); + SASSERT(is_well_sorted(m, f_sgn)); + SASSERT(is_well_sorted(m, f_sig)); + SASSERT(is_well_sorted(m, f_exp)); + + SASSERT(m_bv_util.get_bv_size(e_sig) == 2 * sbits); + SASSERT(m_bv_util.get_bv_size(f_sig) == 2 * sbits); + SASSERT(m_bv_util.get_bv_size(e_exp) == ebits + 2); + SASSERT(m_bv_util.get_bv_size(f_exp) == ebits + 2); + + expr_ref res_sgn(m), res_sig(m), res_exp(m); + + expr_ref exp_delta(m); + exp_delta = m_bv_util.mk_bv_sub(e_exp, f_exp); + dbg_decouple("fpa2bv_fma_add_exp_delta", exp_delta); + + // cap the delta + expr_ref cap(m), cap_le_delta(m); + cap = m_bv_util.mk_numeral(2*sbits, ebits+2); + cap_le_delta = m_bv_util.mk_ule(cap, exp_delta); + m_simp.mk_ite(cap_le_delta, cap, exp_delta, exp_delta); + SASSERT(m_bv_util.get_bv_size(exp_delta) == ebits+2); + SASSERT(is_well_sorted(m, exp_delta)); + dbg_decouple("fpa2bv_fma_add_exp_delta_capped", exp_delta); + + // Alignment shift with sticky bit computation. + expr_ref shifted_big(m), shifted_f_sig(m), sticky_raw(m); + shifted_big = m_bv_util.mk_bv_lshr( + m_bv_util.mk_concat(f_sig, m_bv_util.mk_numeral(0, sbits)), + m_bv_util.mk_zero_extend((3*sbits)-(ebits+2), exp_delta)); + shifted_f_sig = m_bv_util.mk_extract(3*sbits-1, sbits, shifted_big); + sticky_raw = m_bv_util.mk_extract(sbits-1, 0, shifted_big); + SASSERT(m_bv_util.get_bv_size(shifted_f_sig) == 2 * sbits); + SASSERT(is_well_sorted(m, shifted_f_sig)); + + expr_ref sticky(m); + sticky = m_bv_util.mk_zero_extend(2*sbits-1, m.mk_app(m_bv_util.get_fid(), OP_BREDOR, sticky_raw.get())); + SASSERT(is_well_sorted(m, sticky)); + dbg_decouple("fpa2bv_fma_f_sig_sticky_raw", sticky_raw); + dbg_decouple("fpa2bv_fma_f_sig_sticky", sticky); + + expr * or_args[2] = { shifted_f_sig, sticky }; + shifted_f_sig = m_bv_util.mk_bv_or(2, or_args); + SASSERT(is_well_sorted(m, shifted_f_sig)); + + // Significant addition. + // Two extra bits for catching the overflow. + e_sig = m_bv_util.mk_zero_extend(2, e_sig); + shifted_f_sig = m_bv_util.mk_zero_extend(2, shifted_f_sig); + + expr_ref eq_sgn(m); + m_simp.mk_eq(e_sgn, f_sgn, eq_sgn); + + SASSERT(m_bv_util.get_bv_size(e_sig) == 2*sbits + 2); + SASSERT(m_bv_util.get_bv_size(shifted_f_sig) == 2*sbits + 2); + + dbg_decouple("fpa2bv_fma_add_e_sig", e_sig); + dbg_decouple("fpa2bv_fma_add_shifted_f_sig", shifted_f_sig); + + expr_ref sum(m); + m_simp.mk_ite(eq_sgn, + m_bv_util.mk_bv_add(e_sig, shifted_f_sig), + m_bv_util.mk_bv_sub(e_sig, shifted_f_sig), + sum); + + SASSERT(is_well_sorted(m, sum)); + + dbg_decouple("fpa2bv_fma_add_sum", sum); + + expr_ref sign_bv(m), n_sum(m); + sign_bv = m_bv_util.mk_extract(2*sbits+1, 2*sbits+1, sum); + n_sum = m_bv_util.mk_bv_neg(sum); + + expr_ref res_sig_eq(m), sig_abs(m), one_1(m); + one_1 = m_bv_util.mk_numeral(1, 1); + m_simp.mk_eq(sign_bv, one_1, res_sig_eq); + m_simp.mk_ite(res_sig_eq, n_sum, sum, sig_abs); + dbg_decouple("fpa2bv_fma_add_sign_bv", sign_bv); + dbg_decouple("fpa2bv_fma_add_n_sum", n_sum); + dbg_decouple("fpa2bv_fma_add_sig_abs", sig_abs); + + res_exp = e_exp; + + // Result could overflow into 4.xxx ... + + family_id bvfid = m_bv_util.get_fid(); + expr_ref res_sgn_c1(m), res_sgn_c2(m), res_sgn_c3(m); + expr_ref not_e_sgn(m), not_f_sgn(m), not_sign_bv(m); + not_e_sgn = m_bv_util.mk_bv_not(e_sgn); + not_f_sgn = m_bv_util.mk_bv_not(f_sgn); + not_sign_bv = m_bv_util.mk_bv_not(sign_bv); + res_sgn_c1 = m.mk_app(bvfid, OP_BAND, not_e_sgn, e_sgn, sign_bv); + res_sgn_c2 = m.mk_app(bvfid, OP_BAND, e_sgn, not_f_sgn, not_sign_bv); + res_sgn_c3 = m.mk_app(bvfid, OP_BAND, e_sgn, f_sgn); + expr * res_sgn_or_args[3] = { res_sgn_c1, res_sgn_c2, res_sgn_c3 }; + res_sgn = m_bv_util.mk_bv_or(3, res_sgn_or_args); + + sticky_raw = m_bv_util.mk_extract(sbits-5, 0, sig_abs); + sticky = m_bv_util.mk_zero_extend(sbits+3, m.mk_app(bvfid, OP_BREDOR, sticky_raw.get())); + dbg_decouple("fpa2bv_fma_add_sum_sticky", sticky); + + expr * res_or_args[2] = { m_bv_util.mk_extract(2*sbits-1, sbits-4, sig_abs), sticky }; + res_sig = m_bv_util.mk_bv_or(2, res_or_args); + SASSERT(m_bv_util.get_bv_size(res_sig) == sbits+4); + + expr_ref is_zero_sig(m), nil_sbits4(m); + nil_sbits4 = m_bv_util.mk_numeral(0, sbits+4); + m_simp.mk_eq(res_sig, nil_sbits4, is_zero_sig); + SASSERT(is_well_sorted(m, is_zero_sig)); + + dbg_decouple("fpa2bv_fma_is_zero_sig", is_zero_sig); + + expr_ref zero_case(m); + mk_ite(rm_is_to_neg, nzero, pzero, zero_case); + + + //AZ: Should the zero case be constrained in some manner? + expr_ref rounded(m);//, fixed(m); + round(f->get_range(), rm, res_sgn, res_sig, res_exp, rounded); + + fix_bits(prec,rounded,sbits, ebits); + + mk_ite(is_zero_sig, zero_case, rounded, v8); + + // And finally, we tie them together. + mk_ite(c7, v7, v8, result); + mk_ite(c6, v6, result, result); + mk_ite(c5, v5, result, result); + mk_ite(c4, v4, result, result); + mk_ite(c3, v3, result, result); + mk_ite(c2, v2, result, result); + mk_ite(c1, v1, result, result); + + SASSERT(is_well_sorted(m, result)); + + TRACE("fpa2bv_fma_", tout << "FMA = " << mk_ismt2_pp(result, m) << std::endl; ); + } + break; + default: UNREACHABLE(); break; + } +} + +void fpa2bv_converter_prec::mk_sqrt(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result) { + switch (m_mode) { + case FPAA_SMALL_FLOATS: + { + func_decl_ref small_fd(m); + expr_ref_vector small_args(m); + mk_small_op(f, prec, num, args, small_fd, small_args); + expr_ref small_op(m); + fpa2bv_converter::mk_sqrt(small_fd, num, small_args.c_ptr(), small_op); + mk_cast_small_to_big(f, small_op, result); + SASSERT(is_well_sorted(m, result)); + TRACE("fpa2bv_small_float_sqrt", tout << mk_ismt2_pp(result, m) << std::endl; ); + break; + } + case FPAA_PRECISE: + case FPAA_FIXBITS: // for now, encode fully. + fpa2bv_converter::mk_sqrt(f,num,args,result); + break; + default: UNREACHABLE(); break; + } +} + +void fpa2bv_converter_prec::mk_round_to_integral(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result) { + switch (m_mode) { + case FPAA_SMALL_FLOATS: + { + func_decl_ref small_fd(m); + expr_ref_vector small_args(m); + mk_small_op(f, prec, num, args, small_fd, small_args); + expr_ref small_op(m); + fpa2bv_converter::mk_round_to_integral(small_fd, num, small_args.c_ptr(), small_op); + mk_cast_small_to_big(f, small_op, result); + SASSERT(is_well_sorted(m, result)); + TRACE("fpa2bv_small_float_r2i", tout << mk_ismt2_pp(result, m) << std::endl; ); + break; + } + case FPAA_PRECISE: + case FPAA_FIXBITS: // for now, encode fully. + fpa2bv_converter::mk_sqrt(f,num,args,result); + break; + default: UNREACHABLE(); break; + } +} + +void fpa2bv_converter_prec::establish_sort(unsigned num, expr * const * args, unsigned & ebits, unsigned & sbits) +{ + unsigned ne,ns; + //hardcoding for now + ebits = 3; + sbits = 3; + for(unsigned i=0;iget_decl()->get_range())) + { + ne = fu().get_ebits(s); + ns = fu().get_sbits(s); + ebits = (ne < ebits)?ebits:ne; + sbits = (ns < sbits)?sbits:ns; + } + } +} + +void fpa2bv_converter_prec::mk_float_eq(func_decl * f, unsigned num, expr * const * args, expr_ref & result) +{ + switch (m_mode) { + case FPAA_PRECISE: + case FPAA_FIXBITS: + fpa2bv_converter::mk_float_eq(f,num,args,result); + break; + case FPAA_SMALL_FLOATS: + { + unsigned sbits, ebits; + + func_decl_ref small_fd(m); + expr_ref_vector small_args(m); + establish_sort(num, args, ebits, sbits); + mk_small_op(f, ebits, sbits, num, args, small_fd, small_args); + fpa2bv_converter::mk_float_eq(small_fd, num, small_args.c_ptr(), result); + SASSERT(is_well_sorted(m, result)); + TRACE("fpa2bv_small_float_add", tout << mk_ismt2_pp(result, m) << std::endl; ); + break; + } + } + +} + +void fpa2bv_converter_prec::mk_float_lt(func_decl * f, unsigned num, expr * const * args, expr_ref & result) +{ + switch (m_mode){ + case FPAA_PRECISE: + case FPAA_FIXBITS: + fpa2bv_converter::mk_float_lt(f,num,args,result); + break; + case FPAA_SMALL_FLOATS: { + unsigned sbits, ebits; + + func_decl_ref small_fd(m); + expr_ref_vector small_args(m); + establish_sort(num, args, ebits, sbits); + mk_small_op(f, ebits, sbits, num, args, small_fd, small_args); + fpa2bv_converter::mk_float_lt(small_fd, num, small_args.c_ptr(), result); + SASSERT(is_well_sorted(m, result)); + TRACE("fpa2bv_small_float_add", tout << mk_ismt2_pp(result, m) << std::endl; ); + break; + } + } +} +void fpa2bv_converter_prec::mk_float_gt(func_decl * f, unsigned num, expr * const * args, expr_ref & result) +{ + switch (m_mode){ + case FPAA_PRECISE: + case FPAA_FIXBITS: + fpa2bv_converter::mk_float_gt(f,num,args,result); + break; + case FPAA_SMALL_FLOATS: { + unsigned sbits, ebits; + + func_decl_ref small_fd(m); + expr_ref_vector small_args(m); + establish_sort(num, args, ebits, sbits); + mk_small_op(f, ebits, sbits, num, args, small_fd, small_args); + fpa2bv_converter::mk_float_gt(small_fd, num, small_args.c_ptr(), result); + SASSERT(is_well_sorted(m, result)); + TRACE("fpa2bv_small_float_add", tout << mk_ismt2_pp(result, m) << std::endl; ); + break; + } + } +} +void fpa2bv_converter_prec::mk_float_le(func_decl * f, unsigned num, expr * const * args, expr_ref & result) +{ + switch (m_mode){ + case FPAA_PRECISE: + case FPAA_FIXBITS: + fpa2bv_converter::mk_float_le(f,num,args,result); + break; + case FPAA_SMALL_FLOATS: { + unsigned sbits, ebits; + + func_decl_ref small_fd(m); + expr_ref_vector small_args(m); + establish_sort(num, args, ebits, sbits); + mk_small_op(f, ebits, sbits, num, args, small_fd, small_args); + fpa2bv_converter::mk_float_le(small_fd, num, small_args.c_ptr(), result); + SASSERT(is_well_sorted(m, result)); + TRACE("fpa2bv_small_float_add", tout << mk_ismt2_pp(result, m) << std::endl; ); + break; + } + } +} +void fpa2bv_converter_prec::mk_float_ge(func_decl * f, unsigned num, expr * const * args, expr_ref & result) +{ + switch (m_mode){ + case FPAA_PRECISE: + case FPAA_FIXBITS: + fpa2bv_converter::mk_float_ge(f,num,args,result); + break; + case FPAA_SMALL_FLOATS: { + unsigned sbits, ebits; + + func_decl_ref small_fd(m); + expr_ref_vector small_args(m); + establish_sort(num, args, ebits, sbits); + mk_small_op(f, ebits, sbits, num, args, small_fd, small_args); + fpa2bv_converter::mk_float_ge(small_fd, num, small_args.c_ptr(), result); + SASSERT(is_well_sorted(m, result)); + TRACE("fpa2bv_small_float_add", tout << mk_ismt2_pp(result, m) << std::endl; ); + break; + } + } +} +/* + +void fpa2bv_converter_prec::mk_is_zero(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result) +{ + switch (m_mode){ + case FPAA_PRECISE: + case FPAA_FIXBITS: + fpa2bv_converter::mk_float_eq(f,num,args,result); + break; + case FPAA_SMALL_FLOATS: { + unsigned sbits, ebits; + + func_decl_ref small_fd(m); + expr_ref_vector small_args(m); + establish_sort(num, args, ebits, sbits); + mk_small_op(f, ebits, sbits, num, args, small_fd, small_args); + fpa2bv_converter::mk_is_zero(small_fd, num, small_args.c_ptr(), result); + SASSERT(is_well_sorted(m, result)); + TRACE("fpa2bv_small_float_add", tout << mk_ismt2_pp(result, m) << std::endl; ); + break; + } + } +} +void fpa2bv_converter_prec::mk_is_nzero(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); +void fpa2bv_converter_prec::mk_is_pzero(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); +void fpa2bv_converter_prec::mk_is_sign_minus(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); +void fpa2bv_converter_prec::mk_is_nan(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); +void fpa2bv_converter_prec::mk_is_inf(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); +void fpa2bv_converter_prec::mk_is_normal(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); +void fpa2bv_converter_prec::mk_is_subnormal(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); +*/ + + + + +void fpa2bv_prec_model_converter::display(std::ostream & out) { + out << "(fpa2bv-model-converter"; + for (obj_map::iterator it = m_const2bv.begin(); + it != m_const2bv.end(); + it++) { + const symbol & n = it->m_key->get_name(); + out << "\n (" << n << " "; + unsigned indent = n.size() + 4; + out << mk_ismt2_pp(it->m_value, m, indent) << ")"; + } + for (obj_map::iterator it = m_rm_const2bv.begin(); + it != m_rm_const2bv.end(); + it++) { + const symbol & n = it->m_key->get_name(); + out << "\n (" << n << " "; + unsigned indent = n.size() + 4; + out << mk_ismt2_pp(it->m_value, m, indent) << ")"; + } + out << ")" << std::endl; +} + +model_converter * fpa2bv_prec_model_converter::translate(ast_translation & translator) { + fpa2bv_prec_model_converter * res = alloc(fpa2bv_prec_model_converter, translator.to()); + for (obj_map::iterator it = m_const2bv.begin(); + it != m_const2bv.end(); + it++) + { + func_decl * k = translator(it->m_key); + expr * v = translator(it->m_value); + res->m_const2bv.insert(k, v); + translator.to().inc_ref(k); + translator.to().inc_ref(v); + } + for (obj_map::iterator it = m_rm_const2bv.begin(); + it != m_rm_const2bv.end(); + it++) + { + func_decl * k = translator(it->m_key); + expr * v = translator(it->m_value); + res->m_rm_const2bv.insert(k, v); + translator.to().inc_ref(k); + translator.to().inc_ref(v); + } + return res; +} + + + +void fpa2bv_prec_model_converter::convert(model * bv_mdl, model * float_mdl) { + fpa_util fu(m); + bv_util bu(m); + mpf fp_val,sfp_val; + unsynch_mpz_manager & mpzm = fu.fm().mpz_manager(); + unsynch_mpq_manager & mpqm = fu.fm().mpq_manager(); + + TRACE("fpa2bv_mc", tout << "BV Model: " << std::endl; + for (unsigned i = 0 ; i < bv_mdl->get_num_constants(); i++) + tout << bv_mdl->get_constant(i)->get_name() << " --> " << + mk_ismt2_pp(bv_mdl->get_const_interp(bv_mdl->get_constant(i)), m) << std::endl; + ); + + obj_hashtable seen; + + for (obj_map::iterator it = m_const2bv.begin(); + it != m_const2bv.end(); + it++) + { + func_decl * var = it->m_key; + app * a = to_app(it->m_value); + SASSERT(fu.is_float(var->get_range())); + SASSERT(var->get_range()->get_num_parameters() == 2); + + unsigned ebits = fu.get_ebits(var->get_range()); + unsigned sbits = fu.get_sbits(var->get_range()); + unsigned sfpa_ebits = fu.get_ebits(a->get_decl()->get_range()); + unsigned sfpa_sbits = fu.get_sbits(a->get_decl()->get_range()); + + expr_ref sgn(m), sig(m), exp(m); + if (a->get_decl_kind() == OP_FPA_TO_FP) + { + bv_mdl->eval(a->get_arg(0), sgn, true); + bv_mdl->eval(a->get_arg(1), sig, true); + bv_mdl->eval(a->get_arg(2), exp, true); + } + else + { + sgn = bv_mdl->get_const_interp(to_app(a->get_arg(0))->get_decl()); + sig = bv_mdl->get_const_interp(to_app(a->get_arg(1))->get_decl()); + exp = bv_mdl->get_const_interp(to_app(a->get_arg(2))->get_decl()); + } + + seen.insert(to_app(a->get_arg(0))->get_decl()); + seen.insert(to_app(a->get_arg(1))->get_decl()); + seen.insert(to_app(a->get_arg(2))->get_decl()); + + if (!sgn && !sig && !exp) + continue; + + unsigned sgn_sz = bu.get_bv_size(m.get_sort(a->get_arg(0))); + unsigned sig_sz = bu.get_bv_size(m.get_sort(a->get_arg(1))) - 1; + unsigned exp_sz = bu.get_bv_size(m.get_sort(a->get_arg(2))); + + rational sgn_q(0), sig_q(0), exp_q(0); + + if (sgn) bu.is_numeral(sgn, sgn_q, sgn_sz); + if (sig) bu.is_numeral(sig, sig_q, sig_sz); + if (exp) bu.is_numeral(exp, exp_q, exp_sz); + + // un-bias exponent + rational exp_unbiased_q; + mpz sig_z; mpf_exp_t exp_z; + + exp_unbiased_q = exp_q - fu.fm().m_powers2.m1(sfpa_ebits-1); + + mpzm.set(sig_z, sig_q.to_mpq().numerator()); + exp_z = mpzm.get_int64(exp_unbiased_q.to_mpq().numerator()); + + TRACE("fpa2bv_mc", tout << var->get_name() << " == [" << sgn_q.to_string() << " " << + mpzm.to_string(sig_z) << " " << exp_z << "(" << exp_q.to_string() << ")]" << std::endl; ); + + + fu.fm().set(sfp_val, sfpa_ebits, sfpa_sbits, !mpqm.is_zero(sgn_q.to_mpq()), sig_z, exp_z); + fu.fm().set(fp_val, ebits, sbits,MPF_ROUND_TOWARD_ZERO,sfp_val); + //fu.fm().set(fp_val, ebits, sbits, !mpqm.is_zero(sgn_q.to_mpq()), sig_z, exp_z); + /*std::cout<< mk_ismt2_pp(a,m) <<":" << fu.mk_value(fp_val)<register_decl(var, fu.mk_value(fp_val)); + + mpzm.del(sig_z); + fu.fm().del(fp_val); + fu.fm().del(sfp_val); + } + + for (obj_map::iterator it = m_rm_const2bv.begin(); + it != m_rm_const2bv.end(); + it++) + { + func_decl * var = it->m_key; + app * a = to_app(it->m_value); + SASSERT(fu.is_rm(var->get_range())); + rational val(0); + unsigned sz = 0; + if (a && bu.is_numeral(a, val, sz)) { + TRACE("fpa2bv_mc", tout << var->get_name() << " == " << val.to_string() << std::endl; ); + SASSERT(val.is_uint64()); + switch (val.get_uint64()) + { + case BV_RM_TIES_TO_AWAY: float_mdl->register_decl(var, fu.mk_round_nearest_ties_to_away()); break; + case BV_RM_TIES_TO_EVEN: float_mdl->register_decl(var, fu.mk_round_nearest_ties_to_even()); break; + case BV_RM_TO_NEGATIVE: float_mdl->register_decl(var, fu.mk_round_toward_negative()); break; + case BV_RM_TO_POSITIVE: float_mdl->register_decl(var, fu.mk_round_toward_positive()); break; + case BV_RM_TO_ZERO: + default: float_mdl->register_decl(var, fu.mk_round_toward_zero()); + } + seen.insert(var); + } + } + + fu.fm().del(fp_val); + + // Keep all the non-float constants. + unsigned sz = bv_mdl->get_num_constants(); + for (unsigned i = 0; i < sz; i++) + { + func_decl * c = bv_mdl->get_constant(i); + if (!seen.contains(c)) + float_mdl->register_decl(c, bv_mdl->get_const_interp(c)); + } + + // And keep everything else + sz = bv_mdl->get_num_functions(); + for (unsigned i = 0; i < sz; i++) + { + func_decl * c = bv_mdl->get_function(i); + float_mdl->register_decl(c, bv_mdl->get_const_interp(c)); + } + + sz = bv_mdl->get_num_uninterpreted_sorts(); + for (unsigned i = 0; i < sz; i++) + { + sort * s = bv_mdl->get_uninterpreted_sort(i); + ptr_vector u = bv_mdl->get_universe(s); + float_mdl->register_usort(s, u.size(), u.c_ptr()); + } +} + +model_converter * mk_fpa2bv_prec_model_converter(ast_manager & m, + obj_map const & const2bv, + obj_map const & rm_const2bv) { + return alloc(fpa2bv_prec_model_converter, m, const2bv, rm_const2bv); +} + diff --git a/src/tactic/fpa/fpa2bv_converter_prec.h b/src/tactic/fpa/fpa2bv_converter_prec.h new file mode 100644 index 000000000..330cd271f --- /dev/null +++ b/src/tactic/fpa/fpa2bv_converter_prec.h @@ -0,0 +1,162 @@ +/*++ +Copyright (c) 2012 Microsoft Corporation + +Module Name: + + fpa2bv_converter_prec.h + +Abstract: + + Conversion routines for Floating Point -> Bit-Vector + +Author: + + Christoph (cwinter) 2012-02-09 + +Notes: + +--*/ +#ifndef _FPA2BV_CONVERTER_PREC +#define _FPA2BV_CONVERTER_PREC + +#include"ast.h" +#include"obj_hashtable.h" +#include"ref_util.h" +#include"fpa_decl_plugin.h" +#include"bv_decl_plugin.h" +#include"model_converter.h" +#include"basic_simplifier_plugin.h" +#include"fpa2bv_converter.h" + +#define MAX_PRECISION 100 +#define MIN_PRECISION 0 + +class fpa2bv_prec_model_converter; + +enum fpa_approximation_mode { + FPAA_PRECISE, // Always use precise encoding + FPAA_FIXBITS, // Approximate by fixing some bits of the encoding + FPAA_SMALL_FLOATS // Approximate by using smaller floats +}; + +#define FPAA_DEFAULT_MODE FPAA_SMALL_FLOATS + +class fpa2bv_converter_prec : public fpa2bv_converter { + fpa_approximation_mode m_mode; + + void fix_bits(unsigned prec, expr_ref rounded, unsigned sbits, unsigned ebits);//expr_ref& fixed, + + void mk_small_op(func_decl * f, unsigned prec, unsigned num, expr * const * args, func_decl_ref & small_op, expr_ref_vector & cast_args); + void mk_small_op(func_decl * f, unsigned new_ebits, unsigned new_sbits, unsigned num, expr * const * args, func_decl_ref & small_op, expr_ref_vector & cast_args); + void mk_cast_small_to_big(func_decl * big_fd, expr * arg, expr_ref & result); + void mk_cast_small_to_big(unsigned sbits, unsigned ebits, expr * arg, expr_ref & result); + void match_sorts(expr * a, expr * b, expr_ref & n_a, expr_ref & n_b); + void establish_sort(unsigned num, expr * const * args, unsigned & ebits, unsigned & sbits); +public: + fpa2bv_converter_prec(ast_manager & m, fpa_approximation_mode mode); + + void mk_const(func_decl * f, unsigned prec, expr_ref & result); + + void mk_eq(expr * a, expr * b, expr_ref & result); + void mk_ite(expr * c, expr * t, expr * f, expr_ref & result); + + void mk_add(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_sub(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_uminus(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_mul(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_div(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_remainder(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_abs(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_min(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_max(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_fusedma(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_sqrt(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_round_to_integral(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + + void mk_float_eq(func_decl * f, unsigned num, expr * const * args, expr_ref & result); + void mk_float_lt(func_decl * f, unsigned num, expr * const * args, expr_ref & result); + void mk_float_gt(func_decl * f, unsigned num, expr * const * args, expr_ref & result); + void mk_float_le(func_decl * f, unsigned num, expr * const * args, expr_ref & result); + void mk_float_ge(func_decl * f, unsigned num, expr * const * args, expr_ref & result); + + /* + void mk_is_zero(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_is_nzero(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_is_pzero(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_is_sign_minus(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_is_nan(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_is_inf(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_is_normal(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + void mk_is_subnormal(func_decl * f, unsigned prec, unsigned num, expr * const * args, expr_ref & result); + */ + + + + void reset() { + dec_ref_map_key_values(m, m_const2bv); + dec_ref_map_key_values(m, m_rm_const2bv); + } +}; + + +class fpa2bv_prec_model_converter : public model_converter { + ast_manager & m; + obj_map m_const2bv; + obj_map m_rm_const2bv; + +public: + fpa2bv_prec_model_converter(ast_manager & m, obj_map const & const2bv, + obj_map const & rm_const2bv) : + m(m) { + // Just create a copy? + for (obj_map::iterator it = const2bv.begin(); + it != const2bv.end(); + it++) + { + m_const2bv.insert(it->m_key, it->m_value); + m.inc_ref(it->m_key); + m.inc_ref(it->m_value); + } + for (obj_map::iterator it = rm_const2bv.begin(); + it != rm_const2bv.end(); + it++) + { + m_rm_const2bv.insert(it->m_key, it->m_value); + m.inc_ref(it->m_key); + m.inc_ref(it->m_value); + } + } + + virtual ~fpa2bv_prec_model_converter() { + dec_ref_map_key_values(m, m_const2bv); + dec_ref_map_key_values(m, m_rm_const2bv); + } + + virtual void operator()(model_ref & md, unsigned goal_idx) { + SASSERT(goal_idx == 0); + model * new_model = alloc(model, m); + obj_hashtable bits; + convert(md.get(), new_model); + md = new_model; + } + + virtual void operator()(model_ref & md) { + operator()(md, 0); + } + + void display(std::ostream & out); + + virtual model_converter * translate(ast_translation & translator); + +protected: + fpa2bv_prec_model_converter(ast_manager & m) : m(m) { } + + void convert(model * bv_mdl, model * float_mdl); +}; + + +model_converter * mk_fpa2bv_prec_model_converter(ast_manager & m, + obj_map const & const2bv, + obj_map const & rm_const2bv); + +#endif diff --git a/src/tactic/fpa/fpa2bv_rewriter_prec.h b/src/tactic/fpa/fpa2bv_rewriter_prec.h new file mode 100644 index 000000000..d82255d39 --- /dev/null +++ b/src/tactic/fpa/fpa2bv_rewriter_prec.h @@ -0,0 +1,229 @@ +/*++ +Copyright (c) 2012 Microsoft Corporation + +Module Name: + + fpa2bv_rewriter.h + +Abstract: + + Rewriter for converting FPA to BV + +Author: + + Christoph (cwinter) 2012-02-09 + +Notes: + +--*/ + +#ifndef _FPA2BV_REWRITER_H_ +#define _FPA2BV_REWRITER_H_ + +#include"cooperate.h" +#include"rewriter_def.h" +#include"bv_decl_plugin.h" +#include"fpa2bv_converter_prec.h" +#include"tactic_exception.h" +#include + +struct fpa2bv_rewriter_prec_cfg : public default_rewriter_cfg { + ast_manager & m_manager; + expr_ref_vector m_out; + fpa2bv_converter_prec & m_conv; + obj_map * cnst2prec_map; + + unsigned precision; + unsigned long long m_max_memory; + unsigned m_max_steps; + + ast_manager & m() const { return m_manager; } + + fpa2bv_rewriter_prec_cfg(ast_manager & m, fpa2bv_converter_prec & c, params_ref const & p): + m_manager(m), + m_out(m), + m_conv(c) { + updt_params(p); + // We need to make sure that the mananger has the BV plugin loaded. + symbol s_bv("bv"); + if (!m_manager.has_plugin(s_bv)) + m_manager.register_plugin(s_bv, alloc(bv_decl_plugin)); + } + + ~fpa2bv_rewriter_prec_cfg() { + } + + void cleanup_buffers() { + m_out.finalize(); + } + + unsigned get_precision(func_decl * f){ + if(cnst2prec_map->contains(f)) + return cnst2prec_map->find(f); + else return precision; + } + void set_precision(unsigned p) { precision=p; } + void set_mappings(obj_map * o2p) + { + this->cnst2prec_map=o2p; + } + + void updt_params(params_ref const & p) { + m_max_memory = megabytes_to_bytes(p.get_uint("max_memory", UINT_MAX)); + m_max_steps = p.get_uint("max_steps", UINT_MAX); + } + + bool max_steps_exceeded(unsigned num_steps) const { + cooperate("fpa2bv"); + if (memory::get_allocation_size() > m_max_memory) + throw tactic_exception(TACTIC_MAX_MEMORY_MSG); + return num_steps > m_max_steps; + } + + br_status reduce_app(func_decl * f, unsigned num, expr * const * args, expr_ref & result, proof_ref & result_pr) { + TRACE("fpa2bv_rw", tout << "APP: " << f->get_name() << std::endl; ); + + if (num == 0 && f->get_family_id() == null_family_id && m_conv.is_float(f->get_range())) { + m_conv.mk_const(f, get_precision(f), result); + return BR_DONE; + } + + if (num == 0 && f->get_family_id() == null_family_id && m_conv.is_rm(f->get_range())) { + m_conv.mk_rm_const(f, result); + return BR_DONE; + } + + if (m().is_eq(f)) { + SASSERT(num == 2); + if (m_conv.is_float(args[0])) { + m_conv.mk_eq(args[0], args[1], result); + return BR_DONE; + } + return BR_FAILED; + } + + if (m().is_ite(f)) { + SASSERT(num == 3); + if (m_conv.is_float(args[1])) { + m_conv.mk_ite(args[0], args[1], args[2], result); + return BR_DONE; + } + return BR_FAILED; + } + + expr_ref newAssertion(m_manager); + + if (m_conv.is_float_family(f)) + { + switch (f->get_decl_kind()) + { + case OP_FPA_RM_NEAREST_TIES_TO_AWAY: + case OP_FPA_RM_NEAREST_TIES_TO_EVEN: + case OP_FPA_RM_TOWARD_NEGATIVE: + case OP_FPA_RM_TOWARD_POSITIVE: + case OP_FPA_RM_TOWARD_ZERO: m_conv.mk_rounding_mode(f, result); return BR_DONE; + case OP_FPA_NUM: m_conv.mk_numeral(f, num, args, result); return BR_DONE; + case OP_FPA_PLUS_INF: m_conv.mk_pinf(f, result); return BR_DONE; + case OP_FPA_MINUS_INF: m_conv.mk_ninf(f, result); return BR_DONE; + case OP_FPA_NAN: m_conv.mk_nan(f, result); return BR_DONE; + //AZ: Added precision, if precision is MAX_PRECISION uses the regular implementation of the methods + case OP_FPA_ADD: + m_conv.mk_add(f,get_precision(f), num, args, result);return BR_DONE; + case OP_FPA_SUB: + m_conv.mk_sub(f, get_precision(f), num, args, result); return BR_DONE; + case OP_FPA_NEG: + m_conv.mk_uminus(f, get_precision(f), num, args, result); return BR_DONE; + case OP_FPA_MUL: + m_conv.mk_mul(f, get_precision(f), num, args, result); return BR_DONE; + case OP_FPA_DIV: + m_conv.mk_div(f, get_precision(f), num, args, result); return BR_DONE; + case OP_FPA_REM: + m_conv.mk_remainder(f, get_precision(f), num, args, result); return BR_DONE; + case OP_FPA_FMA: + m_conv.mk_fusedma(f, get_precision(f), num, args, result); return BR_DONE; + case OP_FPA_SQRT: + m_conv.mk_sqrt(f, get_precision(f), num, args, result); return BR_DONE; + case OP_FPA_ABS: m_conv.mk_abs(f, get_precision(f), num, args, result); return BR_DONE; + case OP_FPA_MIN: m_conv.mk_min(f, get_precision(f), num, args, result); return BR_DONE; + case OP_FPA_MAX: m_conv.mk_max(f, get_precision(f), num, args, result); return BR_DONE; + case OP_FPA_ROUND_TO_INTEGRAL: m_conv.mk_round_to_integral(f, get_precision(f), num, args, result); return BR_DONE; + case OP_FPA_EQ: m_conv.mk_float_eq(f, num, args, result); return BR_DONE; + case OP_FPA_LT: m_conv.mk_float_lt(f, num, args, result); return BR_DONE; + case OP_FPA_GT: m_conv.mk_float_gt(f, num, args, result); return BR_DONE; + case OP_FPA_LE: m_conv.mk_float_le(f, num, args, result); return BR_DONE; + case OP_FPA_GE: m_conv.mk_float_ge(f, num, args, result); return BR_DONE; + case OP_FPA_IS_ZERO: m_conv.mk_is_zero(f, num, args, result); return BR_DONE; +//case OP_FPA_IS_NZERO: m_conv.mk_is_nzero(f, num, args, result); return BR_DONE; +//case OP_FPA_IS_PZERO: m_conv.mk_is_pzero(f, num, args, result); return BR_DONE; + case OP_FPA_IS_NAN: m_conv.mk_is_nan(f, num, args, result); return BR_DONE; + case OP_FPA_IS_INF: m_conv.mk_is_inf(f, num, args, result); return BR_DONE; + case OP_FPA_IS_NORMAL: m_conv.mk_is_normal(f, num, args, result); return BR_DONE; + case OP_FPA_IS_SUBNORMAL: m_conv.mk_is_subnormal(f, num, args, result); return BR_DONE; + case OP_FPA_IS_NEGATIVE: m_conv.mk_is_negative(f, num, args, result); return BR_DONE; + case OP_FPA_TO_FP: m_conv.mk_to_fp(f, num, args, result); return BR_DONE; + case OP_FPA_TO_IEEE_BV: m_conv.mk_to_ieee_bv(f, num, args, result); return BR_DONE; + default: + TRACE("fpa2bv", tout << "unsupported operator: " << f->get_name() << "\n"; + for (unsigned i = 0; i < num; i++) tout << mk_ismt2_pp(args[i], m()) << std::endl;); + throw tactic_exception("NYI"); + } + } + return BR_FAILED; + } + + bool pre_visit(expr * t) + { + TRACE("pre_visit_prec", tout << mk_ismt2_pp(t, m()) << std::endl;); + + if(t->get_kind() == AST_APP && is_app_of(t, to_app(t)->get_family_id(), OP_EQ)) { + //Equation over non-boolean expressions, it should be of form constantI = subexprI + app * a = to_app(t); + + if (a->get_num_args() == 2) { + expr * a0 = a->get_arg(0); + expr * a1 = a->get_arg(1); + func_decl * cnst = 0; + + if (a0->get_kind() == AST_APP && cnst2prec_map->contains(to_app(a0)->get_decl())) + cnst = to_app(a0)->get_decl(); + else if (a1->get_kind() == AST_APP && cnst2prec_map->contains(to_app(a1)->get_decl())) + cnst = to_app(a1)->get_decl(); + + if (cnst == 0) { + // For all equalities that were in the original problem, we don't + // have any precision tracking, so those simply get 100% precision. + set_precision(100); + } + else + set_precision(cnst2prec_map->find(cnst)); + TRACE("pre_visit_prec", tout << "Precision = " << get_precision(NULL) << std::endl;); + } + } + return true; + } + + bool reduce_quantifier(quantifier * old_q, + expr * new_body, + expr * const * new_patterns, + expr * const * new_no_patterns, + expr_ref & result, + proof_ref & result_pr) { + return false; + } + + bool reduce_var(var * t, expr_ref & result, proof_ref & result_pr) { + return false; + } +}; + +template class rewriter_tpl; + +struct fpa2bv_rewriter_prec : public rewriter_tpl { + fpa2bv_rewriter_prec_cfg m_cfg; + fpa2bv_rewriter_prec(ast_manager & m, fpa2bv_converter_prec & c, params_ref const & p): + rewriter_tpl(m, m.proofs_enabled(), m_cfg), + m_cfg(m, c, p) { + } +}; + +#endif