3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2026-04-02 09:58:59 +00:00

Remove old tactic class, use simplifier-backed mk_bv_size_reduction_tactic

Agent-Logs-Url: https://github.com/Z3Prover/z3/sessions/1ec3e868-14c9-4204-8d88-62f6fc31b2f2

Co-authored-by: NikolajBjorner <3085284+NikolajBjorner@users.noreply.github.com>
This commit is contained in:
copilot-swe-agent[bot] 2026-03-30 03:43:03 +00:00
parent 468ef62136
commit c1112d430f
2 changed files with 2 additions and 418 deletions

View file

@ -21,417 +21,4 @@ Author:
Notes:
--*/
#include "tactic/tactical.h"
#include "ast/bv_decl_plugin.h"
#include "ast/rewriter/expr_replacer.h"
#include "ast/converters/generic_model_converter.h"
#include "ast/ast_smt2_pp.h"
namespace {
class bv_size_reduction_tactic : public tactic {
typedef rational numeral;
typedef generic_model_converter bv_size_reduction_mc;
ast_manager & m;
bv_util m_util;
obj_map<app, numeral> m_signed_lowers;
obj_map<app, numeral> m_signed_uppers;
obj_map<app, numeral> m_unsigned_lowers;
obj_map<app, numeral> m_unsigned_uppers;
ref<bv_size_reduction_mc> m_mc;
generic_model_converter_ref m_fmc;
scoped_ptr<expr_replacer> m_replacer;
bool m_produce_models;
public:
bv_size_reduction_tactic(ast_manager & m) :
m(m),
m_util(m),
m_replacer(mk_default_expr_replacer(m, false)) {
}
tactic * translate(ast_manager & m) override {
return alloc(bv_size_reduction_tactic, m);
}
char const* name() const override { return "bv_size"; }
void operator()(goal_ref const & g, goal_ref_buffer & result) override;
void cleanup() override {
m_signed_lowers.reset();
m_signed_uppers.reset();
m_unsigned_lowers.reset();
m_unsigned_uppers.reset();
m_mc = nullptr;
m_fmc = nullptr;
m_replacer->reset();
}
void update_signed_lower(app * v, numeral const & k) {
// k <= v
auto& value = m_signed_lowers.insert_if_not_there(v, k);
if (value < k) {
// improve bound
value = k;
}
}
void update_signed_upper(app * v, numeral const & k) {
// v <= k
auto& value = m_signed_uppers.insert_if_not_there(v, k);
if (k < value) {
// improve bound
value = k;
}
}
void update_unsigned_lower(app * v, numeral const & k) {
SASSERT(k > numeral(0));
// k <= v
auto& value = m_unsigned_lowers.insert_if_not_there(v, k);
if (value < k) {
// improve bound
value = k;
}
}
void update_unsigned_upper(app * v, numeral const & k) {
SASSERT(k > numeral(0));
// v <= k
auto& value = m_unsigned_uppers.insert_if_not_there(v, k);
if (k < value) {
// improve bound
value = k;
}
}
void collect_bounds(goal const & g) {
unsigned sz = g.size();
numeral val;
unsigned bv_sz;
expr * f, * lhs, * rhs;
#if 0
auto match_bitmask = [&](expr* lhs, expr* rhs) {
unsigned lo, hi;
expr* arg;
if (!m_util.is_numeral(rhs, val, bv_sz))
return false;
if (!val.is_zero())
return false;
if (!m_util.is_extract(lhs, lo, hi, arg))
return false;
if (lo == 0)
return false;
if (hi + 1 != m_util.get_bv_size(arg))
return false;
if (!is_uninterp_const(arg))
return false;
val = rational::power_of_two(lo - 1) -1 ;
update_unsigned_upper(to_app(arg), val);
return true;
};
#endif
for (unsigned i = 0; i < sz; ++i) {
bool negated = false;
f = g.form(i);
if (m.is_not(f)) {
negated = true;
f = to_app(f)->get_arg(0);
}
if (m_util.is_bv_sle(f, lhs, rhs)) {
bv_sz = m_util.get_bv_size(lhs);
if (is_uninterp_const(lhs) && m_util.is_numeral(rhs, val, bv_sz)) {
TRACE(bv_size_reduction, tout << (negated?"not ":"") << mk_ismt2_pp(f, m) << std::endl; );
// v <= k
val = m_util.norm(val, bv_sz, true);
if (negated) {
val += numeral(1);
if (m_util.norm(val, bv_sz, true) != val) {
// bound is infeasible.
}
else {
update_signed_lower(to_app(lhs), val);
}
}
else update_signed_upper(to_app(lhs), val);
}
else if (is_uninterp_const(rhs) && m_util.is_numeral(lhs, val, bv_sz)) {
TRACE(bv_size_reduction, tout << (negated?"not ":"") << mk_ismt2_pp(f, m) << std::endl; );
// k <= v
val = m_util.norm(val, bv_sz, true);
if (negated) {
val -= numeral(1);
if (m_util.norm(val, bv_sz, true) != val) {
// bound is infeasible.
}
else {
update_signed_upper(to_app(rhs), val);
}
}
else update_signed_lower(to_app(rhs), val);
}
}
#if 0
else if (m_util.is_bv_ule(f, lhs, rhs)) {
if (is_uninterp_const(lhs) && m_util.is_numeral(rhs, val, bv_sz)) {
TRACE(bv_size_reduction, tout << (negated?"not ":"") << mk_ismt2_pp(f, m) << std::endl; );
// v <= k
if (negated)
update_unsigned_lower(to_app(lhs), val+numeral(1));
else
update_unsigned_upper(to_app(lhs), val);
}
else if (is_uninterp_const(rhs) && m_util.is_numeral(lhs, val, bv_sz)) {
TRACE(bv_size_reduction, tout << (negated?"not ":"") << mk_ismt2_pp(f, m) << std::endl; );
// k <= v
if (negated)
update_unsigned_upper(to_app(rhs), val-numeral(1));
else
update_unsigned_lower(to_app(rhs), val);
}
}
else if (m.is_eq(f, lhs, rhs)) {
if (match_bitmask(lhs, rhs))
continue;
if (match_bitmask(rhs, lhs))
continue;
}
#endif
}
}
void checkpoint() {
if (!m.inc())
throw tactic_exception(m.limit().get_cancel_msg());
}
void run(goal & g, model_converter_ref & mc) {
if (g.inconsistent())
return;
TRACE(before_bv_size_reduction, g.display(tout););
m_produce_models = g.models_enabled();
mc = nullptr;
m_mc = nullptr;
unsigned num_reduced = 0;
{
tactic_report report("reduce-bv-size", g);
collect_bounds(g);
// create substitution
expr_substitution subst(m);
auto insert_def = [&](app* k, expr* new_def, app* new_const) {
if (!new_def)
return;
subst.insert(k, new_def);
if (m_produce_models) {
if (!m_mc)
m_mc = alloc(bv_size_reduction_mc, m, "bv_size_reduction");
m_mc->add(k, new_def);
if (!m_fmc && new_const)
m_fmc = alloc(generic_model_converter, m, "bv_size_reduction");
if (new_const)
m_fmc->hide(new_const);
}
num_reduced++;
};
if (!(m_signed_lowers.empty() || m_signed_uppers.empty())) {
TRACE(bv_size_reduction,
tout << "m_signed_lowers: " << std::endl;
for (auto const& [k, v] : m_signed_lowers)
tout << mk_ismt2_pp(k, m) << " >= " << v.to_string() << std::endl;
tout << "m_signed_uppers: " << std::endl;
for (auto const& [k, v] : m_signed_uppers)
tout << mk_ismt2_pp(k, m) << " <= " << v.to_string() << std::endl;
);
for (auto& [k, val] : m_signed_lowers) {
unsigned bv_sz = m_util.get_bv_size(k);
numeral l = m_util.norm(val, bv_sz, true);
obj_map<app, numeral>::obj_map_entry * entry = m_signed_uppers.find_core(k);
if (entry != nullptr) {
numeral u = m_util.norm(entry->get_data().m_value, bv_sz, true);
TRACE(bv_size_reduction, tout << l << " <= " << k->get_decl()->get_name() << " <= " << u << "\n";);
expr * new_def = nullptr;
app * new_const = nullptr;
if (l > u) {
g.assert_expr(m.mk_false());
return;
}
else if (l == u) {
new_def = m_util.mk_numeral(l, k->get_sort());
}
else {
// l < u
if (l.is_neg()) {
unsigned l_nb = (-l).get_num_bits();
unsigned v_nb = m_util.get_bv_size(k);
if (u.is_neg())
{
// l <= v <= u <= 0
unsigned i_nb = l_nb;
TRACE(bv_size_reduction, tout << " l <= " << k->get_decl()->get_name() << " <= u <= 0 " << " --> " << i_nb << " bits\n";);
if (i_nb < v_nb) {
new_const = m.mk_fresh_const(nullptr, m_util.mk_sort(i_nb));
new_def = m_util.mk_concat(m_util.mk_numeral(numeral(-1), v_nb - i_nb), new_const);
}
}
else {
// l <= v <= 0 <= u
unsigned u_nb = u.get_num_bits();
unsigned i_nb = ((l_nb > u_nb) ? l_nb : u_nb) + 1;
TRACE(bv_size_reduction, tout << " l <= " << k->get_decl()->get_name() << " <= 0 <= u " << " --> " << i_nb << " bits\n";);
if (i_nb < v_nb) {
new_const = m.mk_fresh_const(nullptr, m_util.mk_sort(i_nb));
new_def = m_util.mk_sign_extend(v_nb - i_nb, new_const);
}
}
}
else {
// 0 <= l <= v <= u
unsigned u_nb = u.get_num_bits();
unsigned v_nb = m_util.get_bv_size(k);
TRACE(bv_size_reduction, tout << l << " <= " << k->get_decl()->get_name() << " <= " << u << " --> " << u_nb << " bits\n";);
if (u_nb < v_nb) {
new_const = m.mk_fresh_const(nullptr, m_util.mk_sort(u_nb));
new_def = m_util.mk_concat(m_util.mk_numeral(numeral(0), v_nb - u_nb), new_const);
}
}
}
insert_def(k, new_def, new_const);
}
}
}
for (auto const& [k, v] : m_unsigned_uppers) {
unsigned shift;
unsigned bv_sz = m_util.get_bv_size(k);
numeral u = m_util.norm(v, bv_sz, true) + 1;
if (u.is_power_of_two(shift) && shift < bv_sz) {
app* new_const = m.mk_fresh_const(nullptr, m_util.mk_sort(shift));
expr* new_def = m_util.mk_concat(m_util.mk_numeral(numeral(0), bv_sz - shift), new_const);
insert_def(k, new_def, new_const);
}
}
#if 0
if (!(m_unsigned_lowers.empty() && m_unsigned_uppers.empty())) {
TRACE(bv_size_reduction,
tout << "m_unsigned_lowers: " << std::endl;
for (auto& [key, value] : m_unsigned_lowers)
tout << mk_ismt2_pp(key, m) << " >= " << value.to_string() << std::endl;
tout << "m_unsigned_uppers: " << std::endl;
for (auto& [key, value] : m_unsigned_uppers)
tout << mk_ismt2_pp(key, m) << " <= " << value.to_string() << std::endl;
);
obj_map<app, numeral>::iterator it = m_unsigned_uppers.begin();
obj_map<app, numeral>::iterator end = m_unsigned_uppers.end();
for (; it != end; ++it) {
app * v = it->m_key;
unsigned bv_sz = m_util.get_bv_size(v);
numeral u = m_util.norm(it->m_value, bv_sz, false);
obj_map<app, numeral>::obj_map_entry * entry = m_signed_lowers.find_core(v);
numeral l = (entry != 0) ? m_util.norm(entry->get_data().m_value, bv_sz, false) : numeral(0);
obj_map<app, numeral>::obj_map_entry * lse = m_signed_lowers.find_core(v);
obj_map<app, numeral>::obj_map_entry * use = m_signed_uppers.find_core(v);
if ((lse != 0 && lse->get_data().m_value > l) &&
(use != 0 && use->get_data().m_value < u))
continue; // Skip, we had better signed bounds.
if (lse != 0 && lse->get_data().m_value > l) l = lse->get_data().m_value;
if (use != 0 && use->get_data().m_value < u) u = use->get_data().m_value;
TRACE(bv_size_reduction, tout << l << " <= " << v->get_decl()->get_name() << " <= " << u << "\n";);
expr * new_def = 0;
app * new_const = 0;
if (l > u) {
g.assert_expr(m.mk_false());
return;
}
else if (l == u) {
new_def = m_util.mk_numeral(l, v->get_sort());
}
else {
// 0 <= l <= v <= u
unsigned u_nb = u.get_num_bits();
unsigned v_nb = m_util.get_bv_size(v);
if (u_nb < v_nb) {
new_def = m_util.mk_concat(m_util.mk_numeral(numeral(0), v_nb - u_nb), new_const);
new_const = m.mk_fresh_const(0, m_util.mk_sort(u_nb));
}
}
if (new_def) {
subst.insert(v, new_def);
if (m_produce_models) {
if (!m_mc)
m_mc = alloc(bv_size_reduction_mc, m);
m_mc->insert(v->get_decl(), new_def);
if (!m_fmc && new_const)
m_fmc = alloc(generic_model_converter, m, "bv_size_reduction");
if (new_const)
m_fmc->hide(new_const);
}
num_reduced++;
TRACE(bv_size_reduction, tout << "New definition = " << mk_ismt2_pp(new_def, m) << "\n";);
}
}
}
#endif
if (subst.empty())
return;
m_replacer->set_substitution(&subst);
unsigned sz = g.size();
expr * f;
expr_ref new_f(m);
for (unsigned i = 0; i < sz; ++i) {
if (g.inconsistent())
return;
f = g.form(i);
(*m_replacer)(f, new_f);
g.update(i, new_f);
}
mc = m_mc.get();
if (m_fmc) {
mc = concat(m_fmc.get(), mc.get());
}
m_mc = nullptr;
m_fmc = nullptr;
}
report_tactic_progress(":bv-reduced", num_reduced);
TRACE(after_bv_size_reduction, g.display(tout); if (m_mc) m_mc->display(tout););
}
};
void bv_size_reduction_tactic::operator()(goal_ref const & g,
goal_ref_buffer & result) {
fail_if_proof_generation("bv-size-reduction", g);
fail_if_unsat_core_generation("bv-size-reduction", g);
TRACE(goal, g->display(tout););
result.reset();
model_converter_ref mc;
run(*(g.get()), mc);
g->inc_depth();
g->add(mc.get());
result.push_back(g.get());
}
}
tactic * mk_bv_size_reduction_tactic(ast_manager & m, params_ref const & p) {
return clean(alloc(bv_size_reduction_tactic, m));
}
#include "tactic/bv/bv_size_reduction_tactic.h"

View file

@ -58,9 +58,7 @@ where `k` is a fresh bit-vector constant of size 3.
class ast_manager;
class tactic;
tactic * mk_bv_size_reduction_tactic(ast_manager & m, params_ref const & p = params_ref());
inline tactic * mk_bv_size_reduction2_tactic(ast_manager & m, params_ref const & p = params_ref()) {
inline tactic * mk_bv_size_reduction_tactic(ast_manager & m, params_ref const & p = params_ref()) {
return alloc(dependent_expr_state_tactic, m, p,
[](auto& m, auto& p, auto& s) -> dependent_expr_simplifier* {
return alloc(bv_size_reduction_simplifier, m, p, s);
@ -68,6 +66,5 @@ inline tactic * mk_bv_size_reduction2_tactic(ast_manager & m, params_ref const &
}
/*
ADD_TACTIC("reduce-bv-size", "try to reduce bit-vector sizes using inequalities.", "mk_bv_size_reduction_tactic(m, p)")
ADD_TACTIC("reduce-bv-size2", "try to reduce bit-vector sizes using inequalities.", "mk_bv_size_reduction2_tactic(m, p)")
ADD_SIMPLIFIER("reduce-bv-size", "try to reduce bit-vector sizes using inequalities.", "alloc(bv_size_reduction_simplifier, m, p, s)")
*/