3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-04-29 20:05:51 +00:00

using a consistent naming convention for naming tactic subfolders

Signed-off-by: Leonardo de Moura <leonardo@microsoft.com>
This commit is contained in:
Leonardo de Moura 2012-10-24 15:11:44 -07:00
parent 67f5ed46c1
commit 0990a2e045
140 changed files with 14 additions and 12 deletions

View file

@ -0,0 +1,165 @@
/*++
Copyright (c) 2011 Microsoft Corporation
Module Name:
bit_blaster_tactic.cpp
Abstract:
Apply bit-blasting to a given goal
Author:
Leonardo (leonardo) 2011-10-25
Notes:
--*/
#include"tactical.h"
#include"bit_blaster_model_converter.h"
#include"bit_blaster_rewriter.h"
#include"ast_smt2_pp.h"
#include"model_pp.h"
class bit_blaster_tactic : public tactic {
struct imp {
bit_blaster_rewriter m_rewriter;
unsigned m_num_steps;
bool m_blast_quant;
imp(ast_manager & m, params_ref const & p):
m_rewriter(m, p) {
updt_params(p);
}
void updt_params_core(params_ref const & p) {
m_blast_quant = p.get_bool(":blast-quant", false);
}
void updt_params(params_ref const & p) {
m_rewriter.updt_params(p);
updt_params_core(p);
}
ast_manager & m() const { return m_rewriter.m(); }
void set_cancel(bool f) {
m_rewriter.set_cancel(f);
}
void operator()(goal_ref const & g,
goal_ref_buffer & result,
model_converter_ref & mc,
proof_converter_ref & pc,
expr_dependency_ref & core) {
mc = 0; pc = 0; core = 0;
bool proofs_enabled = g->proofs_enabled();
if (proofs_enabled && m_blast_quant)
throw tactic_exception("quantified variable blasting does not support proof generation");
tactic_report report("bit-blaster", *g);
TRACE("before_bit_blaster", g->display(tout););
m_num_steps = 0;
expr_ref new_curr(m());
proof_ref new_pr(m());
unsigned size = g->size();
for (unsigned idx = 0; idx < size; idx++) {
if (g->inconsistent())
break;
expr * curr = g->form(idx);
m_rewriter(curr, new_curr, new_pr);
m_num_steps += m_rewriter.get_num_steps();
if (proofs_enabled) {
proof * pr = g->pr(idx);
new_pr = m().mk_modus_ponens(pr, new_pr);
}
g->update(idx, new_curr, new_pr, g->dep(idx));
}
if (g->models_enabled())
mc = mk_bit_blaster_model_converter(m(), m_rewriter.const2bits());
else
mc = 0;
g->inc_depth();
result.push_back(g.get());
TRACE("after_bit_blaster", g->display(tout); if (mc) mc->display(tout); tout << "\n";);
m_rewriter.cleanup();
}
unsigned get_num_steps() const { return m_num_steps; }
};
imp * m_imp;
params_ref m_params;
public:
bit_blaster_tactic(ast_manager & m, params_ref const & p):
m_params(p){
m_imp = alloc(imp, m, p);
}
virtual tactic * translate(ast_manager & m) {
return alloc(bit_blaster_tactic, m, m_params);
}
virtual ~bit_blaster_tactic() {
dealloc(m_imp);
}
virtual void updt_params(params_ref const & p) {
m_params = p;
m_imp->updt_params(p);
}
virtual void collect_param_descrs(param_descrs & r) {
insert_max_memory(r);
insert_max_steps(r);
r.insert(":blast-mul", CPK_BOOL, "(default: true) bit-blast multipliers (and dividers, remainders).");
r.insert(":blast-add", CPK_BOOL, "(default: true) bit-blast adders.");
r.insert(":blast-quant", CPK_BOOL, "(default: false) bit-blast quantified variables.");
r.insert(":blast-full", CPK_BOOL, "(default: false) bit-blast any term with bit-vector sort, this option will make E-matching ineffective in any pattern containing bit-vector terms.");
}
virtual void operator()(goal_ref const & g,
goal_ref_buffer & result,
model_converter_ref & mc,
proof_converter_ref & pc,
expr_dependency_ref & core) {
(*m_imp)(g, result, mc, pc, core);
}
virtual void cleanup() {
ast_manager & m = m_imp->m();
imp * d = m_imp;
#pragma omp critical (tactic_cancel)
{
m_imp = 0;
}
dealloc(d);
d = alloc(imp, m, m_params);
#pragma omp critical (tactic_cancel)
{
m_imp = d;
}
}
unsigned get_num_steps() const {
return m_imp->get_num_steps();
}
protected:
virtual void set_cancel(bool f) {
if (m_imp)
m_imp->set_cancel(f);
}
};
tactic * mk_bit_blaster_tactic(ast_manager & m, params_ref const & p) {
return clean(alloc(bit_blaster_tactic, m, p));
}

View file

@ -0,0 +1,28 @@
/*++
Copyright (c) 2011 Microsoft Corporation
Module Name:
bit_blaster_tactic.h
Abstract:
Apply bit-blasting to a given goal.
Author:
Leonardo (leonardo) 2011-10-25
Notes:
--*/
#ifndef _BIT_BLASTER_TACTIC_H_
#define _BIT_BLASTER_TACTIC_H_
#include"params.h"
class ast_manager;
class tactic;
tactic * mk_bit_blaster_tactic(ast_manager & m, params_ref const & p = params_ref());
#endif

View file

@ -0,0 +1,366 @@
/*++
Copyright (c) 2012 Microsoft Corporation
Module Name:
bv_size_reduction_tactic.cpp
Abstract:
Reduce the number of bits used to encode constants, by using signed bounds.
Example: suppose x is a bit-vector of size 8, and we have
signed bounds for x such that:
-2 <= x <= 2
Then, x can be replaced by ((sign-extend 5) k)
where k is a fresh bit-vector constant of size 3.
Author:
Leonardo (leonardo) 2012-02-19
Notes:
--*/
#include"tactical.h"
#include"bv_decl_plugin.h"
#include"expr_replacer.h"
#include"extension_model_converter.h"
#include"ast_smt2_pp.h"
class bv_size_reduction_tactic : public tactic {
struct imp;
imp * m_imp;
public:
bv_size_reduction_tactic(ast_manager & m);
virtual tactic * translate(ast_manager & m) {
return alloc(bv_size_reduction_tactic, m);
}
virtual ~bv_size_reduction_tactic();
virtual void operator()(goal_ref const & g, goal_ref_buffer & result, model_converter_ref & mc, proof_converter_ref & pc, expr_dependency_ref & core);
virtual void cleanup();
virtual void set_cancel(bool f);
};
tactic * mk_bv_size_reduction_tactic(ast_manager & m, params_ref const & p) {
return clean(alloc(bv_size_reduction_tactic, m));
}
struct bv_size_reduction_tactic::imp {
typedef rational numeral;
typedef extension_model_converter bv_size_reduction_mc;
ast_manager & m;
bv_util m_util;
obj_map<app, numeral> m_signed_lowers;
obj_map<app, numeral> m_signed_uppers;
obj_map<app, numeral> m_unsigned_lowers;
obj_map<app, numeral> m_unsigned_uppers;
ref<bv_size_reduction_mc> m_mc;
scoped_ptr<expr_replacer> m_replacer;
bool m_produce_models;
volatile bool m_cancel;
imp(ast_manager & _m):
m(_m),
m_util(m),
m_replacer(mk_default_expr_replacer(m)),
m_cancel(false) {
}
void update_signed_lower(app * v, numeral const & k) {
// k <= v
obj_map<app, numeral>::obj_map_entry * entry = m_signed_lowers.insert_if_not_there2(v, k);
if (entry->get_data().m_value < k) {
// improve bound
entry->get_data().m_value = k;
}
}
void update_signed_upper(app * v, numeral const & k) {
// v <= k
obj_map<app, numeral>::obj_map_entry * entry = m_signed_uppers.insert_if_not_there2(v, k);
if (k < entry->get_data().m_value) {
// improve bound
entry->get_data().m_value = k;
}
}
void update_unsigned_lower(app * v, numeral const & k) {
SASSERT(k > numeral(0));
// k <= v
obj_map<app, numeral>::obj_map_entry * entry = m_unsigned_lowers.insert_if_not_there2(v, k);
if (entry->get_data().m_value < k) {
// improve bound
entry->get_data().m_value = k;
}
}
void update_unsigned_upper(app * v, numeral const & k) {
SASSERT(k > numeral(0));
// v <= k
obj_map<app, numeral>::obj_map_entry * entry = m_unsigned_uppers.insert_if_not_there2(v, k);
if (k < entry->get_data().m_value) {
// improve bound
entry->get_data().m_value = k;
}
}
void collect_bounds(goal const & g) {
unsigned sz = g.size();
numeral val;
unsigned bv_sz;
expr * f, * lhs, * rhs;
for (unsigned i = 0; i < sz; i++) {
bool negated = false;
f = g.form(i);
if (m.is_not(f)) {
negated = true;
f = to_app(f)->get_arg(0);
}
if (m_util.is_bv_sle(f, lhs, rhs)) {
if (is_uninterp_const(lhs) && m_util.is_numeral(rhs, val, bv_sz)) {
TRACE("bv_size_reduction", tout << (negated?"not ":"") << mk_ismt2_pp(f, m) << std::endl; );
// v <= k
if (negated) update_signed_lower(to_app(lhs), val+numeral(1));
else update_signed_upper(to_app(lhs), val);
}
else if (is_uninterp_const(rhs) && m_util.is_numeral(lhs, val, bv_sz)) {
TRACE("bv_size_reduction", tout << (negated?"not ":"") << mk_ismt2_pp(f, m) << std::endl; );
// k <= v
if (negated) update_signed_upper(to_app(rhs), val-numeral(1));
else update_signed_lower(to_app(rhs), val);
}
}
#if 0
else if (m_util.is_bv_ule(f, lhs, rhs)) {
if (is_uninterp_const(lhs) && m_util.is_numeral(rhs, val, bv_sz)) {
TRACE("bv_size_reduction", tout << (negated?"not ":"") << mk_ismt2_pp(f, m) << std::endl; );
// v <= k
if (negated) update_unsigned_lower(to_app(lhs), val+numeral(1));
else update_unsigned_upper(to_app(lhs), val);
}
else if (is_uninterp_const(rhs) && m_util.is_numeral(lhs, val, bv_sz)) {
TRACE("bv_size_reduction", tout << (negated?"not ":"") << mk_ismt2_pp(f, m) << std::endl; );
// k <= v
if (negated) update_unsigned_upper(to_app(rhs), val-numeral(1));
else update_unsigned_lower(to_app(rhs), val);
}
}
#endif
}
}
void checkpoint() {
if (m_cancel)
throw tactic_exception(TACTIC_CANCELED_MSG);
}
void operator()(goal & g, model_converter_ref & mc) {
if (g.inconsistent())
return;
TRACE("before_bv_size_reduction", g.display(tout););
m_produce_models = g.models_enabled();
mc = 0;
m_mc = 0;
unsigned num_reduced = 0;
{
tactic_report report("bv-size-reduction", g);
collect_bounds(g);
// create substitution
expr_substitution subst(m);
if (!(m_signed_lowers.empty() || m_signed_uppers.empty())) {
TRACE("bv_size_reduction",
tout << "m_signed_lowers: " << std::endl;
for (obj_map<app, numeral>::iterator it = m_signed_lowers.begin(); it != m_signed_lowers.end(); it++)
tout << mk_ismt2_pp(it->m_key, m) << " >= " << it->m_value.to_string() << std::endl;
tout << "m_signed_uppers: " << std::endl;
for (obj_map<app, numeral>::iterator it = m_signed_uppers.begin(); it != m_signed_uppers.end(); it++)
tout << mk_ismt2_pp(it->m_key, m) << " <= " << it->m_value.to_string() << std::endl;
);
obj_map<app, numeral>::iterator it = m_signed_lowers.begin();
obj_map<app, numeral>::iterator end = m_signed_lowers.end();
for (; it != end; ++it) {
app * v = it->m_key;
unsigned bv_sz = m_util.get_bv_size(v);
numeral l = m_util.norm(it->m_value, bv_sz, true);
obj_map<app, numeral>::obj_map_entry * entry = m_signed_uppers.find_core(v);
if (entry != 0) {
numeral u = m_util.norm(entry->get_data().m_value, bv_sz, true);
TRACE("bv_size_reduction", tout << l << " <= " << v->get_decl()->get_name() << " <= " << u << "\n";);
expr * new_def = 0;
if (l > u) {
g.assert_expr(m.mk_false());
return;
}
else if (l == u) {
new_def = m_util.mk_numeral(l, m.get_sort(v));
}
else {
// l < u
if (l.is_neg()) {
unsigned i_nb = (u - l).get_num_bits();
unsigned v_nb = m_util.get_bv_size(v);
if (i_nb < v_nb)
new_def = m_util.mk_sign_extend(v_nb - i_nb, m.mk_fresh_const(0, m_util.mk_sort(i_nb)));
}
else {
// 0 <= l <= v <= u
unsigned u_nb = u.get_num_bits();
unsigned v_nb = m_util.get_bv_size(v);
if (u_nb < v_nb)
new_def = m_util.mk_concat(m_util.mk_numeral(numeral(0), v_nb - u_nb), m.mk_fresh_const(0, m_util.mk_sort(u_nb)));
}
}
if (new_def) {
subst.insert(v, new_def);
if (m_produce_models) {
if (!m_mc)
m_mc = alloc(bv_size_reduction_mc, m);
m_mc->insert(v->get_decl(), new_def);
}
num_reduced++;
}
}
}
}
#if 0
if (!(m_unsigned_lowers.empty() && m_unsigned_uppers.empty())) {
TRACE("bv_size_reduction",
tout << "m_unsigned_lowers: " << std::endl;
for (obj_map<app, numeral>::iterator it = m_unsigned_lowers.begin(); it != m_unsigned_lowers.end(); it++)
tout << mk_ismt2_pp(it->m_key, m) << " >= " << it->m_value.to_string() << std::endl;
tout << "m_unsigned_uppers: " << std::endl;
for (obj_map<app, numeral>::iterator it = m_unsigned_uppers.begin(); it != m_unsigned_uppers.end(); it++)
tout << mk_ismt2_pp(it->m_key, m) << " <= " << it->m_value.to_string() << std::endl;
);
obj_map<app, numeral>::iterator it = m_unsigned_uppers.begin();
obj_map<app, numeral>::iterator end = m_unsigned_uppers.end();
for (; it != end; ++it) {
app * v = it->m_key;
unsigned bv_sz = m_util.get_bv_size(v);
numeral u = m_util.norm(it->m_value, bv_sz, false);
obj_map<app, numeral>::obj_map_entry * entry = m_signed_lowers.find_core(v);
numeral l = (entry != 0) ? m_util.norm(entry->get_data().m_value, bv_sz, false) : numeral(0);
obj_map<app, numeral>::obj_map_entry * lse = m_signed_lowers.find_core(v);
obj_map<app, numeral>::obj_map_entry * use = m_signed_uppers.find_core(v);
if ((lse != 0 && lse->get_data().m_value > l) &&
(use != 0 && use->get_data().m_value < u))
continue; // Skip, we had better signed bounds.
if (lse != 0 && lse->get_data().m_value > l) l = lse->get_data().m_value;
if (use != 0 && use->get_data().m_value < u) u = use->get_data().m_value;
TRACE("bv_size_reduction", tout << l << " <= " << v->get_decl()->get_name() << " <= " << u << "\n";);
expr * new_def = 0;
if (l > u) {
g.assert_expr(m.mk_false());
return;
}
else if (l == u) {
new_def = m_util.mk_numeral(l, m.get_sort(v));
}
else {
// 0 <= l <= v <= u
unsigned u_nb = u.get_num_bits();
unsigned v_nb = m_util.get_bv_size(v);
if (u_nb < v_nb)
new_def = m_util.mk_concat(m_util.mk_numeral(numeral(0), v_nb - u_nb), m.mk_fresh_const(0, m_util.mk_sort(u_nb)));
}
if (new_def) {
subst.insert(v, new_def);
if (m_produce_models) {
if (!m_mc)
m_mc = alloc(bv_size_reduction_mc, m);
m_mc->insert(v->get_decl(), new_def);
}
num_reduced++;
TRACE("bv_size_reduction", tout << "New definition = " << mk_ismt2_pp(new_def, m) << "\n";);
}
}
}
#endif
if (subst.empty())
return;
m_replacer->set_substitution(&subst);
unsigned sz = g.size();
expr * f;
expr_ref new_f(m);
for (unsigned i = 0; i < sz; i++) {
if (g.inconsistent())
return;
f = g.form(i);
(*m_replacer)(f, new_f);
g.update(i, new_f);
}
mc = m_mc.get();
m_mc = 0;
}
report_tactic_progress(":bv-reduced", num_reduced);
TRACE("after_bv_size_reduction", g.display(tout); if (m_mc) m_mc->display(tout););
}
void set_cancel(bool f) {
m_replacer->set_cancel(f);
m_cancel = f;
}
};
bv_size_reduction_tactic::bv_size_reduction_tactic(ast_manager & m) {
m_imp = alloc(imp, m);
}
bv_size_reduction_tactic::~bv_size_reduction_tactic() {
dealloc(m_imp);
}
void bv_size_reduction_tactic::operator()(goal_ref const & g,
goal_ref_buffer & result,
model_converter_ref & mc,
proof_converter_ref & pc,
expr_dependency_ref & core) {
SASSERT(g->is_well_sorted());
fail_if_proof_generation("bv-size-reduction", g);
fail_if_unsat_core_generation("bv-size-reduction", g);
mc = 0; pc = 0; core = 0; result.reset();
m_imp->operator()(*(g.get()), mc);
g->inc_depth();
result.push_back(g.get());
SASSERT(g->is_well_sorted());
}
void bv_size_reduction_tactic::set_cancel(bool f) {
if (m_imp)
m_imp->set_cancel(f);
}
void bv_size_reduction_tactic::cleanup() {
ast_manager & m = m_imp->m;
imp * d = m_imp;
#pragma omp critical (tactic_cancel)
{
m_imp = 0;
}
dealloc(d);
d = alloc(imp, m);
#pragma omp critical (tactic_cancel)
{
m_imp = d;
}
}

View file

@ -0,0 +1,33 @@
/*++
Copyright (c) 2012 Microsoft Corporation
Module Name:
bv_size_reduction.h
Abstract:
Reduce the number of bits used to encode constants, by using signed bounds.
Example: suppose x is a bit-vector of size 8, and we have
signed bounds for x such that:
-2 <= x <= 2
Then, x can be replaced by ((sign-extend 5) k)
where k is a fresh bit-vector constant of size 3.
Author:
Leonardo (leonardo) 2012-02-19
Notes:
--*/
#ifndef _BV_SIZE_REDUCTION_TACTIC_H_
#define _BV_SIZE_REDUCTION_TACTIC_H_
#include"params.h"
class ast_manager;
class tactic;
tactic * mk_bv_size_reduction_tactic(ast_manager & m, params_ref const & p = params_ref());
#endif

View file

@ -0,0 +1,337 @@
/*++
Copyright (c) 2011 Microsoft Corporation
Module Name:
max_bv_sharing_tactic.cpp
Abstract:
Rewriter for "maximing" the number of shared terms.
The idea is to rewrite AC terms to maximize sharing.
This rewriter is particularly useful for reducing
the number of Adders and Multipliers before "bit-blasting".
Author:
Leonardo de Moura (leonardo) 2011-12-29.
Revision History:
--*/
#include"tactical.h"
#include"bv_decl_plugin.h"
#include"rewriter_def.h"
#include"obj_pair_hashtable.h"
#include"ast_lt.h"
#include"cooperate.h"
class max_bv_sharing_tactic : public tactic {
struct rw_cfg : public default_rewriter_cfg {
typedef std::pair<expr *, expr *> expr_pair;
typedef obj_pair_hashtable<expr, expr> set;
bv_util m_util;
set m_add_apps;
set m_mul_apps;
set m_xor_apps;
set m_or_apps;
unsigned long long m_max_memory;
unsigned m_max_steps;
unsigned m_max_args;
ast_manager & m() const { return m_util.get_manager(); }
rw_cfg(ast_manager & m, params_ref const & p):
m_util(m) {
updt_params(p);
}
void cleanup() {
m_add_apps.finalize();
m_mul_apps.finalize();
m_or_apps.finalize();
m_xor_apps.finalize();
}
void updt_params(params_ref const & p) {
m_max_memory = megabytes_to_bytes(p.get_uint(":max-memory", UINT_MAX));
m_max_steps = p.get_uint(":max-steps", UINT_MAX);
m_max_args = p.get_uint(":max-args", 128);
}
bool max_steps_exceeded(unsigned num_steps) const {
cooperate("max bv sharing");
if (memory::get_allocation_size() > m_max_memory)
throw tactic_exception(TACTIC_MAX_MEMORY_MSG);
return num_steps > m_max_steps;
}
set & f2set(func_decl * f) {
switch (f->get_decl_kind()) {
case OP_BADD: return m_add_apps;
case OP_BMUL: return m_mul_apps;
case OP_BXOR: return m_xor_apps;
case OP_BOR: return m_or_apps;
default:
UNREACHABLE();
return m_or_apps; // avoid compilation error
}
}
expr * reuse(set & s, func_decl * f, expr * arg1, expr * arg2) {
if (s.contains(expr_pair(arg1, arg2)))
return m().mk_app(f, arg1, arg2);
if (s.contains(expr_pair(arg2, arg1)))
return m().mk_app(f, arg2, arg1);
return 0;
}
struct ref_count_lt {
bool operator()(expr * t1, expr * t2) const {
if (t1->get_ref_count() < t2->get_ref_count())
return true;
return (t1->get_ref_count() == t2->get_ref_count()) && lt(t1, t2);
}
};
br_status reduce_ac_app(func_decl * f, unsigned num_args, expr * const * args, expr_ref & result) {
set & s = f2set(f);
if (num_args == 2) {
if (!m_util.is_numeral(args[0]) && !m_util.is_numeral(args[1]))
s.insert(expr_pair(args[0], args[1]));
return BR_FAILED;
}
ptr_buffer<expr, 128> _args;
bool first = false;
expr * num = 0;
for (unsigned i = 0; i < num_args; i++) {
expr * arg = args[i];
if (num == 0 && m_util.is_numeral(arg)) {
if (i == 0) first = true;
num = arg;
}
else {
_args.push_back(arg);
}
}
num_args = _args.size();
// std::sort(_args.begin(), _args.end(), ref_count_lt());
// std::sort(_args.begin(), _args.end(), ast_to_lt());
try_to_reuse:
if (num_args > 1 && num_args < m_max_args) {
for (unsigned i = 0; i < num_args - 1; i++) {
for (unsigned j = i + 1; j < num_args; j++) {
expr * r = reuse(s, f, _args[i], _args[j]);
if (r != 0) {
TRACE("bv_sharing_detail", tout << "reusing args: " << i << " " << j << "\n";);
_args[i] = r;
SASSERT(num_args > 1);
for (unsigned w = j; w < num_args - 1; w++) {
_args[w] = _args[w+1];
}
num_args--;
goto try_to_reuse;
}
}
}
}
// TODO:
// some benchmarks are more efficiently solved using a tree-like structure (better sharing)
// other benchmarks are more efficiently solved using a chain-like structure (better propagation for arguments "closer to the output").
//
// One possible solution is to do a global analysis that finds a good order that increases sharing without affecting
// propagation.
//
// Another cheap trick is to create an option, and try both for a small amount of time.
#if 0
SASSERT(num_args > 0);
if (num_args == 1) {
result = _args[0];
}
else {
// ref_count_lt is not a total order on expr's
std::stable_sort(_args.c_ptr(), _args.c_ptr() + num_args, ref_count_lt());
result = m().mk_app(f, _args[0], _args[1]);
for (unsigned i = 2; i < num_args; i++) {
result = m().mk_app(f, result.get(), _args[i]);
}
}
if (num != 0) {
if (first)
result = m().mk_app(f, num, result);
else
result = m().mk_app(f, result, num);
}
return BR_DONE;
#else
// Create "tree-like circuit"
while (true) {
TRACE("bv_sharing_detail", tout << "tree-loop: num_args: " << num_args << "\n";);
unsigned j = 0;
for (unsigned i = 0; i < num_args; i += 2, j++) {
if (i == num_args - 1) {
_args[j] = _args[i];
}
else {
s.insert(expr_pair(_args[i], _args[i+1]));
_args[j] = m().mk_app(f, _args[i], _args[i+1]);
}
}
num_args = j;
if (num_args == 1) {
if (num == 0) {
result = _args[0];
}
else {
if (first)
result = m().mk_app(f, num, _args[0]);
else
result = m().mk_app(f, _args[0], num);
}
return BR_DONE;
}
}
#endif
}
br_status reduce_app(func_decl * f, unsigned num, expr * const * args, expr_ref & result, proof_ref & result_pr) {
if (f->get_family_id() != m_util.get_family_id())
return BR_FAILED;
switch (f->get_decl_kind()) {
case OP_BADD:
case OP_BMUL:
case OP_BOR:
case OP_BXOR:
result_pr = 0;
return reduce_ac_app(f, num, args, result);
default:
return BR_FAILED;
}
}
};
struct rw : public rewriter_tpl<rw_cfg> {
rw_cfg m_cfg;
rw(ast_manager & m, params_ref const & p):
rewriter_tpl<rw_cfg>(m, m.proofs_enabled(), m_cfg),
m_cfg(m, p) {
}
};
struct imp {
rw m_rw;
unsigned m_num_steps;
imp(ast_manager & m, params_ref const & p):
m_rw(m, p) {
}
ast_manager & m() const { return m_rw.m(); }
void set_cancel(bool f) {
m_rw.set_cancel(f);
}
void operator()(goal_ref const & g,
goal_ref_buffer & result,
model_converter_ref & mc,
proof_converter_ref & pc,
expr_dependency_ref & core) {
SASSERT(g->is_well_sorted());
mc = 0; pc = 0; core = 0;
tactic_report report("max-bv-sharing", *g);
bool produce_proofs = g->proofs_enabled();
expr_ref new_curr(m());
proof_ref new_pr(m());
unsigned size = g->size();
for (unsigned idx = 0; idx < size; idx++) {
if (g->inconsistent())
break;
expr * curr = g->form(idx);
m_rw(curr, new_curr, new_pr);
m_num_steps += m_rw.get_num_steps();
if (produce_proofs) {
proof * pr = g->pr(idx);
new_pr = m().mk_modus_ponens(pr, new_pr);
}
g->update(idx, new_curr, new_pr, g->dep(idx));
}
m_rw.cfg().cleanup();
g->inc_depth();
result.push_back(g.get());
TRACE("qe", g->display(tout););
SASSERT(g->is_well_sorted());
}
};
imp * m_imp;
params_ref m_params;
public:
max_bv_sharing_tactic(ast_manager & m, params_ref const & p):
m_params(p) {
m_imp = alloc(imp, m, p);
}
virtual tactic * translate(ast_manager & m) {
return alloc(max_bv_sharing_tactic, m, m_params);
}
virtual ~max_bv_sharing_tactic() {
dealloc(m_imp);
}
virtual void updt_params(params_ref const & p) {
m_params = p;
m_imp->m_rw.cfg().updt_params(p);
}
virtual void collect_param_descrs(param_descrs & r) {
insert_max_memory(r);
insert_max_steps(r);
r.insert(":max-args", CPK_UINT,
"(default: 128) maximum number of arguments (per application) that will be considered by the greedy (quadratic) heuristic.");
}
virtual void operator()(goal_ref const & in,
goal_ref_buffer & result,
model_converter_ref & mc,
proof_converter_ref & pc,
expr_dependency_ref & core) {
(*m_imp)(in, result, mc, pc, core);
}
virtual void cleanup() {
ast_manager & m = m_imp->m();
imp * d = m_imp;
#pragma omp critical (tactic_cancel)
{
m_imp = 0;
}
dealloc(d);
d = alloc(imp, m, m_params);
#pragma omp critical (tactic_cancel)
{
m_imp = d;
}
}
virtual void set_cancel(bool f) {
if (m_imp)
m_imp->set_cancel(f);
}
};
tactic * mk_max_bv_sharing_tactic(ast_manager & m, params_ref const & p) {
return clean(alloc(max_bv_sharing_tactic, m, p));
}

View file

@ -0,0 +1,32 @@
/*++
Copyright (c) 2011 Microsoft Corporation
Module Name:
max_bv_sharing_tactic.h
Abstract:
Rewriter for "maximing" the number of shared terms.
The idea is to rewrite AC terms to maximize sharing.
This rewriter is particularly useful for reducing
the number of Adders and Multipliers before "bit-blasting".
Author:
Leonardo de Moura (leonardo) 2011-12-29.
Revision History:
--*/
#ifndef _MAX_BV_SHARING_TACTIC_H_
#define _MAX_BV_SHARING_TACTIC_H_
#include"params.h"
class ast_manager;
class tactic;
tactic * mk_max_bv_sharing_tactic(ast_manager & m, params_ref const & p = params_ref());
#endif