3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-04-06 17:44:08 +00:00

rewrite some simplifiers

This commit is contained in:
Nikolaj Bjorner 2022-11-30 23:15:32 +09:00
parent 23c53c6820
commit edb0fc394b
8 changed files with 81 additions and 237 deletions

View file

@ -37,13 +37,11 @@ void bit_blaster::reduce() {
expr_ref new_curr(m);
proof_ref new_pr(m);
bool change = false;
for (unsigned idx = qhead(); idx < qtail(); idx++) {
if (m_fmls.inconsistent())
break;
for (unsigned idx : indices()) {
auto [curr, d] = m_fmls[idx]();
m_rewriter(curr, new_curr, new_pr);
m_num_steps += m_rewriter.get_num_steps();
m_rewriter(curr, new_curr, new_pr);
if (curr != new_curr) {
m_num_steps += m_rewriter.get_num_steps();
change = true;
TRACE("bit_blaster", tout << mk_pp(curr, m) << " -> " << new_curr << "\n";);
m_fmls.update(idx, dependent_expr(m, new_curr, d));

View file

@ -27,7 +27,7 @@ namespace bv {
}
void slice::process_eqs() {
for (unsigned i = qhead(); i < qtail(); ++i) {
for (unsigned i : indices()) {
auto const [f, d] = m_fmls[i]();
process_eq(f);
}
@ -136,7 +136,7 @@ namespace bv {
expr_ref_vector cache(m), pin(m);
ptr_vector<expr> todo, args;
expr* c;
for (unsigned i = qhead(); i < qtail(); ++i) {
for (unsigned i : indices()) {
auto const [f, d] = m_fmls[i]();
todo.push_back(f);
pin.push_back(f);

View file

@ -29,7 +29,7 @@ void card2bv::reduce() {
expr_ref new_f1(m), new_f2(m);
proof_ref new_pr(m);
for (unsigned idx = qhead(); !m_fmls.inconsistent() && idx < qtail(); idx++) {
for (unsigned idx : indices()) {
auto [f, d] = m_fmls[idx]();
rw1(f, new_f1);
rw2(false, new_f1, new_f2, new_pr);

View file

@ -7,7 +7,7 @@ Module Name:
Abstract:
abstraction for simplification of depenent expression states.
abstraction for simplification of dependent expression states.
A dependent_expr_state is an interface to a set of dependent expressions.
Dependent expressions are formulas together with a set of dependencies that are coarse grained
proof hints or justifications for them. Input assumptions can be self-justified.

View file

@ -0,0 +1,49 @@
/*++
Copyright (c) 2022 Microsoft Corporation
Module Name:
elim_term_ite.h
Author:
Nikolaj Bjorner (nbjorner) 2022-11-24
--*/
#pragma once
#include "ast/simplifiers/dependent_expr_state.h"
#include "ast/rewriter/elim_term_ite.h"
class elim_term_ite_simplifier : public dependent_expr_simplifier {
elim_term_ite m_elim;
public:
elim_term_ite_simplifier(ast_manager& m, params_ref const& p, dependent_expr_state& fmls):
dependent_expr_simplifier(m, fmls),
m_elim_term_ite(m) {
}
char const* name() const override { return "distribute-forall"; }
void reduce() override {
if (!m_fmls.has_quantifiers())
return;
expr_ref r(m);
for (unsigned idx : indices()) {
auto const& d = m_fmls[idx];
if (!has_quantifiers(d.fml()))
continue;
m_rewriter(d.fml(), r);
m_fmls.update(idx, dependent_expr(m, r, d.dep()));
}
}
void push() override { dependent_expr_simplifier::push(); m_rewriter.push(); }
void pop(unsigned n) override { dependent_expr_simplifier::pop(n); m_rewriter.pop(n); }
};

View file

@ -125,7 +125,7 @@ void elim_unconstrained::init_nodes() {
m_fmls.freeze_suffix();
expr_ref_vector terms(m);
for (unsigned i = qhead(); i < qtail(); ++i)
for (unsigned i : indices())
terms.push_back(m_fmls[i].fml());
m_trail.append(terms);
m_heap.reset();
@ -201,7 +201,7 @@ void elim_unconstrained::gc(expr* t) {
*/
void elim_unconstrained::reconstruct_terms() {
expr_ref_vector terms(m);
for (unsigned i = qhead(); i < qtail(); ++i)
for (unsigned i : indices())
terms.push_back(m_fmls[i].fml());
for (expr* e : subterms_postorder::all(terms)) {
@ -234,8 +234,7 @@ void elim_unconstrained::reconstruct_terms() {
void elim_unconstrained::assert_normalized(vector<dependent_expr>& old_fmls) {
unsigned sz = qtail();
for (unsigned i = qhead(); i < sz; ++i) {
for (unsigned i : indices()) {
auto [f, d] = m_fmls[i]();
node& n = get_node(f);
expr* g = n.m_term;

View file

@ -20,234 +20,26 @@ Revision History:
--*/
#include "ast/bv_decl_plugin.h"
#include "ast/rewriter/rewriter_def.h"
#include "util/obj_pair_hashtable.h"
#include "ast/rewriter/maximize_ac_sharing.h"
#include "ast/simplifiers/dependent_expr_state.h"
#include "ast/ast_lt.h"
class max_bv_sharing : public dependent_expr_simplifier {
struct rw_cfg : public default_rewriter_cfg {
typedef std::pair<expr *, expr *> expr_pair;
typedef obj_pair_hashtable<expr, expr> set;
bv_util m_util;
set m_add_apps;
set m_mul_apps;
set m_xor_apps;
set m_or_apps;
unsigned long long m_max_memory;
unsigned m_max_steps;
unsigned m_max_args;
maximize_bv_sharing_rw m_rewriter;
unsigned m_num_steps = 0;
ast_manager & m() const { return m_util.get_manager(); }
rw_cfg(ast_manager & m, params_ref const & p):
m_util(m) {
updt_params(p);
}
void cleanup() {
m_add_apps.finalize();
m_mul_apps.finalize();
m_or_apps.finalize();
m_xor_apps.finalize();
}
void updt_params(params_ref const & p) {
m_max_memory = megabytes_to_bytes(p.get_uint("max_memory", UINT_MAX));
m_max_steps = p.get_uint("max_steps", UINT_MAX);
m_max_args = p.get_uint("max_args", 128);
}
bool max_steps_exceeded(unsigned num_steps) const {
if (memory::get_allocation_size() > m_max_memory)
throw rewriter_exception(Z3_MAX_MEMORY_MSG);
return num_steps > m_max_steps;
}
set & f2set(func_decl * f) {
switch (f->get_decl_kind()) {
case OP_BADD: return m_add_apps;
case OP_BMUL: return m_mul_apps;
case OP_BXOR: return m_xor_apps;
case OP_BOR: return m_or_apps;
default:
UNREACHABLE();
return m_or_apps; // avoid compilation error
}
}
expr * reuse(set & s, func_decl * f, expr * arg1, expr * arg2) {
if (s.contains(expr_pair(arg1, arg2)))
return m().mk_app(f, arg1, arg2);
if (s.contains(expr_pair(arg2, arg1)))
return m().mk_app(f, arg2, arg1);
return nullptr;
}
struct ref_count_lt {
bool operator()(expr * t1, expr * t2) const {
if (t1->get_ref_count() < t2->get_ref_count())
return true;
return (t1->get_ref_count() == t2->get_ref_count()) && lt(t1, t2);
}
};
br_status reduce_ac_app(func_decl * f, unsigned num_args, expr * const * args, expr_ref & result) {
set & s = f2set(f);
if (num_args == 2) {
if (!m_util.is_numeral(args[0]) && !m_util.is_numeral(args[1]))
s.insert(expr_pair(args[0], args[1]));
return BR_FAILED;
}
ptr_buffer<expr, 128> _args;
bool first = false;
expr * num = nullptr;
for (unsigned i = 0; i < num_args; i++) {
expr * arg = args[i];
if (num == nullptr && m_util.is_numeral(arg)) {
if (i == 0) first = true;
num = arg;
}
else {
_args.push_back(arg);
}
}
num_args = _args.size();
// std::sort(_args.begin(), _args.end(), ref_count_lt());
// std::sort(_args.begin(), _args.end(), ast_to_lt());
try_to_reuse:
if (num_args > 1 && num_args < m_max_args) {
for (unsigned i = 0; i < num_args - 1; i++) {
for (unsigned j = i + 1; j < num_args; j++) {
expr * r = reuse(s, f, _args[i], _args[j]);
if (r != nullptr) {
TRACE("bv_sharing_detail", tout << "reusing args: " << i << " " << j << "\n";);
_args[i] = r;
SASSERT(num_args > 1);
for (unsigned w = j; w < num_args - 1; w++) {
_args[w] = _args[w+1];
}
num_args--;
goto try_to_reuse;
}
}
}
}
// TODO:
// some benchmarks are more efficiently solved using a tree-like structure (better sharing)
// other benchmarks are more efficiently solved using a chain-like structure (better propagation for arguments "closer to the output").
//
// One possible solution is to do a global analysis that finds a good order that increases sharing without affecting
// propagation.
//
// Another cheap trick is to create an option, and try both for a small amount of time.
#if 0
SASSERT(num_args > 0);
if (num_args == 1) {
result = _args[0];
}
else {
// ref_count_lt is not a total order on expr's
std::stable_sort(_args.c_ptr(), _args.c_ptr() + num_args, ref_count_lt());
result = m().mk_app(f, _args[0], _args[1]);
for (unsigned i = 2; i < num_args; i++) {
result = m().mk_app(f, result.get(), _args[i]);
}
}
if (num != 0) {
if (first)
result = m().mk_app(f, num, result);
else
result = m().mk_app(f, result, num);
}
return BR_DONE;
#else
// Create "tree-like circuit"
while (true) {
TRACE("bv_sharing_detail", tout << "tree-loop: num_args: " << num_args << "\n";);
unsigned j = 0;
for (unsigned i = 0; i < num_args; i += 2, j++) {
if (i == num_args - 1) {
_args[j] = _args[i];
}
else {
s.insert(expr_pair(_args[i], _args[i+1]));
_args[j] = m().mk_app(f, _args[i], _args[i+1]);
}
}
num_args = j;
if (num_args == 1) {
if (num == nullptr) {
result = _args[0];
}
else {
if (first)
result = m().mk_app(f, num, _args[0]);
else
result = m().mk_app(f, _args[0], num);
}
return BR_DONE;
}
}
#endif
}
br_status reduce_app(func_decl * f, unsigned num, expr * const * args, expr_ref & result, proof_ref & result_pr) {
if (f->get_family_id() != m_util.get_family_id())
return BR_FAILED;
switch (f->get_decl_kind()) {
case OP_BADD:
case OP_BMUL:
case OP_BOR:
case OP_BXOR:
result_pr = nullptr;
return reduce_ac_app(f, num, args, result);
default:
return BR_FAILED;
}
}
};
struct rw : public rewriter_tpl<rw_cfg> {
rw_cfg m_cfg;
rw(ast_manager & m, params_ref const & p):
rewriter_tpl<rw_cfg>(m, m.proofs_enabled(), m_cfg),
m_cfg(m, p) {
}
};
rw m_rw;
unsigned m_num_steps = 0;
params_ref m_params;
public:
max_bv_sharing(ast_manager & m, params_ref const & p, dependent_expr_state& fmls):
dependent_expr_simplifier(m, fmls),
m_params(p),
m_rw(m, p) {
m_rewriter(m) {
}
void updt_params(params_ref const & p) override {
m_params.append(p);
m_rw.cfg().updt_params(m_params);
void reset_statistics() override {
m_num_steps = 0;
}
void collect_param_descrs(param_descrs & r) override {
insert_max_memory(r);
insert_max_steps(r);
r.insert("max_args", CPK_UINT,
"(default: 128) maximum number of arguments (per application) that will be considered by the greedy (quadratic) heuristic.");
void collect_statistics(statistics& st) const override {
st.update("max-sharing-steps", m_num_steps);
}
char const* name() const override { return "max-bv-sharing"; }
@ -255,15 +47,21 @@ public:
void reduce() override {
expr_ref new_curr(m);
proof_ref new_pr(m);
for (unsigned idx = qhead(); idx < qtail() && !m_fmls.inconsistent(); idx++) {
for (unsigned idx : indices()) {
auto [curr, d] = m_fmls[idx]();
m_rw(curr, new_curr, new_pr);
m_rewriter(curr, new_curr, new_pr);
// Proof reconstruction: new_pr = m.mk_modus_ponens(old_pr, new_pr);
m_num_steps += m_rw.get_num_steps();
m_fmls.update(idx, dependent_expr(m, new_curr, d));
if (new_curr != curr) {
m_num_steps += m_rewriter.get_num_steps();
m_fmls.update(idx, dependent_expr(m, new_curr, d));
}
}
m_rw.cfg().cleanup();
}
}
void push() override { dependent_expr_simplifier::push(); m_rewriter.push_scope(); }
void pop(unsigned n) override { dependent_expr_simplifier::pop(n); m_rewriter.pop_scope(n); }
};
dependent_expr_simplifier * mk_max_bv_sharing(ast_manager & m, params_ref const & p, dependent_expr_state& fmls) {

View file

@ -86,7 +86,7 @@ void propagate_values::reduce() {
for (unsigned r = 0; r < m_max_rounds && m.inc() && rw != m_stats.m_num_rewrites; ++r) {
rw = m_stats.m_num_rewrites;
init_sub();
for (unsigned i = qhead(); i < qtail() && m.inc() && !m_fmls.inconsistent(); ++i)
for (unsigned i : indices())
process_fml(i);
init_sub();
for (unsigned i = qtail(); i-- > qhead() && m.inc() && !m_fmls.inconsistent();)