3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-04-06 17:44:08 +00:00

re-enabling model evaluation of as-array after tuning normalization

Signed-off-by: Nikolaj Bjorner <nbjorner@microsoft.com>
This commit is contained in:
Nikolaj Bjorner 2019-02-10 18:11:01 -08:00
parent 81d322b79f
commit 6cfe66c3c2
6 changed files with 56 additions and 30 deletions

View file

@ -73,11 +73,9 @@ void act_cache::init() {
}
void act_cache::dec_refs() {
map::iterator it = m_table.begin();
map::iterator end = m_table.end();
for (; it != end; ++it) {
m_manager.dec_ref((*it).m_key);
m_manager.dec_ref(UNTAG(expr*, (*it).m_value));
for (auto & kv : m_table) {
m_manager.dec_ref(kv.m_key.first);
m_manager.dec_ref(UNTAG(expr*, kv.m_value));
}
}
@ -105,18 +103,18 @@ act_cache::~act_cache() {
void act_cache::del_unused() {
unsigned sz = m_queue.size();
while (m_qhead < sz) {
expr * k = m_queue[m_qhead];
entry_t const& e = m_queue[m_qhead];
m_qhead++;
SASSERT(m_table.contains(k));
map::key_value * entry = m_table.find_core(k);
SASSERT(m_table.contains(e));
map::key_value * entry = m_table.find_core(e);
SASSERT(entry);
if (GET_TAG(entry->m_value) == 0) {
// Key k was never accessed by client code.
// That is, find(k) was never executed by client code.
m_unused--;
expr * v = entry->m_value;
m_table.erase(k);
m_manager.dec_ref(k);
m_table.erase(e);
m_manager.dec_ref(e.first);
m_manager.dec_ref(v);
break;
}
@ -135,12 +133,13 @@ void act_cache::del_unused() {
/**
\brief Insert a new entry k -> v into the cache.
*/
void act_cache::insert(expr * k, expr * v) {
void act_cache::insert(expr * k, unsigned offset, expr * v) {
SASSERT(k);
entry_t e(k, offset);
if (m_unused >= m_max_unused)
del_unused();
expr * dummy = reinterpret_cast<expr*>(1);
map::key_value & entry = m_table.insert_if_not_there(k, dummy);
map::key_value & entry = m_table.insert_if_not_there(e, dummy);
#if 0
unsigned static counter = 0;
counter++;
@ -156,7 +155,7 @@ void act_cache::insert(expr * k, expr * v) {
m_manager.inc_ref(k);
m_manager.inc_ref(v);
entry.m_value = v;
m_queue.push_back(k);
m_queue.push_back(e);
m_unused++;
DEBUG_CODE(expected_tag = 0;); // new entry
}
@ -175,7 +174,7 @@ void act_cache::insert(expr * k, expr * v) {
}
DEBUG_CODE({
expr * v2;
SASSERT(m_table.find(k, v2));
SASSERT(m_table.find(e, v2));
SASSERT(v == UNTAG(expr*, v2));
SASSERT(expected_tag == GET_TAG(v2));
});
@ -185,8 +184,9 @@ void act_cache::insert(expr * k, expr * v) {
\brief Search for key k in the cache.
If entry k -> (v, tag) is found, we set tag to 1.
*/
expr * act_cache::find(expr * k) {
map::key_value * entry = m_table.find_core(k);
expr * act_cache::find(expr * k, unsigned offset) {
entry_t e(k, offset);
map::key_value * entry = m_table.find_core(e);
if (entry == nullptr)
return nullptr;
if (GET_TAG(entry->m_value) == 0) {
@ -196,7 +196,7 @@ expr * act_cache::find(expr * k) {
m_unused--;
DEBUG_CODE({
expr * v;
SASSERT(m_table.find(k, v));
SASSERT(m_table.find(e, v));
SASSERT(GET_TAG(v) == 1);
});
}

View file

@ -26,9 +26,15 @@ Notes:
class act_cache {
ast_manager & m_manager;
typedef cmap<expr*, expr*, obj_ptr_hash<expr>, default_eq<expr*> > map;
typedef std::pair<expr*, unsigned> entry_t;
struct entry_hash {
unsigned operator()(entry_t const& e) const {
return e.first->hash() + e.second;
}
};
typedef cmap<entry_t, expr*, entry_hash, default_eq<entry_t> > map;
map m_table;
ptr_vector<expr> m_queue; // recently created queue
svector<entry_t> m_queue; // recently created queue
unsigned m_qhead;
unsigned m_unused;
unsigned m_max_unused;
@ -42,8 +48,10 @@ public:
act_cache(ast_manager & m);
act_cache(ast_manager & m, unsigned max_unused);
~act_cache();
void insert(expr * k, expr * v);
expr * find(expr * k);
void insert(expr * k, expr * v) { insert(k, 0, v); }
expr * find(expr * k) { return find(k, 0); }
void insert(expr * k, unsigned offset, expr * v);
expr * find(expr * k, unsigned offset);
void reset();
void cleanup();
unsigned size() const { return m_table.size(); }

View file

@ -42,7 +42,7 @@ void rewriter_core::del_cache_stack() {
}
}
void rewriter_core::cache_result(expr * k, expr * v) {
void rewriter_core::cache_shifted_result(expr * k, unsigned offset, expr * v) {
#if 0
// trace for tracking cache usage
verbose_stream() << "1 " << k->get_id() << std::endl;
@ -53,7 +53,7 @@ void rewriter_core::cache_result(expr * k, expr * v) {
SASSERT(m().get_sort(k) == m().get_sort(v));
m_cache->insert(k, v);
m_cache->insert(k, offset, v);
#if 0
static unsigned num_cached = 0;
num_cached ++;

View file

@ -90,8 +90,10 @@ protected:
void init_cache_stack();
void del_cache_stack();
void reset_cache();
void cache_result(expr * k, expr * v);
void cache_result(expr * k, expr * v) { cache_shifted_result(k, 0, v); }
void cache_shifted_result(expr * k, unsigned offset, expr * v);
expr * get_cached(expr * k) const { return m_cache->find(k); }
expr * get_cached(expr* k, unsigned offset) const { return m_cache->find(k, offset); }
void cache_result(expr * k, expr * v, proof * pr);
proof * get_cached_pr(expr * k) const { return static_cast<proof*>(m_cache_pr->find(k)); }

View file

@ -38,9 +38,10 @@ void rewriter_tpl<Config>::process_var(var * v) {
if (!ProofGen) {
// bindings are only used when Proof Generation is not enabled.
unsigned idx = v->get_idx();
if (idx < m_bindings.size()) {
unsigned index = m_bindings.size() - idx - 1;
var * r = (var*)(m_bindings[index]);
expr * r = m_bindings[index];
if (r != nullptr) {
CTRACE("rewriter", v->get_sort() != m().get_sort(r),
tout << expr_ref(v, m()) << ":" << sort_ref(v->get_sort(), m()) << " != " << expr_ref(r, m()) << ":" << sort_ref(m().get_sort(r), m());
@ -50,11 +51,18 @@ void rewriter_tpl<Config>::process_var(var * v) {
if (!is_ground(r) && m_shifts[index] != m_bindings.size()) {
unsigned shift_amount = m_bindings.size() - m_shifts[index];
expr* c = get_cached(r, shift_amount);
if (c) {
result_stack().push_back(c);
set_new_child_flag(v);
return;
}
expr_ref tmp(m());
m_shifter(r, shift_amount, tmp);
result_stack().push_back(tmp);
TRACE("rewriter", tout << "shift: " << shift_amount << " idx: " << idx << " --> " << tmp << "\n";
display_bindings(tout););
cache_shifted_result(r, shift_amount, tmp);
}
else {
result_stack().push_back(r);
@ -380,7 +388,6 @@ void rewriter_tpl<Config>::process_app(app * t, frame & fr) {
TRACE("get_macro", display_bindings(tout););
begin_scope();
m_num_qvars += num_args;
//m_num_qvars = 0;
m_root = def;
push_frame(def, false, RW_UNBOUNDED_DEPTH);
return;
@ -480,7 +487,7 @@ void rewriter_tpl<Config>::process_quantifier(quantifier * q, frame & fr) {
m_root = q->get_expr();
unsigned sz = m_bindings.size();
for (unsigned i = 0; i < num_decls; i++) {
m_bindings.push_back(0);
m_bindings.push_back(nullptr);
m_shifts.push_back(sz);
}
}

View file

@ -56,6 +56,8 @@ struct evaluator_cfg : public default_rewriter_cfg {
bool m_cache;
bool m_array_equalities;
bool m_array_as_stores;
obj_map<func_decl, expr*> m_def_cache;
expr_ref_vector m_pinned;
evaluator_cfg(ast_manager & m, model_core & md, params_ref const & p):
m(m),
@ -72,7 +74,8 @@ struct evaluator_cfg : public default_rewriter_cfg {
m_pb_rw(m),
m_f_rw(m),
m_seq_rw(m),
m_ar(m) {
m_ar(m),
m_pinned(m) {
bool flat = true;
m_b_rw.set_flat(flat);
m_a_rw.set_flat(flat);
@ -202,23 +205,29 @@ struct evaluator_cfg : public default_rewriter_cfg {
return BR_REWRITE1;
}
}
#if 0
#if 1
if (st == BR_FAILED && num == 0 && m_ar.is_as_array(f) && m_model_completion) {
func_decl* g = nullptr;
VERIFY(m_ar.is_as_array(f, g));
expr* def = nullptr;
quantifier* q = nullptr;
proof* def_pr = nullptr;
if (m_def_cache.find(g, def)) {
result = def;
return BR_DONE;
}
if (get_macro(g, def, q, def_pr)) {
sort_ref_vector vars(m);
svector<symbol> var_names;
for (unsigned i = 0; i < g->get_arity(); ++i) {
var_names.push_back(symbol(i));
vars.push_back(g->get_domain(g->get_arity() - i - 1));
vars.push_back(g->get_domain(i));
}
result = m.mk_lambda(vars.size(), vars.c_ptr(), var_names.c_ptr(), def);
model_evaluator ev(m_model, m_params);
result = ev(result);
m_pinned.push_back(result);
m_def_cache.insert(g, result);
return BR_DONE;
}
}