3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-04-23 17:15:31 +00:00

fix #1675, regression in core processing in maxres

Signed-off-by: Nikolaj Bjorner <nbjorner@microsoft.com>
This commit is contained in:
Nikolaj Bjorner 2018-06-19 23:23:19 -07:00
parent 26e9321517
commit 335d672bf1
43 changed files with 246 additions and 321 deletions

View file

@ -175,10 +175,11 @@ public:
void new_assumption(expr* e, rational const& w) {
IF_VERBOSE(13, verbose_stream() << "new assumption " << mk_pp(e, m) << " " << w << "\n";);
TRACE("opt", tout << "insert: " << mk_pp(e, m) << " : " << w << "\n";);
m_asm2weight.insert(e, w);
m_asms.push_back(e);
m_trail.push_back(e);
TRACE("opt", tout << "insert: " << mk_pp(e, m) << " : " << w << "\n";
tout << m_asms << " " << "\n"; );
}
void trace() {
@ -192,7 +193,7 @@ public:
trace();
if (is_sat != l_true) return is_sat;
while (m_lower < m_upper) {
TRACE("opt",
TRACE("opt_verbose",
display_vec(tout, m_asms);
s().display(tout);
tout << "\n";
@ -204,7 +205,12 @@ public:
}
switch (is_sat) {
case l_true:
SASSERT(is_true(m_asms));
CTRACE("opt", !m_model->is_true(m_asms),
tout << *m_model;
tout << "assumptions: ";
for (expr* a : m_asms) tout << mk_pp(a, m) << " -> " << (*m_model)(a) << " ";
tout << "\n";);
SASSERT(m_model->is_true(m_asms));
found_optimum();
return l_true;
case l_false:
@ -276,8 +282,7 @@ public:
/**
Give preference to cores that have large minmal values.
*/
sort_assumptions(asms);
sort_assumptions(asms);
m_last_index = std::min(m_last_index, asms.size()-1);
m_last_index = 0;
unsigned index = m_last_index>0?m_last_index-1:0;
@ -290,8 +295,6 @@ public:
index = next_index(asms, index);
}
first = false;
IF_VERBOSE(3, verbose_stream() << "hill climb " << index << "\n";);
// IF_VERBOSE(3, verbose_stream() << "weight: " << get_weight(asms[0].get()) << " " << get_weight(asms[index-1].get()) << " num soft: " << index << "\n";);
m_last_index = index;
is_sat = check_sat(index, asms.c_ptr());
}
@ -307,8 +310,9 @@ public:
if (r == l_true) {
model_ref mdl;
s().get_model(mdl);
TRACE("opt", tout << *mdl;);
if (mdl.get()) {
update_assignment(mdl.get());
update_assignment(mdl);
}
}
return r;
@ -318,7 +322,7 @@ public:
IF_VERBOSE(1, verbose_stream() << "found optimum\n";);
m_lower.reset();
for (unsigned i = 0; i < m_soft.size(); ++i) {
m_assignment[i] = is_true(m_soft[i]);
m_assignment[i] = m_model->is_true(m_soft[i]);
if (!m_assignment[i]) {
m_lower += m_weights[i];
}
@ -347,7 +351,6 @@ public:
lbool get_cores(vector<exprs>& cores) {
// assume m_s is unsat.
lbool is_sat = l_false;
expr_ref_vector asms(m_asms);
cores.reset();
exprs core;
while (is_sat == l_false) {
@ -370,6 +373,10 @@ public:
m_lower = m_upper;
return l_true;
}
// 1. remove all core literals from m_asms
// 2. re-add literals of higher weight than min-weight.
// 3. 'core' stores the core literals that are split afterwards
remove_soft(core, m_asms);
split_core(core);
cores.push_back(core);
if (core.size() >= m_max_core_size) {
@ -378,15 +385,14 @@ public:
if (cores.size() >= m_max_num_cores) {
break;
}
remove_soft(core, asms);
is_sat = check_sat_hill_climb(asms);
is_sat = check_sat_hill_climb(m_asms);
}
TRACE("opt",
tout << "num cores: " << cores.size() << "\n";
for (auto const& c : cores) {
display_vec(tout, c);
}
tout << "num satisfying: " << asms.size() << "\n";);
tout << "num satisfying: " << m_asms.size() << "\n";);
return is_sat;
}
@ -394,7 +400,7 @@ public:
void get_current_correction_set(exprs& cs) {
model_ref mdl;
s().get_model(mdl);
update_assignment(mdl.get());
update_assignment(mdl);
get_current_correction_set(mdl.get(), cs);
}
@ -402,10 +408,10 @@ public:
cs.reset();
if (!mdl) return;
for (expr* a : m_asms) {
if (is_false(mdl, a)) {
if (mdl->is_false(a)) {
cs.push_back(a);
}
TRACE("opt", expr_ref tmp(m); mdl->eval(a, tmp, true); tout << mk_pp(a, m) << ": " << tmp << "\n";);
// TRACE("opt", tout << mk_pp(a, m) << ": " << (*mdl)(a) << "\n";);
}
TRACE("opt", display_vec(tout << "new correction set: ", cs););
}
@ -444,7 +450,7 @@ public:
++m_stats.m_num_cs;
expr_ref fml(m), tmp(m);
TRACE("opt", display_vec(tout << "corr_set: ", corr_set););
remove_core(corr_set);
remove_soft(corr_set, m_asms);
rational w = split_core(corr_set);
cs_max_resolve(corr_set, w);
IF_VERBOSE(2, verbose_stream() << "(opt.maxres.correction-set " << corr_set.size() << ")\n";);
@ -484,18 +490,13 @@ public:
void update_model(expr* def, expr* value) {
SASSERT(is_uninterp_const(def));
if (m_csmodel) {
expr_ref val(m);
SASSERT(m_csmodel.get());
if (m_csmodel->eval(value, val, true)) {
m_csmodel->register_decl(to_app(def)->get_decl(), val);
}
m_csmodel->register_decl(to_app(def)->get_decl(), (*m_csmodel)(value));
}
}
void process_unsat(exprs const& core) {
IF_VERBOSE(3, verbose_stream() << "(maxres cs model valid: " << (m_csmodel.get() != nullptr) << " cs size:" << m_correction_set_size << " core: " << core.size() << ")\n";);
expr_ref fml(m);
remove_core(core);
SASSERT(!core.empty());
rational w = core_weight(core);
TRACE("opt", display_vec(tout << "minimized core: ", core););
@ -536,7 +537,7 @@ public:
w = m_mus.get_best_model(mdl);
}
if (mdl.get() && w < m_upper) {
update_assignment(mdl.get());
update_assignment(mdl);
}
return nullptr != mdl.get();
}
@ -707,10 +708,11 @@ public:
s().assert_expr(fml);
}
void update_assignment(model* mdl) {
void update_assignment(model_ref & mdl) {
mdl->set_model_completion(true);
unsigned correction_set_size = 0;
for (expr* a : m_asms) {
if (is_false(mdl, a)) {
if (mdl->is_false(a)) {
++correction_set_size;
}
}
@ -719,41 +721,45 @@ public:
m_correction_set_size = correction_set_size;
}
TRACE("opt", tout << *mdl;);
rational upper(0);
expr_ref tmp(m);
unsigned i = 0;
for (expr* s : m_soft) {
if (!is_true(mdl, s)) {
TRACE("opt", tout << mk_pp(s, m) << ": " << (*mdl)(s) << " " << m_weights[i] << "\n";);
if (!mdl->is_true(s)) {
upper += m_weights[i];
}
++i;
}
if (upper > m_upper) {
TRACE("opt", tout << "new upper: " << upper << " vs existing upper: " << m_upper << "\n";);
return;
}
if (!m_c.verify_model(m_index, mdl, upper)) {
if (!m_c.verify_model(m_index, mdl.get(), upper)) {
return;
}
m_model = mdl;
m_c.model_updated(mdl);
m_c.model_updated(mdl.get());
TRACE("opt", model_smt2_pp(tout << "updated model\n", m, *m_model, 0););
TRACE("opt", tout << "updated upper: " << upper << "\nmodel\n" << *m_model;);
i = 0;
for (expr* s : m_soft) {
m_assignment[i++] = is_true(s);
m_assignment[i++] = m_model->is_true(s);
}
// DEBUG_CODE(verify_assignment(););
m_upper = upper;
trace();
add_upper_bound_block();
}
void add_upper_bound_block() {
@ -769,54 +775,28 @@ public:
s().assert_expr(fml);
}
bool is_true(model* mdl, expr* e) {
expr_ref tmp(m);
return mdl->eval(e, tmp, true) && m.is_true(tmp);
}
bool is_false(model* mdl, expr* e) {
expr_ref tmp(m);
return mdl->eval(e, tmp, true) && m.is_false(tmp);
}
bool is_true(expr* e) {
return is_true(m_model.get(), e);
}
bool is_true(expr_ref_vector const& es) {
unsigned i = 0;
for (; i < es.size() && is_true(es[i]); ++i) { }
CTRACE("opt_bug", i < es.size(), tout << mk_pp(es[i], m) << "\n";
model_smt2_pp(tout, m, *m_model, 0););
return i == es.size();
}
void remove_soft(exprs const& core, expr_ref_vector& asms) {
for (unsigned i = 0; i < asms.size(); ++i) {
if (core.contains(asms[i].get())) {
asms[i] = asms.back();
asms.pop_back();
--i;
}
}
TRACE("opt", tout << "before remove: " << asms << "\n";);
unsigned j = 0;
for (expr* a : asms)
if (!core.contains(a))
asms[j++] = a;
asms.shrink(j);
TRACE("opt", tout << "after remove: " << asms << "\n";);
}
void remove_core(exprs const& core) {
remove_soft(core, m_asms);
}
virtual void updt_params(params_ref& p) {
maxsmt_solver_base::updt_params(p);
opt_params _p(p);
m_hill_climb = _p.maxres_hill_climb();
m_add_upper_bound_block = _p.maxres_add_upper_bound_block();
m_max_num_cores = _p.maxres_max_num_cores();
m_max_core_size = _p.maxres_max_core_size();
m_maximize_assignment = _p.maxres_maximize_assignment();
m_max_correction_set_size = _p.maxres_max_correction_set_size();
m_pivot_on_cs = _p.maxres_pivot_on_correction_set();
m_wmax = _p.maxres_wmax();
m_dump_benchmarks = _p.dump_benchmarks();
virtual void updt_params(params_ref& _p) {
maxsmt_solver_base::updt_params(_p);
opt_params p(_p);
m_hill_climb = p.maxres_hill_climb();
m_add_upper_bound_block = p.maxres_add_upper_bound_block();
m_max_num_cores = p.maxres_max_num_cores();
m_max_core_size = p.maxres_max_core_size();
m_maximize_assignment = p.maxres_maximize_assignment();
m_max_correction_set_size = p.maxres_max_correction_set_size();
m_pivot_on_cs = p.maxres_pivot_on_correction_set();
m_wmax = p.maxres_wmax();
m_dump_benchmarks = p.dump_benchmarks();
}
lbool init_local() {
@ -828,9 +808,8 @@ public:
if (is_sat != l_true) {
return is_sat;
}
obj_map<expr, rational>::iterator it = new_soft.begin(), end = new_soft.end();
for (; it != end; ++it) {
add_soft(it->m_key, it->m_value);
for (auto const& kv : new_soft) {
add_soft(kv.m_key, kv.m_value);
}
m_max_upper = m_upper;
m_found_feasible_optimum = false;
@ -843,10 +822,7 @@ public:
virtual void commit_assignment() {
if (m_found_feasible_optimum) {
TRACE("opt", tout << "Committing feasible solution\n";
tout << m_defs;
tout << m_asms;
);
TRACE("opt", tout << "Committing feasible solution\n" << m_defs << " " << m_asms;);
s().assert_expr(m_defs);
s().assert_expr(m_asms);
}

View file

@ -76,9 +76,7 @@ namespace opt {
m_upper.reset();
m_assignment.reset();
for (unsigned i = 0; i < m_weights.size(); ++i) {
expr_ref val(m);
if (!m_model->eval(m_soft[i], val)) return false;
m_assignment.push_back(m.is_true(val));
m_assignment.push_back(m.is_true(m_soft[i]));
if (!m_assignment.back()) {
m_upper += m_weights[i];
}
@ -232,9 +230,7 @@ namespace opt {
m_msolver = nullptr;
symbol const& maxsat_engine = m_c.maxsat_engine();
IF_VERBOSE(1, verbose_stream() << "(maxsmt)\n";);
TRACE("opt", tout << "maxsmt\n";
s().display(tout); tout << "\n";
);
TRACE("opt_verbose", s().display(tout << "maxsmt\n") << "\n";);
if (m_soft_constraints.empty() || maxsat_engine == symbol("maxres") || maxsat_engine == symbol::null) {
m_msolver = mk_maxres(m_c, m_index, m_weights, m_soft_constraints);
}
@ -455,10 +451,9 @@ namespace opt {
maxsmt.get_model(m_model, labels);
// TBD: is m_fm applied or not?
unsigned j = 0;
expr_ref tmp(m);
for (unsigned i = 0; i < soft.size(); ++i) {
if (m_model->eval(soft[i].first, tmp) && m.is_true(tmp)) {
soft[j++] = soft[i];
for (auto const& p : soft) {
if (m_model->is_true(p.first)) {
soft[j++] = p;
}
}
soft.shrink(j);

View file

@ -341,6 +341,7 @@ namespace opt {
void context::fix_model(model_ref& mdl) {
if (mdl && !m_model_fixed.contains(mdl.get())) {
TRACE("opt", tout << "fix-model\n";);
(*m_fm)(mdl);
apply(m_model_converter, mdl);
m_model_fixed.push_back(mdl.get());
@ -350,7 +351,7 @@ namespace opt {
void context::get_model_core(model_ref& mdl) {
mdl = m_model;
fix_model(mdl);
TRACE("opt", model_smt2_pp(tout, m, *mdl.get(), 0););
TRACE("opt", tout << *mdl;);
}
void context::get_box_model(model_ref& mdl, unsigned index) {
@ -502,7 +503,8 @@ namespace opt {
case O_MINIMIZE:
is_ge = !is_ge;
case O_MAXIMIZE:
if (mdl->eval(obj.m_term, val, true) && is_numeral(val, k)) {
val = (*mdl)(obj.m_term);
if (is_numeral(val, k)) {
if (is_ge) {
result = mk_ge(obj.m_term, val);
}
@ -522,7 +524,7 @@ namespace opt {
for (unsigned i = 0; i < sz; ++i) {
terms.push_back(obj.m_terms[i]);
coeffs.push_back(obj.m_weights[i]);
if (mdl->eval(obj.m_terms[i], val, true) && m.is_true(val)) {
if (mdl->is_true(obj.m_terms[i])) {
k += obj.m_weights[i];
}
else {
@ -1036,7 +1038,7 @@ namespace opt {
buffer << prefix << (m_model_counter++) << ".smt2";
std::ofstream out(buffer.str());
if (out) {
model_smt2_pp(out, m, *mdl, 0);
out << *mdl;
out.close();
}
}
@ -1052,11 +1054,7 @@ namespace opt {
expr_ref val(m);
model_ref mdl = md->copy();
fix_model(mdl);
if (!mdl->eval(term, val, true)) {
TRACE("opt", tout << "Term does not evaluate " << term << "\n";);
return false;
}
val = (*mdl)(term);
unsigned bvsz;
if (!m_arith.is_numeral(val, r) && !m_bv.is_numeral(val, r, bvsz)) {
TRACE("opt", tout << "model does not evaluate objective to a value\n";);
@ -1195,9 +1193,9 @@ namespace opt {
rational r;
switch(obj.m_type) {
case O_MINIMIZE: {
bool evaluated = m_model->eval(obj.m_term, val, true);
TRACE("opt", tout << obj.m_term << " " << val << " " << evaluated << " " << is_numeral(val, r) << "\n";);
if (evaluated && is_numeral(val, r)) {
val = (*m_model)(obj.m_term);
TRACE("opt", tout << obj.m_term << " " << val << " " << is_numeral(val, r) << "\n";);
if (is_numeral(val, r)) {
inf_eps val = inf_eps(obj.m_adjust_value(r));
TRACE("opt", tout << "adjusted value: " << val << "\n";);
if (is_lower) {
@ -1210,9 +1208,9 @@ namespace opt {
break;
}
case O_MAXIMIZE: {
bool evaluated = m_model->eval(obj.m_term, val, true);
val = (*m_model)(obj.m_term);
TRACE("opt", tout << obj.m_term << " " << val << "\n";);
if (evaluated && is_numeral(val, r)) {
if (is_numeral(val, r)) {
inf_eps val = inf_eps(obj.m_adjust_value(r));
TRACE("opt", tout << "adjusted value: " << val << "\n";);
if (is_lower) {
@ -1227,15 +1225,10 @@ namespace opt {
case O_MAXSMT: {
bool ok = true;
for (unsigned j = 0; ok && j < obj.m_terms.size(); ++j) {
bool evaluated = m_model->eval(obj.m_terms[j], val, true);
val = (*m_model)(obj.m_terms[j]);
TRACE("opt", tout << mk_pp(obj.m_terms[j], m) << " " << val << "\n";);
if (evaluated) {
if (!m.is_true(val)) {
r += obj.m_weights[j];
}
}
else {
ok = false;
if (!m.is_true(val)) {
r += obj.m_weights[j];
}
}
if (ok) {
@ -1485,7 +1478,7 @@ namespace opt {
}
if (is_internal && mc) {
mc->collect(visitor);
mc->set_env(&visitor);
}
param_descrs descrs;
@ -1531,7 +1524,9 @@ namespace opt {
if (is_internal && mc) {
mc->display(out);
}
if (is_internal && mc) {
mc->set_env(nullptr);
}
out << "(check-sat)\n";
return out.str();
}
@ -1545,7 +1540,7 @@ namespace opt {
model_ref mdl;
get_model(mdl);
for (expr * f : fmls) {
if (!mdl->eval(f, tmp) || !m.is_true(tmp)) {
if (!mdl->is_true(f)) {
//IF_VERBOSE(0, m_fm->display(verbose_stream() << "fm\n"));
IF_VERBOSE(0, m_model_converter->display(verbose_stream() << "mc\n"));
IF_VERBOSE(0, verbose_stream() << "Failed to validate " << mk_pp(f, m) << "\n" << tmp << "\n");
@ -1559,18 +1554,14 @@ namespace opt {
void context::validate_maxsat(symbol const& id) {
maxsmt& ms = *m_maxsmts.find(id);
TRACE("opt", tout << "Validate: " << id << "\n";);
for (unsigned i = 0; i < m_objectives.size(); ++i) {
objective const& obj = m_objectives[i];
for (objective const& obj : m_objectives) {
if (obj.m_id == id && obj.m_type == O_MAXSMT) {
SASSERT(obj.m_type == O_MAXSMT);
rational value(0);
expr_ref val(m);
for (unsigned i = 0; i < obj.m_terms.size(); ++i) {
bool evaluated = m_model->eval(obj.m_terms[i], val);
SASSERT(evaluated);
CTRACE("opt", evaluated && !m.is_true(val) && !m.is_false(val), tout << mk_pp(obj.m_terms[i], m) << " " << val << "\n";);
CTRACE("opt", !evaluated, tout << mk_pp(obj.m_terms[i], m) << "\n";);
if (evaluated && !m.is_true(val)) {
auto const& t = obj.m_terms[i];
if (!m_model->is_true(t)) {
value += obj.m_weights[i];
}
// TBD: check that optimal was not changed.
@ -1595,14 +1586,13 @@ namespace opt {
if (m_optsmt.objective_is_model_valid(obj.m_index) &&
n.get_infinity().is_zero() &&
n.get_infinitesimal().is_zero() &&
m_model->eval(obj.m_term, val) &&
is_numeral(val, r1)) {
is_numeral((*m_model)(obj.m_term), r1)) {
rational r2 = n.get_rational();
if (obj.m_type == O_MINIMIZE) {
r1.neg();
}
CTRACE("opt", r1 != r2, tout << obj.m_term << " evaluates to " << r1 << " but has objective " << r2 << "\n";);
CTRACE("opt", r1 != r2, model_smt2_pp(tout, m, *m_model, 0););
CTRACE("opt", r1 != r2, tout << *m_model;);
SASSERT(r1 == r2);
}
break;
@ -1610,8 +1600,7 @@ namespace opt {
case O_MAXSMT: {
rational value(0);
for (unsigned i = 0; i < obj.m_terms.size(); ++i) {
bool evaluated = m_model->eval(obj.m_terms[i], val);
if (evaluated && !m.is_true(val)) {
if (!m_model->is_true(obj.m_terms[i])) {
value += obj.m_weights[i];
}
// TBD: check that optimal was not changed.

View file

@ -318,7 +318,7 @@ namespace opt {
m_s->get_labels(m_labels);
for (unsigned i = 0; i < ors.size(); ++i) {
expr_ref tmp(m);
if (m_model->eval(ors[i].get(), tmp) && m.is_true(tmp)) {
if (m_model->is_true(ors[i].get())) {
m_lower[i] = m_upper[i];
ors[i] = m.mk_false();
disj[i] = m.mk_false();

View file

@ -179,7 +179,7 @@ namespace smt {
m_orig_model = mdl;
for (unsigned i = 0; i < m_var2decl.size(); ++i) {
expr_ref tmp(m);
m_assignment[i] = mdl->eval(m_var2decl[i], tmp) && m.is_true(tmp);
m_assignment[i] = mdl->is_true(m_var2decl[i]);
}
}
@ -343,10 +343,7 @@ namespace smt {
for (unsigned i = 0; i < m_clauses.size(); ++i) {
if (!eval(m_clauses[i])) {
m_hard_false.insert(i);
expr_ref tmp(m);
if (!m_orig_model->eval(m_orig_clauses[i].get(), tmp)) {
return;
}
expr_ref tmp = (*m_orig_model)(m_orig_clauses[i].get());
IF_VERBOSE(0,
verbose_stream() << "original evaluation: " << tmp << "\n";
verbose_stream() << mk_pp(m_orig_clauses[i].get(), m) << "\n";
@ -521,14 +518,13 @@ namespace smt {
literal mk_aux_literal(expr* f) {
unsigned var;
expr_ref tmp(m);
if (!m_decl2var.find(f, var)) {
var = m_hard_occ.size();
SASSERT(m_var2decl.size() == var);
SASSERT(m_soft_occ.size() == var);
m_hard_occ.push_back(unsigned_vector());
m_soft_occ.push_back(unsigned_vector());
m_assignment.push_back(m_orig_model->eval(f, tmp) && m.is_true(tmp));
m_assignment.push_back(m_orig_model->is_true(f));
m_decl2var.insert(f, var);
m_var2decl.push_back(f);
}

View file

@ -73,8 +73,7 @@ namespace opt {
unsigned first = 0;
it = soft.begin();
for (; it != end; ++it) {
expr_ref tmp(m);
if (m_model->eval(it->m_key, tmp) && m.is_true(tmp)) {
if (m_model->is_true(it->m_key)) {
unsigned n = it->m_value.get_unsigned();
while (n > 0) {
s().assert_expr(out[first]);
@ -121,8 +120,7 @@ namespace opt {
}
bool is_true(expr* e) {
expr_ref tmp(m);
return m_model->eval(e, tmp) && m.is_true(tmp);
return m_model->is_true(e);
}
// definitions used for sorting network

View file

@ -120,8 +120,7 @@ namespace opt {
}
bool is_true(expr* e) {
expr_ref tmp(m);
return m_model->eval(e, tmp) && m.is_true(tmp);
return m_model->is_true(e);
}
void update_assignment() {
@ -307,9 +306,8 @@ namespace opt {
}
void update_model(expr* def, expr* value) {
expr_ref val(m);
if (m_model && m_model->eval(value, val, true)) {
m_model->register_decl(to_app(def)->get_decl(), val);
if (m_model) {
m_model->register_decl(to_app(def)->get_decl(), (*m_model)(value));
}
}