mirror of
https://github.com/Z3Prover/z3
synced 2025-08-26 13:06:05 +00:00
updates
Signed-off-by: Nikolaj Bjorner <nbjorner@microsoft.com>
This commit is contained in:
parent
aa5d833b38
commit
9b060cace3
2 changed files with 128 additions and 41 deletions
|
@ -41,58 +41,74 @@ namespace smt {
|
||||||
namespace smt {
|
namespace smt {
|
||||||
|
|
||||||
void parallel::worker::run() {
|
void parallel::worker::run() {
|
||||||
ast_translation tr(ctx->m, m);
|
ast_translation g2l(ctx->m, m);
|
||||||
|
ast_translation l2g(m, ctx->m);
|
||||||
while (m.inc()) {
|
while (m.inc()) {
|
||||||
vector<expr_ref_vector> cubes;
|
vector<expr_ref_vector> cubes;
|
||||||
b.get_cubes(tr, cubes);
|
b.get_cubes(g2l, cubes);
|
||||||
if (cubes.empty())
|
if (cubes.empty())
|
||||||
return;
|
return;
|
||||||
for (auto& cube : cubes) {
|
for (auto& cube : cubes) {
|
||||||
if (!m.inc())
|
if (!m.inc())
|
||||||
return; // stop if the main context is cancelled
|
return; // stop if the main context is cancelled
|
||||||
switch (check_cube(cube)) {
|
switch (check_cube(cube)) {
|
||||||
case l_undef:
|
case l_undef: {
|
||||||
// return unprocessed cubes to the batch manager
|
vector<expr_ref_vector> returned_cubes;
|
||||||
// add a split literal to the batch manager.
|
returned_cubes.push_back(cube);
|
||||||
// optionally process other cubes and delay sending back unprocessed cubes to batch manager.
|
auto split_atoms = get_split_atoms();
|
||||||
b.m_cubes.push_back(cube); // TODO: add access funcs for m_cubes
|
b.return_cubes(l2g, returned_cubes, split_atoms);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
case l_true: {
|
case l_true: {
|
||||||
model_ref mdl;
|
model_ref mdl;
|
||||||
ctx->get_model(mdl);
|
ctx->get_model(mdl);
|
||||||
if (mdl)
|
b.set_sat(l2g, *mdl);
|
||||||
ctx->set_model(mdl->translate(tr));
|
|
||||||
//b.set_sat(tr, *mdl);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
case l_false:
|
case l_false: {
|
||||||
// if unsat core only contains assumptions, then unsat
|
auto const& unsat_core = ctx->unsat_core();
|
||||||
// otherwise, extract lemmas that can be shared (units (and unsat core?)).
|
// If the unsat core only contains assumptions,
|
||||||
// share with batch manager.
|
// unsatisfiability does not depend on the current cube and the entire problem is unsat.
|
||||||
// process next cube.
|
if (any_of(unsat_core, [&](expr* e) { return asms.contains(e); })) {
|
||||||
ctx->m_unsat_core.reset();
|
b.set_unsat(l2g, ctx->unsat_core());
|
||||||
for (expr* e : pctx.unsat_core()) // TODO: move this logic to the batch manager since this is per-thread
|
return;
|
||||||
ctx->m_unsat_core.push_back(tr(e));
|
}
|
||||||
|
// TODO: can share lemmas here, such as new units and not(and(unsat_core)), binary clauses, etc.
|
||||||
|
// TODO: remember assumptions used in core so that they get used for the final core.
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
parallel::worker::worker(parallel& p, context& _ctx, expr_ref_vector const& _asms): p(p), b(p.m_batch_manager), m_smt_params(_ctx.get_fparams()), asms(m) {
|
parallel::worker::worker(unsigned id, parallel& p, expr_ref_vector const& _asms): id(id), p(p), b(p.m_batch_manager), m_smt_params(p.ctx.get_fparams()), asms(m) {
|
||||||
ast_translation g2l(_ctx.m, m);
|
ast_translation g2l(p.ctx.m, m);
|
||||||
for (auto e : _asms)
|
for (auto e : _asms)
|
||||||
asms.push_back(g2l(e));
|
asms.push_back(g2l(e));
|
||||||
m_smt_params.m_preprocess = false;
|
m_smt_params.m_preprocess = false;
|
||||||
ctx = alloc(context, m, m_smt_params, _ctx.get_params());
|
ctx = alloc(context, m, m_smt_params, p.ctx.get_params());
|
||||||
|
context::copy(p.ctx, *ctx, true);
|
||||||
|
ctx->set_random_seed(id + m_smt_params.m_random_seed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
lbool parallel::worker::check_cube(expr_ref_vector const& cube) {
|
lbool parallel::worker::check_cube(expr_ref_vector const& cube) {
|
||||||
for (auto& atom : cube) {
|
for (auto& atom : cube)
|
||||||
asms.push_back(atom);
|
asms.push_back(atom);
|
||||||
|
lbool r = l_undef;
|
||||||
|
try {
|
||||||
|
r = ctx->check(asms.size(), asms.data());
|
||||||
|
}
|
||||||
|
catch (z3_error& err) {
|
||||||
|
b.set_exception(err.error_code());
|
||||||
|
}
|
||||||
|
catch (z3_exception& ex) {
|
||||||
|
b.set_exception(ex.what());
|
||||||
|
}
|
||||||
|
catch (...) {
|
||||||
|
b.set_exception("unknown exception");
|
||||||
}
|
}
|
||||||
lbool r = ctx->check(asms.size(), asms.data());
|
|
||||||
asms.shrink(asms.size() - cube.size());
|
asms.shrink(asms.size() - cube.size());
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -118,11 +134,56 @@ namespace smt {
|
||||||
|
|
||||||
void parallel::batch_manager::set_sat(ast_translation& l2g, model& m) {
|
void parallel::batch_manager::set_sat(ast_translation& l2g, model& m) {
|
||||||
std::scoped_lock lock(mux);
|
std::scoped_lock lock(mux);
|
||||||
if (m_result == l_true || m_result == l_undef) {
|
if (l_true == m_result)
|
||||||
m_result = l_true;
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
m_result = l_true;
|
m_result = l_true;
|
||||||
|
p.ctx.set_model(m.translate(l2g));
|
||||||
|
cancel_workers();
|
||||||
|
}
|
||||||
|
|
||||||
|
void parallel::batch_manager::set_unsat(ast_translation& l2g, expr_ref_vector const& unsat_core) {
|
||||||
|
std::scoped_lock lock(mux);
|
||||||
|
if (l_false == m_result)
|
||||||
|
return;
|
||||||
|
m_result = l_false;
|
||||||
|
expr_ref_vector g_core(l2g.to());
|
||||||
|
for (auto& e : unsat_core)
|
||||||
|
g_core.push_back(l2g(e));
|
||||||
|
p.ctx.m_unsat_core.reset();
|
||||||
|
for (expr* e : unsat_core)
|
||||||
|
p.ctx.m_unsat_core.push_back(l2g(e));
|
||||||
|
cancel_workers();
|
||||||
|
}
|
||||||
|
|
||||||
|
void parallel::batch_manager::set_exception(unsigned error_code) {
|
||||||
|
std::scoped_lock lock(mux);
|
||||||
|
if (m_exception_kind != NO_EX)
|
||||||
|
return; // already set
|
||||||
|
m_exception_kind = ERROR_CODE_EX;
|
||||||
|
m_exception_code = error_code;
|
||||||
|
cancel_workers();
|
||||||
|
}
|
||||||
|
|
||||||
|
void parallel::batch_manager::set_exception(std::string const& msg) {
|
||||||
|
std::scoped_lock lock(mux);
|
||||||
|
if (m_exception_kind != NO_EX)
|
||||||
|
return; // already set
|
||||||
|
m_exception_kind = ERROR_MSG_EX;
|
||||||
|
m_exception_msg = msg;
|
||||||
|
cancel_workers();
|
||||||
|
}
|
||||||
|
|
||||||
|
lbool parallel::batch_manager::get_result() const {
|
||||||
|
if (m_exception_kind == ERROR_MSG_EX)
|
||||||
|
throw default_exception(m_exception_msg.c_str());
|
||||||
|
if (m_exception_kind == ERROR_CODE_EX)
|
||||||
|
throw z3_error(m_exception_code);
|
||||||
|
if (m.limit().is_canceled())
|
||||||
|
return l_undef; // the main context was cancelled, so we return undef.
|
||||||
|
return m_result;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
for (auto& c : m_cubes) {
|
for (auto& c : m_cubes) {
|
||||||
expr_ref_vector g_cube(l2g.to());
|
expr_ref_vector g_cube(l2g.to());
|
||||||
for (auto& e : c) {
|
for (auto& e : c) {
|
||||||
|
@ -130,7 +191,7 @@ namespace smt {
|
||||||
}
|
}
|
||||||
share_lemma(l2g, mk_and(g_cube));
|
share_lemma(l2g, mk_and(g_cube));
|
||||||
}
|
}
|
||||||
}
|
#endif
|
||||||
|
|
||||||
void parallel::batch_manager::return_cubes(ast_translation& l2g, vector<expr_ref_vector>const& cubes, expr_ref_vector const& split_atoms) {
|
void parallel::batch_manager::return_cubes(ast_translation& l2g, vector<expr_ref_vector>const& cubes, expr_ref_vector const& split_atoms) {
|
||||||
std::scoped_lock lock(mux);
|
std::scoped_lock lock(mux);
|
||||||
|
@ -168,25 +229,33 @@ namespace smt {
|
||||||
|
|
||||||
expr_ref_vector top_lits(m);
|
expr_ref_vector top_lits(m);
|
||||||
for (const auto& node : candidates) {
|
for (const auto& node : candidates) {
|
||||||
if (ctx->get_assignment(node.key) != l_undef) continue;
|
if (ctx->get_assignment(node.key) != l_undef)
|
||||||
|
continue;
|
||||||
|
|
||||||
expr* e = ctx->bool_var2expr(node.key);
|
expr* e = ctx->bool_var2expr(node.key);
|
||||||
if (!e) continue;
|
if (!e)
|
||||||
|
continue;
|
||||||
|
|
||||||
top_lits.push_back(expr_ref(e, m));
|
top_lits.push_back(expr_ref(e, m));
|
||||||
if (top_lits.size() >= k) break;
|
if (top_lits.size() >= k)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
return top_lits;
|
return top_lits;
|
||||||
}
|
}
|
||||||
|
|
||||||
lbool parallel::new_check(expr_ref_vector const& asms) {
|
lbool parallel::new_check(expr_ref_vector const& asms) {
|
||||||
|
|
||||||
ast_manager& m = ctx.m;
|
ast_manager& m = ctx.m;
|
||||||
|
|
||||||
|
if (m.has_trace_stream())
|
||||||
|
throw default_exception("trace streams have to be off in parallel mode");
|
||||||
|
|
||||||
{
|
{
|
||||||
scoped_limits sl(m.limit());
|
scoped_limits sl(m.limit());
|
||||||
unsigned num_threads = std::min((unsigned)std::thread::hardware_concurrency(), ctx.get_fparams().m_threads);
|
unsigned num_threads = std::min((unsigned)std::thread::hardware_concurrency(), ctx.get_fparams().m_threads);
|
||||||
SASSERT(num_threads > 1);
|
SASSERT(num_threads > 1);
|
||||||
for (unsigned i = 0; i < num_threads; ++i)
|
for (unsigned i = 0; i < num_threads; ++i)
|
||||||
m_workers.push_back(alloc(worker, *this, ctx, asms));
|
m_workers.push_back(alloc(worker, i, *this, asms));
|
||||||
|
|
||||||
// THIS WILL ALLOW YOU TO CANCEL ALL THE CHILD THREADS
|
// THIS WILL ALLOW YOU TO CANCEL ALL THE CHILD THREADS
|
||||||
// within the lexical scope of the code block, creates a data structure that allows you to push children
|
// within the lexical scope of the code block, creates a data structure that allows you to push children
|
||||||
|
@ -206,6 +275,9 @@ namespace smt {
|
||||||
// Wait for all threads to finish
|
// Wait for all threads to finish
|
||||||
for (auto& th : threads)
|
for (auto& th : threads)
|
||||||
th.join();
|
th.join();
|
||||||
|
|
||||||
|
for (auto w : m_workers)
|
||||||
|
w->collect_statistics(ctx.m_aux_stats);
|
||||||
}
|
}
|
||||||
m_workers.clear();
|
m_workers.clear();
|
||||||
return m_batch_manager.get_result();
|
return m_batch_manager.get_result();
|
||||||
|
|
|
@ -27,6 +27,12 @@ namespace smt {
|
||||||
unsigned num_threads;
|
unsigned num_threads;
|
||||||
|
|
||||||
class batch_manager {
|
class batch_manager {
|
||||||
|
|
||||||
|
enum exception_kind {
|
||||||
|
NO_EX,
|
||||||
|
ERROR_CODE_EX,
|
||||||
|
ERROR_MSG_EX
|
||||||
|
};
|
||||||
ast_manager& m;
|
ast_manager& m;
|
||||||
parallel& p;
|
parallel& p;
|
||||||
std::mutex mux;
|
std::mutex mux;
|
||||||
|
@ -34,10 +40,18 @@ namespace smt {
|
||||||
vector<expr_ref_vector> m_cubes;
|
vector<expr_ref_vector> m_cubes;
|
||||||
lbool m_result = l_false;
|
lbool m_result = l_false;
|
||||||
unsigned m_max_batch_size = 10;
|
unsigned m_max_batch_size = 10;
|
||||||
|
exception_kind m_exception_kind = NO_EX;
|
||||||
|
unsigned m_exception_code = 0;
|
||||||
|
std::string m_exception_msg;
|
||||||
|
|
||||||
|
void cancel_workers() {
|
||||||
|
for (auto& w : p.m_workers)
|
||||||
|
w->cancel();
|
||||||
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
batch_manager(ast_manager& m, parallel& p) : m(m), p(p), m_split_atoms(m) { m_cubes.push_back(expr_ref_vector(m)); }
|
batch_manager(ast_manager& m, parallel& p) : m(m), p(p), m_split_atoms(m) { m_cubes.push_back(expr_ref_vector(m)); }
|
||||||
void set_unsat();
|
void set_unsat(ast_translation& l2g, expr_ref_vector const& unsat_core);
|
||||||
void set_sat(ast_translation& l2g, model& m);
|
void set_sat(ast_translation& l2g, model& m);
|
||||||
void set_exception(std::string const& msg);
|
void set_exception(std::string const& msg);
|
||||||
void set_exception(unsigned error_code);
|
void set_exception(unsigned error_code);
|
||||||
|
@ -55,10 +69,11 @@ namespace smt {
|
||||||
//
|
//
|
||||||
void return_cubes(ast_translation& l2g, vector<expr_ref_vector>const& cubes, expr_ref_vector const& split_atoms);
|
void return_cubes(ast_translation& l2g, vector<expr_ref_vector>const& cubes, expr_ref_vector const& split_atoms);
|
||||||
void share_lemma(ast_translation& l2g, expr* lemma);
|
void share_lemma(ast_translation& l2g, expr* lemma);
|
||||||
lbool get_result() const { return m.limit().is_canceled() ? l_undef : m_result; }
|
lbool get_result() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
class worker {
|
class worker {
|
||||||
|
unsigned id; // unique identifier for the worker
|
||||||
parallel& p;
|
parallel& p;
|
||||||
batch_manager& b;
|
batch_manager& b;
|
||||||
ast_manager m;
|
ast_manager m;
|
||||||
|
@ -70,7 +85,7 @@ namespace smt {
|
||||||
void share_units();
|
void share_units();
|
||||||
lbool check_cube(expr_ref_vector const& cube);
|
lbool check_cube(expr_ref_vector const& cube);
|
||||||
public:
|
public:
|
||||||
worker(parallel& p, context& _ctx, expr_ref_vector const& _asms);
|
worker(unsigned id, parallel& p, expr_ref_vector const& _asms);
|
||||||
void run();
|
void run();
|
||||||
expr_ref_vector get_split_atoms();
|
expr_ref_vector get_split_atoms();
|
||||||
void cancel() {
|
void cancel() {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue