mirror of
https://github.com/Z3Prover/z3
synced 2025-11-23 06:01:26 +00:00
Merge branch 'parallel' of https://github.com/Z3Prover/z3 into param-tuning
This commit is contained in:
commit
78844a05e9
5 changed files with 22 additions and 25 deletions
|
|
@ -3617,7 +3617,7 @@ namespace smt {
|
||||||
\remark A logical context can only be configured at scope level 0,
|
\remark A logical context can only be configured at scope level 0,
|
||||||
and before internalizing any formulas.
|
and before internalizing any formulas.
|
||||||
*/
|
*/
|
||||||
lbool context::setup_and_check(bool reset_cancel, bool enable_parallel_param_tuning) {
|
lbool context::setup_and_check(bool reset_cancel) {
|
||||||
if (!check_preamble(reset_cancel)) return l_undef;
|
if (!check_preamble(reset_cancel)) return l_undef;
|
||||||
SASSERT(m_scope_lvl == 0);
|
SASSERT(m_scope_lvl == 0);
|
||||||
SASSERT(!m_setup.already_configured());
|
SASSERT(!m_setup.already_configured());
|
||||||
|
|
@ -3625,7 +3625,7 @@ namespace smt {
|
||||||
|
|
||||||
if (m_fparams.m_threads > 1 && !m.has_trace_stream()) {
|
if (m_fparams.m_threads > 1 && !m.has_trace_stream()) {
|
||||||
expr_ref_vector asms(m);
|
expr_ref_vector asms(m);
|
||||||
parallel p(*this, enable_parallel_param_tuning);
|
parallel p(*this);
|
||||||
return p(asms);
|
return p(asms);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -3685,15 +3685,15 @@ namespace smt {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
lbool context::check(unsigned num_assumptions, expr * const * assumptions, bool reset_cancel, bool enable_parallel_param_tuning) {
|
lbool context::check(unsigned num_assumptions, expr * const * assumptions, bool reset_cancel) {
|
||||||
if (!check_preamble(reset_cancel)) return l_undef;
|
if (!check_preamble(reset_cancel)) return l_undef;
|
||||||
SASSERT(at_base_level());
|
SASSERT(at_base_level());
|
||||||
setup_context(false);
|
setup_context(false);
|
||||||
search_completion sc(*this);
|
search_completion sc(*this);
|
||||||
if (m_fparams.m_threads > 1 && !m.has_trace_stream()) {
|
if (m_fparams.m_threads > 1 && !m.has_trace_stream()) {
|
||||||
expr_ref_vector asms(m, num_assumptions, assumptions);
|
expr_ref_vector asms(m, num_assumptions, assumptions);
|
||||||
IF_VERBOSE(1, verbose_stream() << "Starting parallel check with " << asms.size() << " assumptions and param tuning enabled: " << enable_parallel_param_tuning << "\n");
|
IF_VERBOSE(1, verbose_stream() << "Starting parallel check with " << asms.size() << " assumptions\n");
|
||||||
parallel p(*this, enable_parallel_param_tuning);
|
parallel p(*this);
|
||||||
return p(asms);
|
return p(asms);
|
||||||
}
|
}
|
||||||
lbool r = l_undef;
|
lbool r = l_undef;
|
||||||
|
|
|
||||||
|
|
@ -1690,7 +1690,7 @@ namespace smt {
|
||||||
|
|
||||||
void pop(unsigned num_scopes);
|
void pop(unsigned num_scopes);
|
||||||
|
|
||||||
lbool check(unsigned num_assumptions = 0, expr * const * assumptions = nullptr, bool reset_cancel = true, bool enable_parallel_param_tuning = true);
|
lbool check(unsigned num_assumptions = 0, expr * const * assumptions = nullptr, bool reset_cancel = true);
|
||||||
|
|
||||||
lbool check(expr_ref_vector const& cube, vector<expr_ref_vector> const& clauses);
|
lbool check(expr_ref_vector const& cube, vector<expr_ref_vector> const& clauses);
|
||||||
|
|
||||||
|
|
@ -1700,7 +1700,7 @@ namespace smt {
|
||||||
|
|
||||||
lbool preferred_sat(expr_ref_vector const& asms, vector<expr_ref_vector>& cores);
|
lbool preferred_sat(expr_ref_vector const& asms, vector<expr_ref_vector>& cores);
|
||||||
|
|
||||||
lbool setup_and_check(bool reset_cancel = true, bool enable_parallel_param_tuning = true);
|
lbool setup_and_check(bool reset_cancel = true);
|
||||||
|
|
||||||
void reduce_assertions();
|
void reduce_assertions();
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -990,8 +990,7 @@ namespace smt {
|
||||||
|
|
||||||
|
|
||||||
void context::undo_mk_bool_var() {
|
void context::undo_mk_bool_var() {
|
||||||
SASSERT(!m_b_internalized_stack.empty(ue key per literal
|
SASSERT(!m_b_internalized_stack.empty());
|
||||||
m_lit_scores[lit.sign()][v] += 1.));
|
|
||||||
m_stats.m_num_del_bool_var++;
|
m_stats.m_num_del_bool_var++;
|
||||||
expr * n = m_b_internalized_stack.back();
|
expr * n = m_b_internalized_stack.back();
|
||||||
unsigned n_id = n->get_id();
|
unsigned n_id = n->get_id();
|
||||||
|
|
|
||||||
|
|
@ -68,7 +68,7 @@ namespace smt {
|
||||||
lbool parallel::param_generator::run_prefix_step() {
|
lbool parallel::param_generator::run_prefix_step() {
|
||||||
if (m.limit().is_canceled())
|
if (m.limit().is_canceled())
|
||||||
return l_undef;
|
return l_undef;
|
||||||
IF_VERBOSE(1, verbose_stream() << " PARAM TUNER running prefix step\n");
|
IF_VERBOSE(1, verbose_stream() << " PARAM TUNER running prefix step with conflicts " << m_max_prefix_conflicts <<"\n");
|
||||||
ctx->get_fparams().m_max_conflicts = m_max_prefix_conflicts;
|
ctx->get_fparams().m_max_conflicts = m_max_prefix_conflicts;
|
||||||
ctx->get_fparams().m_threads = 1;
|
ctx->get_fparams().m_threads = 1;
|
||||||
|
|
||||||
|
|
@ -76,7 +76,7 @@ namespace smt {
|
||||||
ctx->m_recorded_cubes = &m_recorded_cubes;
|
ctx->m_recorded_cubes = &m_recorded_cubes;
|
||||||
lbool r = l_undef;
|
lbool r = l_undef;
|
||||||
try {
|
try {
|
||||||
r = ctx->check(0, nullptr, true, false);
|
r = ctx->check(0, nullptr, true);
|
||||||
IF_VERBOSE(1, verbose_stream() << " PARAM TUNER: prefix step result " << r << "\n");
|
IF_VERBOSE(1, verbose_stream() << " PARAM TUNER: prefix step result " << r << "\n");
|
||||||
}
|
}
|
||||||
catch (z3_error &err) {
|
catch (z3_error &err) {
|
||||||
|
|
@ -126,7 +126,7 @@ namespace smt {
|
||||||
if (m.limit().is_canceled())
|
if (m.limit().is_canceled())
|
||||||
return;
|
return;
|
||||||
// the conflicts and decisions are cumulative over all cube replays inside the probe_ctx
|
// the conflicts and decisions are cumulative over all cube replays inside the probe_ctx
|
||||||
lbool r = probe_ctx->check(cube.size(), cube.data(), true, false);
|
lbool r = probe_ctx->check(cube.size(), cube.data(), true);
|
||||||
IF_VERBOSE(2, verbose_stream() << " PARAM TUNER " << i << ": cube replay result " << r << "\n");
|
IF_VERBOSE(2, verbose_stream() << " PARAM TUNER " << i << ": cube replay result " << r << "\n");
|
||||||
}
|
}
|
||||||
unsigned conflicts = probe_ctx->m_stats.m_num_conflicts;
|
unsigned conflicts = probe_ctx->m_stats.m_num_conflicts;
|
||||||
|
|
@ -340,10 +340,9 @@ namespace smt {
|
||||||
|
|
||||||
parallel::param_generator::param_generator(parallel& p)
|
parallel::param_generator::param_generator(parallel& p)
|
||||||
: b(p.m_batch_manager), m_p(p.ctx.get_params()), m_l2g(m, p.ctx.m) {
|
: b(p.m_batch_manager), m_p(p.ctx.get_params()), m_l2g(m, p.ctx.m) {
|
||||||
// patch fix so that ctx = alloc(context, m, p.ctx.get_fparams(), m_p); doesn't crash due to some issue with default construction of m
|
|
||||||
ast_translation m_g2l(p.ctx.m, m);
|
|
||||||
m_g2l(p.ctx.m.mk_true());
|
|
||||||
|
|
||||||
|
m.copy_families_plugins(p.ctx.m);
|
||||||
|
SASSERT(p.ctx.get_fparams().m_threads == 1);
|
||||||
ctx = alloc(context, m, p.ctx.get_fparams(), m_p);
|
ctx = alloc(context, m, p.ctx.get_fparams(), m_p);
|
||||||
context::copy(p.ctx, *ctx, true);
|
context::copy(p.ctx, *ctx, true);
|
||||||
// don't share initial units
|
// don't share initial units
|
||||||
|
|
@ -539,7 +538,7 @@ namespace smt {
|
||||||
<< bounded_pp_exprs(cube)
|
<< bounded_pp_exprs(cube)
|
||||||
<< "with max_conflicts: " << ctx->get_fparams().m_max_conflicts << "\n";);
|
<< "with max_conflicts: " << ctx->get_fparams().m_max_conflicts << "\n";);
|
||||||
try {
|
try {
|
||||||
r = ctx->check(asms.size(), asms.data(), true, false);
|
r = ctx->check(asms.size(), asms.data(), true);
|
||||||
} catch (z3_error &err) {
|
} catch (z3_error &err) {
|
||||||
b.set_exception(err.error_code());
|
b.set_exception(err.error_code());
|
||||||
} catch (z3_exception &ex) {
|
} catch (z3_exception &ex) {
|
||||||
|
|
@ -692,6 +691,7 @@ namespace smt {
|
||||||
scoped_clear(parallel &p) : p(p) {}
|
scoped_clear(parallel &p) : p(p) {}
|
||||||
~scoped_clear() {
|
~scoped_clear() {
|
||||||
p.m_workers.reset();
|
p.m_workers.reset();
|
||||||
|
p.m_param_generator = nullptr;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
scoped_clear clear(*this);
|
scoped_clear clear(*this);
|
||||||
|
|
@ -701,6 +701,8 @@ namespace smt {
|
||||||
|
|
||||||
scoped_limits sl(m.limit());
|
scoped_limits sl(m.limit());
|
||||||
flet<unsigned> _nt(ctx.m_fparams.m_threads, 1);
|
flet<unsigned> _nt(ctx.m_fparams.m_threads, 1);
|
||||||
|
|
||||||
|
m_param_generator = alloc(param_generator, *this);
|
||||||
SASSERT(num_threads > 1);
|
SASSERT(num_threads > 1);
|
||||||
for (unsigned i = 0; i < num_threads; ++i)
|
for (unsigned i = 0; i < num_threads; ++i)
|
||||||
m_workers.push_back(alloc(worker, i, *this, asms));
|
m_workers.push_back(alloc(worker, i, *this, asms));
|
||||||
|
|
@ -711,13 +713,12 @@ namespace smt {
|
||||||
sl.push_child(&(m_param_generator->limit()));
|
sl.push_child(&(m_param_generator->limit()));
|
||||||
|
|
||||||
// Launch threads
|
// Launch threads
|
||||||
vector<std::thread> threads(m_enable_param_tuner ? num_threads + 1 : num_threads); // +1 for param generator
|
vector<std::thread> threads(num_threads + 1); // +1 for param generator
|
||||||
for (unsigned i = 0; i < num_threads; ++i) {
|
for (unsigned i = 0; i < num_threads; ++i) {
|
||||||
threads[i] = std::thread([&, i]() { m_workers[i]->run(); });
|
threads[i] = std::thread([&, i]() { m_workers[i]->run(); });
|
||||||
}
|
}
|
||||||
// the final thread runs the parameter generator
|
// the final thread runs the parameter generator
|
||||||
if (m_enable_param_tuner)
|
threads[num_threads] = std::thread([&]() { m_param_generator->protocol_iteration(); });
|
||||||
threads[num_threads] = std::thread([&]() { m_param_generator->protocol_iteration(); });
|
|
||||||
|
|
||||||
// Wait for all threads to finish
|
// Wait for all threads to finish
|
||||||
for (auto &th : threads)
|
for (auto &th : threads)
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,6 @@ namespace smt {
|
||||||
class parallel {
|
class parallel {
|
||||||
context& ctx;
|
context& ctx;
|
||||||
unsigned num_threads;
|
unsigned num_threads;
|
||||||
bool m_enable_param_tuner;
|
|
||||||
|
|
||||||
struct shared_clause {
|
struct shared_clause {
|
||||||
unsigned source_worker_id;
|
unsigned source_worker_id;
|
||||||
|
|
@ -136,7 +135,7 @@ namespace smt {
|
||||||
scoped_ptr<context> ctx;
|
scoped_ptr<context> ctx;
|
||||||
|
|
||||||
unsigned N = 4; // number of prefix permutations to test (including current)
|
unsigned N = 4; // number of prefix permutations to test (including current)
|
||||||
unsigned m_max_prefix_conflicts = 1000;
|
unsigned m_max_prefix_conflicts = 1000; // todo- maybe make this adaptive to grow up to 1000 but start with a smaller base
|
||||||
|
|
||||||
scoped_ptr<context> m_prefix_solver;
|
scoped_ptr<context> m_prefix_solver;
|
||||||
vector<expr_ref_vector> m_recorded_cubes;
|
vector<expr_ref_vector> m_recorded_cubes;
|
||||||
|
|
@ -222,14 +221,12 @@ namespace smt {
|
||||||
scoped_ptr<param_generator> m_param_generator;
|
scoped_ptr<param_generator> m_param_generator;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
parallel(context& ctx, bool enable_param_tuner = true) :
|
parallel(context& ctx) :
|
||||||
ctx(ctx),
|
ctx(ctx),
|
||||||
num_threads(std::min(
|
num_threads(std::min(
|
||||||
(unsigned)std::thread::hardware_concurrency(),
|
(unsigned)std::thread::hardware_concurrency(),
|
||||||
ctx.get_fparams().m_threads)),
|
ctx.get_fparams().m_threads)),
|
||||||
m_enable_param_tuner(enable_param_tuner),
|
m_batch_manager(ctx.m, *this) {}
|
||||||
m_batch_manager(ctx.m, *this),
|
|
||||||
m_param_generator(enable_param_tuner ? alloc(param_generator, *this) : nullptr) {}
|
|
||||||
|
|
||||||
lbool operator()(expr_ref_vector const& asms);
|
lbool operator()(expr_ref_vector const& asms);
|
||||||
};
|
};
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue