mirror of
https://github.com/Z3Prover/z3
synced 2025-08-26 13:06:05 +00:00
merge
This commit is contained in:
parent
58e312190d
commit
445339d2d4
1 changed files with 6 additions and 4 deletions
|
@ -50,7 +50,7 @@ namespace smt {
|
||||||
return;
|
return;
|
||||||
for (auto& cube : cubes) {
|
for (auto& cube : cubes) {
|
||||||
if (!m.inc())
|
if (!m.inc())
|
||||||
return; // stop if the main context is cancelled
|
return; // stop if the main context (i.e. parent thread) is cancelled
|
||||||
switch (check_cube(cube)) {
|
switch (check_cube(cube)) {
|
||||||
case l_undef: {
|
case l_undef: {
|
||||||
// return unprocessed cubes to the batch manager
|
// return unprocessed cubes to the batch manager
|
||||||
|
@ -152,7 +152,7 @@ namespace smt {
|
||||||
void parallel::batch_manager::share_lemma(ast_translation& l2g, expr* lemma) {
|
void parallel::batch_manager::share_lemma(ast_translation& l2g, expr* lemma) {
|
||||||
std::scoped_lock lock(mux);
|
std::scoped_lock lock(mux);
|
||||||
expr_ref g_lemma(l2g(lemma), l2g.to());
|
expr_ref g_lemma(l2g(lemma), l2g.to());
|
||||||
p.ctx.assert_expr(g_lemma); // QUESTION: where does this get shared with the local thread contexts?
|
p.ctx.assert_expr(g_lemma); // QUESTION: where does this get shared with the local thread contexts? -- doesn't right now, we will build the scaffolding for this later!
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -256,6 +256,8 @@ namespace smt {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// frugal stragety: only split on return cubes
|
||||||
|
//
|
||||||
void parallel::batch_manager::return_cubes(ast_translation& l2g, vector<expr_ref_vector>const& cubes, expr_ref_vector const& split_atoms) {
|
void parallel::batch_manager::return_cubes(ast_translation& l2g, vector<expr_ref_vector>const& cubes, expr_ref_vector const& split_atoms) {
|
||||||
std::scoped_lock lock(mux);
|
std::scoped_lock lock(mux);
|
||||||
for (auto & c : cubes) {
|
for (auto & c : cubes) {
|
||||||
|
@ -272,6 +274,7 @@ namespace smt {
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
// Split base: one copy with ¬atom, one with atom
|
// Split base: one copy with ¬atom, one with atom
|
||||||
|
// TODO FIX: THIS CAN RESULT IN SEGFAULT because it's a pointer to a pointer vector, which may have changed!
|
||||||
m_cubes.push_back(base); // push new copy of base cube
|
m_cubes.push_back(base); // push new copy of base cube
|
||||||
m_cubes.back().push_back(m.mk_not(atom)); // add ¬atom to new copy
|
m_cubes.back().push_back(m.mk_not(atom)); // add ¬atom to new copy
|
||||||
base.push_back(atom); // add atom to base cube
|
base.push_back(atom); // add atom to base cube
|
||||||
|
@ -326,7 +329,6 @@ namespace smt {
|
||||||
|
|
||||||
{
|
{
|
||||||
scoped_limits sl(m.limit());
|
scoped_limits sl(m.limit());
|
||||||
unsigned num_threads = std::min((unsigned)std::thread::hardware_concurrency(), ctx.get_fparams().m_threads);
|
|
||||||
SASSERT(num_threads > 1);
|
SASSERT(num_threads > 1);
|
||||||
for (unsigned i = 0; i < num_threads; ++i)
|
for (unsigned i = 0; i < num_threads; ++i)
|
||||||
m_workers.push_back(alloc(worker, i, *this, asms)); // i.e. "new worker(i, *this, asms)"
|
m_workers.push_back(alloc(worker, i, *this, asms)); // i.e. "new worker(i, *this, asms)"
|
||||||
|
@ -354,7 +356,7 @@ namespace smt {
|
||||||
w->collect_statistics(ctx.m_aux_stats);
|
w->collect_statistics(ctx.m_aux_stats);
|
||||||
}
|
}
|
||||||
m_workers.clear();
|
m_workers.clear();
|
||||||
return m_batch_manager.get_result();
|
return m_batch_manager.get_result(); // i.e. all threads have finished all of their cubes -- so if state::is_running is still true, means the entire formula is unsat (otherwise a thread would have returned l_undef)
|
||||||
}
|
}
|
||||||
|
|
||||||
lbool parallel::operator()(expr_ref_vector const& asms) {
|
lbool parallel::operator()(expr_ref_vector const& asms) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue