3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-04-15 21:38:44 +00:00

fix parallel solving bugs

Signed-off-by: Nikolaj Bjorner <nbjorner@microsoft.com>
This commit is contained in:
Nikolaj Bjorner 2017-02-11 15:35:13 -05:00
parent 4220432ac3
commit 690689424d
6 changed files with 43 additions and 56 deletions

View file

@ -327,6 +327,7 @@ public:
m_found_feasible_optimum = true;
}
virtual lbool operator()() {
m_defs.reset();
switch(m_st) {
@ -731,7 +732,7 @@ public:
m_assignment[i] = is_true(m_soft[i]);
}
DEBUG_CODE(verify_assignment(););
// DEBUG_CODE(verify_assignment(););
m_upper = upper;
trace();
@ -866,6 +867,7 @@ public:
if (is_sat == l_false) {
IF_VERBOSE(0, verbose_stream() << "assignment is infeasible\n";);
}
IF_VERBOSE(1, verbose_stream() << "verified\n";);
}
};

View file

@ -297,7 +297,7 @@ namespace sat {
}
// TBD: need proper check for overflow.
if (offset > (1 << 12)) {
IF_VERBOSE(2, verbose_stream() << "offset: " << offset << "\n";
IF_VERBOSE(12, verbose_stream() << "offset: " << offset << "\n";
active2pb(m_A);
display(verbose_stream(), m_A);
);
@ -698,7 +698,7 @@ namespace sat {
extension* card_extension::copy(solver* s) {
card_extension* result = alloc(card_extension);
result->set_solver(s);
for (unsigned i = 1; i < m_constraints.size(); ++i) {
for (unsigned i = 0; i < m_constraints.size(); ++i) {
literal_vector lits;
card& c = *m_constraints[i];
for (unsigned i = 0; i < c.size(); ++i) {

View file

@ -39,33 +39,22 @@ namespace sat {
m_vectors.resize(sz, 0);
m_heads.reset();
m_heads.resize(num_threads, 0);
m_at_end.reset();
m_at_end.resize(num_threads, true);
m_tail = 0;
m_size = sz;
}
void parallel::vector_pool::begin_add_vector(unsigned owner, unsigned n) {
SASSERT(m_tail < m_size);
unsigned capacity = n + 2;
m_vectors.reserve(m_size + capacity, 0);
IF_VERBOSE(3, verbose_stream() << owner << ": begin-add " << n << " tail: " << m_tail << " size: " << m_size << "\n";);
if (m_tail >= m_size) {
// move tail to the front.
for (unsigned i = 0; i < m_heads.size(); ++i) {
// the tail could potentially loop around full circle before one of the heads picks up anything.
// in this case the we miss the newly inserted record.
while (m_heads[i] < capacity) {
next(m_heads[i]);
}
IF_VERBOSE(3, verbose_stream() << owner << ": head: " << m_heads[i] << "\n";);
}
m_tail = 0;
}
else {
for (unsigned i = 0; i < m_heads.size(); ++i) {
while (m_tail < m_heads[i] && m_heads[i] < m_tail + capacity) {
next(m_heads[i]);
}
IF_VERBOSE(3, verbose_stream() << owner << ": head: " << m_heads[i] << "\n";);
for (unsigned i = 0; i < m_heads.size(); ++i) {
while (m_tail < m_heads[i] && m_heads[i] < m_tail + capacity) {
next(m_heads[i]);
}
m_at_end[i] = false;
}
m_vectors[m_tail++] = owner;
m_vectors[m_tail++] = n;
@ -75,18 +64,23 @@ namespace sat {
m_vectors[m_tail++] = e;
}
void parallel::vector_pool::end_add_vector() {
if (m_tail >= m_size) {
m_tail = 0;
}
}
bool parallel::vector_pool::get_vector(unsigned owner, unsigned& n, unsigned const*& ptr) {
unsigned head = m_heads[owner];
unsigned iterations = 0;
while (head != m_tail) {
while (head != m_tail || !m_at_end[owner]) {
++iterations;
if (head == 0 && m_tail >= m_size) {
break;
}
SASSERT(head < m_size);
IF_VERBOSE(static_cast<unsigned>(iterations > m_size ? 0 : 3), verbose_stream() << owner << ": head: " << head << " tail: " << m_tail << "\n";);
SASSERT(head < m_size && m_tail < m_size);
bool is_self = owner == get_owner(head);
next(m_heads[owner]);
IF_VERBOSE(static_cast<unsigned>(iterations > m_size ? 0 : 3), verbose_stream() << owner << ": [" << head << ":" << m_heads[owner] << "] tail: " << m_tail << "\n";);
m_at_end[owner] = (m_heads[owner] == m_tail);
if (!is_self) {
n = get_length(head);
ptr = get_ptr(head);
@ -156,6 +150,7 @@ namespace sat {
m_pool.begin_add_vector(s.m_par_id, 2);
m_pool.add_vector_elem(l1.index());
m_pool.add_vector_elem(l2.index());
m_pool.end_add_vector();
}
}
@ -171,6 +166,7 @@ namespace sat {
for (unsigned i = 0; i < n; ++i) {
m_pool.add_vector_elem(c[i].index());
}
m_pool.end_add_vector();
}
}

View file

@ -16,8 +16,8 @@ Author:
Revision History:
--*/
#ifndef SAT_PAR_H_
#define SAT_PAR_H_
#ifndef SAT_PARALLEL_H_
#define SAT_PARALLEL_H_
#include"sat_types.h"
#include"hashtable.h"
@ -34,6 +34,7 @@ namespace sat {
unsigned m_size;
unsigned m_tail;
unsigned_vector m_heads;
svector<bool> m_at_end;
void next(unsigned& index);
unsigned get_owner(unsigned index) const { return m_vectors[index]; }
unsigned get_length(unsigned index) const { return m_vectors[index+1]; }
@ -42,6 +43,7 @@ namespace sat {
vector_pool() {}
void reserve(unsigned num_owners, unsigned sz);
void begin_add_vector(unsigned owner, unsigned n);
void end_add_vector();
void add_vector_elem(unsigned e);
bool get_vector(unsigned owner, unsigned& n, unsigned const*& ptr);
};

View file

@ -100,11 +100,10 @@ namespace sat {
if (src.get_extension()) {
m_ext = src.get_extension()->copy(this);
}
{
unsigned sz = src.init_trail_size();
for (unsigned i = 0; i < sz; ++i) {
assign(src.m_trail[i], justification());
}
unsigned trail_sz = src.init_trail_size();
for (unsigned i = 0; i < trail_sz; ++i) {
assign(src.m_trail[i], justification());
}
// copy binary clauses
@ -365,12 +364,6 @@ namespace sat {
}
unsigned some_idx = c.size() >> 1;
literal block_lit = c[some_idx];
if (m_watches.size() <= (~c[0]).index()) std::cout << c << "\n";
if (m_watches.size() <= (~c[1]).index()) std::cout << c << "\n";
if (m_watches[(~c[0]).index()].size() >= 20000) {
std::cout << m_par_id << ": " << c << "\n";
enable_trace("sat");
}
m_watches[(~c[0]).index()].push_back(watched(block_lit, cls_off));
m_watches[(~c[1]).index()].push_back(watched(block_lit, cls_off));
return reinit;
@ -843,7 +836,6 @@ namespace sat {
int num_threads = static_cast<int>(m_config.m_num_threads);
int num_extra_solvers = num_threads - 1;
sat::parallel par(*this);
// par.reserve(num_threads, 1 << 16);
par.reserve(num_threads, 1 << 9);
par.init_solvers(*this, num_extra_solvers);
int finished_id = -1;
@ -872,13 +864,6 @@ namespace sat {
}
}
if (first) {
if (r == l_true && i < num_extra_solvers) {
set_model(par.get_solver(i).get_model());
}
else if (r == l_false && i < num_extra_solvers) {
m_core.reset();
m_core.append(par.get_solver(i).get_core());
}
for (int j = 0; j < num_extra_solvers; ++j) {
if (i != j) {
par.cancel_solver(j);
@ -901,14 +886,21 @@ namespace sat {
ex_kind = DEFAULT_EX;
}
}
set_par(0, 0);
if (finished_id != -1 && finished_id < num_extra_solvers) {
m_stats = par.get_solver(finished_id).m_stats;
}
if (result == l_true && finished_id != -1 && finished_id < num_extra_solvers) {
set_model(par.get_solver(finished_id).get_model());
}
else if (result == l_false && finished_id != -1 && finished_id < num_extra_solvers) {
m_core.reset();
m_core.append(par.get_solver(finished_id).get_core());
}
if (!canceled) {
rlimit().reset_cancel();
}
set_par(0, 0);
if (finished_id == -1) {
switch (ex_kind) {
case ERROR_EX: throw z3_error(error_code);
@ -925,8 +917,7 @@ namespace sat {
void solver::exchange_par() {
if (m_par && at_base_lvl()) m_par->get_clauses(*this);
if (m_par && at_base_lvl()) {
// std::cout << scope_lvl() << " " << search_lvl() << "\n";
SASSERT(scope_lvl() == search_lvl());
// SASSERT(scope_lvl() == search_lvl());
// TBD: import also dependencies of assumptions.
unsigned sz = init_trail_size();
unsigned num_in = 0, num_out = 0;
@ -1681,7 +1672,6 @@ namespace sat {
return false;
default:
if (new_sz != sz) {
std::cout << "shrinking " << c << "\n";
if (m_config.m_drat) m_drat.del(c);
c.shrink(new_sz);
if (m_config.m_drat) m_drat.add(c, true);
@ -2320,7 +2310,6 @@ namespace sat {
}
#if 0
if (!drop && i >= bound) {
// std::cout << "drop\n";
j = sz;
break;
}

View file

@ -91,7 +91,6 @@ public:
virtual ~inc_sat_solver() {}
virtual solver* translate(ast_manager& dst_m, params_ref const& p) {
std::cout << "translate\n";
ast_translation tr(m, dst_m);
if (m_num_scopes > 0) {
throw default_exception("Cannot translate sat solver at non-base level");
@ -219,7 +218,6 @@ public:
m_params.append(p);
sat_params p1(p);
m_params.set_bool("elim_vars", false);
std::cout << m_params << "\n";
m_solver.updt_params(m_params);
m_optimize_model = m_params.get_bool("optimize_model", false);
}