mirror of
https://github.com/Z3Prover/z3
synced 2025-08-26 21:16:02 +00:00
merge
This commit is contained in:
commit
e520a42a05
4 changed files with 205 additions and 22 deletions
|
@ -199,7 +199,8 @@ namespace smt {
|
|||
};
|
||||
lit_node* m_dll_lits;
|
||||
|
||||
svector<std::array<double, 2>> m_lit_scores;
|
||||
// svector<std::array<double, 2>> m_lit_scores;
|
||||
svector<double> m_lit_scores[2];
|
||||
|
||||
clause_vector m_aux_clauses;
|
||||
clause_vector m_lemmas;
|
||||
|
@ -947,11 +948,14 @@ namespace smt {
|
|||
void dump_axiom(unsigned n, literal const* lits);
|
||||
void add_scores(unsigned n, literal const* lits);
|
||||
void reset_scores() {
|
||||
for (auto& s : m_lit_scores) s[0] = s[1] = 0.0;
|
||||
for (auto& e : m_lit_scores[0])
|
||||
e = 0;
|
||||
for (auto& e : m_lit_scores[1])
|
||||
e = 0;
|
||||
m_pq_scores.clear(); // Clear the priority queue heap as well
|
||||
}
|
||||
double get_score(literal l) const {
|
||||
return m_lit_scores[l.var()][l.sign()];
|
||||
return m_lit_scores[l.sign()][l.var()];
|
||||
}
|
||||
public:
|
||||
|
||||
|
|
|
@ -931,8 +931,10 @@ namespace smt {
|
|||
set_bool_var(id, v);
|
||||
m_bdata.reserve(v+1);
|
||||
m_activity.reserve(v+1);
|
||||
m_lit_scores.reserve(v + 1);
|
||||
m_lit_scores[v][0] = m_lit_scores[v][1] = 0.0;
|
||||
m_lit_scores[0].reserve(v + 1);
|
||||
m_lit_scores[1].reserve(v + 1);
|
||||
|
||||
m_lit_scores[0][v] = m_lit_scores[1][v] = 0.0;
|
||||
m_bool_var2expr.reserve(v+1);
|
||||
m_bool_var2expr[v] = n;
|
||||
literal l(v, false);
|
||||
|
@ -1543,10 +1545,9 @@ namespace smt {
|
|||
auto lit = lits[i];
|
||||
unsigned v = lit.var(); // unique key per literal
|
||||
|
||||
auto curr_score = m_lit_scores[v][0] * m_lit_scores[v][1];
|
||||
m_lit_scores[v][lit.sign()] += 1.0 / n;
|
||||
m_lit_scores[lit.sign()][v] += 1.0 / n;
|
||||
|
||||
auto new_score = m_lit_scores[v][0] * m_lit_scores[v][1];
|
||||
auto new_score = m_lit_scores[0][v] * m_lit_scores[1][v];
|
||||
m_pq_scores.set(v, new_score);
|
||||
|
||||
}
|
||||
|
|
|
@ -40,6 +40,132 @@ namespace smt {
|
|||
|
||||
namespace smt {
|
||||
|
||||
|
||||
void parallel::worker::run() {
|
||||
ast_translation tr(ctx->m, m);
|
||||
while (m.inc()) {
|
||||
vector<expr_ref_vector> cubes;
|
||||
b.get_cubes(tr, cubes);
|
||||
if (cubes.empty())
|
||||
return;
|
||||
for (auto& cube : cubes) {
|
||||
if (!m.inc())
|
||||
return; // stop if the main context is cancelled
|
||||
switch (check_cube(cube)) {
|
||||
case l_undef:
|
||||
// return unprocessed cubes to the batch manager
|
||||
// add a split literal to the batch manager.
|
||||
// optionally process other cubes and delay sending back unprocessed cubes to batch manager.
|
||||
break;
|
||||
case l_true: {
|
||||
model_ref mdl;
|
||||
ctx->get_model(mdl);
|
||||
//b.set_sat(tr, *mdl);
|
||||
return;
|
||||
}
|
||||
case l_false:
|
||||
// if unsat core only contains assumptions, then unsat
|
||||
// otherwise, extract lemmas that can be shared (units (and unsat core?)).
|
||||
// share with batch manager.
|
||||
// process next cube.
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
parallel::worker::worker(parallel& p, context& _ctx, expr_ref_vector const& _asms): p(p), b(p.m_batch_manager), m_smt_params(_ctx.get_fparams()), asms(m) {
|
||||
|
||||
ast_translation g2l(_ctx.m, m);
|
||||
for (auto e : _asms)
|
||||
asms.push_back(g2l(e));
|
||||
m_smt_params.m_preprocess = false;
|
||||
ctx = alloc(context, m, m_smt_params, _ctx.get_params());
|
||||
}
|
||||
|
||||
|
||||
lbool parallel::worker::check_cube(expr_ref_vector const& cube) {
|
||||
|
||||
return l_undef;
|
||||
}
|
||||
|
||||
void parallel::batch_manager::get_cubes(ast_translation& g2l, vector<expr_ref_vector>& cubes) {
|
||||
std::scoped_lock lock(mux);
|
||||
if (m_cubes.size() == 1 && m_cubes[0].size() == 0) {
|
||||
// special initialization: the first cube is emtpy, have the worker work on an empty cube.
|
||||
cubes.push_back(expr_ref_vector(g2l.to()));
|
||||
return;
|
||||
}
|
||||
// TODO adjust to number of worker threads runnin.
|
||||
// if the size of m_cubes is less than m_max_batch_size/ num_threads, then return fewer cubes.
|
||||
for (unsigned i = 0; i < m_max_batch_size && !m_cubes.empty(); ++i) {
|
||||
auto& cube = m_cubes.back();
|
||||
expr_ref_vector l_cube(g2l.to());
|
||||
for (auto& e : cube) {
|
||||
l_cube.push_back(g2l(e));
|
||||
}
|
||||
cubes.push_back(l_cube);
|
||||
m_cubes.pop_back();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void parallel::batch_manager::return_cubes(ast_translation& l2g, vector<expr_ref_vector>const& cubes, expr_ref_vector const& split_atoms) {
|
||||
std::scoped_lock lock(mux);
|
||||
for (auto & c : cubes) {
|
||||
expr_ref_vector g_cube(l2g.to());
|
||||
for (auto& e : c) {
|
||||
g_cube.push_back(l2g(e));
|
||||
}
|
||||
// TODO: split this g_cube on m_split_atoms that are not already in g_cube as literals.
|
||||
m_cubes.push_back(g_cube);
|
||||
}
|
||||
// TODO: avoid making m_cubes too large.
|
||||
for (auto& atom : split_atoms) {
|
||||
expr_ref g_atom(l2g.from());
|
||||
g_atom = l2g(atom);
|
||||
if (m_split_atoms.contains(g_atom))
|
||||
continue;
|
||||
m_split_atoms.push_back(g_atom);
|
||||
unsigned sz = m_cubes.size();
|
||||
for (unsigned i = 0; i < sz; ++i) {
|
||||
m_cubes.push_back(m_cubes[i]); // copy the existing cubes
|
||||
m_cubes.back().push_back(m.mk_not(g_atom)); // add the negation of the split atom to each cube
|
||||
m_cubes[i].push_back(g_atom);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
lbool parallel::new_check(expr_ref_vector const& asms) {
|
||||
|
||||
ast_manager& m = ctx.m;
|
||||
{
|
||||
scoped_limits sl(m.limit());
|
||||
unsigned num_threads = std::min((unsigned)std::thread::hardware_concurrency(), ctx.get_fparams().m_threads);
|
||||
SASSERT(num_threads > 1);
|
||||
for (unsigned i = 0; i < num_threads; ++i)
|
||||
m_workers.push_back(alloc(worker, *this, ctx, asms));
|
||||
for (auto w : m_workers)
|
||||
sl.push_child(&(w->limit()));
|
||||
|
||||
// Launch threads
|
||||
vector<std::thread> threads(num_threads);
|
||||
for (unsigned i = 0; i < num_threads; ++i) {
|
||||
threads[i] = std::thread([&, i]() {
|
||||
m_workers[i]->run();
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
// Wait for all threads to finish
|
||||
for (auto& th : threads)
|
||||
th.join();
|
||||
}
|
||||
m_workers.clear();
|
||||
return m_batch_manager.get_result();
|
||||
}
|
||||
|
||||
lbool parallel::operator()(expr_ref_vector const& asms) {
|
||||
|
||||
lbool result = l_undef;
|
||||
|
@ -101,16 +227,6 @@ namespace smt {
|
|||
}
|
||||
|
||||
|
||||
auto cube = [](context& ctx, expr_ref_vector& lasms, expr_ref& c) {
|
||||
lookahead lh(ctx);
|
||||
c = lh.choose();
|
||||
if (c) {
|
||||
if ((ctx.get_random_value() % 2) == 0)
|
||||
c = c.get_manager().mk_not(c);
|
||||
lasms.push_back(c);
|
||||
}
|
||||
};
|
||||
|
||||
auto cube_pq = [&](context& ctx, expr_ref_vector& lasms, expr_ref& c) {
|
||||
unsigned k = 3; // Number of top literals you want
|
||||
|
||||
|
@ -256,7 +372,6 @@ namespace smt {
|
|||
verbose_stream() << " :cube " << mk_bounded_pp(mk_and(cube), pm, 3);
|
||||
verbose_stream() << ")\n";);
|
||||
|
||||
|
||||
lbool r = pctx.check(lasms_copy.size(), lasms_copy.data());
|
||||
std::cout << "Thread " << i << " finished cube " << mk_bounded_pp(mk_and(cube), pm, 3) << " with result: " << r << "\n";
|
||||
results.push_back(r);
|
||||
|
@ -421,9 +536,9 @@ namespace smt {
|
|||
}
|
||||
|
||||
std::cout << "Cubes out:\n";
|
||||
for (size_t j = 0; j < cube_batch.size(); ++j) {
|
||||
for (unsigned j = 0; j < cube_batch.size(); ++j) {
|
||||
std::cout << " [" << j << "]\n";
|
||||
for (size_t k = 0; k < cube_batch[j].size(); ++k) {
|
||||
for (unsigned k = 0; k < cube_batch[j].size(); ++k) {
|
||||
std::cout << " [" << k << "] " << mk_pp(cube_batch[j][k].get(), m) << "\n";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,8 +24,71 @@ namespace smt {
|
|||
|
||||
class parallel {
|
||||
context& ctx;
|
||||
|
||||
class batch_manager {
|
||||
ast_manager& m;
|
||||
parallel& p;
|
||||
std::mutex mux;
|
||||
expr_ref_vector m_split_atoms; // atoms to split on
|
||||
vector<expr_ref_vector> m_cubes;
|
||||
lbool m_result = l_false;
|
||||
unsigned m_max_batch_size = 10;
|
||||
|
||||
public:
|
||||
parallel(context& ctx): ctx(ctx) {}
|
||||
batch_manager(ast_manager& m, parallel& p) : m(m), p(p), m_split_atoms(m) { m_cubes.push_back(expr_ref_vector(m)); }
|
||||
void set_unsat();
|
||||
void set_sat(ast_translation& l2g, model& m);
|
||||
void set_exception(std::string const& msg);
|
||||
void set_exception(unsigned error_code);
|
||||
|
||||
//
|
||||
// worker threads ask the batch manager for a supply of cubes to check.
|
||||
// they pass in a translation function from the global context to local context (ast-manager). It is called g2l.
|
||||
// The batch manager returns a list of cubes to solve.
|
||||
//
|
||||
void get_cubes(ast_translation& g2l, vector<expr_ref_vector>& cubes);
|
||||
|
||||
//
|
||||
// worker threads return unprocessed cubes to the batch manager together with split literal candidates.
|
||||
// the batch manager re-enqueues unprocessed cubes and optionally splits them using the split_atoms returned by this and workers.
|
||||
//
|
||||
void return_cubes(ast_translation& l2g, vector<expr_ref_vector>const& cubes, expr_ref_vector const& split_atoms);
|
||||
void share_lemma(ast_translation& l2g, expr* lemma);
|
||||
lbool get_result() const { return m.limit().is_canceled() ? l_undef : m_result; }
|
||||
};
|
||||
|
||||
class worker {
|
||||
parallel& p;
|
||||
batch_manager& b;
|
||||
ast_manager m;
|
||||
expr_ref_vector asms;
|
||||
smt_params m_smt_params;
|
||||
scoped_ptr<context> ctx;
|
||||
unsigned m_max_conflicts = 100;
|
||||
unsigned m_num_shared_units = 0;
|
||||
void share_units();
|
||||
lbool check_cube(expr_ref_vector const& cube);
|
||||
public:
|
||||
worker(parallel& p, context& _ctx, expr_ref_vector const& _asms);
|
||||
void run();
|
||||
void cancel() {
|
||||
m.limit().cancel();
|
||||
}
|
||||
void collect_statistics(::statistics& st) const {
|
||||
ctx->collect_statistics(st);
|
||||
}
|
||||
reslimit& limit() {
|
||||
return m.limit();
|
||||
}
|
||||
};
|
||||
|
||||
batch_manager m_batch_manager;
|
||||
ptr_vector<worker> m_workers;
|
||||
|
||||
lbool new_check(expr_ref_vector const& asms);
|
||||
|
||||
public:
|
||||
parallel(context& ctx): ctx(ctx), m_batch_manager(ctx.m, *this) {}
|
||||
|
||||
lbool operator()(expr_ref_vector const& asms);
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue