3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-11-23 06:01:26 +00:00

add some toggle-able params to smt_parallel_params.pyg for doing the param tuning experiments on QF_RDL. set up this logic in the smt_parallel files

This commit is contained in:
Ilana Shapiro 2025-11-11 22:08:07 -08:00
parent 78844a05e9
commit 602d69daf9
3 changed files with 62 additions and 12 deletions

View file

@ -21,5 +21,7 @@ def_module_params('smt_parallel',
('cubetree', BOOL, False, 'use cube tree data structure for storing cubes'), ('cubetree', BOOL, False, 'use cube tree data structure for storing cubes'),
('searchtree', BOOL, False, 'use search tree implementation (parallel2)'), ('searchtree', BOOL, False, 'use search tree implementation (parallel2)'),
('inprocessing', BOOL, False, 'integrate in-processing as a heuristic simplification'), ('inprocessing', BOOL, False, 'integrate in-processing as a heuristic simplification'),
('inprocessing_delay', UINT, 0, 'number of undef before invoking simplification') ('inprocessing_delay', UINT, 0, 'number of undef before invoking simplification'),
('param_tuning', BOOL, False, 'whether to tune params online during solving'),
('tunable_params', STRING, '', 'comma-separated key=value list for online param tuning, e.g. \"smt.arith.nl.horner=false,smt.arith.nl.delay=8\"')
)) ))

View file

@ -163,11 +163,9 @@ namespace smt {
m_param_state.push_back({symbol("smt.arith.nl.expensive_patching"), smtp.arith_nl_expensive_patching()}); m_param_state.push_back({symbol("smt.arith.nl.expensive_patching"), smtp.arith_nl_expensive_patching()});
m_param_state.push_back({symbol("smt.arith.nl.gb"), smtp.arith_nl_grobner()}); m_param_state.push_back({symbol("smt.arith.nl.gb"), smtp.arith_nl_grobner()});
m_param_state.push_back({symbol("smt.arith.nl.horner"), smtp.arith_nl_horner()}); m_param_state.push_back({symbol("smt.arith.nl.horner"), smtp.arith_nl_horner()});
m_param_state.push_back({symbol("smt.arith.nl.horner_frequency"), unsigned_value({smtp.arith_nl_horner_frequency(), 2, 6}) m_param_state.push_back({symbol("smt.arith.nl.horner_frequency"), unsigned_value({smtp.arith_nl_horner_frequency(), 2, 6})});
});
m_param_state.push_back({symbol("smt.arith.nl.optimize_bounds"), smtp.arith_nl_optimize_bounds()}); m_param_state.push_back({symbol("smt.arith.nl.optimize_bounds"), smtp.arith_nl_optimize_bounds()});
m_param_state.push_back( m_param_state.push_back({symbol("smt.arith.nl.propagate_linear_monomials"), smtp.arith_nl_propagate_linear_monomials()});
{symbol("smt.arith.nl.propagate_linear_monomials"), smtp.arith_nl_propagate_linear_monomials()});
m_param_state.push_back({symbol("smt.arith.nl.tangents"), smtp.arith_nl_tangents()}); m_param_state.push_back({symbol("smt.arith.nl.tangents"), smtp.arith_nl_tangents()});
}; };
@ -699,10 +697,12 @@ namespace smt {
m_batch_manager.initialize(); m_batch_manager.initialize();
m_workers.reset(); m_workers.reset();
smt_parallel_params pp(ctx.m_params);
m_should_tune_params = pp.param_tuning();
scoped_limits sl(m.limit()); scoped_limits sl(m.limit());
flet<unsigned> _nt(ctx.m_fparams.m_threads, 1); flet<unsigned> _nt(ctx.m_fparams.m_threads, 1);
m_param_generator = alloc(param_generator, *this);
SASSERT(num_threads > 1); SASSERT(num_threads > 1);
for (unsigned i = 0; i < num_threads; ++i) for (unsigned i = 0; i < num_threads; ++i)
m_workers.push_back(alloc(worker, i, *this, asms)); m_workers.push_back(alloc(worker, i, *this, asms));
@ -710,15 +710,62 @@ namespace smt {
for (auto w : m_workers) for (auto w : m_workers)
sl.push_child(&(w->limit())); sl.push_child(&(w->limit()));
if (m_should_tune_params) {
m_param_generator = alloc(param_generator, *this);
sl.push_child(&(m_param_generator->limit())); sl.push_child(&(m_param_generator->limit()));
}
std::string tuned = pp.tunable_params();
if (!tuned.empty()) {
auto trim = [](std::string &s) {
s.erase(0, s.find_first_not_of(" \t\n\r"));
s.erase(s.find_last_not_of(" \t\n\r") + 1);
};
std::stringstream ss(tuned);
std::string kv;
while (std::getline(ss, kv, ',')) {
size_t eq = kv.find('=');
if (eq == std::string::npos)
continue;
std::string key = kv.substr(0, eq);
std::string val = kv.substr(eq + 1);
trim(key);
trim(val);
if (val == "true" || val == "1") {
ctx.m_params.set_bool(symbol(key.c_str()), true);
}
else if (val == "false" || val == "0") {
ctx.m_params.set_bool(symbol(key.c_str()), false);
}
else if (std::all_of(val.begin(), val.end(), ::isdigit)) {
ctx.m_params.set_uint(symbol(key.c_str()),
static_cast<unsigned>(std::stoul(val)));
}
else {
// if non-numeric and non-bool, just store as string/symbol
ctx.m_params.set_str(symbol(key.c_str()), val.c_str());
}
}
IF_VERBOSE(1,
verbose_stream() << "Applied parameter overrides:\n";
ctx.m_params.display(verbose_stream());
);
}
// Launch threads // Launch threads
vector<std::thread> threads(num_threads + 1); // +1 for param generator vector<std::thread> threads(m_should_tune_params ? num_threads + 1 : num_threads); // +1 for param generator
for (unsigned i = 0; i < num_threads; ++i) { for (unsigned i = 0; i < num_threads; ++i) {
threads[i] = std::thread([&, i]() { m_workers[i]->run(); }); threads[i] = std::thread([&, i]() { m_workers[i]->run(); });
} }
// the final thread runs the parameter generator // the final thread runs the parameter generator
if (m_should_tune_params) {
threads[num_threads] = std::thread([&]() { m_param_generator->protocol_iteration(); }); threads[num_threads] = std::thread([&]() { m_param_generator->protocol_iteration(); });
}
// Wait for all threads to finish // Wait for all threads to finish
for (auto &th : threads) for (auto &th : threads)

View file

@ -36,6 +36,7 @@ namespace smt {
class parallel { class parallel {
context& ctx; context& ctx;
unsigned num_threads; unsigned num_threads;
bool m_should_tune_params;
struct shared_clause { struct shared_clause {
unsigned source_worker_id; unsigned source_worker_id;
@ -73,7 +74,7 @@ namespace smt {
void cancel_background_threads() { void cancel_background_threads() {
cancel_workers(); cancel_workers();
cancel_param_generator(); if (p.m_should_tune_params) cancel_param_generator();
} }
// called from batch manager to cancel other workers if we've reached a verdict // called from batch manager to cancel other workers if we've reached a verdict