diff --git a/src/smt/smt_parallel.cpp b/src/smt/smt_parallel.cpp index 0d65d745e..c78c8ad3b 100644 --- a/src/smt/smt_parallel.cpp +++ b/src/smt/smt_parallel.cpp @@ -171,6 +171,15 @@ namespace smt { m_num_shared_units = sz; } + void parallel::worker::collect_statistics(::statistics& st) const { + ctx->collect_statistics(st); + } + + void parallel::worker::cancel() { + LOG_WORKER(1, " canceling\n"); + m.limit().cancel(); + } + void parallel::batch_manager::init_parameters_state() { auto& smt_params = p.ctx.get_fparams(); std::function(unsigned&)> inc = [](unsigned& v) { return [&]() -> void { ++v; }; }; @@ -425,6 +434,8 @@ namespace smt { m_cubes.push_back(cube); m_cubes.back().push_back(m.mk_not(atom)); m_cubes[i].push_back(atom); + m_stats.m_max_cube_size = std::max(m_stats.m_max_cube_size, m_cubes.back().size()); + m_stats.m_num_cubes += 2; } }; @@ -530,6 +541,12 @@ namespace smt { m_config.m_never_cube = sp.never_cube(); } + void parallel::batch_manager::collect_statistics(::statistics& st) const { + //ctx->collect_statistics(st); + st.update("parallel-num_cubes", m_stats.m_num_cubes); + st.update("parallel-max-cube-size", m_stats.m_max_cube_size); + } + lbool parallel::operator()(expr_ref_vector const& asms) { ast_manager& m = ctx.m; @@ -573,7 +590,8 @@ namespace smt { th.join(); for (auto w : m_workers) - w->collect_statistics(ctx.m_aux_stats); + w->collect_statistics(ctx.m_aux_stats); + m_batch_manager.collect_statistics(ctx.m_aux_stats); } return m_batch_manager.get_result(); // i.e. all threads have finished all of their cubes -- so if state::is_running is still true, means the entire formula is unsat (otherwise a thread would have returned l_undef) diff --git a/src/smt/smt_parallel.h b/src/smt/smt_parallel.h index 3275cc4da..dd9a15c82 100644 --- a/src/smt/smt_parallel.h +++ b/src/smt/smt_parallel.h @@ -50,12 +50,17 @@ namespace smt { bool m_frugal_cube_only = false; bool m_never_cube = false; }; + struct stats { + unsigned m_max_cube_size = 0; + unsigned m_num_cubes = 0; + }; ast_manager& m; parallel& p; std::mutex mux; state m_state = state::is_running; config m_config; + stats m_stats; expr_ref_vector m_split_atoms; // atoms to split on vector m_cubes; unsigned m_max_batch_size = 10; @@ -99,6 +104,8 @@ namespace smt { void report_assumption_used(ast_translation& l2g, expr* assumption); void collect_clause(ast_translation& l2g, unsigned source_worker_id, expr* e); expr_ref_vector return_shared_clauses(ast_translation& g2l, unsigned& worker_limit, unsigned worker_id); + + void collect_statistics(::statistics& st) const; lbool get_result() const; }; @@ -114,6 +121,7 @@ namespace smt { bool m_share_units_initial_only = false; bool m_cube_initial_only = false; }; + unsigned id; // unique identifier for the worker parallel& p; batch_manager& b; @@ -137,13 +145,9 @@ namespace smt { expr_ref_vector get_split_atoms(); void collect_shared_clauses(ast_translation& g2l); - void cancel() { - IF_VERBOSE(1, verbose_stream() << "Worker " << id << " canceling\n"); - m.limit().cancel(); - } - void collect_statistics(::statistics& st) const { - ctx->collect_statistics(st); - } + void cancel(); + void collect_statistics(::statistics& st) const; + reslimit& limit() { return m.limit(); }