3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-04-12 12:08:18 +00:00

cleanup thread pool of scoped_timer on memory finalize

but keep it alive on Z3_memory_reset()
This commit is contained in:
Nuno Lopes 2020-10-24 12:46:50 +01:00
parent 0213af3c61
commit 4e9035d4b9
5 changed files with 37 additions and 22 deletions

View file

@ -426,7 +426,7 @@ extern "C" {
void Z3_API Z3_reset_memory(void) { void Z3_API Z3_reset_memory(void) {
LOG_Z3_reset_memory(); LOG_Z3_reset_memory();
memory::finalize(); memory::finalize(false);
memory::initialize(0); memory::initialize(0);
} }

View file

@ -27,6 +27,7 @@ Copyright (c) 2015 Microsoft Corporation
// rational::finalize(); // rational::finalize();
void mem_initialize(); void mem_initialize();
void mem_finalize(); void mem_finalize();
void finalize_scoped_timer();
// If PROFILE_MEMORY is defined, Z3 will display the amount of memory used, and the number of synchronization steps during finalization // If PROFILE_MEMORY is defined, Z3 will display the amount of memory used, and the number of synchronization steps during finalization
// #define PROFILE_MEMORY // #define PROFILE_MEMORY
@ -130,7 +131,7 @@ void memory::set_max_alloc_count(size_t max_count) {
static bool g_finalizing = false; static bool g_finalizing = false;
void memory::finalize() { void memory::finalize(bool shutdown) {
if (g_memory_initialized) { if (g_memory_initialized) {
g_finalizing = true; g_finalizing = true;
mem_finalize(); mem_finalize();
@ -139,6 +140,10 @@ void memory::finalize() {
//delete g_memory_mux; //delete g_memory_mux;
g_memory_initialized = false; g_memory_initialized = false;
g_finalizing = false; g_finalizing = false;
if (shutdown) {
finalize_scoped_timer();
}
} }
} }

View file

@ -55,7 +55,7 @@ public:
static bool above_high_watermark(); static bool above_high_watermark();
static void set_max_size(size_t max_size); static void set_max_size(size_t max_size);
static void set_max_alloc_count(size_t max_count); static void set_max_alloc_count(size_t max_count);
static void finalize(); static void finalize(bool shutdown = true);
static void display_max_usage(std::ostream& os); static void display_max_usage(std::ostream& os);
static void display_i_max_usage(std::ostream& os); static void display_i_max_usage(std::ostream& os);
static void deallocate(void* p); static void deallocate(void* p);

View file

@ -20,6 +20,7 @@ Revision History:
--*/ --*/
#include "util/scoped_timer.h" #include "util/scoped_timer.h"
#include "util/mutex.h"
#include "util/util.h" #include "util/util.h"
#include "util/vector.h" #include "util/vector.h"
#include <chrono> #include <chrono>
@ -32,20 +33,15 @@ Revision History:
struct state { struct state {
std::thread * m_thread { nullptr }; std::thread * m_thread { nullptr };
std::timed_mutex m_mutex; std::timed_mutex m_mutex;
unsigned ms { 0 }; event_handler * eh;
event_handler * eh { nullptr }; unsigned ms;
int work { 0 }; int work;
std::condition_variable_any cv; std::condition_variable_any cv;
}; };
/*
* NOTE: this implementation deliberately leaks threads when Z3
* exits. this is preferable to deallocating on exit, because
* destructing threads blocked on condition variables leads to
* deadlock.
*/
static std::vector<state*> available_workers; static std::vector<state*> available_workers;
static std::mutex workers; static std::mutex workers;
static atomic<unsigned> num_workers(0);
static void thread_func(state *s) { static void thread_func(state *s) {
workers.lock(); workers.lock();
@ -53,6 +49,10 @@ static void thread_func(state *s) {
s->cv.wait(workers, [=]{ return s->work > 0; }); s->cv.wait(workers, [=]{ return s->work > 0; });
workers.unlock(); workers.unlock();
// exiting..
if (s->work == 2)
return;
auto end = std::chrono::steady_clock::now() + std::chrono::milliseconds(s->ms); auto end = std::chrono::steady_clock::now() + std::chrono::milliseconds(s->ms);
while (!s->m_mutex.try_lock_until(end)) { while (!s->m_mutex.try_lock_until(end)) {
@ -81,6 +81,7 @@ public:
if (available_workers.empty()) { if (available_workers.empty()) {
workers.unlock(); workers.unlock();
s = new state; s = new state;
++num_workers;
} }
else { else {
s = available_workers.back(); s = available_workers.back();
@ -93,7 +94,6 @@ public:
s->work = 1; s->work = 1;
if (!s->m_thread) { if (!s->m_thread) {
s->m_thread = new std::thread(thread_func, s); s->m_thread = new std::thread(thread_func, s);
s->m_thread->detach();
} }
else { else {
s->cv.notify_one(); s->cv.notify_one();
@ -117,7 +117,23 @@ scoped_timer::~scoped_timer() {
} }
void finalize_scoped_timer() { void finalize_scoped_timer() {
// TODO: stub to collect idle allocated timers. unsigned deleted = 0;
// if a timer is not idle, we treat this as abnormal
// termination and don't commit to collecting the state. while (deleted < num_workers) {
workers.lock();
for (auto w : available_workers) {
w->work = 2;
w->cv.notify_one();
}
decltype(available_workers) cleanup_workers;
std::swap(available_workers, cleanup_workers);
workers.unlock();
for (auto w : cleanup_workers) {
++deleted;
w->m_thread->join();
delete w->m_thread;
delete w;
}
}
} }

View file

@ -27,9 +27,3 @@ public:
scoped_timer(unsigned ms, event_handler * eh); scoped_timer(unsigned ms, event_handler * eh);
~scoped_timer(); ~scoped_timer();
}; };
/*
ADD_FINALIZER('finalize_scoped_timer();')
*/
void finalize_scoped_timer();