mirror of
https://github.com/Z3Prover/z3
synced 2025-08-15 07:15:26 +00:00
Fix a race condition in scoped_timer::finalize
scoped_timer::finalize is called from fork. However, it may race with other threads creating or freeing timer threads. This patch reworks scoped_timer::finalize so that it runs only once (there is no need to repeat it). We remove the assignments "num_workers = 0" and "available_workers.clear();" - they were done without any locks, thus they were subjects to race condition. The variable num_workers is deleted because it is not needed. There was another bug in scoped_timer::finalize - if some workers were busy, the function would spin-wait for them to terminate. This patch changes it so that busy workers are ignored. Signed-off-by: Mikulas Patocka <mikulas@twibright.com>
This commit is contained in:
parent
a4e7bf82da
commit
95d90a7be5
1 changed files with 14 additions and 23 deletions
|
@ -48,7 +48,6 @@ struct scoped_timer_state {
|
||||||
|
|
||||||
static std::vector<scoped_timer_state*> available_workers;
|
static std::vector<scoped_timer_state*> available_workers;
|
||||||
static std::mutex workers;
|
static std::mutex workers;
|
||||||
static atomic<unsigned> num_workers(0);
|
|
||||||
|
|
||||||
static void thread_func(scoped_timer_state *s) {
|
static void thread_func(scoped_timer_state *s) {
|
||||||
workers.lock();
|
workers.lock();
|
||||||
|
@ -94,7 +93,6 @@ scoped_timer::scoped_timer(unsigned ms, event_handler * eh) {
|
||||||
// start new thead
|
// start new thead
|
||||||
workers.unlock();
|
workers.unlock();
|
||||||
s = new scoped_timer_state;
|
s = new scoped_timer_state;
|
||||||
++num_workers;
|
|
||||||
init_state(ms, eh);
|
init_state(ms, eh);
|
||||||
s->m_thread = std::thread(thread_func, s);
|
s->m_thread = std::thread(thread_func, s);
|
||||||
}
|
}
|
||||||
|
@ -122,34 +120,27 @@ scoped_timer::~scoped_timer() {
|
||||||
|
|
||||||
void scoped_timer::initialize() {
|
void scoped_timer::initialize() {
|
||||||
#ifndef _WINDOWS
|
#ifndef _WINDOWS
|
||||||
static bool pthread_atfork_set = false;
|
static std::atomic<bool> pthread_atfork_set = false;
|
||||||
if (!pthread_atfork_set) {
|
if (!pthread_atfork_set.exchange(true)) {
|
||||||
pthread_atfork(finalize, nullptr, nullptr);
|
pthread_atfork(finalize, nullptr, nullptr);
|
||||||
pthread_atfork_set = true;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void scoped_timer::finalize() {
|
void scoped_timer::finalize() {
|
||||||
unsigned deleted = 0;
|
workers.lock();
|
||||||
while (deleted < num_workers) {
|
for (auto w : available_workers) {
|
||||||
workers.lock();
|
w->work = EXITING;
|
||||||
for (auto w : available_workers) {
|
w->cv.notify_one();
|
||||||
w->work = EXITING;
|
}
|
||||||
w->cv.notify_one();
|
decltype(available_workers) cleanup_workers;
|
||||||
}
|
std::swap(available_workers, cleanup_workers);
|
||||||
decltype(available_workers) cleanup_workers;
|
workers.unlock();
|
||||||
std::swap(available_workers, cleanup_workers);
|
|
||||||
workers.unlock();
|
for (auto w : cleanup_workers) {
|
||||||
|
w->m_thread.join();
|
||||||
for (auto w : cleanup_workers) {
|
delete w;
|
||||||
++deleted;
|
|
||||||
w->m_thread.join();
|
|
||||||
delete w;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
num_workers = 0;
|
|
||||||
available_workers.clear();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void scoped_timer::init_state(unsigned ms, event_handler * eh) {
|
void scoped_timer::init_state(unsigned ms, event_handler * eh) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue