mirror of
https://github.com/Z3Prover/z3
synced 2025-04-22 16:45:31 +00:00
Fix a race condition in scoped_timer::finalize (#7618)
scoped_timer::finalize is called from fork. However, it may race with other threads creating or freeing timer threads. This patch removes the loop in scoped_timer::finalize (because it is not needed and it may spin) and also removes two unlocked assignments. The idle thread is added to "available_workers" in scoped_timer::~scoped_timer destructor. If we call the "finalize" method as a part of total memory cleanup, all the scoped_timers' destructors were already executed and all the worker threads are already on "available_workers" vector. So, we don't need to loop; the first loop iteration will clean all the threads. If the "finalize" method is called from single-threaded program's fork(), then all the scoped timers' destructors are already called and the case is analogous to the previous case. If the "finalize" method is called from multi-threaded program's fork(), then it breaks down - the "num_workers" variable is the total amount of workers (both sleeping and busy), and we loop until we terminated "num_workers" threads - that means that if the number of sleeping workers is less than "num_workers", the function just spins. Then, there is unlocked assignment to "num_workers = 0" and "available_workers.clear()" that can race with other threads doing z3 work and corrupt memory. available_workers.clear() is not needed, because it was already cleared by std::swap(available_workers, cleanup_workers) (and that was correctly locked). Signed-off-by: Mikulas Patocka <mikulas@twibright.com>
This commit is contained in:
parent
a83efa68eb
commit
6ecc7a2dd4
1 changed files with 12 additions and 20 deletions
|
@ -48,7 +48,6 @@ struct scoped_timer_state {
|
|||
|
||||
static std::vector<scoped_timer_state*> available_workers;
|
||||
static std::mutex workers;
|
||||
static atomic<unsigned> num_workers(0);
|
||||
|
||||
static void thread_func(scoped_timer_state *s) {
|
||||
workers.lock();
|
||||
|
@ -94,7 +93,6 @@ scoped_timer::scoped_timer(unsigned ms, event_handler * eh) {
|
|||
// start new thead
|
||||
workers.unlock();
|
||||
s = new scoped_timer_state;
|
||||
++num_workers;
|
||||
init_state(ms, eh);
|
||||
s->m_thread = std::thread(thread_func, s);
|
||||
}
|
||||
|
@ -131,25 +129,19 @@ void scoped_timer::initialize() {
|
|||
}
|
||||
|
||||
void scoped_timer::finalize() {
|
||||
unsigned deleted = 0;
|
||||
while (deleted < num_workers) {
|
||||
workers.lock();
|
||||
for (auto w : available_workers) {
|
||||
w->work = EXITING;
|
||||
w->cv.notify_one();
|
||||
}
|
||||
decltype(available_workers) cleanup_workers;
|
||||
std::swap(available_workers, cleanup_workers);
|
||||
workers.unlock();
|
||||
|
||||
for (auto w : cleanup_workers) {
|
||||
++deleted;
|
||||
w->m_thread.join();
|
||||
delete w;
|
||||
}
|
||||
workers.lock();
|
||||
for (auto w : available_workers) {
|
||||
w->work = EXITING;
|
||||
w->cv.notify_one();
|
||||
}
|
||||
decltype(available_workers) cleanup_workers;
|
||||
std::swap(available_workers, cleanup_workers);
|
||||
workers.unlock();
|
||||
|
||||
for (auto w : cleanup_workers) {
|
||||
w->m_thread.join();
|
||||
delete w;
|
||||
}
|
||||
num_workers = 0;
|
||||
available_workers.clear();
|
||||
}
|
||||
|
||||
void scoped_timer::init_state(unsigned ms, event_handler * eh) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue