From c68de0be2fb649f91b31080224fb6e48084eaaee Mon Sep 17 00:00:00 2001 From: Rich Felker Date: Tue, 2 Aug 2011 20:31:15 -0400 Subject: avoid accessing mutex memory after atomic unlock this change is needed to fix a race condition and ensure that it's possible to unlock and destroy or unmap the mutex as soon as pthread_mutex_lock succeeds. POSIX explicitly gives such an example in the rationale and requires an implementation to allow such usage. --- src/thread/pthread_mutex_unlock.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'src/thread/pthread_mutex_unlock.c') diff --git a/src/thread/pthread_mutex_unlock.c b/src/thread/pthread_mutex_unlock.c index 6c4d7f22..0f4a5e6c 100644 --- a/src/thread/pthread_mutex_unlock.c +++ b/src/thread/pthread_mutex_unlock.c @@ -3,6 +3,8 @@ int pthread_mutex_unlock(pthread_mutex_t *m) { pthread_t self; + int waiters = m->_m_waiters; + int cont; if (m->_m_type != PTHREAD_MUTEX_NORMAL) { if (!m->_m_lock) @@ -16,15 +18,14 @@ int pthread_mutex_unlock(pthread_mutex_t *m) self->robust_list.pending = &m->_m_next; *(void **)m->_m_prev = m->_m_next; if (m->_m_next) ((void **)m->_m_next)[-1] = m->_m_prev; - a_store(&m->_m_lock, 0); + cont = a_swap(&m->_m_lock, 0); self->robust_list.pending = 0; - } else { - a_store(&m->_m_lock, 0); + goto wake; } - } else { - a_store(&m->_m_lock, 0); } - - if (m->_m_waiters) __wake(&m->_m_lock, 1, 0); + cont = a_swap(&m->_m_lock, 0); +wake: + if (waiters || cont<0) + __wake(&m->_m_lock, 1, 0); return 0; } -- cgit v1.2.1