summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRich Felker <dalias@aerifal.cx>2011-03-30 09:06:00 -0400
committerRich Felker <dalias@aerifal.cx>2011-03-30 09:06:00 -0400
commit02084109f0f0d6e0a7fe4a8cb3a90a422725e264 (patch)
treed0c40b22c52dc87c62de4848f77d1bf48bf343fb
parent124b4ebc8a293e616cc0a7eaba3587c9b7ff13ec (diff)
downloadmusl-02084109f0f0d6e0a7fe4a8cb3a90a422725e264.tar.gz
streamline mutex unlock to remove a useless branch, use a_store to unlock
this roughly halves the cost of pthread_mutex_unlock, at least for non-robust, normal-type mutexes. the a_store change is in preparation for future support of archs which require a memory barrier or special atomic store operation, and also should prevent the possibility of the compiler misordering writes.
-rw-r--r--src/thread/pthread_mutex_unlock.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/src/thread/pthread_mutex_unlock.c b/src/thread/pthread_mutex_unlock.c
index 67aa7ba5..5855db0b 100644
--- a/src/thread/pthread_mutex_unlock.c
+++ b/src/thread/pthread_mutex_unlock.c
@@ -14,11 +14,15 @@ int pthread_mutex_unlock(pthread_mutex_t *m)
self->robust_list.pending = &m->_m_next;
*(void **)m->_m_prev = m->_m_next;
if (m->_m_next) ((void **)m->_m_next)[-1] = m->_m_prev;
+ a_store(&m->_m_lock, 0);
+ self->robust_list.pending = 0;
+ } else {
+ a_store(&m->_m_lock, 0);
}
+ } else {
+ a_store(&m->_m_lock, 0);
}
- m->_m_lock = 0;
if (m->_m_waiters) __wake(&m->_m_lock, 1, 0);
- if (m->_m_type >= 4) self->robust_list.pending = 0;
return 0;
}