path: root/src/thread/pthread_mutex_unlock.c
diff options
authorRich Felker <>2019-02-12 19:56:49 -0500
committerRich Felker <>2019-02-12 19:56:49 -0500
commit099b89d3840c30d7dd962e18668c2e6d39f0c626 (patch)
treeeba766cb65c04a65be534979394b6f7eeb6b6b1e /src/thread/pthread_mutex_unlock.c
parent042b3ee452f542e0e16d847f90777e8c3a012375 (diff)
redesign robust mutex states to eliminate data races on type field
in order to implement ENOTRECOVERABLE, the implementation has traditionally used a bit of the mutex type field to indicate that it's recovered after EOWNERDEAD and will go into ENOTRECOVERABLE state if pthread_mutex_consistent is not called before unlocking. while it's only the thread that holds the lock that needs access to this information (except possibly for the sake of pthread_mutex_consistent choosing between EINVAL and EPERM for erroneous calls), the change to the type field is formally a data race with all other threads that perform any operation on the mutex. no individual bits race, and no write races are possible, so things are "okay" in some sense, but it's still not good. this patch moves the recovery/consistency state to the mutex owner/lock field which is rightfully mutable. bit 30, the same bit the kernel uses with a zero owner to indicate that the previous owner died holding the lock, is now used with a nonzero owner to indicate that the mutex is held but has not yet been marked consistent. note that the kernel ABI also reserves bit 29 not to appear in any tid, so the sentinel value we use for ENOTRECOVERABLE, 0x7fffffff, does not clash with any tid plus bit 30.
Diffstat (limited to 'src/thread/pthread_mutex_unlock.c')
1 files changed, 7 insertions, 2 deletions
diff --git a/src/thread/pthread_mutex_unlock.c b/src/thread/pthread_mutex_unlock.c
index 7dd00d27..ea9f54dd 100644
--- a/src/thread/pthread_mutex_unlock.c
+++ b/src/thread/pthread_mutex_unlock.c
@@ -7,13 +7,18 @@ int __pthread_mutex_unlock(pthread_mutex_t *m)
int cont;
int type = m->_m_type & 15;
int priv = (m->_m_type & 128) ^ 128;
+ int new = 0;
self = __pthread_self();
- if ((m->_m_lock&0x7fffffff) != self->tid)
+ int old = m->_m_lock;
+ int own = old & 0x3fffffff;
+ if (own != self->tid)
return EPERM;
if ((type&3) == PTHREAD_MUTEX_RECURSIVE && m->_m_count)
return m->_m_count--, 0;
+ if ((type&4) && (old&0x40000000))
+ new = 0x7fffffff;
if (!priv) {
self->robust_list.pending = &m->_m_next;
@@ -24,7 +29,7 @@ int __pthread_mutex_unlock(pthread_mutex_t *m)
if (next != &self->robust_list.head) *(volatile void *volatile *)
((char *)next - sizeof(void *)) = prev;
- cont = a_swap(&m->_m_lock, (type & 8) ? 0x7fffffff : 0);
+ cont = a_swap(&m->_m_lock, new);
if (type != PTHREAD_MUTEX_NORMAL && !priv) {
self->robust_list.pending = 0;