fix extremely rare but dangerous race condition in robust mutexes
authorRich Felker <dalias@aerifal.cx>
Fri, 17 Aug 2012 21:13:53 +0000 (17:13 -0400)
committerRich Felker <dalias@aerifal.cx>
Fri, 17 Aug 2012 21:13:53 +0000 (17:13 -0400)
if new shared mappings of files/devices/shared memory can be made
between the time a robust mutex is unlocked and its subsequent removal
from the pending slot in the robustlist header, the kernel can
inadvertently corrupt data in the newly-mapped pages when the process
terminates. i am fixing the bug by using the same global vm lock
mechanism that was used to fix the race condition with unmapping
barriers after pthread_barrier_wait returns.

src/thread/pthread_barrier_wait.c
src/thread/pthread_mutex_unlock.c
src/thread/vmlock.c [new file with mode: 0644]

index 6052925..5e60338 100644 (file)
@@ -1,22 +1,7 @@
 #include "pthread_impl.h"
 
-static int vmlock[2];
-
-void __vm_lock(int inc)
-{
-       for (;;) {
-               int v = vmlock[0];
-               if (inc*v < 0) __wait(vmlock, vmlock+1, v, 1);
-               else if (a_cas(vmlock, v, v+inc)==v) break;
-       }
-}
-
-void __vm_unlock(void)
-{
-       int inc = vmlock[0]>0 ? -1 : 1;
-       if (a_fetch_add(vmlock, inc)==-inc && vmlock[1])
-               __wake(vmlock, -1, 1);
-}
+void __vm_lock_impl(int);
+void __vm_unlock_impl(void);
 
 static int pshared_barrier_wait(pthread_barrier_t *b)
 {
@@ -41,7 +26,7 @@ static int pshared_barrier_wait(pthread_barrier_t *b)
                        __wait(&b->_b_count, &b->_b_waiters2, v, 0);
        }
 
-       __vm_lock(+1);
+       __vm_lock_impl(+1);
 
        /* Ensure all threads have a vm lock before proceeding */
        if (a_fetch_add(&b->_b_count, -1)==1-limit) {
@@ -62,7 +47,7 @@ static int pshared_barrier_wait(pthread_barrier_t *b)
        if (v==INT_MIN+1 || (v==1 && w))
                __wake(&b->_b_lock, 1, 0);
 
-       __vm_unlock();
+       __vm_unlock_impl();
 
        return ret;
 }
index fdf9fc1..5fc0f4e 100644 (file)
@@ -1,5 +1,8 @@
 #include "pthread_impl.h"
 
+void __vm_lock_impl(int);
+void __vm_unlock_impl(void);
+
 int pthread_mutex_unlock(pthread_mutex_t *m)
 {
        pthread_t self;
@@ -20,11 +23,14 @@ int pthread_mutex_unlock(pthread_mutex_t *m)
                        self->robust_list.pending = &m->_m_next;
                        *(void **)m->_m_prev = m->_m_next;
                        if (m->_m_next) ((void **)m->_m_next)[-1] = m->_m_prev;
+                       __vm_lock_impl(+1);
                }
        }
        cont = a_swap(&m->_m_lock, 0);
-       if (robust)
+       if (robust) {
                self->robust_list.pending = 0;
+               __vm_unlock_impl();
+       }
        if (waiters || cont<0)
                __wake(&m->_m_lock, 1, 0);
        return 0;
diff --git a/src/thread/vmlock.c b/src/thread/vmlock.c
new file mode 100644 (file)
index 0000000..aba9e31
--- /dev/null
@@ -0,0 +1,22 @@
+#include "pthread_impl.h"
+
+static int vmlock[2];
+
+void __vm_lock(int inc)
+{
+       for (;;) {
+               int v = vmlock[0];
+               if (inc*v < 0) __wait(vmlock, vmlock+1, v, 1);
+               else if (a_cas(vmlock, v, v+inc)==v) break;
+       }
+}
+
+void __vm_unlock(void)
+{
+       int inc = vmlock[0]>0 ? -1 : 1;
+       if (a_fetch_add(vmlock, inc)==-inc && vmlock[1])
+               __wake(vmlock, -1, 1);
+}
+
+weak_alias(__vm_lock, __vm_lock_impl);
+weak_alias(__vm_unlock, __vm_unlock_impl);