فهرست منبع

avoid accessing mutex memory after atomic unlock

this change is needed to fix a race condition and ensure that it's
possible to unlock and destroy or unmap the mutex as soon as
pthread_mutex_lock succeeds. POSIX explicitly gives such an example in
the rationale and requires an implementation to allow such usage.
Rich Felker 13 سال پیش
والد
کامیت
c68de0be2f
4فایلهای تغییر یافته به همراه31 افزوده شده و 34 حذف شده
  1. 1 13
      src/thread/pthread_mutex_lock.c
  2. 15 7
      src/thread/pthread_mutex_timedlock.c
  3. 7 7
      src/thread/pthread_mutex_trylock.c
  4. 8 7
      src/thread/pthread_mutex_unlock.c

+ 1 - 13
src/thread/pthread_mutex_lock.c

@@ -2,17 +2,5 @@
 
 int pthread_mutex_lock(pthread_mutex_t *m)
 {
-	int r;
-
-	if (m->_m_type == PTHREAD_MUTEX_NORMAL && !a_swap(&m->_m_lock, EBUSY))
-		return 0;
-
-	while ((r=pthread_mutex_trylock(m)) == EBUSY) {
-		if (!(r=m->_m_lock) || (r&0x40000000)) continue;
-		if ((m->_m_type&3) == PTHREAD_MUTEX_ERRORCHECK
-		 && (r&0x1fffffff) == pthread_self()->tid)
-			return EDEADLK;
-		__wait(&m->_m_lock, &m->_m_waiters, r, 0);
-	}
-	return r;
+	return pthread_mutex_timedlock(m, 0);
 }

+ 15 - 7
src/thread/pthread_mutex_timedlock.c

@@ -2,15 +2,23 @@
 
 int pthread_mutex_timedlock(pthread_mutex_t *m, const struct timespec *at)
 {
-	int r, w=0;
+	int r, t;
+
+	if (m->_m_type == PTHREAD_MUTEX_NORMAL && !a_cas(&m->_m_lock, 0, EBUSY))
+		return 0;
+
 	while ((r=pthread_mutex_trylock(m)) == EBUSY) {
 		if (!(r=m->_m_lock) || (r&0x40000000)) continue;
-		if (!w) a_inc(&m->_m_waiters), w++;
-		if (__timedwait(&m->_m_lock, r, CLOCK_REALTIME, at, 0) == ETIMEDOUT) {
-			if (w) a_dec(&m->_m_waiters);
-			return ETIMEDOUT;
-		}
+		if ((m->_m_type&3) == PTHREAD_MUTEX_ERRORCHECK
+		 && (r&0x1fffffff) == pthread_self()->tid)
+			return EDEADLK;
+
+		a_inc(&m->_m_waiters);
+		t = r | 0x80000000;
+		a_cas(&m->_m_lock, r, t);
+		r = __timedwait(&m->_m_lock, t, CLOCK_REALTIME, at, 0);
+		a_dec(&m->_m_waiters);
+		if (r && r != EINTR) break;
 	}
-	if (w) a_dec(&m->_m_waiters);
 	return r;
 }

+ 7 - 7
src/thread/pthread_mutex_trylock.c

@@ -2,15 +2,14 @@
 
 int pthread_mutex_trylock(pthread_mutex_t *m)
 {
-	int tid;
-	int own;
+	int tid, old, own;
 	pthread_t self;
 
 	if (m->_m_type == PTHREAD_MUTEX_NORMAL)
-		return a_swap(&m->_m_lock, EBUSY);
+		return a_cas(&m->_m_lock, 0, EBUSY) & EBUSY;
 
 	self = pthread_self();
-	tid = self->tid | 0x80000000;
+	tid = self->tid;
 
 	if (m->_m_type >= 4) {
 		if (!self->robust_list.off)
@@ -20,14 +19,15 @@ int pthread_mutex_trylock(pthread_mutex_t *m)
 		self->robust_list.pending = &m->_m_next;
 	}
 
-	if (m->_m_lock == tid && (m->_m_type&3) == PTHREAD_MUTEX_RECURSIVE) {
+	old = m->_m_lock;
+	own = old & 0x7fffffff;
+	if (own == tid && (m->_m_type&3) == PTHREAD_MUTEX_RECURSIVE) {
 		if ((unsigned)m->_m_count >= INT_MAX) return EAGAIN;
 		m->_m_count++;
 		return 0;
 	}
 
-	own = m->_m_lock;
-	if ((own && !(own & 0x40000000)) || a_cas(&m->_m_lock, own, tid)!=own)
+	if ((own && !(own & 0x40000000)) || a_cas(&m->_m_lock, old, tid)!=old)
 		return EBUSY;
 
 	m->_m_count = 1;

+ 8 - 7
src/thread/pthread_mutex_unlock.c

@@ -3,6 +3,8 @@
 int pthread_mutex_unlock(pthread_mutex_t *m)
 {
 	pthread_t self;
+	int waiters = m->_m_waiters;
+	int cont;
 
 	if (m->_m_type != PTHREAD_MUTEX_NORMAL) {
 		if (!m->_m_lock)
@@ -16,15 +18,14 @@ int pthread_mutex_unlock(pthread_mutex_t *m)
 			self->robust_list.pending = &m->_m_next;
 			*(void **)m->_m_prev = m->_m_next;
 			if (m->_m_next) ((void **)m->_m_next)[-1] = m->_m_prev;
-			a_store(&m->_m_lock, 0);
+			cont = a_swap(&m->_m_lock, 0);
 			self->robust_list.pending = 0;
-		} else {
-			a_store(&m->_m_lock, 0);
+			goto wake;
 		}
-	} else {
-		a_store(&m->_m_lock, 0);
 	}
-
-	if (m->_m_waiters) __wake(&m->_m_lock, 1, 0);
+	cont = a_swap(&m->_m_lock, 0);
+wake:
+	if (waiters || cont<0)
+		__wake(&m->_m_lock, 1, 0);
 	return 0;
 }