浏览代码

optimize out setting up robust list with kernel when not needed

as a result of commit 12e1e324683a1d381b7f15dd36c99b37dd44d940, kernel
processing of the robust list is only needed for process-shared
mutexes. previously the first attempt to lock any owner-tracked mutex
resulted in robust list initialization and a set_robust_list syscall.
this is no longer necessary, and since the kernel's record of the
robust list must now be cleared at thread exit time for detached
threads, optimizing it out is more worthwhile than before too.
Rich Felker 10 年之前
父节点
当前提交
4e98cce1c5
共有 4 个文件被更改,包括 8 次插入7 次删除
  1. 1 0
      src/env/__init_tls.c
  2. 2 1
      src/process/fork.c
  3. 1 0
      src/thread/pthread_create.c
  4. 4 6
      src/thread/pthread_mutex_trylock.c

+ 1 - 0
src/env/__init_tls.c

@@ -18,6 +18,7 @@ int __init_tp(void *p)
 	libc.has_thread_pointer = 1;
 	libc.has_thread_pointer = 1;
 	td->tid = __syscall(SYS_set_tid_address, &td->tid);
 	td->tid = __syscall(SYS_set_tid_address, &td->tid);
 	td->locale = &libc.global_locale;
 	td->locale = &libc.global_locale;
+	td->robust_list.head = &td->robust_list.head;
 	return 0;
 	return 0;
 }
 }
 
 

+ 2 - 1
src/process/fork.c

@@ -25,7 +25,8 @@ pid_t fork(void)
 	if (libc.has_thread_pointer && !ret) {
 	if (libc.has_thread_pointer && !ret) {
 		pthread_t self = __pthread_self();
 		pthread_t self = __pthread_self();
 		self->tid = __syscall(SYS_gettid);
 		self->tid = __syscall(SYS_gettid);
-		memset(&self->robust_list, 0, sizeof self->robust_list);
+		self->robust_list.off = 0;
+		self->robust_list.pending = 0;
 		libc.threads_minus_1 = 0;
 		libc.threads_minus_1 = 0;
 	}
 	}
 	__restore_sigs(&set);
 	__restore_sigs(&set);

+ 1 - 0
src/thread/pthread_create.c

@@ -268,6 +268,7 @@ int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att
 		do_sched = new->startlock[0] = 1;
 		do_sched = new->startlock[0] = 1;
 		__block_app_sigs(new->sigmask);
 		__block_app_sigs(new->sigmask);
 	}
 	}
+	new->robust_list.head = &new->robust_list.head;
 	new->unblock_cancel = self->cancel;
 	new->unblock_cancel = self->cancel;
 	new->canary = self->canary;
 	new->canary = self->canary;
 
 

+ 4 - 6
src/thread/pthread_mutex_trylock.c

@@ -7,12 +7,6 @@ int __pthread_mutex_trylock_owner(pthread_mutex_t *m)
 	pthread_t self = __pthread_self();
 	pthread_t self = __pthread_self();
 	int tid = self->tid;
 	int tid = self->tid;
 
 
-	if (!self->robust_list.off) {
-		__syscall(SYS_set_robust_list, &self->robust_list, 3*sizeof(long));
-		self->robust_list.head = &self->robust_list.head;
-		self->robust_list.off = (char*)&m->_m_lock-(char *)&m->_m_next;
-	}
-
 	old = m->_m_lock;
 	old = m->_m_lock;
 	own = old & 0x7fffffff;
 	own = old & 0x7fffffff;
 	if (own == tid && (type&3) == PTHREAD_MUTEX_RECURSIVE) {
 	if (own == tid && (type&3) == PTHREAD_MUTEX_RECURSIVE) {
@@ -23,6 +17,10 @@ int __pthread_mutex_trylock_owner(pthread_mutex_t *m)
 	if (own == 0x40000000) return ENOTRECOVERABLE;
 	if (own == 0x40000000) return ENOTRECOVERABLE;
 
 
 	if (m->_m_type & 128) {
 	if (m->_m_type & 128) {
+		if (!self->robust_list.off) {
+			self->robust_list.off = (char*)&m->_m_lock-(char *)&m->_m_next;
+			__syscall(SYS_set_robust_list, &self->robust_list, 3*sizeof(long));
+		}
 		if (m->_m_waiters) tid |= 0x80000000;
 		if (m->_m_waiters) tid |= 0x80000000;
 		self->robust_list.pending = &m->_m_next;
 		self->robust_list.pending = &m->_m_next;
 	}
 	}