Ver código fonte

add working a_spin() atomic for non-x86 targets

conceptually, a_spin needs to be at least a compiler barrier, so the
compiler will not optimize out loops (and the load on each iteration)
while spinning. it should also be a memory barrier, or the spinning
thread might keep spinning without noticing stores from other threads,
thus delaying for longer than it should.

ideally, an optimal a_spin implementation that avoids unnecessary
cache/memory contention should be chosen for each arch, but for now,
the easiest thing is to perform a useless a_cas on the calling
thread's stack.
Rich Felker 10 anos atrás
pai
commit
ea818ea834

+ 1 - 0
arch/arm/atomic.h

@@ -103,6 +103,7 @@ static inline void a_store(volatile int *p, int x)
 
 static inline void a_spin()
 {
+	__k_cas(&(int){0}, 0, 0));
 }
 
 static inline void a_crash()

+ 1 - 0
arch/microblaze/atomic.h

@@ -97,6 +97,7 @@ static inline void a_store(volatile int *p, int x)
 
 static inline void a_spin()
 {
+	a_cas(&(int){0}, 0, 0);
 }
 
 static inline void a_crash()

+ 1 - 0
arch/mips/atomic.h

@@ -137,6 +137,7 @@ static inline void a_store(volatile int *p, int x)
 
 static inline void a_spin()
 {
+	a_cas(&(int){0}, 0, 0);
 }
 
 static inline void a_crash()

+ 1 - 0
arch/or1k/atomic.h

@@ -74,6 +74,7 @@ static inline void a_store(volatile int *p, int x)
 
 static inline void a_spin()
 {
+	a_cas(&(int){0}, 0, 0);
 }
 
 static inline void a_crash()

+ 1 - 0
arch/powerpc/atomic.h

@@ -80,6 +80,7 @@ static inline void a_store(volatile int *p, int x)
 
 static inline void a_spin()
 {
+	a_cas(&(int){0}, 0, 0);
 }
 
 static inline void a_crash()

+ 1 - 0
arch/sh/atomic.h

@@ -53,6 +53,7 @@ static inline void a_dec(volatile int *x)
 
 static inline void a_spin()
 {
+	a_cas(&(int){0}, 0, 0);
 }
 
 static inline void a_crash()