atomic.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. #ifndef _INTERNAL_ATOMIC_H
  2. #define _INTERNAL_ATOMIC_H
  3. #include <stdint.h>
  4. static inline int a_ctz_l(unsigned long x)
  5. {
  6. static const char debruijn32[32] = {
  7. 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
  8. 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
  9. };
  10. return debruijn32[(x&-x)*0x076be629 >> 27];
  11. }
  12. static inline int a_ctz_64(uint64_t x)
  13. {
  14. uint32_t y = x;
  15. if (!y) {
  16. y = x>>32;
  17. return 32 + a_ctz_l(y);
  18. }
  19. return a_ctz_l(y);
  20. }
  21. static inline int a_cas(volatile int *p, int t, int s)
  22. {
  23. int dummy;
  24. __asm__ __volatile__(
  25. ".set push\n"
  26. ".set mips2\n"
  27. ".set noreorder\n"
  28. " sync\n"
  29. "1: ll %0, %2\n"
  30. " bne %0, %3, 1f\n"
  31. " addu %1, %4, $0\n"
  32. " sc %1, %2\n"
  33. " beq %1, $0, 1b\n"
  34. " nop\n"
  35. " sync\n"
  36. "1: \n"
  37. ".set pop\n"
  38. : "=&r"(t), "=&r"(dummy), "+m"(*p) : "r"(t), "r"(s) : "memory" );
  39. return t;
  40. }
  41. static inline void *a_cas_p(volatile void *p, void *t, void *s)
  42. {
  43. return (void *)a_cas(p, (int)t, (int)s);
  44. }
  45. static inline int a_swap(volatile int *x, int v)
  46. {
  47. int old, dummy;
  48. __asm__ __volatile__(
  49. ".set push\n"
  50. ".set mips2\n"
  51. ".set noreorder\n"
  52. " sync\n"
  53. "1: ll %0, %2\n"
  54. " addu %1, %3, $0\n"
  55. " sc %1, %2\n"
  56. " beq %1, $0, 1b\n"
  57. " nop\n"
  58. " sync\n"
  59. ".set pop\n"
  60. : "=&r"(old), "=&r"(dummy), "+m"(*x) : "r"(v) : "memory" );
  61. return old;
  62. }
  63. static inline int a_fetch_add(volatile int *x, int v)
  64. {
  65. int old, dummy;
  66. __asm__ __volatile__(
  67. ".set push\n"
  68. ".set mips2\n"
  69. ".set noreorder\n"
  70. " sync\n"
  71. "1: ll %0, %2\n"
  72. " addu %1, %0, %3\n"
  73. " sc %1, %2\n"
  74. " beq %1, $0, 1b\n"
  75. " nop\n"
  76. " sync\n"
  77. ".set pop\n"
  78. : "=&r"(old), "=&r"(dummy), "+m"(*x) : "r"(v) : "memory" );
  79. return old;
  80. }
  81. static inline void a_inc(volatile int *x)
  82. {
  83. int dummy;
  84. __asm__ __volatile__(
  85. ".set push\n"
  86. ".set mips2\n"
  87. ".set noreorder\n"
  88. " sync\n"
  89. "1: ll %0, %1\n"
  90. " addu %0, %0, 1\n"
  91. " sc %0, %1\n"
  92. " beq %0, $0, 1b\n"
  93. " nop\n"
  94. " sync\n"
  95. ".set pop\n"
  96. : "=&r"(dummy), "+m"(*x) : : "memory" );
  97. }
  98. static inline void a_dec(volatile int *x)
  99. {
  100. int dummy;
  101. __asm__ __volatile__(
  102. ".set push\n"
  103. ".set mips2\n"
  104. ".set noreorder\n"
  105. " sync\n"
  106. "1: ll %0, %1\n"
  107. " subu %0, %0, 1\n"
  108. " sc %0, %1\n"
  109. " beq %0, $0, 1b\n"
  110. " nop\n"
  111. " sync\n"
  112. ".set pop\n"
  113. : "=&r"(dummy), "+m"(*x) : : "memory" );
  114. }
  115. static inline void a_store(volatile int *p, int x)
  116. {
  117. __asm__ __volatile__(
  118. ".set push\n"
  119. ".set mips2\n"
  120. ".set noreorder\n"
  121. " sync\n"
  122. " sw %1, %0\n"
  123. " sync\n"
  124. ".set pop\n"
  125. : "+m"(*p) : "r"(x) : "memory" );
  126. }
  127. #define a_spin a_barrier
  128. static inline void a_barrier()
  129. {
  130. a_cas(&(int){0}, 0, 0);
  131. }
  132. static inline void a_crash()
  133. {
  134. *(volatile char *)0=0;
  135. }
  136. static inline void a_and(volatile int *p, int v)
  137. {
  138. int dummy;
  139. __asm__ __volatile__(
  140. ".set push\n"
  141. ".set mips2\n"
  142. ".set noreorder\n"
  143. " sync\n"
  144. "1: ll %0, %1\n"
  145. " and %0, %0, %2\n"
  146. " sc %0, %1\n"
  147. " beq %0, $0, 1b\n"
  148. " nop\n"
  149. " sync\n"
  150. ".set pop\n"
  151. : "=&r"(dummy), "+m"(*p) : "r"(v) : "memory" );
  152. }
  153. static inline void a_or(volatile int *p, int v)
  154. {
  155. int dummy;
  156. __asm__ __volatile__(
  157. ".set push\n"
  158. ".set mips2\n"
  159. ".set noreorder\n"
  160. " sync\n"
  161. "1: ll %0, %1\n"
  162. " or %0, %0, %2\n"
  163. " sc %0, %1\n"
  164. " beq %0, $0, 1b\n"
  165. " nop\n"
  166. " sync\n"
  167. ".set pop\n"
  168. : "=&r"(dummy), "+m"(*p) : "r"(v) : "memory" );
  169. }
  170. static inline void a_or_l(volatile void *p, long v)
  171. {
  172. a_or(p, v);
  173. }
  174. static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  175. {
  176. union { uint64_t v; uint32_t r[2]; } u = { v };
  177. a_and((int *)p, u.r[0]);
  178. a_and((int *)p+1, u.r[1]);
  179. }
  180. static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  181. {
  182. union { uint64_t v; uint32_t r[2]; } u = { v };
  183. a_or((int *)p, u.r[0]);
  184. a_or((int *)p+1, u.r[1]);
  185. }
  186. #endif