1
0

atomic.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. #ifndef _INTERNAL_ATOMIC_H
  2. #define _INTERNAL_ATOMIC_H
  3. #include <stdint.h>
  4. static inline int a_ctz_l(unsigned long x)
  5. {
  6. static const char debruijn32[32] = {
  7. 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
  8. 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
  9. };
  10. return debruijn32[(x&-x)*0x076be629 >> 27];
  11. }
  12. static inline int a_ctz_64(uint64_t x)
  13. {
  14. uint32_t y = x;
  15. if (!y) {
  16. y = x>>32;
  17. return 32 + a_ctz_l(y);
  18. }
  19. return a_ctz_l(y);
  20. }
  21. static inline int a_cas(volatile int *p, int t, int s)
  22. {
  23. int dummy;
  24. __asm__ __volatile__(
  25. ".set push\n"
  26. ".set mips2\n"
  27. ".set noreorder\n"
  28. " sync\n"
  29. "1: ll %0, %2\n"
  30. " bne %0, %3, 1f\n"
  31. " addu %1, %4, $0\n"
  32. " sc %1, %2\n"
  33. " beq %1, $0, 1b\n"
  34. " nop\n"
  35. " sync\n"
  36. "1: \n"
  37. ".set pop\n"
  38. : "=&r"(t), "=&r"(dummy), "+m"(*p) : "r"(t), "r"(s) : "memory" );
  39. return t;
  40. }
  41. static inline void *a_cas_p(volatile void *p, void *t, void *s)
  42. {
  43. return (void *)a_cas(p, (int)t, (int)s);
  44. }
  45. static inline long a_cas_l(volatile void *p, long t, long s)
  46. {
  47. return a_cas(p, t, s);
  48. }
  49. static inline int a_swap(volatile int *x, int v)
  50. {
  51. int old, dummy;
  52. __asm__ __volatile__(
  53. ".set push\n"
  54. ".set mips2\n"
  55. ".set noreorder\n"
  56. " sync\n"
  57. "1: ll %0, %2\n"
  58. " addu %1, %3, $0\n"
  59. " sc %1, %2\n"
  60. " beq %1, $0, 1b\n"
  61. " nop\n"
  62. " sync\n"
  63. ".set pop\n"
  64. : "=&r"(old), "=&r"(dummy), "+m"(*x) : "r"(v) : "memory" );
  65. return old;
  66. }
  67. static inline int a_fetch_add(volatile int *x, int v)
  68. {
  69. int old, dummy;
  70. __asm__ __volatile__(
  71. ".set push\n"
  72. ".set mips2\n"
  73. ".set noreorder\n"
  74. " sync\n"
  75. "1: ll %0, %2\n"
  76. " addu %1, %0, %3\n"
  77. " sc %1, %2\n"
  78. " beq %1, $0, 1b\n"
  79. " nop\n"
  80. " sync\n"
  81. ".set pop\n"
  82. : "=&r"(old), "=&r"(dummy), "+m"(*x) : "r"(v) : "memory" );
  83. return old;
  84. }
  85. static inline void a_inc(volatile int *x)
  86. {
  87. int dummy;
  88. __asm__ __volatile__(
  89. ".set push\n"
  90. ".set mips2\n"
  91. ".set noreorder\n"
  92. " sync\n"
  93. "1: ll %0, %1\n"
  94. " addu %0, %0, 1\n"
  95. " sc %0, %1\n"
  96. " beq %0, $0, 1b\n"
  97. " nop\n"
  98. " sync\n"
  99. ".set pop\n"
  100. : "=&r"(dummy), "+m"(*x) : : "memory" );
  101. }
  102. static inline void a_dec(volatile int *x)
  103. {
  104. int dummy;
  105. __asm__ __volatile__(
  106. ".set push\n"
  107. ".set mips2\n"
  108. ".set noreorder\n"
  109. " sync\n"
  110. "1: ll %0, %1\n"
  111. " subu %0, %0, 1\n"
  112. " sc %0, %1\n"
  113. " beq %0, $0, 1b\n"
  114. " nop\n"
  115. " sync\n"
  116. ".set pop\n"
  117. : "=&r"(dummy), "+m"(*x) : : "memory" );
  118. }
  119. static inline void a_store(volatile int *p, int x)
  120. {
  121. __asm__ __volatile__(
  122. ".set push\n"
  123. ".set mips2\n"
  124. ".set noreorder\n"
  125. " sync\n"
  126. " sw %1, %0\n"
  127. " sync\n"
  128. ".set pop\n"
  129. : "+m"(*p) : "r"(x) : "memory" );
  130. }
  131. static inline void a_spin()
  132. {
  133. }
  134. static inline void a_crash()
  135. {
  136. *(volatile char *)0=0;
  137. }
  138. static inline void a_and(volatile int *p, int v)
  139. {
  140. int dummy;
  141. __asm__ __volatile__(
  142. ".set push\n"
  143. ".set mips2\n"
  144. ".set noreorder\n"
  145. " sync\n"
  146. "1: ll %0, %1\n"
  147. " and %0, %0, %2\n"
  148. " sc %0, %1\n"
  149. " beq %0, $0, 1b\n"
  150. " nop\n"
  151. " sync\n"
  152. ".set pop\n"
  153. : "=&r"(dummy), "+m"(*p) : "r"(v) : "memory" );
  154. }
  155. static inline void a_or(volatile int *p, int v)
  156. {
  157. int dummy;
  158. __asm__ __volatile__(
  159. ".set push\n"
  160. ".set mips2\n"
  161. ".set noreorder\n"
  162. " sync\n"
  163. "1: ll %0, %1\n"
  164. " or %0, %0, %2\n"
  165. " sc %0, %1\n"
  166. " beq %0, $0, 1b\n"
  167. " nop\n"
  168. " sync\n"
  169. ".set pop\n"
  170. : "=&r"(dummy), "+m"(*p) : "r"(v) : "memory" );
  171. }
  172. static inline void a_or_l(volatile void *p, long v)
  173. {
  174. a_or(p, v);
  175. }
  176. static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  177. {
  178. union { uint64_t v; uint32_t r[2]; } u = { v };
  179. a_and((int *)p, u.r[0]);
  180. a_and((int *)p+1, u.r[1]);
  181. }
  182. static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  183. {
  184. union { uint64_t v; uint32_t r[2]; } u = { v };
  185. a_or((int *)p, u.r[0]);
  186. a_or((int *)p+1, u.r[1]);
  187. }
  188. #endif