atomic.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. #ifndef _INTERNAL_ATOMIC_H
  2. #define _INTERNAL_ATOMIC_H
  3. #include <stdint.h>
  4. static inline int a_ctz_l(unsigned long x)
  5. {
  6. static const char debruijn32[32] = {
  7. 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
  8. 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
  9. };
  10. return debruijn32[(x&-x)*0x076be629 >> 27];
  11. }
  12. static inline int a_ctz_64(uint64_t x)
  13. {
  14. uint32_t y = x;
  15. if (!y) {
  16. y = x>>32;
  17. return 32 + a_ctz_l(y);
  18. }
  19. return a_ctz_l(y);
  20. }
  21. static inline int a_cas(volatile int *p, int t, int s)
  22. {
  23. int dummy;
  24. __asm__ __volatile__(
  25. ".set push\n"
  26. ".set mips2\n"
  27. ".set noreorder\n"
  28. " sync\n"
  29. "1: ll %0, %2\n"
  30. " bne %0, %3, 1f\n"
  31. " addu %1, %4, $0\n"
  32. " sc %1, %2\n"
  33. " beq %1, $0, 1b\n"
  34. " nop\n"
  35. " sync\n"
  36. "1: \n"
  37. ".set pop\n"
  38. : "=&r"(t), "=&r"(dummy), "+m"(*p) : "r"(t), "r"(s) : "memory" );
  39. return t;
  40. }
  41. static inline void *a_cas_p(volatile void *p, void *t, void *s)
  42. {
  43. return (void *)a_cas(p, (int)t, (int)s);
  44. }
  45. static inline int a_swap(volatile int *x, int v)
  46. {
  47. int old, dummy;
  48. __asm__ __volatile__(
  49. ".set push\n"
  50. ".set mips2\n"
  51. ".set noreorder\n"
  52. " sync\n"
  53. "1: ll %0, %2\n"
  54. " addu %1, %3, $0\n"
  55. " sc %1, %2\n"
  56. " beq %1, $0, 1b\n"
  57. " nop\n"
  58. " sync\n"
  59. ".set pop\n"
  60. : "=&r"(old), "=&r"(dummy), "+m"(*x) : "r"(v) : "memory" );
  61. return old;
  62. }
  63. static inline int a_fetch_add(volatile int *x, int v)
  64. {
  65. int old, dummy;
  66. __asm__ __volatile__(
  67. ".set push\n"
  68. ".set mips2\n"
  69. ".set noreorder\n"
  70. " sync\n"
  71. "1: ll %0, %2\n"
  72. " addu %1, %0, %3\n"
  73. " sc %1, %2\n"
  74. " beq %1, $0, 1b\n"
  75. " nop\n"
  76. " sync\n"
  77. ".set pop\n"
  78. : "=&r"(old), "=&r"(dummy), "+m"(*x) : "r"(v) : "memory" );
  79. return old;
  80. }
  81. static inline void a_inc(volatile int *x)
  82. {
  83. int dummy;
  84. __asm__ __volatile__(
  85. ".set push\n"
  86. ".set mips2\n"
  87. ".set noreorder\n"
  88. " sync\n"
  89. "1: ll %0, %1\n"
  90. " addu %0, %0, 1\n"
  91. " sc %0, %1\n"
  92. " beq %0, $0, 1b\n"
  93. " nop\n"
  94. " sync\n"
  95. ".set pop\n"
  96. : "=&r"(dummy), "+m"(*x) : : "memory" );
  97. }
  98. static inline void a_dec(volatile int *x)
  99. {
  100. int dummy;
  101. __asm__ __volatile__(
  102. ".set push\n"
  103. ".set mips2\n"
  104. ".set noreorder\n"
  105. " sync\n"
  106. "1: ll %0, %1\n"
  107. " subu %0, %0, 1\n"
  108. " sc %0, %1\n"
  109. " beq %0, $0, 1b\n"
  110. " nop\n"
  111. " sync\n"
  112. ".set pop\n"
  113. : "=&r"(dummy), "+m"(*x) : : "memory" );
  114. }
  115. static inline void a_store(volatile int *p, int x)
  116. {
  117. __asm__ __volatile__(
  118. ".set push\n"
  119. ".set mips2\n"
  120. ".set noreorder\n"
  121. " sync\n"
  122. " sw %1, %0\n"
  123. " sync\n"
  124. ".set pop\n"
  125. : "+m"(*p) : "r"(x) : "memory" );
  126. }
  127. static inline void a_spin()
  128. {
  129. }
  130. static inline void a_crash()
  131. {
  132. *(volatile char *)0=0;
  133. }
  134. static inline void a_and(volatile int *p, int v)
  135. {
  136. int dummy;
  137. __asm__ __volatile__(
  138. ".set push\n"
  139. ".set mips2\n"
  140. ".set noreorder\n"
  141. " sync\n"
  142. "1: ll %0, %1\n"
  143. " and %0, %0, %2\n"
  144. " sc %0, %1\n"
  145. " beq %0, $0, 1b\n"
  146. " nop\n"
  147. " sync\n"
  148. ".set pop\n"
  149. : "=&r"(dummy), "+m"(*p) : "r"(v) : "memory" );
  150. }
  151. static inline void a_or(volatile int *p, int v)
  152. {
  153. int dummy;
  154. __asm__ __volatile__(
  155. ".set push\n"
  156. ".set mips2\n"
  157. ".set noreorder\n"
  158. " sync\n"
  159. "1: ll %0, %1\n"
  160. " or %0, %0, %2\n"
  161. " sc %0, %1\n"
  162. " beq %0, $0, 1b\n"
  163. " nop\n"
  164. " sync\n"
  165. ".set pop\n"
  166. : "=&r"(dummy), "+m"(*p) : "r"(v) : "memory" );
  167. }
  168. static inline void a_or_l(volatile void *p, long v)
  169. {
  170. a_or(p, v);
  171. }
  172. static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  173. {
  174. union { uint64_t v; uint32_t r[2]; } u = { v };
  175. a_and((int *)p, u.r[0]);
  176. a_and((int *)p+1, u.r[1]);
  177. }
  178. static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  179. {
  180. union { uint64_t v; uint32_t r[2]; } u = { v };
  181. a_or((int *)p, u.r[0]);
  182. a_or((int *)p+1, u.r[1]);
  183. }
  184. #endif