atomic.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. #ifndef _INTERNAL_ATOMIC_H
  2. #define _INTERNAL_ATOMIC_H
  3. #include <stdint.h>
  4. static inline int a_ctz_64(uint64_t x)
  5. {
  6. long r;
  7. __asm__( "bsf %1,%0" : "=r"(r) : "r"(x) );
  8. return r;
  9. }
  10. static inline int a_ctz_l(unsigned long x)
  11. {
  12. long r;
  13. __asm__( "bsf %1,%0" : "=r"(r) : "r"(x) );
  14. return r;
  15. }
  16. static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  17. {
  18. __asm__( "lock ; andq %1, %0"
  19. : "=m"(*(long *)p) : "r"(v) : "memory" );
  20. }
  21. static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  22. {
  23. __asm__( "lock ; orq %1, %0"
  24. : "=m"(*(long *)p) : "r"(v) : "memory" );
  25. }
  26. static inline void a_store_l(volatile void *p, long x)
  27. {
  28. __asm__( "movq %1, %0" : "=m"(*(long *)p) : "r"(x) : "memory" );
  29. }
  30. static inline void a_or_l(volatile void *p, long v)
  31. {
  32. __asm__( "lock ; orq %1, %0"
  33. : "=m"(*(long *)p) : "r"(v) : "memory" );
  34. }
  35. static inline void *a_cas_p(volatile void *p, void *t, void *s)
  36. {
  37. __asm__( "lock ; cmpxchg %3, %1"
  38. : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
  39. return t;
  40. }
  41. static inline long a_cas_l(volatile void *p, long t, long s)
  42. {
  43. __asm__( "lock ; cmpxchg %3, %1"
  44. : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
  45. return t;
  46. }
  47. static inline int a_cas(volatile int *p, int t, int s)
  48. {
  49. __asm__( "lock ; cmpxchgl %3, %1"
  50. : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
  51. return t;
  52. }
  53. static inline void *a_swap_p(void *volatile *x, void *v)
  54. {
  55. __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(void **)x) : "0"(v) : "memory" );
  56. return v;
  57. }
  58. static inline long a_swap_l(volatile void *x, long v)
  59. {
  60. __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(long *)x) : "0"(v) : "memory" );
  61. return v;
  62. }
  63. static inline void a_or(volatile void *p, int v)
  64. {
  65. __asm__( "lock ; orl %1, %0"
  66. : "=m"(*(int *)p) : "r"(v) : "memory" );
  67. }
  68. static inline void a_and(volatile void *p, int v)
  69. {
  70. __asm__( "lock ; andl %1, %0"
  71. : "=m"(*(int *)p) : "r"(v) : "memory" );
  72. }
  73. static inline int a_swap(volatile int *x, int v)
  74. {
  75. __asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  76. return v;
  77. }
  78. #define a_xchg a_swap
  79. static inline int a_fetch_add(volatile int *x, int v)
  80. {
  81. __asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  82. return v;
  83. }
  84. static inline void a_inc(volatile int *x)
  85. {
  86. __asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" );
  87. }
  88. static inline void a_dec(volatile int *x)
  89. {
  90. __asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" );
  91. }
  92. static inline void a_store(volatile int *p, int x)
  93. {
  94. __asm__( "movl %1, %0" : "=m"(*p) : "r"(x) : "memory" );
  95. }
  96. static inline void a_spin()
  97. {
  98. __asm__ __volatile__( "pause" : : : "memory" );
  99. }
  100. static inline void a_crash()
  101. {
  102. __asm__ __volatile__( "hlt" : : : "memory" );
  103. }
  104. #endif