atomic_arch.h 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. #define a_cas a_cas
  2. static inline int a_cas(volatile int *p, int t, int s)
  3. {
  4. __asm__ __volatile__ (
  5. "lock ; cmpxchg %3, %1"
  6. : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
  7. return t;
  8. }
  9. #define a_swap a_swap
  10. static inline int a_swap(volatile int *p, int v)
  11. {
  12. __asm__ __volatile__(
  13. "xchg %0, %1"
  14. : "=r"(v), "=m"(*p) : "0"(v) : "memory" );
  15. return v;
  16. }
  17. #define a_fetch_add a_fetch_add
  18. static inline int a_fetch_add(volatile int *p, int v)
  19. {
  20. __asm__ __volatile__(
  21. "lock ; xadd %0, %1"
  22. : "=r"(v), "=m"(*p) : "0"(v) : "memory" );
  23. return v;
  24. }
  25. #define a_and a_and
  26. static inline void a_and(volatile int *p, int v)
  27. {
  28. __asm__ __volatile__(
  29. "lock ; and %1, %0"
  30. : "=m"(*p) : "r"(v) : "memory" );
  31. }
  32. #define a_or a_or
  33. static inline void a_or(volatile int *p, int v)
  34. {
  35. __asm__ __volatile__(
  36. "lock ; or %1, %0"
  37. : "=m"(*p) : "r"(v) : "memory" );
  38. }
  39. #define a_and_64 a_and_64
  40. static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  41. {
  42. __asm__ __volatile(
  43. "lock ; and %1, %0"
  44. : "=m"(*p) : "r"(v) : "memory" );
  45. }
  46. #define a_or_64 a_or_64
  47. static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  48. {
  49. __asm__ __volatile__(
  50. "lock ; or %1, %0"
  51. : "=m"(*p) : "r"(v) : "memory" );
  52. }
  53. #define a_inc a_inc
  54. static inline void a_inc(volatile int *p)
  55. {
  56. __asm__ __volatile__(
  57. "lock ; incl %0"
  58. : "=m"(*p) : "m"(*p) : "memory" );
  59. }
  60. #define a_dec a_dec
  61. static inline void a_dec(volatile int *p)
  62. {
  63. __asm__ __volatile__(
  64. "lock ; decl %0"
  65. : "=m"(*p) : "m"(*p) : "memory" );
  66. }
  67. #define a_store a_store
  68. static inline void a_store(volatile int *p, int x)
  69. {
  70. __asm__ __volatile__(
  71. "mov %1, %0 ; lock ; orl $0,(%%rsp)"
  72. : "=m"(*p) : "r"(x) : "memory" );
  73. }
  74. #define a_barrier a_barrier
  75. static inline void a_barrier()
  76. {
  77. __asm__ __volatile__( "" : : : "memory" );
  78. }
  79. #define a_spin a_spin
  80. static inline void a_spin()
  81. {
  82. __asm__ __volatile__( "pause" : : : "memory" );
  83. }
  84. #define a_crash a_crash
  85. static inline void a_crash()
  86. {
  87. __asm__ __volatile__( "hlt" : : : "memory" );
  88. }
  89. #define a_ctz_64 a_ctz_64
  90. static inline int a_ctz_64(uint64_t x)
  91. {
  92. __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
  93. return x;
  94. }
  95. #define a_ctz_32 a_ctz_32
  96. static inline int a_ctz_32(uint32_t x)
  97. {
  98. __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
  99. return x;
  100. }
  101. #define a_clz_64 a_clz_64
  102. static inline int a_clz_64(uint64_t x)
  103. {
  104. __asm__( "bsr %1,%0 ; xor $63,%0" : "=r"(x) : "r"(x) );
  105. return x;
  106. }