1
0

atomic_arch.h 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. #define a_ctz_64 a_ctz_64
  2. static inline int a_ctz_64(uint64_t x)
  3. {
  4. __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
  5. return x;
  6. }
  7. #define a_and_64 a_and_64
  8. static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  9. {
  10. __asm__( "lock ; and %1, %0"
  11. : "=m"(*p) : "r"(v) : "memory" );
  12. }
  13. #define a_or_64 a_or_64
  14. static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  15. {
  16. __asm__( "lock ; or %1, %0"
  17. : "=m"(*p) : "r"(v) : "memory" );
  18. }
  19. #define a_or_l a_or_l
  20. static inline void a_or_l(volatile void *p, long v)
  21. {
  22. __asm__( "lock ; or %1, %0"
  23. : "=m"(*(long *)p) : "r"(v) : "memory" );
  24. }
  25. #define a_cas_p a_cas_p
  26. static inline void *a_cas_p(volatile void *p, void *t, void *s)
  27. {
  28. __asm__( "lock ; cmpxchg %3, %1"
  29. : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
  30. return t;
  31. }
  32. #define a_cas a_cas
  33. static inline int a_cas(volatile int *p, int t, int s)
  34. {
  35. __asm__( "lock ; cmpxchg %3, %1"
  36. : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
  37. return t;
  38. }
  39. #define a_or a_or
  40. static inline void a_or(volatile int *p, int v)
  41. {
  42. __asm__( "lock ; or %1, %0"
  43. : "=m"(*p) : "r"(v) : "memory" );
  44. }
  45. #define a_and a_and
  46. static inline void a_and(volatile int *p, int v)
  47. {
  48. __asm__( "lock ; and %1, %0"
  49. : "=m"(*p) : "r"(v) : "memory" );
  50. }
  51. #define a_swap a_swap
  52. static inline int a_swap(volatile int *x, int v)
  53. {
  54. __asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  55. return v;
  56. }
  57. #define a_fetch_add a_fetch_add
  58. static inline int a_fetch_add(volatile int *x, int v)
  59. {
  60. __asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  61. return v;
  62. }
  63. #define a_inc a_inc
  64. static inline void a_inc(volatile int *x)
  65. {
  66. __asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" );
  67. }
  68. #define a_dec a_dec
  69. static inline void a_dec(volatile int *x)
  70. {
  71. __asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" );
  72. }
  73. #define a_store a_store
  74. static inline void a_store(volatile int *p, int x)
  75. {
  76. __asm__( "mov %1, %0 ; lock ; orl $0,(%%rsp)" : "=m"(*p) : "r"(x) : "memory" );
  77. }
  78. #define a_spin a_spin
  79. static inline void a_spin()
  80. {
  81. __asm__ __volatile__( "pause" : : : "memory" );
  82. }
  83. #define a_barrier a_barrier
  84. static inline void a_barrier()
  85. {
  86. __asm__ __volatile__( "" : : : "memory" );
  87. }
  88. #define a_crash a_crash
  89. static inline void a_crash()
  90. {
  91. __asm__ __volatile__( "hlt" : : : "memory" );
  92. }