atomic_arch.h 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. #define a_ctz_64 a_ctz_64
  2. static inline int a_ctz_64(uint64_t x)
  3. {
  4. int r;
  5. __asm__( "bsf %1,%0 ; jnz 1f ; bsf %2,%0 ; addl $32,%0\n1:"
  6. : "=&r"(r) : "r"((unsigned)x), "r"((unsigned)(x>>32)) );
  7. return r;
  8. }
  9. #define a_ctz_l a_ctz_l
  10. static inline int a_ctz_l(unsigned long x)
  11. {
  12. long r;
  13. __asm__( "bsf %1,%0" : "=r"(r) : "r"(x) );
  14. return r;
  15. }
  16. #define a_and_64 a_and_64
  17. static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  18. {
  19. __asm__( "lock ; andl %1, (%0) ; lock ; andl %2, 4(%0)"
  20. : : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" );
  21. }
  22. #define a_or_64 a_or_64
  23. static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  24. {
  25. __asm__( "lock ; orl %1, (%0) ; lock ; orl %2, 4(%0)"
  26. : : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" );
  27. }
  28. #define a_or_l a_or_l
  29. static inline void a_or_l(volatile void *p, long v)
  30. {
  31. __asm__( "lock ; orl %1, %0"
  32. : "=m"(*(long *)p) : "r"(v) : "memory" );
  33. }
  34. #define a_cas a_cas
  35. static inline int a_cas(volatile int *p, int t, int s)
  36. {
  37. __asm__( "lock ; cmpxchg %3, %1"
  38. : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
  39. return t;
  40. }
  41. #define a_or a_or
  42. static inline void a_or(volatile int *p, int v)
  43. {
  44. __asm__( "lock ; orl %1, %0"
  45. : "=m"(*p) : "r"(v) : "memory" );
  46. }
  47. #define a_and a_and
  48. static inline void a_and(volatile int *p, int v)
  49. {
  50. __asm__( "lock ; andl %1, %0"
  51. : "=m"(*p) : "r"(v) : "memory" );
  52. }
  53. #define a_swap a_swap
  54. static inline int a_swap(volatile int *x, int v)
  55. {
  56. __asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  57. return v;
  58. }
  59. #define a_fetch_add a_fetch_add
  60. static inline int a_fetch_add(volatile int *x, int v)
  61. {
  62. __asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  63. return v;
  64. }
  65. #define a_inc a_inc
  66. static inline void a_inc(volatile int *x)
  67. {
  68. __asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" );
  69. }
  70. #define a_dec a_dec
  71. static inline void a_dec(volatile int *x)
  72. {
  73. __asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" );
  74. }
  75. #define a_store a_store
  76. static inline void a_store(volatile int *p, int x)
  77. {
  78. __asm__( "movl %1, %0 ; lock ; orl $0,(%%esp)" : "=m"(*p) : "r"(x) : "memory" );
  79. }
  80. #define a_spin a_spin
  81. static inline void a_spin()
  82. {
  83. __asm__ __volatile__( "pause" : : : "memory" );
  84. }
  85. #define a_barrier a_barrier
  86. static inline void a_barrier()
  87. {
  88. __asm__ __volatile__( "" : : : "memory" );
  89. }
  90. #define a_crash a_crash
  91. static inline void a_crash()
  92. {
  93. __asm__ __volatile__( "hlt" : : : "memory" );
  94. }