1
0

atomic.c 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. #include "libc.h"
  2. #define LLSC_CLOBBERS "r0", "t", "memory"
  3. #define LLSC_START(mem) \
  4. "0: movli.l @" mem ", r0\n"
  5. #define LLSC_END(mem) \
  6. "1: movco.l r0, @" mem "\n" \
  7. " bf 0b\n" \
  8. " synco\n"
  9. /* gusa is a hack in the kernel which lets you create a sequence of instructions
  10. * which will be restarted if the process is preempted in the middle of the
  11. * sequence. It will do for implementing atomics on non-smp systems. ABI is:
  12. * r0 = address of first instruction after the atomic sequence
  13. * r1 = original stack pointer
  14. * r15 = -1 * length of atomic sequence in bytes
  15. */
  16. #define GUSA_CLOBBERS "r0", "r1", "memory"
  17. #define GUSA_START(mem,old,nop) \
  18. " .align 2\n" \
  19. " mova 1f, r0\n" \
  20. nop \
  21. " mov r15, r1\n" \
  22. " mov #(0f-1f), r15\n" \
  23. "0: mov.l @" mem ", " old "\n"
  24. /* the target of mova must be 4 byte aligned, so we may need a nop */
  25. #define GUSA_START_ODD(mem,old) GUSA_START(mem,old,"")
  26. #define GUSA_START_EVEN(mem,old) GUSA_START(mem,old,"\tnop\n")
  27. #define GUSA_END(mem,new) \
  28. " mov.l " new ", @" mem "\n" \
  29. "1: mov r1, r15\n"
  30. #define CPU_HAS_LLSC 0x0040
  31. int __sh_cas(volatile int *p, int t, int s)
  32. {
  33. int old;
  34. if (__hwcap & CPU_HAS_LLSC) {
  35. __asm__ __volatile__(
  36. LLSC_START("%1")
  37. " mov r0, %0\n"
  38. " cmp/eq %0, %2\n"
  39. " bf 1f\n"
  40. " mov %3, r0\n"
  41. LLSC_END("%1")
  42. : "=&r"(old) : "r"(p), "r"(t), "r"(s) : LLSC_CLOBBERS);
  43. } else {
  44. __asm__ __volatile__(
  45. GUSA_START_EVEN("%1", "%0")
  46. " cmp/eq %0, %2\n"
  47. " bf 1f\n"
  48. GUSA_END("%1", "%3")
  49. : "=&r"(old) : "r"(p), "r"(t), "r"(s) : GUSA_CLOBBERS, "t");
  50. }
  51. return old;
  52. }
  53. int __sh_swap(volatile int *x, int v)
  54. {
  55. int old;
  56. if (__hwcap & CPU_HAS_LLSC) {
  57. __asm__ __volatile__(
  58. LLSC_START("%1")
  59. " mov r0, %0\n"
  60. " mov %2, r0\n"
  61. LLSC_END("%1")
  62. : "=&r"(old) : "r"(x), "r"(v) : LLSC_CLOBBERS);
  63. } else {
  64. __asm__ __volatile__(
  65. GUSA_START_EVEN("%1", "%0")
  66. GUSA_END("%1", "%2")
  67. : "=&r"(old) : "r"(x), "r"(v) : GUSA_CLOBBERS);
  68. }
  69. return old;
  70. }
  71. int __sh_fetch_add(volatile int *x, int v)
  72. {
  73. int old, dummy;
  74. if (__hwcap & CPU_HAS_LLSC) {
  75. __asm__ __volatile__(
  76. LLSC_START("%1")
  77. " mov r0, %0\n"
  78. " add %2, r0\n"
  79. LLSC_END("%1")
  80. : "=&r"(old) : "r"(x), "r"(v) : LLSC_CLOBBERS);
  81. } else {
  82. __asm__ __volatile__(
  83. GUSA_START_EVEN("%2", "%0")
  84. " mov %0, %1\n"
  85. " add %3, %1\n"
  86. GUSA_END("%2", "%1")
  87. : "=&r"(old), "=&r"(dummy) : "r"(x), "r"(v) : GUSA_CLOBBERS);
  88. }
  89. return old;
  90. }
  91. void __sh_store(volatile int *p, int x)
  92. {
  93. if (__hwcap & CPU_HAS_LLSC) {
  94. __asm__ __volatile__(
  95. " mov.l %1, @%0\n"
  96. " synco\n"
  97. : : "r"(p), "r"(x) : "memory");
  98. } else {
  99. __asm__ __volatile__(
  100. " mov.l %1, @%0\n"
  101. : : "r"(p), "r"(x) : "memory");
  102. }
  103. }
  104. void __sh_and(volatile int *x, int v)
  105. {
  106. int dummy;
  107. if (__hwcap & CPU_HAS_LLSC) {
  108. __asm__ __volatile__(
  109. LLSC_START("%0")
  110. " and %1, r0\n"
  111. LLSC_END("%0")
  112. : : "r"(x), "r"(v) : LLSC_CLOBBERS);
  113. } else {
  114. __asm__ __volatile__(
  115. GUSA_START_ODD("%1", "%0")
  116. " and %2, %0\n"
  117. GUSA_END("%1", "%0")
  118. : "=&r"(dummy) : "r"(x), "r"(v) : GUSA_CLOBBERS);
  119. }
  120. }
  121. void __sh_or(volatile int *x, int v)
  122. {
  123. int dummy;
  124. if (__hwcap & CPU_HAS_LLSC) {
  125. __asm__ __volatile__(
  126. LLSC_START("%0")
  127. " or %1, r0\n"
  128. LLSC_END("%0")
  129. : : "r"(x), "r"(v) : LLSC_CLOBBERS);
  130. } else {
  131. __asm__ __volatile__(
  132. GUSA_START_ODD("%1", "%0")
  133. " or %2, %0\n"
  134. GUSA_END("%1", "%0")
  135. : "=&r"(dummy) : "r"(x), "r"(v) : GUSA_CLOBBERS);
  136. }
  137. }