spinlock_32.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. /* spinlock.h: 32-bit Sparc spinlock support.
  2. *
  3. * Copyright (C) 1997 David S. Miller ([email protected])
  4. */
  5. #ifndef __SPARC_SPINLOCK_H
  6. #define __SPARC_SPINLOCK_H
  7. #ifndef __ASSEMBLY__
  8. #include <asm/psr.h>
  9. #include <asm/barrier.h>
  10. #include <asm/processor.h> /* for cpu_relax */
  11. #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
  12. static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
  13. {
  14. smp_cond_load_acquire(&lock->lock, !VAL);
  15. }
  16. static inline void arch_spin_lock(arch_spinlock_t *lock)
  17. {
  18. __asm__ __volatile__(
  19. "\n1:\n\t"
  20. "ldstub [%0], %%g2\n\t"
  21. "orcc %%g2, 0x0, %%g0\n\t"
  22. "bne,a 2f\n\t"
  23. " ldub [%0], %%g2\n\t"
  24. ".subsection 2\n"
  25. "2:\n\t"
  26. "orcc %%g2, 0x0, %%g0\n\t"
  27. "bne,a 2b\n\t"
  28. " ldub [%0], %%g2\n\t"
  29. "b,a 1b\n\t"
  30. ".previous\n"
  31. : /* no outputs */
  32. : "r" (lock)
  33. : "g2", "memory", "cc");
  34. }
  35. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  36. {
  37. unsigned int result;
  38. __asm__ __volatile__("ldstub [%1], %0"
  39. : "=r" (result)
  40. : "r" (lock)
  41. : "memory");
  42. return (result == 0);
  43. }
  44. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  45. {
  46. __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
  47. }
  48. /* Read-write spinlocks, allowing multiple readers
  49. * but only one writer.
  50. *
  51. * NOTE! it is quite common to have readers in interrupts
  52. * but no interrupt writers. For those circumstances we
  53. * can "mix" irq-safe locks - any writer needs to get a
  54. * irq-safe write-lock, but readers can get non-irqsafe
  55. * read-locks.
  56. *
  57. * XXX This might create some problems with my dual spinlock
  58. * XXX scheme, deadlocks etc. -DaveM
  59. *
  60. * Sort of like atomic_t's on Sparc, but even more clever.
  61. *
  62. * ------------------------------------
  63. * | 24-bit counter | wlock | arch_rwlock_t
  64. * ------------------------------------
  65. * 31 8 7 0
  66. *
  67. * wlock signifies the one writer is in or somebody is updating
  68. * counter. For a writer, if he successfully acquires the wlock,
  69. * but counter is non-zero, he has to release the lock and wait,
  70. * till both counter and wlock are zero.
  71. *
  72. * Unfortunately this scheme limits us to ~16,000,000 cpus.
  73. */
  74. static inline void __arch_read_lock(arch_rwlock_t *rw)
  75. {
  76. register arch_rwlock_t *lp asm("g1");
  77. lp = rw;
  78. __asm__ __volatile__(
  79. "mov %%o7, %%g4\n\t"
  80. "call ___rw_read_enter\n\t"
  81. " ldstub [%%g1 + 3], %%g2\n"
  82. : /* no outputs */
  83. : "r" (lp)
  84. : "g2", "g4", "memory", "cc");
  85. }
  86. #define arch_read_lock(lock) \
  87. do { unsigned long flags; \
  88. local_irq_save(flags); \
  89. __arch_read_lock(lock); \
  90. local_irq_restore(flags); \
  91. } while(0)
  92. static inline void __arch_read_unlock(arch_rwlock_t *rw)
  93. {
  94. register arch_rwlock_t *lp asm("g1");
  95. lp = rw;
  96. __asm__ __volatile__(
  97. "mov %%o7, %%g4\n\t"
  98. "call ___rw_read_exit\n\t"
  99. " ldstub [%%g1 + 3], %%g2\n"
  100. : /* no outputs */
  101. : "r" (lp)
  102. : "g2", "g4", "memory", "cc");
  103. }
  104. #define arch_read_unlock(lock) \
  105. do { unsigned long flags; \
  106. local_irq_save(flags); \
  107. __arch_read_unlock(lock); \
  108. local_irq_restore(flags); \
  109. } while(0)
  110. static inline void arch_write_lock(arch_rwlock_t *rw)
  111. {
  112. register arch_rwlock_t *lp asm("g1");
  113. lp = rw;
  114. __asm__ __volatile__(
  115. "mov %%o7, %%g4\n\t"
  116. "call ___rw_write_enter\n\t"
  117. " ldstub [%%g1 + 3], %%g2\n"
  118. : /* no outputs */
  119. : "r" (lp)
  120. : "g2", "g4", "memory", "cc");
  121. *(volatile __u32 *)&lp->lock = ~0U;
  122. }
  123. static inline void arch_write_unlock(arch_rwlock_t *lock)
  124. {
  125. __asm__ __volatile__(
  126. " st %%g0, [%0]"
  127. : /* no outputs */
  128. : "r" (lock)
  129. : "memory");
  130. }
  131. static inline int arch_write_trylock(arch_rwlock_t *rw)
  132. {
  133. unsigned int val;
  134. __asm__ __volatile__("ldstub [%1 + 3], %0"
  135. : "=r" (val)
  136. : "r" (&rw->lock)
  137. : "memory");
  138. if (val == 0) {
  139. val = rw->lock & ~0xff;
  140. if (val)
  141. ((volatile u8*)&rw->lock)[3] = 0;
  142. else
  143. *(volatile u32*)&rw->lock = ~0U;
  144. }
  145. return (val == 0);
  146. }
  147. static inline int __arch_read_trylock(arch_rwlock_t *rw)
  148. {
  149. register arch_rwlock_t *lp asm("g1");
  150. register int res asm("o0");
  151. lp = rw;
  152. __asm__ __volatile__(
  153. "mov %%o7, %%g4\n\t"
  154. "call ___rw_read_try\n\t"
  155. " ldstub [%%g1 + 3], %%g2\n"
  156. : "=r" (res)
  157. : "r" (lp)
  158. : "g2", "g4", "memory", "cc");
  159. return res;
  160. }
  161. #define arch_read_trylock(lock) \
  162. ({ unsigned long flags; \
  163. int res; \
  164. local_irq_save(flags); \
  165. res = __arch_read_trylock(lock); \
  166. local_irq_restore(flags); \
  167. res; \
  168. })
  169. #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  170. #define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
  171. #define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
  172. #define arch_spin_relax(lock) cpu_relax()
  173. #define arch_read_relax(lock) cpu_relax()
  174. #define arch_write_relax(lock) cpu_relax()
  175. #define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
  176. #define arch_write_can_lock(rw) (!(rw)->lock)
  177. #endif /* !(__ASSEMBLY__) */
  178. #endif /* __SPARC_SPINLOCK_H */