root/arch/arm/include/asm/spinlock.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. dsb_sev
  2. arch_spin_lock
  3. arch_spin_trylock
  4. arch_spin_unlock
  5. arch_spin_value_unlocked
  6. arch_spin_is_locked
  7. arch_spin_is_contended
  8. arch_write_lock
  9. arch_write_trylock
  10. arch_write_unlock
  11. arch_read_lock
  12. arch_read_unlock
  13. arch_read_trylock

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef __ASM_SPINLOCK_H
   3 #define __ASM_SPINLOCK_H
   4 
   5 #if __LINUX_ARM_ARCH__ < 6
   6 #error SMP not supported on pre-ARMv6 CPUs
   7 #endif
   8 
   9 #include <linux/prefetch.h>
  10 #include <asm/barrier.h>
  11 #include <asm/processor.h>
  12 
  13 /*
  14  * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K
  15  * extensions, so when running on UP, we have to patch these instructions away.
  16  */
  17 #ifdef CONFIG_THUMB2_KERNEL
  18 /*
  19  * For Thumb-2, special care is needed to ensure that the conditional WFE
  20  * instruction really does assemble to exactly 4 bytes (as required by
  21  * the SMP_ON_UP fixup code).   By itself "wfene" might cause the
  22  * assembler to insert a extra (16-bit) IT instruction, depending on the
  23  * presence or absence of neighbouring conditional instructions.
  24  *
  25  * To avoid this unpredictableness, an approprite IT is inserted explicitly:
  26  * the assembler won't change IT instructions which are explicitly present
  27  * in the input.
  28  */
  29 #define WFE(cond)       __ALT_SMP_ASM(          \
  30         "it " cond "\n\t"                       \
  31         "wfe" cond ".n",                        \
  32                                                 \
  33         "nop.w"                                 \
  34 )
  35 #else
  36 #define WFE(cond)       __ALT_SMP_ASM("wfe" cond, "nop")
  37 #endif
  38 
  39 #define SEV             __ALT_SMP_ASM(WASM(sev), WASM(nop))
  40 
  41 static inline void dsb_sev(void)
  42 {
  43 
  44         dsb(ishst);
  45         __asm__(SEV);
  46 }
  47 
  48 /*
  49  * ARMv6 ticket-based spin-locking.
  50  *
  51  * A memory barrier is required after we get a lock, and before we
  52  * release it, because V6 CPUs are assumed to have weakly ordered
  53  * memory.
  54  */
  55 
  56 static inline void arch_spin_lock(arch_spinlock_t *lock)
  57 {
  58         unsigned long tmp;
  59         u32 newval;
  60         arch_spinlock_t lockval;
  61 
  62         prefetchw(&lock->slock);
  63         __asm__ __volatile__(
  64 "1:     ldrex   %0, [%3]\n"
  65 "       add     %1, %0, %4\n"
  66 "       strex   %2, %1, [%3]\n"
  67 "       teq     %2, #0\n"
  68 "       bne     1b"
  69         : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
  70         : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
  71         : "cc");
  72 
  73         while (lockval.tickets.next != lockval.tickets.owner) {
  74                 wfe();
  75                 lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
  76         }
  77 
  78         smp_mb();
  79 }
  80 
  81 static inline int arch_spin_trylock(arch_spinlock_t *lock)
  82 {
  83         unsigned long contended, res;
  84         u32 slock;
  85 
  86         prefetchw(&lock->slock);
  87         do {
  88                 __asm__ __volatile__(
  89                 "       ldrex   %0, [%3]\n"
  90                 "       mov     %2, #0\n"
  91                 "       subs    %1, %0, %0, ror #16\n"
  92                 "       addeq   %0, %0, %4\n"
  93                 "       strexeq %2, %0, [%3]"
  94                 : "=&r" (slock), "=&r" (contended), "=&r" (res)
  95                 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
  96                 : "cc");
  97         } while (res);
  98 
  99         if (!contended) {
 100                 smp_mb();
 101                 return 1;
 102         } else {
 103                 return 0;
 104         }
 105 }
 106 
 107 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 108 {
 109         smp_mb();
 110         lock->tickets.owner++;
 111         dsb_sev();
 112 }
 113 
 114 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 115 {
 116         return lock.tickets.owner == lock.tickets.next;
 117 }
 118 
 119 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 120 {
 121         return !arch_spin_value_unlocked(READ_ONCE(*lock));
 122 }
 123 
 124 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
 125 {
 126         struct __raw_tickets tickets = READ_ONCE(lock->tickets);
 127         return (tickets.next - tickets.owner) > 1;
 128 }
 129 #define arch_spin_is_contended  arch_spin_is_contended
 130 
 131 /*
 132  * RWLOCKS
 133  *
 134  *
 135  * Write locks are easy - we just set bit 31.  When unlocking, we can
 136  * just write zero since the lock is exclusively held.
 137  */
 138 
 139 static inline void arch_write_lock(arch_rwlock_t *rw)
 140 {
 141         unsigned long tmp;
 142 
 143         prefetchw(&rw->lock);
 144         __asm__ __volatile__(
 145 "1:     ldrex   %0, [%1]\n"
 146 "       teq     %0, #0\n"
 147         WFE("ne")
 148 "       strexeq %0, %2, [%1]\n"
 149 "       teq     %0, #0\n"
 150 "       bne     1b"
 151         : "=&r" (tmp)
 152         : "r" (&rw->lock), "r" (0x80000000)
 153         : "cc");
 154 
 155         smp_mb();
 156 }
 157 
 158 static inline int arch_write_trylock(arch_rwlock_t *rw)
 159 {
 160         unsigned long contended, res;
 161 
 162         prefetchw(&rw->lock);
 163         do {
 164                 __asm__ __volatile__(
 165                 "       ldrex   %0, [%2]\n"
 166                 "       mov     %1, #0\n"
 167                 "       teq     %0, #0\n"
 168                 "       strexeq %1, %3, [%2]"
 169                 : "=&r" (contended), "=&r" (res)
 170                 : "r" (&rw->lock), "r" (0x80000000)
 171                 : "cc");
 172         } while (res);
 173 
 174         if (!contended) {
 175                 smp_mb();
 176                 return 1;
 177         } else {
 178                 return 0;
 179         }
 180 }
 181 
 182 static inline void arch_write_unlock(arch_rwlock_t *rw)
 183 {
 184         smp_mb();
 185 
 186         __asm__ __volatile__(
 187         "str    %1, [%0]\n"
 188         :
 189         : "r" (&rw->lock), "r" (0)
 190         : "cc");
 191 
 192         dsb_sev();
 193 }
 194 
 195 /*
 196  * Read locks are a bit more hairy:
 197  *  - Exclusively load the lock value.
 198  *  - Increment it.
 199  *  - Store new lock value if positive, and we still own this location.
 200  *    If the value is negative, we've already failed.
 201  *  - If we failed to store the value, we want a negative result.
 202  *  - If we failed, try again.
 203  * Unlocking is similarly hairy.  We may have multiple read locks
 204  * currently active.  However, we know we won't have any write
 205  * locks.
 206  */
 207 static inline void arch_read_lock(arch_rwlock_t *rw)
 208 {
 209         unsigned long tmp, tmp2;
 210 
 211         prefetchw(&rw->lock);
 212         __asm__ __volatile__(
 213 "       .syntax unified\n"
 214 "1:     ldrex   %0, [%2]\n"
 215 "       adds    %0, %0, #1\n"
 216 "       strexpl %1, %0, [%2]\n"
 217         WFE("mi")
 218 "       rsbspl  %0, %1, #0\n"
 219 "       bmi     1b"
 220         : "=&r" (tmp), "=&r" (tmp2)
 221         : "r" (&rw->lock)
 222         : "cc");
 223 
 224         smp_mb();
 225 }
 226 
 227 static inline void arch_read_unlock(arch_rwlock_t *rw)
 228 {
 229         unsigned long tmp, tmp2;
 230 
 231         smp_mb();
 232 
 233         prefetchw(&rw->lock);
 234         __asm__ __volatile__(
 235 "1:     ldrex   %0, [%2]\n"
 236 "       sub     %0, %0, #1\n"
 237 "       strex   %1, %0, [%2]\n"
 238 "       teq     %1, #0\n"
 239 "       bne     1b"
 240         : "=&r" (tmp), "=&r" (tmp2)
 241         : "r" (&rw->lock)
 242         : "cc");
 243 
 244         if (tmp == 0)
 245                 dsb_sev();
 246 }
 247 
 248 static inline int arch_read_trylock(arch_rwlock_t *rw)
 249 {
 250         unsigned long contended, res;
 251 
 252         prefetchw(&rw->lock);
 253         do {
 254                 __asm__ __volatile__(
 255                 "       ldrex   %0, [%2]\n"
 256                 "       mov     %1, #0\n"
 257                 "       adds    %0, %0, #1\n"
 258                 "       strexpl %1, %0, [%2]"
 259                 : "=&r" (contended), "=&r" (res)
 260                 : "r" (&rw->lock)
 261                 : "cc");
 262         } while (res);
 263 
 264         /* If the lock is negative, then it is already held for write. */
 265         if (contended < 0x80000000) {
 266                 smp_mb();
 267                 return 1;
 268         } else {
 269                 return 0;
 270         }
 271 }
 272 
 273 #endif /* __ASM_SPINLOCK_H */

/* [<][>][^][v][top][bottom][index][help] */