root/arch/powerpc/include/asm/spinlock.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. vcpu_is_preempted
  2. arch_spin_value_unlocked
  3. arch_spin_is_locked
  4. __arch_spin_trylock
  5. arch_spin_trylock
  6. splpar_spin_yield
  7. splpar_rw_yield
  8. is_shared_processor
  9. spin_yield
  10. rw_yield
  11. arch_spin_lock
  12. arch_spin_lock_flags
  13. arch_spin_unlock
  14. __arch_read_trylock
  15. __arch_write_trylock
  16. arch_read_lock
  17. arch_write_lock
  18. arch_read_trylock
  19. arch_write_trylock
  20. arch_read_unlock
  21. arch_write_unlock

   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 #ifndef __ASM_SPINLOCK_H
   3 #define __ASM_SPINLOCK_H
   4 #ifdef __KERNEL__
   5 
   6 /*
   7  * Simple spin lock operations.  
   8  *
   9  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
  10  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  11  * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
  12  *      Rework to support virtual processors
  13  *
  14  * Type of int is used as a full 64b word is not necessary.
  15  *
  16  * (the type definitions are in asm/spinlock_types.h)
  17  */
  18 #include <linux/jump_label.h>
  19 #include <linux/irqflags.h>
  20 #ifdef CONFIG_PPC64
  21 #include <asm/paca.h>
  22 #include <asm/hvcall.h>
  23 #endif
  24 #include <asm/synch.h>
  25 #include <asm/ppc-opcode.h>
  26 #include <asm/asm-405.h>
  27 
  28 #ifdef CONFIG_PPC64
  29 /* use 0x800000yy when locked, where yy == CPU number */
  30 #ifdef __BIG_ENDIAN__
  31 #define LOCK_TOKEN      (*(u32 *)(&get_paca()->lock_token))
  32 #else
  33 #define LOCK_TOKEN      (*(u32 *)(&get_paca()->paca_index))
  34 #endif
  35 #else
  36 #define LOCK_TOKEN      1
  37 #endif
  38 
  39 #ifdef CONFIG_PPC_PSERIES
  40 DECLARE_STATIC_KEY_FALSE(shared_processor);
  41 
  42 #define vcpu_is_preempted vcpu_is_preempted
  43 static inline bool vcpu_is_preempted(int cpu)
  44 {
  45         if (!static_branch_unlikely(&shared_processor))
  46                 return false;
  47         return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
  48 }
  49 #endif
  50 
  51 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
  52 {
  53         return lock.slock == 0;
  54 }
  55 
  56 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
  57 {
  58         smp_mb();
  59         return !arch_spin_value_unlocked(*lock);
  60 }
  61 
  62 /*
  63  * This returns the old value in the lock, so we succeeded
  64  * in getting the lock if the return value is 0.
  65  */
  66 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
  67 {
  68         unsigned long tmp, token;
  69 
  70         token = LOCK_TOKEN;
  71         __asm__ __volatile__(
  72 "1:     " PPC_LWARX(%0,0,%2,1) "\n\
  73         cmpwi           0,%0,0\n\
  74         bne-            2f\n\
  75         stwcx.          %1,0,%2\n\
  76         bne-            1b\n"
  77         PPC_ACQUIRE_BARRIER
  78 "2:"
  79         : "=&r" (tmp)
  80         : "r" (token), "r" (&lock->slock)
  81         : "cr0", "memory");
  82 
  83         return tmp;
  84 }
  85 
  86 static inline int arch_spin_trylock(arch_spinlock_t *lock)
  87 {
  88         return __arch_spin_trylock(lock) == 0;
  89 }
  90 
  91 /*
  92  * On a system with shared processors (that is, where a physical
  93  * processor is multiplexed between several virtual processors),
  94  * there is no point spinning on a lock if the holder of the lock
  95  * isn't currently scheduled on a physical processor.  Instead
  96  * we detect this situation and ask the hypervisor to give the
  97  * rest of our timeslice to the lock holder.
  98  *
  99  * So that we can tell which virtual processor is holding a lock,
 100  * we put 0x80000000 | smp_processor_id() in the lock when it is
 101  * held.  Conveniently, we have a word in the paca that holds this
 102  * value.
 103  */
 104 
 105 #if defined(CONFIG_PPC_SPLPAR)
 106 /* We only yield to the hypervisor if we are in shared processor mode */
 107 void splpar_spin_yield(arch_spinlock_t *lock);
 108 void splpar_rw_yield(arch_rwlock_t *lock);
 109 #else /* SPLPAR */
 110 static inline void splpar_spin_yield(arch_spinlock_t *lock) {};
 111 static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
 112 #endif
 113 
 114 static inline bool is_shared_processor(void)
 115 {
 116 /*
 117  * LPPACA is only available on Pseries so guard anything LPPACA related to
 118  * allow other platforms (which include this common header) to compile.
 119  */
 120 #ifdef CONFIG_PPC_PSERIES
 121         return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
 122                 lppaca_shared_proc(local_paca->lppaca_ptr));
 123 #else
 124         return false;
 125 #endif
 126 }
 127 
 128 static inline void spin_yield(arch_spinlock_t *lock)
 129 {
 130         if (is_shared_processor())
 131                 splpar_spin_yield(lock);
 132         else
 133                 barrier();
 134 }
 135 
 136 static inline void rw_yield(arch_rwlock_t *lock)
 137 {
 138         if (is_shared_processor())
 139                 splpar_rw_yield(lock);
 140         else
 141                 barrier();
 142 }
 143 
 144 static inline void arch_spin_lock(arch_spinlock_t *lock)
 145 {
 146         while (1) {
 147                 if (likely(__arch_spin_trylock(lock) == 0))
 148                         break;
 149                 do {
 150                         HMT_low();
 151                         if (is_shared_processor())
 152                                 splpar_spin_yield(lock);
 153                 } while (unlikely(lock->slock != 0));
 154                 HMT_medium();
 155         }
 156 }
 157 
 158 static inline
 159 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
 160 {
 161         unsigned long flags_dis;
 162 
 163         while (1) {
 164                 if (likely(__arch_spin_trylock(lock) == 0))
 165                         break;
 166                 local_save_flags(flags_dis);
 167                 local_irq_restore(flags);
 168                 do {
 169                         HMT_low();
 170                         if (is_shared_processor())
 171                                 splpar_spin_yield(lock);
 172                 } while (unlikely(lock->slock != 0));
 173                 HMT_medium();
 174                 local_irq_restore(flags_dis);
 175         }
 176 }
 177 #define arch_spin_lock_flags arch_spin_lock_flags
 178 
 179 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 180 {
 181         __asm__ __volatile__("# arch_spin_unlock\n\t"
 182                                 PPC_RELEASE_BARRIER: : :"memory");
 183         lock->slock = 0;
 184 }
 185 
 186 /*
 187  * Read-write spinlocks, allowing multiple readers
 188  * but only one writer.
 189  *
 190  * NOTE! it is quite common to have readers in interrupts
 191  * but no interrupt writers. For those circumstances we
 192  * can "mix" irq-safe locks - any writer needs to get a
 193  * irq-safe write-lock, but readers can get non-irqsafe
 194  * read-locks.
 195  */
 196 
 197 #ifdef CONFIG_PPC64
 198 #define __DO_SIGN_EXTEND        "extsw  %0,%0\n"
 199 #define WRLOCK_TOKEN            LOCK_TOKEN      /* it's negative */
 200 #else
 201 #define __DO_SIGN_EXTEND
 202 #define WRLOCK_TOKEN            (-1)
 203 #endif
 204 
 205 /*
 206  * This returns the old value in the lock + 1,
 207  * so we got a read lock if the return value is > 0.
 208  */
 209 static inline long __arch_read_trylock(arch_rwlock_t *rw)
 210 {
 211         long tmp;
 212 
 213         __asm__ __volatile__(
 214 "1:     " PPC_LWARX(%0,0,%1,1) "\n"
 215         __DO_SIGN_EXTEND
 216 "       addic.          %0,%0,1\n\
 217         ble-            2f\n"
 218         PPC405_ERR77(0,%1)
 219 "       stwcx.          %0,0,%1\n\
 220         bne-            1b\n"
 221         PPC_ACQUIRE_BARRIER
 222 "2:"    : "=&r" (tmp)
 223         : "r" (&rw->lock)
 224         : "cr0", "xer", "memory");
 225 
 226         return tmp;
 227 }
 228 
 229 /*
 230  * This returns the old value in the lock,
 231  * so we got the write lock if the return value is 0.
 232  */
 233 static inline long __arch_write_trylock(arch_rwlock_t *rw)
 234 {
 235         long tmp, token;
 236 
 237         token = WRLOCK_TOKEN;
 238         __asm__ __volatile__(
 239 "1:     " PPC_LWARX(%0,0,%2,1) "\n\
 240         cmpwi           0,%0,0\n\
 241         bne-            2f\n"
 242         PPC405_ERR77(0,%1)
 243 "       stwcx.          %1,0,%2\n\
 244         bne-            1b\n"
 245         PPC_ACQUIRE_BARRIER
 246 "2:"    : "=&r" (tmp)
 247         : "r" (token), "r" (&rw->lock)
 248         : "cr0", "memory");
 249 
 250         return tmp;
 251 }
 252 
 253 static inline void arch_read_lock(arch_rwlock_t *rw)
 254 {
 255         while (1) {
 256                 if (likely(__arch_read_trylock(rw) > 0))
 257                         break;
 258                 do {
 259                         HMT_low();
 260                         if (is_shared_processor())
 261                                 splpar_rw_yield(rw);
 262                 } while (unlikely(rw->lock < 0));
 263                 HMT_medium();
 264         }
 265 }
 266 
 267 static inline void arch_write_lock(arch_rwlock_t *rw)
 268 {
 269         while (1) {
 270                 if (likely(__arch_write_trylock(rw) == 0))
 271                         break;
 272                 do {
 273                         HMT_low();
 274                         if (is_shared_processor())
 275                                 splpar_rw_yield(rw);
 276                 } while (unlikely(rw->lock != 0));
 277                 HMT_medium();
 278         }
 279 }
 280 
 281 static inline int arch_read_trylock(arch_rwlock_t *rw)
 282 {
 283         return __arch_read_trylock(rw) > 0;
 284 }
 285 
 286 static inline int arch_write_trylock(arch_rwlock_t *rw)
 287 {
 288         return __arch_write_trylock(rw) == 0;
 289 }
 290 
 291 static inline void arch_read_unlock(arch_rwlock_t *rw)
 292 {
 293         long tmp;
 294 
 295         __asm__ __volatile__(
 296         "# read_unlock\n\t"
 297         PPC_RELEASE_BARRIER
 298 "1:     lwarx           %0,0,%1\n\
 299         addic           %0,%0,-1\n"
 300         PPC405_ERR77(0,%1)
 301 "       stwcx.          %0,0,%1\n\
 302         bne-            1b"
 303         : "=&r"(tmp)
 304         : "r"(&rw->lock)
 305         : "cr0", "xer", "memory");
 306 }
 307 
 308 static inline void arch_write_unlock(arch_rwlock_t *rw)
 309 {
 310         __asm__ __volatile__("# write_unlock\n\t"
 311                                 PPC_RELEASE_BARRIER: : :"memory");
 312         rw->lock = 0;
 313 }
 314 
 315 #define arch_spin_relax(lock)   spin_yield(lock)
 316 #define arch_read_relax(lock)   rw_yield(lock)
 317 #define arch_write_relax(lock)  rw_yield(lock)
 318 
 319 /* See include/linux/spinlock.h */
 320 #define smp_mb__after_spinlock()   smp_mb()
 321 
 322 #endif /* __KERNEL__ */
 323 #endif /* __ASM_SPINLOCK_H */

/* [<][>][^][v][top][bottom][index][help] */