root/arch/ia64/include/asm/spinlock.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. __ticket_spin_lock
  2. __ticket_spin_trylock
  3. __ticket_spin_unlock
  4. __ticket_spin_is_locked
  5. __ticket_spin_is_contended
  6. arch_spin_value_unlocked
  7. arch_spin_is_locked
  8. arch_spin_is_contended
  9. arch_spin_lock
  10. arch_spin_trylock
  11. arch_spin_unlock
  12. arch_spin_lock_flags
  13. arch_read_lock_flags
  14. arch_write_lock_flags
  15. arch_write_unlock
  16. arch_write_unlock
  17. arch_read_trylock

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _ASM_IA64_SPINLOCK_H
   3 #define _ASM_IA64_SPINLOCK_H
   4 
   5 /*
   6  * Copyright (C) 1998-2003 Hewlett-Packard Co
   7  *      David Mosberger-Tang <davidm@hpl.hp.com>
   8  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
   9  *
  10  * This file is used for SMP configurations only.
  11  */
  12 
  13 #include <linux/compiler.h>
  14 #include <linux/kernel.h>
  15 #include <linux/bitops.h>
  16 
  17 #include <linux/atomic.h>
  18 #include <asm/intrinsics.h>
  19 #include <asm/barrier.h>
  20 #include <asm/processor.h>
  21 
  22 #define arch_spin_lock_init(x)                  ((x)->lock = 0)
  23 
  24 /*
  25  * Ticket locks are conceptually two parts, one indicating the current head of
  26  * the queue, and the other indicating the current tail. The lock is acquired
  27  * by atomically noting the tail and incrementing it by one (thus adding
  28  * ourself to the queue and noting our position), then waiting until the head
  29  * becomes equal to the the initial value of the tail.
  30  * The pad bits in the middle are used to prevent the next_ticket number
  31  * overflowing into the now_serving number.
  32  *
  33  *   31             17  16    15  14                    0
  34  *  +----------------------------------------------------+
  35  *  |  now_serving     | padding |   next_ticket         |
  36  *  +----------------------------------------------------+
  37  */
  38 
  39 #define TICKET_SHIFT    17
  40 #define TICKET_BITS     15
  41 #define TICKET_MASK     ((1 << TICKET_BITS) - 1)
  42 
  43 static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
  44 {
  45         int     *p = (int *)&lock->lock, ticket, serve;
  46 
  47         ticket = ia64_fetchadd(1, p, acq);
  48 
  49         if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
  50                 return;
  51 
  52         ia64_invala();
  53 
  54         for (;;) {
  55                 asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
  56 
  57                 if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
  58                         return;
  59                 cpu_relax();
  60         }
  61 }
  62 
  63 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
  64 {
  65         int tmp = READ_ONCE(lock->lock);
  66 
  67         if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
  68                 return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
  69         return 0;
  70 }
  71 
  72 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
  73 {
  74         unsigned short  *p = (unsigned short *)&lock->lock + 1, tmp;
  75 
  76         /* This could be optimised with ARCH_HAS_MMIOWB */
  77         mmiowb();
  78         asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
  79         WRITE_ONCE(*p, (tmp + 2) & ~1);
  80 }
  81 
  82 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
  83 {
  84         long tmp = READ_ONCE(lock->lock);
  85 
  86         return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
  87 }
  88 
  89 static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
  90 {
  91         long tmp = READ_ONCE(lock->lock);
  92 
  93         return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
  94 }
  95 
  96 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
  97 {
  98         return !(((lock.lock >> TICKET_SHIFT) ^ lock.lock) & TICKET_MASK);
  99 }
 100 
 101 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 102 {
 103         return __ticket_spin_is_locked(lock);
 104 }
 105 
 106 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
 107 {
 108         return __ticket_spin_is_contended(lock);
 109 }
 110 #define arch_spin_is_contended  arch_spin_is_contended
 111 
 112 static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
 113 {
 114         __ticket_spin_lock(lock);
 115 }
 116 
 117 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
 118 {
 119         return __ticket_spin_trylock(lock);
 120 }
 121 
 122 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
 123 {
 124         __ticket_spin_unlock(lock);
 125 }
 126 
 127 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
 128                                                   unsigned long flags)
 129 {
 130         arch_spin_lock(lock);
 131 }
 132 #define arch_spin_lock_flags    arch_spin_lock_flags
 133 
 134 #ifdef ASM_SUPPORTED
 135 
 136 static __always_inline void
 137 arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
 138 {
 139         __asm__ __volatile__ (
 140                 "tbit.nz p6, p0 = %1,%2\n"
 141                 "br.few 3f\n"
 142                 "1:\n"
 143                 "fetchadd4.rel r2 = [%0], -1;;\n"
 144                 "(p6) ssm psr.i\n"
 145                 "2:\n"
 146                 "hint @pause\n"
 147                 "ld4 r2 = [%0];;\n"
 148                 "cmp4.lt p7,p0 = r2, r0\n"
 149                 "(p7) br.cond.spnt.few 2b\n"
 150                 "(p6) rsm psr.i\n"
 151                 ";;\n"
 152                 "3:\n"
 153                 "fetchadd4.acq r2 = [%0], 1;;\n"
 154                 "cmp4.lt p7,p0 = r2, r0\n"
 155                 "(p7) br.cond.spnt.few 1b\n"
 156                 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
 157                 : "p6", "p7", "r2", "memory");
 158 }
 159 
 160 #define arch_read_lock_flags arch_read_lock_flags
 161 #define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
 162 
 163 #else /* !ASM_SUPPORTED */
 164 
 165 #define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
 166 
 167 #define arch_read_lock(rw)                                                              \
 168 do {                                                                                    \
 169         arch_rwlock_t *__read_lock_ptr = (rw);                                          \
 170                                                                                         \
 171         while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {          \
 172                 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);                        \
 173                 while (*(volatile int *)__read_lock_ptr < 0)                            \
 174                         cpu_relax();                                                    \
 175         }                                                                               \
 176 } while (0)
 177 
 178 #endif /* !ASM_SUPPORTED */
 179 
 180 #define arch_read_unlock(rw)                                    \
 181 do {                                                            \
 182         arch_rwlock_t *__read_lock_ptr = (rw);                  \
 183         ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);        \
 184 } while (0)
 185 
 186 #ifdef ASM_SUPPORTED
 187 
 188 static __always_inline void
 189 arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
 190 {
 191         __asm__ __volatile__ (
 192                 "tbit.nz p6, p0 = %1, %2\n"
 193                 "mov ar.ccv = r0\n"
 194                 "dep r29 = -1, r0, 31, 1\n"
 195                 "br.few 3f;;\n"
 196                 "1:\n"
 197                 "(p6) ssm psr.i\n"
 198                 "2:\n"
 199                 "hint @pause\n"
 200                 "ld4 r2 = [%0];;\n"
 201                 "cmp4.eq p0,p7 = r0, r2\n"
 202                 "(p7) br.cond.spnt.few 2b\n"
 203                 "(p6) rsm psr.i\n"
 204                 ";;\n"
 205                 "3:\n"
 206                 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
 207                 "cmp4.eq p0,p7 = r0, r2\n"
 208                 "(p7) br.cond.spnt.few 1b;;\n"
 209                 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
 210                 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
 211 }
 212 
 213 #define arch_write_lock_flags arch_write_lock_flags
 214 #define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
 215 
 216 #define arch_write_trylock(rw)                                                  \
 217 ({                                                                              \
 218         register long result;                                                   \
 219                                                                                 \
 220         __asm__ __volatile__ (                                                  \
 221                 "mov ar.ccv = r0\n"                                             \
 222                 "dep r29 = -1, r0, 31, 1;;\n"                                   \
 223                 "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n"                         \
 224                 : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory");          \
 225         (result == 0);                                                          \
 226 })
 227 
 228 static inline void arch_write_unlock(arch_rwlock_t *x)
 229 {
 230         u8 *y = (u8 *)x;
 231         barrier();
 232         asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
 233 }
 234 
 235 #else /* !ASM_SUPPORTED */
 236 
 237 #define arch_write_lock(l)                                                              \
 238 ({                                                                                      \
 239         __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);                       \
 240         __u32 *ia64_write_lock_ptr = (__u32 *) (l);                                     \
 241         do {                                                                            \
 242                 while (*ia64_write_lock_ptr)                                            \
 243                         ia64_barrier();                                                 \
 244                 ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0);     \
 245         } while (ia64_val);                                                             \
 246 })
 247 
 248 #define arch_write_trylock(rw)                                          \
 249 ({                                                                      \
 250         __u64 ia64_val;                                                 \
 251         __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1);                  \
 252         ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0);   \
 253         (ia64_val == 0);                                                \
 254 })
 255 
 256 static inline void arch_write_unlock(arch_rwlock_t *x)
 257 {
 258         barrier();
 259         x->write_lock = 0;
 260 }
 261 
 262 #endif /* !ASM_SUPPORTED */
 263 
 264 static inline int arch_read_trylock(arch_rwlock_t *x)
 265 {
 266         union {
 267                 arch_rwlock_t lock;
 268                 __u32 word;
 269         } old, new;
 270         old.lock = new.lock = *x;
 271         old.lock.write_lock = new.lock.write_lock = 0;
 272         ++new.lock.read_counter;
 273         return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
 274 }
 275 
 276 #endif /*  _ASM_IA64_SPINLOCK_H */

/* [<][>][^][v][top][bottom][index][help] */