root/arch/arm/include/asm/cmpxchg.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. __xchg
  2. __cmpxchg
  3. __cmpxchg_local
  4. __cmpxchg64

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef __ASM_ARM_CMPXCHG_H
   3 #define __ASM_ARM_CMPXCHG_H
   4 
   5 #include <linux/irqflags.h>
   6 #include <linux/prefetch.h>
   7 #include <asm/barrier.h>
   8 
   9 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
  10 /*
  11  * On the StrongARM, "swp" is terminally broken since it bypasses the
  12  * cache totally.  This means that the cache becomes inconsistent, and,
  13  * since we use normal loads/stores as well, this is really bad.
  14  * Typically, this causes oopsen in filp_close, but could have other,
  15  * more disastrous effects.  There are two work-arounds:
  16  *  1. Disable interrupts and emulate the atomic swap
  17  *  2. Clean the cache, perform atomic swap, flush the cache
  18  *
  19  * We choose (1) since its the "easiest" to achieve here and is not
  20  * dependent on the processor type.
  21  *
  22  * NOTE that this solution won't work on an SMP system, so explcitly
  23  * forbid it here.
  24  */
  25 #define swp_is_buggy
  26 #endif
  27 
  28 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
  29 {
  30         extern void __bad_xchg(volatile void *, int);
  31         unsigned long ret;
  32 #ifdef swp_is_buggy
  33         unsigned long flags;
  34 #endif
  35 #if __LINUX_ARM_ARCH__ >= 6
  36         unsigned int tmp;
  37 #endif
  38 
  39         prefetchw((const void *)ptr);
  40 
  41         switch (size) {
  42 #if __LINUX_ARM_ARCH__ >= 6
  43 #ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */
  44         case 1:
  45                 asm volatile("@ __xchg1\n"
  46                 "1:     ldrexb  %0, [%3]\n"
  47                 "       strexb  %1, %2, [%3]\n"
  48                 "       teq     %1, #0\n"
  49                 "       bne     1b"
  50                         : "=&r" (ret), "=&r" (tmp)
  51                         : "r" (x), "r" (ptr)
  52                         : "memory", "cc");
  53                 break;
  54         case 2:
  55                 asm volatile("@ __xchg2\n"
  56                 "1:     ldrexh  %0, [%3]\n"
  57                 "       strexh  %1, %2, [%3]\n"
  58                 "       teq     %1, #0\n"
  59                 "       bne     1b"
  60                         : "=&r" (ret), "=&r" (tmp)
  61                         : "r" (x), "r" (ptr)
  62                         : "memory", "cc");
  63                 break;
  64 #endif
  65         case 4:
  66                 asm volatile("@ __xchg4\n"
  67                 "1:     ldrex   %0, [%3]\n"
  68                 "       strex   %1, %2, [%3]\n"
  69                 "       teq     %1, #0\n"
  70                 "       bne     1b"
  71                         : "=&r" (ret), "=&r" (tmp)
  72                         : "r" (x), "r" (ptr)
  73                         : "memory", "cc");
  74                 break;
  75 #elif defined(swp_is_buggy)
  76 #ifdef CONFIG_SMP
  77 #error SMP is not supported on this platform
  78 #endif
  79         case 1:
  80                 raw_local_irq_save(flags);
  81                 ret = *(volatile unsigned char *)ptr;
  82                 *(volatile unsigned char *)ptr = x;
  83                 raw_local_irq_restore(flags);
  84                 break;
  85 
  86         case 4:
  87                 raw_local_irq_save(flags);
  88                 ret = *(volatile unsigned long *)ptr;
  89                 *(volatile unsigned long *)ptr = x;
  90                 raw_local_irq_restore(flags);
  91                 break;
  92 #else
  93         case 1:
  94                 asm volatile("@ __xchg1\n"
  95                 "       swpb    %0, %1, [%2]"
  96                         : "=&r" (ret)
  97                         : "r" (x), "r" (ptr)
  98                         : "memory", "cc");
  99                 break;
 100         case 4:
 101                 asm volatile("@ __xchg4\n"
 102                 "       swp     %0, %1, [%2]"
 103                         : "=&r" (ret)
 104                         : "r" (x), "r" (ptr)
 105                         : "memory", "cc");
 106                 break;
 107 #endif
 108         default:
 109                 /* Cause a link-time error, the xchg() size is not supported */
 110                 __bad_xchg(ptr, size), ret = 0;
 111                 break;
 112         }
 113 
 114         return ret;
 115 }
 116 
 117 #define xchg_relaxed(ptr, x) ({                                         \
 118         (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr),           \
 119                                    sizeof(*(ptr)));                     \
 120 })
 121 
 122 #include <asm-generic/cmpxchg-local.h>
 123 
 124 #if __LINUX_ARM_ARCH__ < 6
 125 /* min ARCH < ARMv6 */
 126 
 127 #ifdef CONFIG_SMP
 128 #error "SMP is not supported on this platform"
 129 #endif
 130 
 131 #define xchg xchg_relaxed
 132 
 133 /*
 134  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
 135  * them available.
 136  */
 137 #define cmpxchg_local(ptr, o, n) ({                                     \
 138         (__typeof(*ptr))__cmpxchg_local_generic((ptr),                  \
 139                                                 (unsigned long)(o),     \
 140                                                 (unsigned long)(n),     \
 141                                                 sizeof(*(ptr)));        \
 142 })
 143 
 144 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
 145 
 146 #include <asm-generic/cmpxchg.h>
 147 
 148 #else   /* min ARCH >= ARMv6 */
 149 
 150 extern void __bad_cmpxchg(volatile void *ptr, int size);
 151 
 152 /*
 153  * cmpxchg only support 32-bits operands on ARMv6.
 154  */
 155 
 156 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 157                                       unsigned long new, int size)
 158 {
 159         unsigned long oldval, res;
 160 
 161         prefetchw((const void *)ptr);
 162 
 163         switch (size) {
 164 #ifndef CONFIG_CPU_V6   /* min ARCH >= ARMv6K */
 165         case 1:
 166                 do {
 167                         asm volatile("@ __cmpxchg1\n"
 168                         "       ldrexb  %1, [%2]\n"
 169                         "       mov     %0, #0\n"
 170                         "       teq     %1, %3\n"
 171                         "       strexbeq %0, %4, [%2]\n"
 172                                 : "=&r" (res), "=&r" (oldval)
 173                                 : "r" (ptr), "Ir" (old), "r" (new)
 174                                 : "memory", "cc");
 175                 } while (res);
 176                 break;
 177         case 2:
 178                 do {
 179                         asm volatile("@ __cmpxchg1\n"
 180                         "       ldrexh  %1, [%2]\n"
 181                         "       mov     %0, #0\n"
 182                         "       teq     %1, %3\n"
 183                         "       strexheq %0, %4, [%2]\n"
 184                                 : "=&r" (res), "=&r" (oldval)
 185                                 : "r" (ptr), "Ir" (old), "r" (new)
 186                                 : "memory", "cc");
 187                 } while (res);
 188                 break;
 189 #endif
 190         case 4:
 191                 do {
 192                         asm volatile("@ __cmpxchg4\n"
 193                         "       ldrex   %1, [%2]\n"
 194                         "       mov     %0, #0\n"
 195                         "       teq     %1, %3\n"
 196                         "       strexeq %0, %4, [%2]\n"
 197                                 : "=&r" (res), "=&r" (oldval)
 198                                 : "r" (ptr), "Ir" (old), "r" (new)
 199                                 : "memory", "cc");
 200                 } while (res);
 201                 break;
 202         default:
 203                 __bad_cmpxchg(ptr, size);
 204                 oldval = 0;
 205         }
 206 
 207         return oldval;
 208 }
 209 
 210 #define cmpxchg_relaxed(ptr,o,n) ({                                     \
 211         (__typeof__(*(ptr)))__cmpxchg((ptr),                            \
 212                                       (unsigned long)(o),               \
 213                                       (unsigned long)(n),               \
 214                                       sizeof(*(ptr)));                  \
 215 })
 216 
 217 static inline unsigned long __cmpxchg_local(volatile void *ptr,
 218                                             unsigned long old,
 219                                             unsigned long new, int size)
 220 {
 221         unsigned long ret;
 222 
 223         switch (size) {
 224 #ifdef CONFIG_CPU_V6    /* min ARCH == ARMv6 */
 225         case 1:
 226         case 2:
 227                 ret = __cmpxchg_local_generic(ptr, old, new, size);
 228                 break;
 229 #endif
 230         default:
 231                 ret = __cmpxchg(ptr, old, new, size);
 232         }
 233 
 234         return ret;
 235 }
 236 
 237 #define cmpxchg_local(ptr, o, n) ({                                     \
 238         (__typeof(*ptr))__cmpxchg_local((ptr),                          \
 239                                         (unsigned long)(o),             \
 240                                         (unsigned long)(n),             \
 241                                         sizeof(*(ptr)));                \
 242 })
 243 
 244 static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
 245                                              unsigned long long old,
 246                                              unsigned long long new)
 247 {
 248         unsigned long long oldval;
 249         unsigned long res;
 250 
 251         prefetchw(ptr);
 252 
 253         __asm__ __volatile__(
 254 "1:     ldrexd          %1, %H1, [%3]\n"
 255 "       teq             %1, %4\n"
 256 "       teqeq           %H1, %H4\n"
 257 "       bne             2f\n"
 258 "       strexd          %0, %5, %H5, [%3]\n"
 259 "       teq             %0, #0\n"
 260 "       bne             1b\n"
 261 "2:"
 262         : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
 263         : "r" (ptr), "r" (old), "r" (new)
 264         : "cc");
 265 
 266         return oldval;
 267 }
 268 
 269 #define cmpxchg64_relaxed(ptr, o, n) ({                                 \
 270         (__typeof__(*(ptr)))__cmpxchg64((ptr),                          \
 271                                         (unsigned long long)(o),        \
 272                                         (unsigned long long)(n));       \
 273 })
 274 
 275 #define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
 276 
 277 #endif  /* __LINUX_ARM_ARCH__ >= 6 */
 278 
 279 #endif /* __ASM_ARM_CMPXCHG_H */

/* [<][>][^][v][top][bottom][index][help] */