root/lib/atomic64.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. lock_addr
  2. atomic64_read
  3. atomic64_set
  4. ATOMIC64_OPS
  5. atomic64_cmpxchg
  6. atomic64_xchg
  7. atomic64_fetch_add_unless

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Generic implementation of 64-bit atomics using spinlocks,
   4  * useful on processors that don't have 64-bit atomic instructions.
   5  *
   6  * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
   7  */
   8 #include <linux/types.h>
   9 #include <linux/cache.h>
  10 #include <linux/spinlock.h>
  11 #include <linux/init.h>
  12 #include <linux/export.h>
  13 #include <linux/atomic.h>
  14 
  15 /*
  16  * We use a hashed array of spinlocks to provide exclusive access
  17  * to each atomic64_t variable.  Since this is expected to used on
  18  * systems with small numbers of CPUs (<= 4 or so), we use a
  19  * relatively small array of 16 spinlocks to avoid wasting too much
  20  * memory on the spinlock array.
  21  */
  22 #define NR_LOCKS        16
  23 
  24 /*
  25  * Ensure each lock is in a separate cacheline.
  26  */
  27 static union {
  28         raw_spinlock_t lock;
  29         char pad[L1_CACHE_BYTES];
  30 } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
  31         [0 ... (NR_LOCKS - 1)] = {
  32                 .lock =  __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
  33         },
  34 };
  35 
  36 static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
  37 {
  38         unsigned long addr = (unsigned long) v;
  39 
  40         addr >>= L1_CACHE_SHIFT;
  41         addr ^= (addr >> 8) ^ (addr >> 16);
  42         return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
  43 }
  44 
  45 s64 atomic64_read(const atomic64_t *v)
  46 {
  47         unsigned long flags;
  48         raw_spinlock_t *lock = lock_addr(v);
  49         s64 val;
  50 
  51         raw_spin_lock_irqsave(lock, flags);
  52         val = v->counter;
  53         raw_spin_unlock_irqrestore(lock, flags);
  54         return val;
  55 }
  56 EXPORT_SYMBOL(atomic64_read);
  57 
  58 void atomic64_set(atomic64_t *v, s64 i)
  59 {
  60         unsigned long flags;
  61         raw_spinlock_t *lock = lock_addr(v);
  62 
  63         raw_spin_lock_irqsave(lock, flags);
  64         v->counter = i;
  65         raw_spin_unlock_irqrestore(lock, flags);
  66 }
  67 EXPORT_SYMBOL(atomic64_set);
  68 
  69 #define ATOMIC64_OP(op, c_op)                                           \
  70 void atomic64_##op(s64 a, atomic64_t *v)                                \
  71 {                                                                       \
  72         unsigned long flags;                                            \
  73         raw_spinlock_t *lock = lock_addr(v);                            \
  74                                                                         \
  75         raw_spin_lock_irqsave(lock, flags);                             \
  76         v->counter c_op a;                                              \
  77         raw_spin_unlock_irqrestore(lock, flags);                        \
  78 }                                                                       \
  79 EXPORT_SYMBOL(atomic64_##op);
  80 
  81 #define ATOMIC64_OP_RETURN(op, c_op)                                    \
  82 s64 atomic64_##op##_return(s64 a, atomic64_t *v)                        \
  83 {                                                                       \
  84         unsigned long flags;                                            \
  85         raw_spinlock_t *lock = lock_addr(v);                            \
  86         s64 val;                                                        \
  87                                                                         \
  88         raw_spin_lock_irqsave(lock, flags);                             \
  89         val = (v->counter c_op a);                                      \
  90         raw_spin_unlock_irqrestore(lock, flags);                        \
  91         return val;                                                     \
  92 }                                                                       \
  93 EXPORT_SYMBOL(atomic64_##op##_return);
  94 
  95 #define ATOMIC64_FETCH_OP(op, c_op)                                     \
  96 s64 atomic64_fetch_##op(s64 a, atomic64_t *v)                           \
  97 {                                                                       \
  98         unsigned long flags;                                            \
  99         raw_spinlock_t *lock = lock_addr(v);                            \
 100         s64 val;                                                        \
 101                                                                         \
 102         raw_spin_lock_irqsave(lock, flags);                             \
 103         val = v->counter;                                               \
 104         v->counter c_op a;                                              \
 105         raw_spin_unlock_irqrestore(lock, flags);                        \
 106         return val;                                                     \
 107 }                                                                       \
 108 EXPORT_SYMBOL(atomic64_fetch_##op);
 109 
 110 #define ATOMIC64_OPS(op, c_op)                                          \
 111         ATOMIC64_OP(op, c_op)                                           \
 112         ATOMIC64_OP_RETURN(op, c_op)                                    \
 113         ATOMIC64_FETCH_OP(op, c_op)
 114 
 115 ATOMIC64_OPS(add, +=)
 116 ATOMIC64_OPS(sub, -=)
 117 
 118 #undef ATOMIC64_OPS
 119 #define ATOMIC64_OPS(op, c_op)                                          \
 120         ATOMIC64_OP(op, c_op)                                           \
 121         ATOMIC64_OP_RETURN(op, c_op)                                    \
 122         ATOMIC64_FETCH_OP(op, c_op)
 123 
 124 ATOMIC64_OPS(and, &=)
 125 ATOMIC64_OPS(or, |=)
 126 ATOMIC64_OPS(xor, ^=)
 127 
 128 #undef ATOMIC64_OPS
 129 #undef ATOMIC64_FETCH_OP
 130 #undef ATOMIC64_OP_RETURN
 131 #undef ATOMIC64_OP
 132 
 133 s64 atomic64_dec_if_positive(atomic64_t *v)
 134 {
 135         unsigned long flags;
 136         raw_spinlock_t *lock = lock_addr(v);
 137         s64 val;
 138 
 139         raw_spin_lock_irqsave(lock, flags);
 140         val = v->counter - 1;
 141         if (val >= 0)
 142                 v->counter = val;
 143         raw_spin_unlock_irqrestore(lock, flags);
 144         return val;
 145 }
 146 EXPORT_SYMBOL(atomic64_dec_if_positive);
 147 
 148 s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
 149 {
 150         unsigned long flags;
 151         raw_spinlock_t *lock = lock_addr(v);
 152         s64 val;
 153 
 154         raw_spin_lock_irqsave(lock, flags);
 155         val = v->counter;
 156         if (val == o)
 157                 v->counter = n;
 158         raw_spin_unlock_irqrestore(lock, flags);
 159         return val;
 160 }
 161 EXPORT_SYMBOL(atomic64_cmpxchg);
 162 
 163 s64 atomic64_xchg(atomic64_t *v, s64 new)
 164 {
 165         unsigned long flags;
 166         raw_spinlock_t *lock = lock_addr(v);
 167         s64 val;
 168 
 169         raw_spin_lock_irqsave(lock, flags);
 170         val = v->counter;
 171         v->counter = new;
 172         raw_spin_unlock_irqrestore(lock, flags);
 173         return val;
 174 }
 175 EXPORT_SYMBOL(atomic64_xchg);
 176 
 177 s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
 178 {
 179         unsigned long flags;
 180         raw_spinlock_t *lock = lock_addr(v);
 181         s64 val;
 182 
 183         raw_spin_lock_irqsave(lock, flags);
 184         val = v->counter;
 185         if (val != u)
 186                 v->counter += a;
 187         raw_spin_unlock_irqrestore(lock, flags);
 188 
 189         return val;
 190 }
 191 EXPORT_SYMBOL(atomic64_fetch_add_unless);

/* [<][>][^][v][top][bottom][index][help] */