root/kernel/locking/osq_lock.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. encode_cpu
  2. node_cpu
  3. decode_cpu
  4. osq_wait_next
  5. osq_lock
  6. osq_unlock

   1 // SPDX-License-Identifier: GPL-2.0
   2 #include <linux/percpu.h>
   3 #include <linux/sched.h>
   4 #include <linux/osq_lock.h>
   5 
   6 /*
   7  * An MCS like lock especially tailored for optimistic spinning for sleeping
   8  * lock implementations (mutex, rwsem, etc).
   9  *
  10  * Using a single mcs node per CPU is safe because sleeping locks should not be
  11  * called from interrupt context and we have preemption disabled while
  12  * spinning.
  13  */
  14 static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
  15 
  16 /*
  17  * We use the value 0 to represent "no CPU", thus the encoded value
  18  * will be the CPU number incremented by 1.
  19  */
  20 static inline int encode_cpu(int cpu_nr)
  21 {
  22         return cpu_nr + 1;
  23 }
  24 
  25 static inline int node_cpu(struct optimistic_spin_node *node)
  26 {
  27         return node->cpu - 1;
  28 }
  29 
  30 static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
  31 {
  32         int cpu_nr = encoded_cpu_val - 1;
  33 
  34         return per_cpu_ptr(&osq_node, cpu_nr);
  35 }
  36 
  37 /*
  38  * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
  39  * Can return NULL in case we were the last queued and we updated @lock instead.
  40  */
  41 static inline struct optimistic_spin_node *
  42 osq_wait_next(struct optimistic_spin_queue *lock,
  43               struct optimistic_spin_node *node,
  44               struct optimistic_spin_node *prev)
  45 {
  46         struct optimistic_spin_node *next = NULL;
  47         int curr = encode_cpu(smp_processor_id());
  48         int old;
  49 
  50         /*
  51          * If there is a prev node in queue, then the 'old' value will be
  52          * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
  53          * we're currently last in queue, then the queue will then become empty.
  54          */
  55         old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
  56 
  57         for (;;) {
  58                 if (atomic_read(&lock->tail) == curr &&
  59                     atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
  60                         /*
  61                          * We were the last queued, we moved @lock back. @prev
  62                          * will now observe @lock and will complete its
  63                          * unlock()/unqueue().
  64                          */
  65                         break;
  66                 }
  67 
  68                 /*
  69                  * We must xchg() the @node->next value, because if we were to
  70                  * leave it in, a concurrent unlock()/unqueue() from
  71                  * @node->next might complete Step-A and think its @prev is
  72                  * still valid.
  73                  *
  74                  * If the concurrent unlock()/unqueue() wins the race, we'll
  75                  * wait for either @lock to point to us, through its Step-B, or
  76                  * wait for a new @node->next from its Step-C.
  77                  */
  78                 if (node->next) {
  79                         next = xchg(&node->next, NULL);
  80                         if (next)
  81                                 break;
  82                 }
  83 
  84                 cpu_relax();
  85         }
  86 
  87         return next;
  88 }
  89 
  90 bool osq_lock(struct optimistic_spin_queue *lock)
  91 {
  92         struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
  93         struct optimistic_spin_node *prev, *next;
  94         int curr = encode_cpu(smp_processor_id());
  95         int old;
  96 
  97         node->locked = 0;
  98         node->next = NULL;
  99         node->cpu = curr;
 100 
 101         /*
 102          * We need both ACQUIRE (pairs with corresponding RELEASE in
 103          * unlock() uncontended, or fastpath) and RELEASE (to publish
 104          * the node fields we just initialised) semantics when updating
 105          * the lock tail.
 106          */
 107         old = atomic_xchg(&lock->tail, curr);
 108         if (old == OSQ_UNLOCKED_VAL)
 109                 return true;
 110 
 111         prev = decode_cpu(old);
 112         node->prev = prev;
 113 
 114         /*
 115          * osq_lock()                   unqueue
 116          *
 117          * node->prev = prev            osq_wait_next()
 118          * WMB                          MB
 119          * prev->next = node            next->prev = prev // unqueue-C
 120          *
 121          * Here 'node->prev' and 'next->prev' are the same variable and we need
 122          * to ensure these stores happen in-order to avoid corrupting the list.
 123          */
 124         smp_wmb();
 125 
 126         WRITE_ONCE(prev->next, node);
 127 
 128         /*
 129          * Normally @prev is untouchable after the above store; because at that
 130          * moment unlock can proceed and wipe the node element from stack.
 131          *
 132          * However, since our nodes are static per-cpu storage, we're
 133          * guaranteed their existence -- this allows us to apply
 134          * cmpxchg in an attempt to undo our queueing.
 135          */
 136 
 137         while (!READ_ONCE(node->locked)) {
 138                 /*
 139                  * If we need to reschedule bail... so we can block.
 140                  * Use vcpu_is_preempted() to avoid waiting for a preempted
 141                  * lock holder:
 142                  */
 143                 if (need_resched() || vcpu_is_preempted(node_cpu(node->prev)))
 144                         goto unqueue;
 145 
 146                 cpu_relax();
 147         }
 148         return true;
 149 
 150 unqueue:
 151         /*
 152          * Step - A  -- stabilize @prev
 153          *
 154          * Undo our @prev->next assignment; this will make @prev's
 155          * unlock()/unqueue() wait for a next pointer since @lock points to us
 156          * (or later).
 157          */
 158 
 159         for (;;) {
 160                 if (prev->next == node &&
 161                     cmpxchg(&prev->next, node, NULL) == node)
 162                         break;
 163 
 164                 /*
 165                  * We can only fail the cmpxchg() racing against an unlock(),
 166                  * in which case we should observe @node->locked becomming
 167                  * true.
 168                  */
 169                 if (smp_load_acquire(&node->locked))
 170                         return true;
 171 
 172                 cpu_relax();
 173 
 174                 /*
 175                  * Or we race against a concurrent unqueue()'s step-B, in which
 176                  * case its step-C will write us a new @node->prev pointer.
 177                  */
 178                 prev = READ_ONCE(node->prev);
 179         }
 180 
 181         /*
 182          * Step - B -- stabilize @next
 183          *
 184          * Similar to unlock(), wait for @node->next or move @lock from @node
 185          * back to @prev.
 186          */
 187 
 188         next = osq_wait_next(lock, node, prev);
 189         if (!next)
 190                 return false;
 191 
 192         /*
 193          * Step - C -- unlink
 194          *
 195          * @prev is stable because its still waiting for a new @prev->next
 196          * pointer, @next is stable because our @node->next pointer is NULL and
 197          * it will wait in Step-A.
 198          */
 199 
 200         WRITE_ONCE(next->prev, prev);
 201         WRITE_ONCE(prev->next, next);
 202 
 203         return false;
 204 }
 205 
 206 void osq_unlock(struct optimistic_spin_queue *lock)
 207 {
 208         struct optimistic_spin_node *node, *next;
 209         int curr = encode_cpu(smp_processor_id());
 210 
 211         /*
 212          * Fast path for the uncontended case.
 213          */
 214         if (likely(atomic_cmpxchg_release(&lock->tail, curr,
 215                                           OSQ_UNLOCKED_VAL) == curr))
 216                 return;
 217 
 218         /*
 219          * Second most likely case.
 220          */
 221         node = this_cpu_ptr(&osq_node);
 222         next = xchg(&node->next, NULL);
 223         if (next) {
 224                 WRITE_ONCE(next->locked, 1);
 225                 return;
 226         }
 227 
 228         next = osq_wait_next(lock, node, NULL);
 229         if (next)
 230                 WRITE_ONCE(next->locked, 1);
 231 }

/* [<][>][^][v][top][bottom][index][help] */