root/kernel/locking/mcs_spinlock.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. mcs_spin_lock
  2. mcs_spin_unlock

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * MCS lock defines
   4  *
   5  * This file contains the main data structure and API definitions of MCS lock.
   6  *
   7  * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
   8  * with the desirable properties of being fair, and with each cpu trying
   9  * to acquire the lock spinning on a local variable.
  10  * It avoids expensive cache bouncings that common test-and-set spin-lock
  11  * implementations incur.
  12  */
  13 #ifndef __LINUX_MCS_SPINLOCK_H
  14 #define __LINUX_MCS_SPINLOCK_H
  15 
  16 #include <asm/mcs_spinlock.h>
  17 
  18 struct mcs_spinlock {
  19         struct mcs_spinlock *next;
  20         int locked; /* 1 if lock acquired */
  21         int count;  /* nesting count, see qspinlock.c */
  22 };
  23 
  24 #ifndef arch_mcs_spin_lock_contended
  25 /*
  26  * Using smp_cond_load_acquire() provides the acquire semantics
  27  * required so that subsequent operations happen after the
  28  * lock is acquired. Additionally, some architectures such as
  29  * ARM64 would like to do spin-waiting instead of purely
  30  * spinning, and smp_cond_load_acquire() provides that behavior.
  31  */
  32 #define arch_mcs_spin_lock_contended(l)                                 \
  33 do {                                                                    \
  34         smp_cond_load_acquire(l, VAL);                                  \
  35 } while (0)
  36 #endif
  37 
  38 #ifndef arch_mcs_spin_unlock_contended
  39 /*
  40  * smp_store_release() provides a memory barrier to ensure all
  41  * operations in the critical section has been completed before
  42  * unlocking.
  43  */
  44 #define arch_mcs_spin_unlock_contended(l)                               \
  45         smp_store_release((l), 1)
  46 #endif
  47 
  48 /*
  49  * Note: the smp_load_acquire/smp_store_release pair is not
  50  * sufficient to form a full memory barrier across
  51  * cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
  52  * For applications that need a full barrier across multiple cpus
  53  * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
  54  * used after mcs_lock.
  55  */
  56 
  57 /*
  58  * In order to acquire the lock, the caller should declare a local node and
  59  * pass a reference of the node to this function in addition to the lock.
  60  * If the lock has already been acquired, then this will proceed to spin
  61  * on this node->locked until the previous lock holder sets the node->locked
  62  * in mcs_spin_unlock().
  63  */
  64 static inline
  65 void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
  66 {
  67         struct mcs_spinlock *prev;
  68 
  69         /* Init node */
  70         node->locked = 0;
  71         node->next   = NULL;
  72 
  73         /*
  74          * We rely on the full barrier with global transitivity implied by the
  75          * below xchg() to order the initialization stores above against any
  76          * observation of @node. And to provide the ACQUIRE ordering associated
  77          * with a LOCK primitive.
  78          */
  79         prev = xchg(lock, node);
  80         if (likely(prev == NULL)) {
  81                 /*
  82                  * Lock acquired, don't need to set node->locked to 1. Threads
  83                  * only spin on its own node->locked value for lock acquisition.
  84                  * However, since this thread can immediately acquire the lock
  85                  * and does not proceed to spin on its own node->locked, this
  86                  * value won't be used. If a debug mode is needed to
  87                  * audit lock status, then set node->locked value here.
  88                  */
  89                 return;
  90         }
  91         WRITE_ONCE(prev->next, node);
  92 
  93         /* Wait until the lock holder passes the lock down. */
  94         arch_mcs_spin_lock_contended(&node->locked);
  95 }
  96 
  97 /*
  98  * Releases the lock. The caller should pass in the corresponding node that
  99  * was used to acquire the lock.
 100  */
 101 static inline
 102 void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
 103 {
 104         struct mcs_spinlock *next = READ_ONCE(node->next);
 105 
 106         if (likely(!next)) {
 107                 /*
 108                  * Release the lock by setting it to NULL
 109                  */
 110                 if (likely(cmpxchg_release(lock, node, NULL) == node))
 111                         return;
 112                 /* Wait until the next pointer is set */
 113                 while (!(next = READ_ONCE(node->next)))
 114                         cpu_relax();
 115         }
 116 
 117         /* Pass lock to next waiter. */
 118         arch_mcs_spin_unlock_contended(&next->locked);
 119 }
 120 
 121 #endif /* __LINUX_MCS_SPINLOCK_H */

/* [<][>][^][v][top][bottom][index][help] */