root/include/asm-generic/bitops/lock.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. test_and_set_bit_lock
  2. clear_bit_unlock
  3. __clear_bit_unlock
  4. clear_bit_unlock_is_negative_byte

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _ASM_GENERIC_BITOPS_LOCK_H_
   3 #define _ASM_GENERIC_BITOPS_LOCK_H_
   4 
   5 #include <linux/atomic.h>
   6 #include <linux/compiler.h>
   7 #include <asm/barrier.h>
   8 
   9 /**
  10  * test_and_set_bit_lock - Set a bit and return its old value, for lock
  11  * @nr: Bit to set
  12  * @addr: Address to count from
  13  *
  14  * This operation is atomic and provides acquire barrier semantics if
  15  * the returned value is 0.
  16  * It can be used to implement bit locks.
  17  */
  18 static inline int test_and_set_bit_lock(unsigned int nr,
  19                                         volatile unsigned long *p)
  20 {
  21         long old;
  22         unsigned long mask = BIT_MASK(nr);
  23 
  24         p += BIT_WORD(nr);
  25         if (READ_ONCE(*p) & mask)
  26                 return 1;
  27 
  28         old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
  29         return !!(old & mask);
  30 }
  31 
  32 
  33 /**
  34  * clear_bit_unlock - Clear a bit in memory, for unlock
  35  * @nr: the bit to set
  36  * @addr: the address to start counting from
  37  *
  38  * This operation is atomic and provides release barrier semantics.
  39  */
  40 static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
  41 {
  42         p += BIT_WORD(nr);
  43         atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
  44 }
  45 
  46 /**
  47  * __clear_bit_unlock - Clear a bit in memory, for unlock
  48  * @nr: the bit to set
  49  * @addr: the address to start counting from
  50  *
  51  * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
  52  * the bits in the word are protected by this lock some archs can use weaker
  53  * ops to safely unlock.
  54  *
  55  * See for example x86's implementation.
  56  */
  57 static inline void __clear_bit_unlock(unsigned int nr,
  58                                       volatile unsigned long *p)
  59 {
  60         unsigned long old;
  61 
  62         p += BIT_WORD(nr);
  63         old = READ_ONCE(*p);
  64         old &= ~BIT_MASK(nr);
  65         atomic_long_set_release((atomic_long_t *)p, old);
  66 }
  67 
  68 /**
  69  * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
  70  *                                     byte is negative, for unlock.
  71  * @nr: the bit to clear
  72  * @addr: the address to start counting from
  73  *
  74  * This is a bit of a one-trick-pony for the filemap code, which clears
  75  * PG_locked and tests PG_waiters,
  76  */
  77 #ifndef clear_bit_unlock_is_negative_byte
  78 static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr,
  79                                                      volatile unsigned long *p)
  80 {
  81         long old;
  82         unsigned long mask = BIT_MASK(nr);
  83 
  84         p += BIT_WORD(nr);
  85         old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
  86         return !!(old & BIT(7));
  87 }
  88 #define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
  89 #endif
  90 
  91 #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */

/* [<][>][^][v][top][bottom][index][help] */