root/include/linux/percpu-rwsem.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. percpu_down_read
  2. percpu_down_read_trylock
  3. percpu_up_read
  4. percpu_rwsem_release
  5. percpu_rwsem_acquire

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _LINUX_PERCPU_RWSEM_H
   3 #define _LINUX_PERCPU_RWSEM_H
   4 
   5 #include <linux/atomic.h>
   6 #include <linux/rwsem.h>
   7 #include <linux/percpu.h>
   8 #include <linux/rcuwait.h>
   9 #include <linux/rcu_sync.h>
  10 #include <linux/lockdep.h>
  11 
  12 struct percpu_rw_semaphore {
  13         struct rcu_sync         rss;
  14         unsigned int __percpu   *read_count;
  15         struct rw_semaphore     rw_sem; /* slowpath */
  16         struct rcuwait          writer; /* blocked writer */
  17         int                     readers_block;
  18 };
  19 
  20 #define __DEFINE_PERCPU_RWSEM(name, is_static)                          \
  21 static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name);          \
  22 is_static struct percpu_rw_semaphore name = {                           \
  23         .rss = __RCU_SYNC_INITIALIZER(name.rss),                        \
  24         .read_count = &__percpu_rwsem_rc_##name,                        \
  25         .rw_sem = __RWSEM_INITIALIZER(name.rw_sem),                     \
  26         .writer = __RCUWAIT_INITIALIZER(name.writer),                   \
  27 }
  28 #define DEFINE_PERCPU_RWSEM(name)               \
  29         __DEFINE_PERCPU_RWSEM(name, /* not static */)
  30 #define DEFINE_STATIC_PERCPU_RWSEM(name)        \
  31         __DEFINE_PERCPU_RWSEM(name, static)
  32 
  33 extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
  34 extern void __percpu_up_read(struct percpu_rw_semaphore *);
  35 
  36 static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
  37 {
  38         might_sleep();
  39 
  40         rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_);
  41 
  42         preempt_disable();
  43         /*
  44          * We are in an RCU-sched read-side critical section, so the writer
  45          * cannot both change sem->state from readers_fast and start checking
  46          * counters while we are here. So if we see !sem->state, we know that
  47          * the writer won't be checking until we're past the preempt_enable()
  48          * and that once the synchronize_rcu() is done, the writer will see
  49          * anything we did within this RCU-sched read-size critical section.
  50          */
  51         __this_cpu_inc(*sem->read_count);
  52         if (unlikely(!rcu_sync_is_idle(&sem->rss)))
  53                 __percpu_down_read(sem, false); /* Unconditional memory barrier */
  54         /*
  55          * The preempt_enable() prevents the compiler from
  56          * bleeding the critical section out.
  57          */
  58         preempt_enable();
  59 }
  60 
  61 static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
  62 {
  63         int ret = 1;
  64 
  65         preempt_disable();
  66         /*
  67          * Same as in percpu_down_read().
  68          */
  69         __this_cpu_inc(*sem->read_count);
  70         if (unlikely(!rcu_sync_is_idle(&sem->rss)))
  71                 ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
  72         preempt_enable();
  73         /*
  74          * The barrier() from preempt_enable() prevents the compiler from
  75          * bleeding the critical section out.
  76          */
  77 
  78         if (ret)
  79                 rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_);
  80 
  81         return ret;
  82 }
  83 
  84 static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
  85 {
  86         preempt_disable();
  87         /*
  88          * Same as in percpu_down_read().
  89          */
  90         if (likely(rcu_sync_is_idle(&sem->rss)))
  91                 __this_cpu_dec(*sem->read_count);
  92         else
  93                 __percpu_up_read(sem); /* Unconditional memory barrier */
  94         preempt_enable();
  95 
  96         rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
  97 }
  98 
  99 extern void percpu_down_write(struct percpu_rw_semaphore *);
 100 extern void percpu_up_write(struct percpu_rw_semaphore *);
 101 
 102 extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
 103                                 const char *, struct lock_class_key *);
 104 
 105 extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
 106 
 107 #define percpu_init_rwsem(sem)                                  \
 108 ({                                                              \
 109         static struct lock_class_key rwsem_key;                 \
 110         __percpu_init_rwsem(sem, #sem, &rwsem_key);             \
 111 })
 112 
 113 #define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
 114 
 115 #define percpu_rwsem_assert_held(sem)                           \
 116         lockdep_assert_held(&(sem)->rw_sem)
 117 
 118 static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
 119                                         bool read, unsigned long ip)
 120 {
 121         lock_release(&sem->rw_sem.dep_map, 1, ip);
 122 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 123         if (!read)
 124                 atomic_long_set(&sem->rw_sem.owner, RWSEM_OWNER_UNKNOWN);
 125 #endif
 126 }
 127 
 128 static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
 129                                         bool read, unsigned long ip)
 130 {
 131         lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
 132 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 133         if (!read)
 134                 atomic_long_set(&sem->rw_sem.owner, (long)current);
 135 #endif
 136 }
 137 
 138 #endif

/* [<][>][^][v][top][bottom][index][help] */