root/include/linux/percpu_counter.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. percpu_counter_compare
  2. percpu_counter_add
  3. percpu_counter_sum_positive
  4. percpu_counter_sum
  5. percpu_counter_read
  6. percpu_counter_read_positive
  7. percpu_counter_initialized
  8. percpu_counter_init
  9. percpu_counter_destroy
  10. percpu_counter_set
  11. percpu_counter_compare
  12. __percpu_counter_compare
  13. percpu_counter_add
  14. percpu_counter_add_batch
  15. percpu_counter_read
  16. percpu_counter_read_positive
  17. percpu_counter_sum_positive
  18. percpu_counter_sum
  19. percpu_counter_initialized
  20. percpu_counter_inc
  21. percpu_counter_dec
  22. percpu_counter_sub

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _LINUX_PERCPU_COUNTER_H
   3 #define _LINUX_PERCPU_COUNTER_H
   4 /*
   5  * A simple "approximate counter" for use in ext2 and ext3 superblocks.
   6  *
   7  * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
   8  */
   9 
  10 #include <linux/spinlock.h>
  11 #include <linux/smp.h>
  12 #include <linux/list.h>
  13 #include <linux/threads.h>
  14 #include <linux/percpu.h>
  15 #include <linux/types.h>
  16 #include <linux/gfp.h>
  17 
  18 #ifdef CONFIG_SMP
  19 
  20 struct percpu_counter {
  21         raw_spinlock_t lock;
  22         s64 count;
  23 #ifdef CONFIG_HOTPLUG_CPU
  24         struct list_head list;  /* All percpu_counters are on a list */
  25 #endif
  26         s32 __percpu *counters;
  27 };
  28 
  29 extern int percpu_counter_batch;
  30 
  31 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
  32                           struct lock_class_key *key);
  33 
  34 #define percpu_counter_init(fbc, value, gfp)                            \
  35         ({                                                              \
  36                 static struct lock_class_key __key;                     \
  37                                                                         \
  38                 __percpu_counter_init(fbc, value, gfp, &__key);         \
  39         })
  40 
  41 void percpu_counter_destroy(struct percpu_counter *fbc);
  42 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
  43 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
  44                               s32 batch);
  45 s64 __percpu_counter_sum(struct percpu_counter *fbc);
  46 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
  47 
  48 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
  49 {
  50         return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
  51 }
  52 
  53 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
  54 {
  55         percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
  56 }
  57 
  58 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
  59 {
  60         s64 ret = __percpu_counter_sum(fbc);
  61         return ret < 0 ? 0 : ret;
  62 }
  63 
  64 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
  65 {
  66         return __percpu_counter_sum(fbc);
  67 }
  68 
  69 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
  70 {
  71         return fbc->count;
  72 }
  73 
  74 /*
  75  * It is possible for the percpu_counter_read() to return a small negative
  76  * number for some counter which should never be negative.
  77  *
  78  */
  79 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
  80 {
  81         /* Prevent reloads of fbc->count */
  82         s64 ret = READ_ONCE(fbc->count);
  83 
  84         if (ret >= 0)
  85                 return ret;
  86         return 0;
  87 }
  88 
  89 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
  90 {
  91         return (fbc->counters != NULL);
  92 }
  93 
  94 #else /* !CONFIG_SMP */
  95 
  96 struct percpu_counter {
  97         s64 count;
  98 };
  99 
 100 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
 101                                       gfp_t gfp)
 102 {
 103         fbc->count = amount;
 104         return 0;
 105 }
 106 
 107 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
 108 {
 109 }
 110 
 111 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
 112 {
 113         fbc->count = amount;
 114 }
 115 
 116 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
 117 {
 118         if (fbc->count > rhs)
 119                 return 1;
 120         else if (fbc->count < rhs)
 121                 return -1;
 122         else
 123                 return 0;
 124 }
 125 
 126 static inline int
 127 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
 128 {
 129         return percpu_counter_compare(fbc, rhs);
 130 }
 131 
 132 static inline void
 133 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
 134 {
 135         preempt_disable();
 136         fbc->count += amount;
 137         preempt_enable();
 138 }
 139 
 140 static inline void
 141 percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
 142 {
 143         percpu_counter_add(fbc, amount);
 144 }
 145 
 146 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
 147 {
 148         return fbc->count;
 149 }
 150 
 151 /*
 152  * percpu_counter is intended to track positive numbers. In the UP case the
 153  * number should never be negative.
 154  */
 155 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
 156 {
 157         return fbc->count;
 158 }
 159 
 160 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
 161 {
 162         return percpu_counter_read_positive(fbc);
 163 }
 164 
 165 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
 166 {
 167         return percpu_counter_read(fbc);
 168 }
 169 
 170 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
 171 {
 172         return true;
 173 }
 174 
 175 #endif  /* CONFIG_SMP */
 176 
 177 static inline void percpu_counter_inc(struct percpu_counter *fbc)
 178 {
 179         percpu_counter_add(fbc, 1);
 180 }
 181 
 182 static inline void percpu_counter_dec(struct percpu_counter *fbc)
 183 {
 184         percpu_counter_add(fbc, -1);
 185 }
 186 
 187 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
 188 {
 189         percpu_counter_add(fbc, -amount);
 190 }
 191 
 192 #endif /* _LINUX_PERCPU_COUNTER_H */

/* [<][>][^][v][top][bottom][index][help] */