root/include/linux/sbitmap.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. sbitmap_free
  2. __sbitmap_for_each_set
  3. sbitmap_for_each_set
  4. __sbitmap_word
  5. sbitmap_set_bit
  6. sbitmap_clear_bit
  7. sbitmap_deferred_clear_bit
  8. sbitmap_clear_bit_unlock
  9. sbitmap_test_bit
  10. sbitmap_queue_free
  11. sbitmap_queue_get
  12. sbitmap_queue_get_shallow
  13. sbq_index_inc
  14. sbq_index_atomic_inc
  15. sbq_wait_ptr

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * Fast and scalable bitmaps.
   4  *
   5  * Copyright (C) 2016 Facebook
   6  * Copyright (C) 2013-2014 Jens Axboe
   7  */
   8 
   9 #ifndef __LINUX_SCALE_BITMAP_H
  10 #define __LINUX_SCALE_BITMAP_H
  11 
  12 #include <linux/kernel.h>
  13 #include <linux/slab.h>
  14 
  15 struct seq_file;
  16 
  17 /**
  18  * struct sbitmap_word - Word in a &struct sbitmap.
  19  */
  20 struct sbitmap_word {
  21         /**
  22          * @depth: Number of bits being used in @word/@cleared
  23          */
  24         unsigned long depth;
  25 
  26         /**
  27          * @word: word holding free bits
  28          */
  29         unsigned long word ____cacheline_aligned_in_smp;
  30 
  31         /**
  32          * @cleared: word holding cleared bits
  33          */
  34         unsigned long cleared ____cacheline_aligned_in_smp;
  35 
  36         /**
  37          * @swap_lock: Held while swapping word <-> cleared
  38          */
  39         spinlock_t swap_lock;
  40 } ____cacheline_aligned_in_smp;
  41 
  42 /**
  43  * struct sbitmap - Scalable bitmap.
  44  *
  45  * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This
  46  * trades off higher memory usage for better scalability.
  47  */
  48 struct sbitmap {
  49         /**
  50          * @depth: Number of bits used in the whole bitmap.
  51          */
  52         unsigned int depth;
  53 
  54         /**
  55          * @shift: log2(number of bits used per word)
  56          */
  57         unsigned int shift;
  58 
  59         /**
  60          * @map_nr: Number of words (cachelines) being used for the bitmap.
  61          */
  62         unsigned int map_nr;
  63 
  64         /**
  65          * @map: Allocated bitmap.
  66          */
  67         struct sbitmap_word *map;
  68 };
  69 
  70 #define SBQ_WAIT_QUEUES 8
  71 #define SBQ_WAKE_BATCH 8
  72 
  73 /**
  74  * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
  75  */
  76 struct sbq_wait_state {
  77         /**
  78          * @wait_cnt: Number of frees remaining before we wake up.
  79          */
  80         atomic_t wait_cnt;
  81 
  82         /**
  83          * @wait: Wait queue.
  84          */
  85         wait_queue_head_t wait;
  86 } ____cacheline_aligned_in_smp;
  87 
  88 /**
  89  * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free
  90  * bits.
  91  *
  92  * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to
  93  * avoid contention on the wait queue spinlock. This ensures that we don't hit a
  94  * scalability wall when we run out of free bits and have to start putting tasks
  95  * to sleep.
  96  */
  97 struct sbitmap_queue {
  98         /**
  99          * @sb: Scalable bitmap.
 100          */
 101         struct sbitmap sb;
 102 
 103         /*
 104          * @alloc_hint: Cache of last successfully allocated or freed bit.
 105          *
 106          * This is per-cpu, which allows multiple users to stick to different
 107          * cachelines until the map is exhausted.
 108          */
 109         unsigned int __percpu *alloc_hint;
 110 
 111         /**
 112          * @wake_batch: Number of bits which must be freed before we wake up any
 113          * waiters.
 114          */
 115         unsigned int wake_batch;
 116 
 117         /**
 118          * @wake_index: Next wait queue in @ws to wake up.
 119          */
 120         atomic_t wake_index;
 121 
 122         /**
 123          * @ws: Wait queues.
 124          */
 125         struct sbq_wait_state *ws;
 126 
 127         /*
 128          * @ws_active: count of currently active ws waitqueues
 129          */
 130         atomic_t ws_active;
 131 
 132         /**
 133          * @round_robin: Allocate bits in strict round-robin order.
 134          */
 135         bool round_robin;
 136 
 137         /**
 138          * @min_shallow_depth: The minimum shallow depth which may be passed to
 139          * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
 140          */
 141         unsigned int min_shallow_depth;
 142 };
 143 
 144 /**
 145  * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node.
 146  * @sb: Bitmap to initialize.
 147  * @depth: Number of bits to allocate.
 148  * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if
 149  *         given, a good default is chosen.
 150  * @flags: Allocation flags.
 151  * @node: Memory node to allocate on.
 152  *
 153  * Return: Zero on success or negative errno on failure.
 154  */
 155 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
 156                       gfp_t flags, int node);
 157 
 158 /**
 159  * sbitmap_free() - Free memory used by a &struct sbitmap.
 160  * @sb: Bitmap to free.
 161  */
 162 static inline void sbitmap_free(struct sbitmap *sb)
 163 {
 164         kfree(sb->map);
 165         sb->map = NULL;
 166 }
 167 
 168 /**
 169  * sbitmap_resize() - Resize a &struct sbitmap.
 170  * @sb: Bitmap to resize.
 171  * @depth: New number of bits to resize to.
 172  *
 173  * Doesn't reallocate anything. It's up to the caller to ensure that the new
 174  * depth doesn't exceed the depth that the sb was initialized with.
 175  */
 176 void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
 177 
 178 /**
 179  * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
 180  * @sb: Bitmap to allocate from.
 181  * @alloc_hint: Hint for where to start searching for a free bit.
 182  * @round_robin: If true, be stricter about allocation order; always allocate
 183  *               starting from the last allocated bit. This is less efficient
 184  *               than the default behavior (false).
 185  *
 186  * This operation provides acquire barrier semantics if it succeeds.
 187  *
 188  * Return: Non-negative allocated bit number if successful, -1 otherwise.
 189  */
 190 int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
 191 
 192 /**
 193  * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
 194  * limiting the depth used from each word.
 195  * @sb: Bitmap to allocate from.
 196  * @alloc_hint: Hint for where to start searching for a free bit.
 197  * @shallow_depth: The maximum number of bits to allocate from a single word.
 198  *
 199  * This rather specific operation allows for having multiple users with
 200  * different allocation limits. E.g., there can be a high-priority class that
 201  * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
 202  * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
 203  * class can only allocate half of the total bits in the bitmap, preventing it
 204  * from starving out the high-priority class.
 205  *
 206  * Return: Non-negative allocated bit number if successful, -1 otherwise.
 207  */
 208 int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
 209                         unsigned long shallow_depth);
 210 
 211 /**
 212  * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
 213  * @sb: Bitmap to check.
 214  *
 215  * Return: true if any bit in the bitmap is set, false otherwise.
 216  */
 217 bool sbitmap_any_bit_set(const struct sbitmap *sb);
 218 
 219 /**
 220  * sbitmap_any_bit_clear() - Check for an unset bit in a &struct
 221  * sbitmap.
 222  * @sb: Bitmap to check.
 223  *
 224  * Return: true if any bit in the bitmap is clear, false otherwise.
 225  */
 226 bool sbitmap_any_bit_clear(const struct sbitmap *sb);
 227 
 228 #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
 229 #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
 230 
 231 typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
 232 
 233 /**
 234  * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
 235  * @start: Where to start the iteration.
 236  * @sb: Bitmap to iterate over.
 237  * @fn: Callback. Should return true to continue or false to break early.
 238  * @data: Pointer to pass to callback.
 239  *
 240  * This is inline even though it's non-trivial so that the function calls to the
 241  * callback will hopefully get optimized away.
 242  */
 243 static inline void __sbitmap_for_each_set(struct sbitmap *sb,
 244                                           unsigned int start,
 245                                           sb_for_each_fn fn, void *data)
 246 {
 247         unsigned int index;
 248         unsigned int nr;
 249         unsigned int scanned = 0;
 250 
 251         if (start >= sb->depth)
 252                 start = 0;
 253         index = SB_NR_TO_INDEX(sb, start);
 254         nr = SB_NR_TO_BIT(sb, start);
 255 
 256         while (scanned < sb->depth) {
 257                 unsigned long word;
 258                 unsigned int depth = min_t(unsigned int,
 259                                            sb->map[index].depth - nr,
 260                                            sb->depth - scanned);
 261 
 262                 scanned += depth;
 263                 word = sb->map[index].word & ~sb->map[index].cleared;
 264                 if (!word)
 265                         goto next;
 266 
 267                 /*
 268                  * On the first iteration of the outer loop, we need to add the
 269                  * bit offset back to the size of the word for find_next_bit().
 270                  * On all other iterations, nr is zero, so this is a noop.
 271                  */
 272                 depth += nr;
 273                 while (1) {
 274                         nr = find_next_bit(&word, depth, nr);
 275                         if (nr >= depth)
 276                                 break;
 277                         if (!fn(sb, (index << sb->shift) + nr, data))
 278                                 return;
 279 
 280                         nr++;
 281                 }
 282 next:
 283                 nr = 0;
 284                 if (++index >= sb->map_nr)
 285                         index = 0;
 286         }
 287 }
 288 
 289 /**
 290  * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
 291  * @sb: Bitmap to iterate over.
 292  * @fn: Callback. Should return true to continue or false to break early.
 293  * @data: Pointer to pass to callback.
 294  */
 295 static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
 296                                         void *data)
 297 {
 298         __sbitmap_for_each_set(sb, 0, fn, data);
 299 }
 300 
 301 static inline unsigned long *__sbitmap_word(struct sbitmap *sb,
 302                                             unsigned int bitnr)
 303 {
 304         return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word;
 305 }
 306 
 307 /* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */
 308 
 309 static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr)
 310 {
 311         set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
 312 }
 313 
 314 static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
 315 {
 316         clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
 317 }
 318 
 319 /*
 320  * This one is special, since it doesn't actually clear the bit, rather it
 321  * sets the corresponding bit in the ->cleared mask instead. Paired with
 322  * the caller doing sbitmap_deferred_clear() if a given index is full, which
 323  * will clear the previously freed entries in the corresponding ->word.
 324  */
 325 static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
 326 {
 327         unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
 328 
 329         set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
 330 }
 331 
 332 static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb,
 333                                             unsigned int bitnr)
 334 {
 335         clear_bit_unlock(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
 336 }
 337 
 338 static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
 339 {
 340         return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
 341 }
 342 
 343 /**
 344  * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
 345  * @sb: Bitmap to show.
 346  * @m: struct seq_file to write to.
 347  *
 348  * This is intended for debugging. The format may change at any time.
 349  */
 350 void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
 351 
 352 /**
 353  * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
 354  * seq_file.
 355  * @sb: Bitmap to show.
 356  * @m: struct seq_file to write to.
 357  *
 358  * This is intended for debugging. The output isn't guaranteed to be internally
 359  * consistent.
 360  */
 361 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m);
 362 
 363 /**
 364  * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
 365  * memory node.
 366  * @sbq: Bitmap queue to initialize.
 367  * @depth: See sbitmap_init_node().
 368  * @shift: See sbitmap_init_node().
 369  * @round_robin: See sbitmap_get().
 370  * @flags: Allocation flags.
 371  * @node: Memory node to allocate on.
 372  *
 373  * Return: Zero on success or negative errno on failure.
 374  */
 375 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
 376                             int shift, bool round_robin, gfp_t flags, int node);
 377 
 378 /**
 379  * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
 380  *
 381  * @sbq: Bitmap queue to free.
 382  */
 383 static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
 384 {
 385         kfree(sbq->ws);
 386         free_percpu(sbq->alloc_hint);
 387         sbitmap_free(&sbq->sb);
 388 }
 389 
 390 /**
 391  * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
 392  * @sbq: Bitmap queue to resize.
 393  * @depth: New number of bits to resize to.
 394  *
 395  * Like sbitmap_resize(), this doesn't reallocate anything. It has to do
 396  * some extra work on the &struct sbitmap_queue, so it's not safe to just
 397  * resize the underlying &struct sbitmap.
 398  */
 399 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
 400 
 401 /**
 402  * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
 403  * sbitmap_queue with preemption already disabled.
 404  * @sbq: Bitmap queue to allocate from.
 405  *
 406  * Return: Non-negative allocated bit number if successful, -1 otherwise.
 407  */
 408 int __sbitmap_queue_get(struct sbitmap_queue *sbq);
 409 
 410 /**
 411  * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
 412  * sbitmap_queue, limiting the depth used from each word, with preemption
 413  * already disabled.
 414  * @sbq: Bitmap queue to allocate from.
 415  * @shallow_depth: The maximum number of bits to allocate from a single word.
 416  * See sbitmap_get_shallow().
 417  *
 418  * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
 419  * initializing @sbq.
 420  *
 421  * Return: Non-negative allocated bit number if successful, -1 otherwise.
 422  */
 423 int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
 424                                 unsigned int shallow_depth);
 425 
 426 /**
 427  * sbitmap_queue_get() - Try to allocate a free bit from a &struct
 428  * sbitmap_queue.
 429  * @sbq: Bitmap queue to allocate from.
 430  * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
 431  *       sbitmap_queue_clear()).
 432  *
 433  * Return: Non-negative allocated bit number if successful, -1 otherwise.
 434  */
 435 static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
 436                                     unsigned int *cpu)
 437 {
 438         int nr;
 439 
 440         *cpu = get_cpu();
 441         nr = __sbitmap_queue_get(sbq);
 442         put_cpu();
 443         return nr;
 444 }
 445 
 446 /**
 447  * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
 448  * sbitmap_queue, limiting the depth used from each word.
 449  * @sbq: Bitmap queue to allocate from.
 450  * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
 451  *       sbitmap_queue_clear()).
 452  * @shallow_depth: The maximum number of bits to allocate from a single word.
 453  * See sbitmap_get_shallow().
 454  *
 455  * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
 456  * initializing @sbq.
 457  *
 458  * Return: Non-negative allocated bit number if successful, -1 otherwise.
 459  */
 460 static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
 461                                             unsigned int *cpu,
 462                                             unsigned int shallow_depth)
 463 {
 464         int nr;
 465 
 466         *cpu = get_cpu();
 467         nr = __sbitmap_queue_get_shallow(sbq, shallow_depth);
 468         put_cpu();
 469         return nr;
 470 }
 471 
 472 /**
 473  * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
 474  * minimum shallow depth that will be used.
 475  * @sbq: Bitmap queue in question.
 476  * @min_shallow_depth: The minimum shallow depth that will be passed to
 477  * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
 478  *
 479  * sbitmap_queue_clear() batches wakeups as an optimization. The batch size
 480  * depends on the depth of the bitmap. Since the shallow allocation functions
 481  * effectively operate with a different depth, the shallow depth must be taken
 482  * into account when calculating the batch size. This function must be called
 483  * with the minimum shallow depth that will be used. Failure to do so can result
 484  * in missed wakeups.
 485  */
 486 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
 487                                      unsigned int min_shallow_depth);
 488 
 489 /**
 490  * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
 491  * &struct sbitmap_queue.
 492  * @sbq: Bitmap to free from.
 493  * @nr: Bit number to free.
 494  * @cpu: CPU the bit was allocated on.
 495  */
 496 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
 497                          unsigned int cpu);
 498 
 499 static inline int sbq_index_inc(int index)
 500 {
 501         return (index + 1) & (SBQ_WAIT_QUEUES - 1);
 502 }
 503 
 504 static inline void sbq_index_atomic_inc(atomic_t *index)
 505 {
 506         int old = atomic_read(index);
 507         int new = sbq_index_inc(old);
 508         atomic_cmpxchg(index, old, new);
 509 }
 510 
 511 /**
 512  * sbq_wait_ptr() - Get the next wait queue to use for a &struct
 513  * sbitmap_queue.
 514  * @sbq: Bitmap queue to wait on.
 515  * @wait_index: A counter per "user" of @sbq.
 516  */
 517 static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
 518                                                   atomic_t *wait_index)
 519 {
 520         struct sbq_wait_state *ws;
 521 
 522         ws = &sbq->ws[atomic_read(wait_index)];
 523         sbq_index_atomic_inc(wait_index);
 524         return ws;
 525 }
 526 
 527 /**
 528  * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct
 529  * sbitmap_queue.
 530  * @sbq: Bitmap queue to wake up.
 531  */
 532 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
 533 
 534 /**
 535  * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
 536  * on a &struct sbitmap_queue.
 537  * @sbq: Bitmap queue to wake up.
 538  */
 539 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
 540 
 541 /**
 542  * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
 543  * seq_file.
 544  * @sbq: Bitmap queue to show.
 545  * @m: struct seq_file to write to.
 546  *
 547  * This is intended for debugging. The format may change at any time.
 548  */
 549 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
 550 
 551 struct sbq_wait {
 552         struct sbitmap_queue *sbq;      /* if set, sbq_wait is accounted */
 553         struct wait_queue_entry wait;
 554 };
 555 
 556 #define DEFINE_SBQ_WAIT(name)                                                   \
 557         struct sbq_wait name = {                                                \
 558                 .sbq = NULL,                                                    \
 559                 .wait = {                                                       \
 560                         .private        = current,                              \
 561                         .func           = autoremove_wake_function,             \
 562                         .entry          = LIST_HEAD_INIT((name).wait.entry),    \
 563                 }                                                               \
 564         }
 565 
 566 /*
 567  * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
 568  * internal state.
 569  */
 570 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
 571                                 struct sbq_wait_state *ws,
 572                                 struct sbq_wait *sbq_wait, int state);
 573 
 574 /*
 575  * Must be paired with sbitmap_prepare_to_wait().
 576  */
 577 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
 578                                 struct sbq_wait *sbq_wait);
 579 
 580 /*
 581  * Wrapper around add_wait_queue(), which maintains some extra internal state
 582  */
 583 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
 584                             struct sbq_wait_state *ws,
 585                             struct sbq_wait *sbq_wait);
 586 
 587 /*
 588  * Must be paired with sbitmap_add_wait_queue()
 589  */
 590 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait);
 591 
 592 #endif /* __LINUX_SCALE_BITMAP_H */

/* [<][>][^][v][top][bottom][index][help] */