root/mm/mempool.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. poison_error
  2. __check_element
  3. check_element
  4. __poison_element
  5. poison_element
  6. check_element
  7. poison_element
  8. kasan_poison_element
  9. kasan_unpoison_element
  10. add_element
  11. remove_element
  12. mempool_exit
  13. mempool_destroy
  14. mempool_init_node
  15. mempool_init
  16. mempool_create
  17. mempool_create_node
  18. mempool_resize
  19. mempool_alloc
  20. mempool_free
  21. mempool_alloc_slab
  22. mempool_free_slab
  23. mempool_kmalloc
  24. mempool_kfree
  25. mempool_alloc_pages
  26. mempool_free_pages

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  *  linux/mm/mempool.c
   4  *
   5  *  memory buffer pool support. Such pools are mostly used
   6  *  for guaranteed, deadlock-free memory allocations during
   7  *  extreme VM load.
   8  *
   9  *  started by Ingo Molnar, Copyright (C) 2001
  10  *  debugging by David Rientjes, Copyright (C) 2015
  11  */
  12 
  13 #include <linux/mm.h>
  14 #include <linux/slab.h>
  15 #include <linux/highmem.h>
  16 #include <linux/kasan.h>
  17 #include <linux/kmemleak.h>
  18 #include <linux/export.h>
  19 #include <linux/mempool.h>
  20 #include <linux/blkdev.h>
  21 #include <linux/writeback.h>
  22 #include "slab.h"
  23 
  24 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
  25 static void poison_error(mempool_t *pool, void *element, size_t size,
  26                          size_t byte)
  27 {
  28         const int nr = pool->curr_nr;
  29         const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
  30         const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
  31         int i;
  32 
  33         pr_err("BUG: mempool element poison mismatch\n");
  34         pr_err("Mempool %p size %zu\n", pool, size);
  35         pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
  36         for (i = start; i < end; i++)
  37                 pr_cont("%x ", *(u8 *)(element + i));
  38         pr_cont("%s\n", end < size ? "..." : "");
  39         dump_stack();
  40 }
  41 
  42 static void __check_element(mempool_t *pool, void *element, size_t size)
  43 {
  44         u8 *obj = element;
  45         size_t i;
  46 
  47         for (i = 0; i < size; i++) {
  48                 u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
  49 
  50                 if (obj[i] != exp) {
  51                         poison_error(pool, element, size, i);
  52                         return;
  53                 }
  54         }
  55         memset(obj, POISON_INUSE, size);
  56 }
  57 
  58 static void check_element(mempool_t *pool, void *element)
  59 {
  60         /* Mempools backed by slab allocator */
  61         if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
  62                 __check_element(pool, element, ksize(element));
  63 
  64         /* Mempools backed by page allocator */
  65         if (pool->free == mempool_free_pages) {
  66                 int order = (int)(long)pool->pool_data;
  67                 void *addr = kmap_atomic((struct page *)element);
  68 
  69                 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
  70                 kunmap_atomic(addr);
  71         }
  72 }
  73 
  74 static void __poison_element(void *element, size_t size)
  75 {
  76         u8 *obj = element;
  77 
  78         memset(obj, POISON_FREE, size - 1);
  79         obj[size - 1] = POISON_END;
  80 }
  81 
  82 static void poison_element(mempool_t *pool, void *element)
  83 {
  84         /* Mempools backed by slab allocator */
  85         if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
  86                 __poison_element(element, ksize(element));
  87 
  88         /* Mempools backed by page allocator */
  89         if (pool->alloc == mempool_alloc_pages) {
  90                 int order = (int)(long)pool->pool_data;
  91                 void *addr = kmap_atomic((struct page *)element);
  92 
  93                 __poison_element(addr, 1UL << (PAGE_SHIFT + order));
  94                 kunmap_atomic(addr);
  95         }
  96 }
  97 #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
  98 static inline void check_element(mempool_t *pool, void *element)
  99 {
 100 }
 101 static inline void poison_element(mempool_t *pool, void *element)
 102 {
 103 }
 104 #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
 105 
 106 static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
 107 {
 108         if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
 109                 kasan_poison_kfree(element, _RET_IP_);
 110         if (pool->alloc == mempool_alloc_pages)
 111                 kasan_free_pages(element, (unsigned long)pool->pool_data);
 112 }
 113 
 114 static void kasan_unpoison_element(mempool_t *pool, void *element)
 115 {
 116         if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
 117                 kasan_unpoison_slab(element);
 118         if (pool->alloc == mempool_alloc_pages)
 119                 kasan_alloc_pages(element, (unsigned long)pool->pool_data);
 120 }
 121 
 122 static __always_inline void add_element(mempool_t *pool, void *element)
 123 {
 124         BUG_ON(pool->curr_nr >= pool->min_nr);
 125         poison_element(pool, element);
 126         kasan_poison_element(pool, element);
 127         pool->elements[pool->curr_nr++] = element;
 128 }
 129 
 130 static void *remove_element(mempool_t *pool)
 131 {
 132         void *element = pool->elements[--pool->curr_nr];
 133 
 134         BUG_ON(pool->curr_nr < 0);
 135         kasan_unpoison_element(pool, element);
 136         check_element(pool, element);
 137         return element;
 138 }
 139 
 140 /**
 141  * mempool_exit - exit a mempool initialized with mempool_init()
 142  * @pool:      pointer to the memory pool which was initialized with
 143  *             mempool_init().
 144  *
 145  * Free all reserved elements in @pool and @pool itself.  This function
 146  * only sleeps if the free_fn() function sleeps.
 147  *
 148  * May be called on a zeroed but uninitialized mempool (i.e. allocated with
 149  * kzalloc()).
 150  */
 151 void mempool_exit(mempool_t *pool)
 152 {
 153         while (pool->curr_nr) {
 154                 void *element = remove_element(pool);
 155                 pool->free(element, pool->pool_data);
 156         }
 157         kfree(pool->elements);
 158         pool->elements = NULL;
 159 }
 160 EXPORT_SYMBOL(mempool_exit);
 161 
 162 /**
 163  * mempool_destroy - deallocate a memory pool
 164  * @pool:      pointer to the memory pool which was allocated via
 165  *             mempool_create().
 166  *
 167  * Free all reserved elements in @pool and @pool itself.  This function
 168  * only sleeps if the free_fn() function sleeps.
 169  */
 170 void mempool_destroy(mempool_t *pool)
 171 {
 172         if (unlikely(!pool))
 173                 return;
 174 
 175         mempool_exit(pool);
 176         kfree(pool);
 177 }
 178 EXPORT_SYMBOL(mempool_destroy);
 179 
 180 int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
 181                       mempool_free_t *free_fn, void *pool_data,
 182                       gfp_t gfp_mask, int node_id)
 183 {
 184         spin_lock_init(&pool->lock);
 185         pool->min_nr    = min_nr;
 186         pool->pool_data = pool_data;
 187         pool->alloc     = alloc_fn;
 188         pool->free      = free_fn;
 189         init_waitqueue_head(&pool->wait);
 190 
 191         pool->elements = kmalloc_array_node(min_nr, sizeof(void *),
 192                                             gfp_mask, node_id);
 193         if (!pool->elements)
 194                 return -ENOMEM;
 195 
 196         /*
 197          * First pre-allocate the guaranteed number of buffers.
 198          */
 199         while (pool->curr_nr < pool->min_nr) {
 200                 void *element;
 201 
 202                 element = pool->alloc(gfp_mask, pool->pool_data);
 203                 if (unlikely(!element)) {
 204                         mempool_exit(pool);
 205                         return -ENOMEM;
 206                 }
 207                 add_element(pool, element);
 208         }
 209 
 210         return 0;
 211 }
 212 EXPORT_SYMBOL(mempool_init_node);
 213 
 214 /**
 215  * mempool_init - initialize a memory pool
 216  * @pool:      pointer to the memory pool that should be initialized
 217  * @min_nr:    the minimum number of elements guaranteed to be
 218  *             allocated for this pool.
 219  * @alloc_fn:  user-defined element-allocation function.
 220  * @free_fn:   user-defined element-freeing function.
 221  * @pool_data: optional private data available to the user-defined functions.
 222  *
 223  * Like mempool_create(), but initializes the pool in (i.e. embedded in another
 224  * structure).
 225  *
 226  * Return: %0 on success, negative error code otherwise.
 227  */
 228 int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
 229                  mempool_free_t *free_fn, void *pool_data)
 230 {
 231         return mempool_init_node(pool, min_nr, alloc_fn, free_fn,
 232                                  pool_data, GFP_KERNEL, NUMA_NO_NODE);
 233 
 234 }
 235 EXPORT_SYMBOL(mempool_init);
 236 
 237 /**
 238  * mempool_create - create a memory pool
 239  * @min_nr:    the minimum number of elements guaranteed to be
 240  *             allocated for this pool.
 241  * @alloc_fn:  user-defined element-allocation function.
 242  * @free_fn:   user-defined element-freeing function.
 243  * @pool_data: optional private data available to the user-defined functions.
 244  *
 245  * this function creates and allocates a guaranteed size, preallocated
 246  * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
 247  * functions. This function might sleep. Both the alloc_fn() and the free_fn()
 248  * functions might sleep - as long as the mempool_alloc() function is not called
 249  * from IRQ contexts.
 250  *
 251  * Return: pointer to the created memory pool object or %NULL on error.
 252  */
 253 mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
 254                                 mempool_free_t *free_fn, void *pool_data)
 255 {
 256         return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
 257                                    GFP_KERNEL, NUMA_NO_NODE);
 258 }
 259 EXPORT_SYMBOL(mempool_create);
 260 
 261 mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
 262                                mempool_free_t *free_fn, void *pool_data,
 263                                gfp_t gfp_mask, int node_id)
 264 {
 265         mempool_t *pool;
 266 
 267         pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
 268         if (!pool)
 269                 return NULL;
 270 
 271         if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data,
 272                               gfp_mask, node_id)) {
 273                 kfree(pool);
 274                 return NULL;
 275         }
 276 
 277         return pool;
 278 }
 279 EXPORT_SYMBOL(mempool_create_node);
 280 
 281 /**
 282  * mempool_resize - resize an existing memory pool
 283  * @pool:       pointer to the memory pool which was allocated via
 284  *              mempool_create().
 285  * @new_min_nr: the new minimum number of elements guaranteed to be
 286  *              allocated for this pool.
 287  *
 288  * This function shrinks/grows the pool. In the case of growing,
 289  * it cannot be guaranteed that the pool will be grown to the new
 290  * size immediately, but new mempool_free() calls will refill it.
 291  * This function may sleep.
 292  *
 293  * Note, the caller must guarantee that no mempool_destroy is called
 294  * while this function is running. mempool_alloc() & mempool_free()
 295  * might be called (eg. from IRQ contexts) while this function executes.
 296  *
 297  * Return: %0 on success, negative error code otherwise.
 298  */
 299 int mempool_resize(mempool_t *pool, int new_min_nr)
 300 {
 301         void *element;
 302         void **new_elements;
 303         unsigned long flags;
 304 
 305         BUG_ON(new_min_nr <= 0);
 306         might_sleep();
 307 
 308         spin_lock_irqsave(&pool->lock, flags);
 309         if (new_min_nr <= pool->min_nr) {
 310                 while (new_min_nr < pool->curr_nr) {
 311                         element = remove_element(pool);
 312                         spin_unlock_irqrestore(&pool->lock, flags);
 313                         pool->free(element, pool->pool_data);
 314                         spin_lock_irqsave(&pool->lock, flags);
 315                 }
 316                 pool->min_nr = new_min_nr;
 317                 goto out_unlock;
 318         }
 319         spin_unlock_irqrestore(&pool->lock, flags);
 320 
 321         /* Grow the pool */
 322         new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
 323                                      GFP_KERNEL);
 324         if (!new_elements)
 325                 return -ENOMEM;
 326 
 327         spin_lock_irqsave(&pool->lock, flags);
 328         if (unlikely(new_min_nr <= pool->min_nr)) {
 329                 /* Raced, other resize will do our work */
 330                 spin_unlock_irqrestore(&pool->lock, flags);
 331                 kfree(new_elements);
 332                 goto out;
 333         }
 334         memcpy(new_elements, pool->elements,
 335                         pool->curr_nr * sizeof(*new_elements));
 336         kfree(pool->elements);
 337         pool->elements = new_elements;
 338         pool->min_nr = new_min_nr;
 339 
 340         while (pool->curr_nr < pool->min_nr) {
 341                 spin_unlock_irqrestore(&pool->lock, flags);
 342                 element = pool->alloc(GFP_KERNEL, pool->pool_data);
 343                 if (!element)
 344                         goto out;
 345                 spin_lock_irqsave(&pool->lock, flags);
 346                 if (pool->curr_nr < pool->min_nr) {
 347                         add_element(pool, element);
 348                 } else {
 349                         spin_unlock_irqrestore(&pool->lock, flags);
 350                         pool->free(element, pool->pool_data);   /* Raced */
 351                         goto out;
 352                 }
 353         }
 354 out_unlock:
 355         spin_unlock_irqrestore(&pool->lock, flags);
 356 out:
 357         return 0;
 358 }
 359 EXPORT_SYMBOL(mempool_resize);
 360 
 361 /**
 362  * mempool_alloc - allocate an element from a specific memory pool
 363  * @pool:      pointer to the memory pool which was allocated via
 364  *             mempool_create().
 365  * @gfp_mask:  the usual allocation bitmask.
 366  *
 367  * this function only sleeps if the alloc_fn() function sleeps or
 368  * returns NULL. Note that due to preallocation, this function
 369  * *never* fails when called from process contexts. (it might
 370  * fail if called from an IRQ context.)
 371  * Note: using __GFP_ZERO is not supported.
 372  *
 373  * Return: pointer to the allocated element or %NULL on error.
 374  */
 375 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
 376 {
 377         void *element;
 378         unsigned long flags;
 379         wait_queue_entry_t wait;
 380         gfp_t gfp_temp;
 381 
 382         VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
 383         might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
 384 
 385         gfp_mask |= __GFP_NOMEMALLOC;   /* don't allocate emergency reserves */
 386         gfp_mask |= __GFP_NORETRY;      /* don't loop in __alloc_pages */
 387         gfp_mask |= __GFP_NOWARN;       /* failures are OK */
 388 
 389         gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
 390 
 391 repeat_alloc:
 392 
 393         element = pool->alloc(gfp_temp, pool->pool_data);
 394         if (likely(element != NULL))
 395                 return element;
 396 
 397         spin_lock_irqsave(&pool->lock, flags);
 398         if (likely(pool->curr_nr)) {
 399                 element = remove_element(pool);
 400                 spin_unlock_irqrestore(&pool->lock, flags);
 401                 /* paired with rmb in mempool_free(), read comment there */
 402                 smp_wmb();
 403                 /*
 404                  * Update the allocation stack trace as this is more useful
 405                  * for debugging.
 406                  */
 407                 kmemleak_update_trace(element);
 408                 return element;
 409         }
 410 
 411         /*
 412          * We use gfp mask w/o direct reclaim or IO for the first round.  If
 413          * alloc failed with that and @pool was empty, retry immediately.
 414          */
 415         if (gfp_temp != gfp_mask) {
 416                 spin_unlock_irqrestore(&pool->lock, flags);
 417                 gfp_temp = gfp_mask;
 418                 goto repeat_alloc;
 419         }
 420 
 421         /* We must not sleep if !__GFP_DIRECT_RECLAIM */
 422         if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
 423                 spin_unlock_irqrestore(&pool->lock, flags);
 424                 return NULL;
 425         }
 426 
 427         /* Let's wait for someone else to return an element to @pool */
 428         init_wait(&wait);
 429         prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
 430 
 431         spin_unlock_irqrestore(&pool->lock, flags);
 432 
 433         /*
 434          * FIXME: this should be io_schedule().  The timeout is there as a
 435          * workaround for some DM problems in 2.6.18.
 436          */
 437         io_schedule_timeout(5*HZ);
 438 
 439         finish_wait(&pool->wait, &wait);
 440         goto repeat_alloc;
 441 }
 442 EXPORT_SYMBOL(mempool_alloc);
 443 
 444 /**
 445  * mempool_free - return an element to the pool.
 446  * @element:   pool element pointer.
 447  * @pool:      pointer to the memory pool which was allocated via
 448  *             mempool_create().
 449  *
 450  * this function only sleeps if the free_fn() function sleeps.
 451  */
 452 void mempool_free(void *element, mempool_t *pool)
 453 {
 454         unsigned long flags;
 455 
 456         if (unlikely(element == NULL))
 457                 return;
 458 
 459         /*
 460          * Paired with the wmb in mempool_alloc().  The preceding read is
 461          * for @element and the following @pool->curr_nr.  This ensures
 462          * that the visible value of @pool->curr_nr is from after the
 463          * allocation of @element.  This is necessary for fringe cases
 464          * where @element was passed to this task without going through
 465          * barriers.
 466          *
 467          * For example, assume @p is %NULL at the beginning and one task
 468          * performs "p = mempool_alloc(...);" while another task is doing
 469          * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
 470          * may end up using curr_nr value which is from before allocation
 471          * of @p without the following rmb.
 472          */
 473         smp_rmb();
 474 
 475         /*
 476          * For correctness, we need a test which is guaranteed to trigger
 477          * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
 478          * without locking achieves that and refilling as soon as possible
 479          * is desirable.
 480          *
 481          * Because curr_nr visible here is always a value after the
 482          * allocation of @element, any task which decremented curr_nr below
 483          * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
 484          * incremented to min_nr afterwards.  If curr_nr gets incremented
 485          * to min_nr after the allocation of @element, the elements
 486          * allocated after that are subject to the same guarantee.
 487          *
 488          * Waiters happen iff curr_nr is 0 and the above guarantee also
 489          * ensures that there will be frees which return elements to the
 490          * pool waking up the waiters.
 491          */
 492         if (unlikely(pool->curr_nr < pool->min_nr)) {
 493                 spin_lock_irqsave(&pool->lock, flags);
 494                 if (likely(pool->curr_nr < pool->min_nr)) {
 495                         add_element(pool, element);
 496                         spin_unlock_irqrestore(&pool->lock, flags);
 497                         wake_up(&pool->wait);
 498                         return;
 499                 }
 500                 spin_unlock_irqrestore(&pool->lock, flags);
 501         }
 502         pool->free(element, pool->pool_data);
 503 }
 504 EXPORT_SYMBOL(mempool_free);
 505 
 506 /*
 507  * A commonly used alloc and free fn.
 508  */
 509 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
 510 {
 511         struct kmem_cache *mem = pool_data;
 512         VM_BUG_ON(mem->ctor);
 513         return kmem_cache_alloc(mem, gfp_mask);
 514 }
 515 EXPORT_SYMBOL(mempool_alloc_slab);
 516 
 517 void mempool_free_slab(void *element, void *pool_data)
 518 {
 519         struct kmem_cache *mem = pool_data;
 520         kmem_cache_free(mem, element);
 521 }
 522 EXPORT_SYMBOL(mempool_free_slab);
 523 
 524 /*
 525  * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
 526  * specified by pool_data
 527  */
 528 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
 529 {
 530         size_t size = (size_t)pool_data;
 531         return kmalloc(size, gfp_mask);
 532 }
 533 EXPORT_SYMBOL(mempool_kmalloc);
 534 
 535 void mempool_kfree(void *element, void *pool_data)
 536 {
 537         kfree(element);
 538 }
 539 EXPORT_SYMBOL(mempool_kfree);
 540 
 541 /*
 542  * A simple mempool-backed page allocator that allocates pages
 543  * of the order specified by pool_data.
 544  */
 545 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
 546 {
 547         int order = (int)(long)pool_data;
 548         return alloc_pages(gfp_mask, order);
 549 }
 550 EXPORT_SYMBOL(mempool_alloc_pages);
 551 
 552 void mempool_free_pages(void *element, void *pool_data)
 553 {
 554         int order = (int)(long)pool_data;
 555         __free_pages(element, order);
 556 }
 557 EXPORT_SYMBOL(mempool_free_pages);

/* [<][>][^][v][top][bottom][index][help] */