root/include/linux/slab.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. __check_heap_object
  2. kmalloc_type
  3. kmalloc_index
  4. __kmalloc
  5. __kmalloc_node
  6. kmem_cache_alloc_node
  7. kmem_cache_alloc_trace
  8. kmem_cache_alloc_trace
  9. kmem_cache_alloc_node_trace
  10. kmalloc_order
  11. kmalloc_large
  12. kmalloc
  13. kmalloc_size
  14. kmalloc_node
  15. kmalloc_array
  16. kcalloc
  17. kmalloc_array_node
  18. kcalloc_node
  19. kmem_cache_zalloc
  20. kzalloc
  21. kzalloc_node

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 /*
   3  * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
   4  *
   5  * (C) SGI 2006, Christoph Lameter
   6  *      Cleaned up and restructured to ease the addition of alternative
   7  *      implementations of SLAB allocators.
   8  * (C) Linux Foundation 2008-2013
   9  *      Unified interface for all slab allocators
  10  */
  11 
  12 #ifndef _LINUX_SLAB_H
  13 #define _LINUX_SLAB_H
  14 
  15 #include <linux/gfp.h>
  16 #include <linux/overflow.h>
  17 #include <linux/types.h>
  18 #include <linux/workqueue.h>
  19 #include <linux/percpu-refcount.h>
  20 
  21 
  22 /*
  23  * Flags to pass to kmem_cache_create().
  24  * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
  25  */
  26 /* DEBUG: Perform (expensive) checks on alloc/free */
  27 #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
  28 /* DEBUG: Red zone objs in a cache */
  29 #define SLAB_RED_ZONE           ((slab_flags_t __force)0x00000400U)
  30 /* DEBUG: Poison objects */
  31 #define SLAB_POISON             ((slab_flags_t __force)0x00000800U)
  32 /* Align objs on cache lines */
  33 #define SLAB_HWCACHE_ALIGN      ((slab_flags_t __force)0x00002000U)
  34 /* Use GFP_DMA memory */
  35 #define SLAB_CACHE_DMA          ((slab_flags_t __force)0x00004000U)
  36 /* Use GFP_DMA32 memory */
  37 #define SLAB_CACHE_DMA32        ((slab_flags_t __force)0x00008000U)
  38 /* DEBUG: Store the last owner for bug hunting */
  39 #define SLAB_STORE_USER         ((slab_flags_t __force)0x00010000U)
  40 /* Panic if kmem_cache_create() fails */
  41 #define SLAB_PANIC              ((slab_flags_t __force)0x00040000U)
  42 /*
  43  * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
  44  *
  45  * This delays freeing the SLAB page by a grace period, it does _NOT_
  46  * delay object freeing. This means that if you do kmem_cache_free()
  47  * that memory location is free to be reused at any time. Thus it may
  48  * be possible to see another object there in the same RCU grace period.
  49  *
  50  * This feature only ensures the memory location backing the object
  51  * stays valid, the trick to using this is relying on an independent
  52  * object validation pass. Something like:
  53  *
  54  *  rcu_read_lock()
  55  * again:
  56  *  obj = lockless_lookup(key);
  57  *  if (obj) {
  58  *    if (!try_get_ref(obj)) // might fail for free objects
  59  *      goto again;
  60  *
  61  *    if (obj->key != key) { // not the object we expected
  62  *      put_ref(obj);
  63  *      goto again;
  64  *    }
  65  *  }
  66  *  rcu_read_unlock();
  67  *
  68  * This is useful if we need to approach a kernel structure obliquely,
  69  * from its address obtained without the usual locking. We can lock
  70  * the structure to stabilize it and check it's still at the given address,
  71  * only if we can be sure that the memory has not been meanwhile reused
  72  * for some other kind of object (which our subsystem's lock might corrupt).
  73  *
  74  * rcu_read_lock before reading the address, then rcu_read_unlock after
  75  * taking the spinlock within the structure expected at that address.
  76  *
  77  * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
  78  */
  79 /* Defer freeing slabs to RCU */
  80 #define SLAB_TYPESAFE_BY_RCU    ((slab_flags_t __force)0x00080000U)
  81 /* Spread some memory over cpuset */
  82 #define SLAB_MEM_SPREAD         ((slab_flags_t __force)0x00100000U)
  83 /* Trace allocations and frees */
  84 #define SLAB_TRACE              ((slab_flags_t __force)0x00200000U)
  85 
  86 /* Flag to prevent checks on free */
  87 #ifdef CONFIG_DEBUG_OBJECTS
  88 # define SLAB_DEBUG_OBJECTS     ((slab_flags_t __force)0x00400000U)
  89 #else
  90 # define SLAB_DEBUG_OBJECTS     0
  91 #endif
  92 
  93 /* Avoid kmemleak tracing */
  94 #define SLAB_NOLEAKTRACE        ((slab_flags_t __force)0x00800000U)
  95 
  96 /* Fault injection mark */
  97 #ifdef CONFIG_FAILSLAB
  98 # define SLAB_FAILSLAB          ((slab_flags_t __force)0x02000000U)
  99 #else
 100 # define SLAB_FAILSLAB          0
 101 #endif
 102 /* Account to memcg */
 103 #ifdef CONFIG_MEMCG_KMEM
 104 # define SLAB_ACCOUNT           ((slab_flags_t __force)0x04000000U)
 105 #else
 106 # define SLAB_ACCOUNT           0
 107 #endif
 108 
 109 #ifdef CONFIG_KASAN
 110 #define SLAB_KASAN              ((slab_flags_t __force)0x08000000U)
 111 #else
 112 #define SLAB_KASAN              0
 113 #endif
 114 
 115 /* The following flags affect the page allocator grouping pages by mobility */
 116 /* Objects are reclaimable */
 117 #define SLAB_RECLAIM_ACCOUNT    ((slab_flags_t __force)0x00020000U)
 118 #define SLAB_TEMPORARY          SLAB_RECLAIM_ACCOUNT    /* Objects are short-lived */
 119 
 120 /* Slab deactivation flag */
 121 #define SLAB_DEACTIVATED        ((slab_flags_t __force)0x10000000U)
 122 
 123 /*
 124  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
 125  *
 126  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
 127  *
 128  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
 129  * Both make kfree a no-op.
 130  */
 131 #define ZERO_SIZE_PTR ((void *)16)
 132 
 133 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
 134                                 (unsigned long)ZERO_SIZE_PTR)
 135 
 136 #include <linux/kasan.h>
 137 
 138 struct mem_cgroup;
 139 /*
 140  * struct kmem_cache related prototypes
 141  */
 142 void __init kmem_cache_init(void);
 143 bool slab_is_available(void);
 144 
 145 extern bool usercopy_fallback;
 146 
 147 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
 148                         unsigned int align, slab_flags_t flags,
 149                         void (*ctor)(void *));
 150 struct kmem_cache *kmem_cache_create_usercopy(const char *name,
 151                         unsigned int size, unsigned int align,
 152                         slab_flags_t flags,
 153                         unsigned int useroffset, unsigned int usersize,
 154                         void (*ctor)(void *));
 155 void kmem_cache_destroy(struct kmem_cache *);
 156 int kmem_cache_shrink(struct kmem_cache *);
 157 
 158 void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
 159 void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *);
 160 
 161 /*
 162  * Please use this macro to create slab caches. Simply specify the
 163  * name of the structure and maybe some flags that are listed above.
 164  *
 165  * The alignment of the struct determines object alignment. If you
 166  * f.e. add ____cacheline_aligned_in_smp to the struct declaration
 167  * then the objects will be properly aligned in SMP configurations.
 168  */
 169 #define KMEM_CACHE(__struct, __flags)                                   \
 170                 kmem_cache_create(#__struct, sizeof(struct __struct),   \
 171                         __alignof__(struct __struct), (__flags), NULL)
 172 
 173 /*
 174  * To whitelist a single field for copying to/from usercopy, use this
 175  * macro instead for KMEM_CACHE() above.
 176  */
 177 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field)                 \
 178                 kmem_cache_create_usercopy(#__struct,                   \
 179                         sizeof(struct __struct),                        \
 180                         __alignof__(struct __struct), (__flags),        \
 181                         offsetof(struct __struct, __field),             \
 182                         sizeof_field(struct __struct, __field), NULL)
 183 
 184 /*
 185  * Common kmalloc functions provided by all allocators
 186  */
 187 void * __must_check __krealloc(const void *, size_t, gfp_t);
 188 void * __must_check krealloc(const void *, size_t, gfp_t);
 189 void kfree(const void *);
 190 void kzfree(const void *);
 191 size_t __ksize(const void *);
 192 size_t ksize(const void *);
 193 
 194 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
 195 void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
 196                         bool to_user);
 197 #else
 198 static inline void __check_heap_object(const void *ptr, unsigned long n,
 199                                        struct page *page, bool to_user) { }
 200 #endif
 201 
 202 /*
 203  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
 204  * alignment larger than the alignment of a 64-bit integer.
 205  * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
 206  */
 207 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
 208 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
 209 #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
 210 #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
 211 #else
 212 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
 213 #endif
 214 
 215 /*
 216  * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
 217  * Intended for arches that get misalignment faults even for 64 bit integer
 218  * aligned buffers.
 219  */
 220 #ifndef ARCH_SLAB_MINALIGN
 221 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
 222 #endif
 223 
 224 /*
 225  * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
 226  * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
 227  * aligned pointers.
 228  */
 229 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
 230 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
 231 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
 232 
 233 /*
 234  * Kmalloc array related definitions
 235  */
 236 
 237 #ifdef CONFIG_SLAB
 238 /*
 239  * The largest kmalloc size supported by the SLAB allocators is
 240  * 32 megabyte (2^25) or the maximum allocatable page order if that is
 241  * less than 32 MB.
 242  *
 243  * WARNING: Its not easy to increase this value since the allocators have
 244  * to do various tricks to work around compiler limitations in order to
 245  * ensure proper constant folding.
 246  */
 247 #define KMALLOC_SHIFT_HIGH      ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
 248                                 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
 249 #define KMALLOC_SHIFT_MAX       KMALLOC_SHIFT_HIGH
 250 #ifndef KMALLOC_SHIFT_LOW
 251 #define KMALLOC_SHIFT_LOW       5
 252 #endif
 253 #endif
 254 
 255 #ifdef CONFIG_SLUB
 256 /*
 257  * SLUB directly allocates requests fitting in to an order-1 page
 258  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
 259  */
 260 #define KMALLOC_SHIFT_HIGH      (PAGE_SHIFT + 1)
 261 #define KMALLOC_SHIFT_MAX       (MAX_ORDER + PAGE_SHIFT - 1)
 262 #ifndef KMALLOC_SHIFT_LOW
 263 #define KMALLOC_SHIFT_LOW       3
 264 #endif
 265 #endif
 266 
 267 #ifdef CONFIG_SLOB
 268 /*
 269  * SLOB passes all requests larger than one page to the page allocator.
 270  * No kmalloc array is necessary since objects of different sizes can
 271  * be allocated from the same page.
 272  */
 273 #define KMALLOC_SHIFT_HIGH      PAGE_SHIFT
 274 #define KMALLOC_SHIFT_MAX       (MAX_ORDER + PAGE_SHIFT - 1)
 275 #ifndef KMALLOC_SHIFT_LOW
 276 #define KMALLOC_SHIFT_LOW       3
 277 #endif
 278 #endif
 279 
 280 /* Maximum allocatable size */
 281 #define KMALLOC_MAX_SIZE        (1UL << KMALLOC_SHIFT_MAX)
 282 /* Maximum size for which we actually use a slab cache */
 283 #define KMALLOC_MAX_CACHE_SIZE  (1UL << KMALLOC_SHIFT_HIGH)
 284 /* Maximum order allocatable via the slab allocagtor */
 285 #define KMALLOC_MAX_ORDER       (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
 286 
 287 /*
 288  * Kmalloc subsystem.
 289  */
 290 #ifndef KMALLOC_MIN_SIZE
 291 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
 292 #endif
 293 
 294 /*
 295  * This restriction comes from byte sized index implementation.
 296  * Page size is normally 2^12 bytes and, in this case, if we want to use
 297  * byte sized index which can represent 2^8 entries, the size of the object
 298  * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
 299  * If minimum size of kmalloc is less than 16, we use it as minimum object
 300  * size and give up to use byte sized index.
 301  */
 302 #define SLAB_OBJ_MIN_SIZE      (KMALLOC_MIN_SIZE < 16 ? \
 303                                (KMALLOC_MIN_SIZE) : 16)
 304 
 305 /*
 306  * Whenever changing this, take care of that kmalloc_type() and
 307  * create_kmalloc_caches() still work as intended.
 308  */
 309 enum kmalloc_cache_type {
 310         KMALLOC_NORMAL = 0,
 311         KMALLOC_RECLAIM,
 312 #ifdef CONFIG_ZONE_DMA
 313         KMALLOC_DMA,
 314 #endif
 315         NR_KMALLOC_TYPES
 316 };
 317 
 318 #ifndef CONFIG_SLOB
 319 extern struct kmem_cache *
 320 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
 321 
 322 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
 323 {
 324 #ifdef CONFIG_ZONE_DMA
 325         /*
 326          * The most common case is KMALLOC_NORMAL, so test for it
 327          * with a single branch for both flags.
 328          */
 329         if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0))
 330                 return KMALLOC_NORMAL;
 331 
 332         /*
 333          * At least one of the flags has to be set. If both are, __GFP_DMA
 334          * is more important.
 335          */
 336         return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM;
 337 #else
 338         return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL;
 339 #endif
 340 }
 341 
 342 /*
 343  * Figure out which kmalloc slab an allocation of a certain size
 344  * belongs to.
 345  * 0 = zero alloc
 346  * 1 =  65 .. 96 bytes
 347  * 2 = 129 .. 192 bytes
 348  * n = 2^(n-1)+1 .. 2^n
 349  */
 350 static __always_inline unsigned int kmalloc_index(size_t size)
 351 {
 352         if (!size)
 353                 return 0;
 354 
 355         if (size <= KMALLOC_MIN_SIZE)
 356                 return KMALLOC_SHIFT_LOW;
 357 
 358         if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
 359                 return 1;
 360         if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
 361                 return 2;
 362         if (size <=          8) return 3;
 363         if (size <=         16) return 4;
 364         if (size <=         32) return 5;
 365         if (size <=         64) return 6;
 366         if (size <=        128) return 7;
 367         if (size <=        256) return 8;
 368         if (size <=        512) return 9;
 369         if (size <=       1024) return 10;
 370         if (size <=   2 * 1024) return 11;
 371         if (size <=   4 * 1024) return 12;
 372         if (size <=   8 * 1024) return 13;
 373         if (size <=  16 * 1024) return 14;
 374         if (size <=  32 * 1024) return 15;
 375         if (size <=  64 * 1024) return 16;
 376         if (size <= 128 * 1024) return 17;
 377         if (size <= 256 * 1024) return 18;
 378         if (size <= 512 * 1024) return 19;
 379         if (size <= 1024 * 1024) return 20;
 380         if (size <=  2 * 1024 * 1024) return 21;
 381         if (size <=  4 * 1024 * 1024) return 22;
 382         if (size <=  8 * 1024 * 1024) return 23;
 383         if (size <=  16 * 1024 * 1024) return 24;
 384         if (size <=  32 * 1024 * 1024) return 25;
 385         if (size <=  64 * 1024 * 1024) return 26;
 386         BUG();
 387 
 388         /* Will never be reached. Needed because the compiler may complain */
 389         return -1;
 390 }
 391 #endif /* !CONFIG_SLOB */
 392 
 393 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
 394 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
 395 void kmem_cache_free(struct kmem_cache *, void *);
 396 
 397 /*
 398  * Bulk allocation and freeing operations. These are accelerated in an
 399  * allocator specific way to avoid taking locks repeatedly or building
 400  * metadata structures unnecessarily.
 401  *
 402  * Note that interrupts must be enabled when calling these functions.
 403  */
 404 void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
 405 int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
 406 
 407 /*
 408  * Caller must not use kfree_bulk() on memory not originally allocated
 409  * by kmalloc(), because the SLOB allocator cannot handle this.
 410  */
 411 static __always_inline void kfree_bulk(size_t size, void **p)
 412 {
 413         kmem_cache_free_bulk(NULL, size, p);
 414 }
 415 
 416 #ifdef CONFIG_NUMA
 417 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
 418 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
 419 #else
 420 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
 421 {
 422         return __kmalloc(size, flags);
 423 }
 424 
 425 static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
 426 {
 427         return kmem_cache_alloc(s, flags);
 428 }
 429 #endif
 430 
 431 #ifdef CONFIG_TRACING
 432 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
 433 
 434 #ifdef CONFIG_NUMA
 435 extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
 436                                            gfp_t gfpflags,
 437                                            int node, size_t size) __assume_slab_alignment __malloc;
 438 #else
 439 static __always_inline void *
 440 kmem_cache_alloc_node_trace(struct kmem_cache *s,
 441                               gfp_t gfpflags,
 442                               int node, size_t size)
 443 {
 444         return kmem_cache_alloc_trace(s, gfpflags, size);
 445 }
 446 #endif /* CONFIG_NUMA */
 447 
 448 #else /* CONFIG_TRACING */
 449 static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
 450                 gfp_t flags, size_t size)
 451 {
 452         void *ret = kmem_cache_alloc(s, flags);
 453 
 454         ret = kasan_kmalloc(s, ret, size, flags);
 455         return ret;
 456 }
 457 
 458 static __always_inline void *
 459 kmem_cache_alloc_node_trace(struct kmem_cache *s,
 460                               gfp_t gfpflags,
 461                               int node, size_t size)
 462 {
 463         void *ret = kmem_cache_alloc_node(s, gfpflags, node);
 464 
 465         ret = kasan_kmalloc(s, ret, size, gfpflags);
 466         return ret;
 467 }
 468 #endif /* CONFIG_TRACING */
 469 
 470 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
 471 
 472 #ifdef CONFIG_TRACING
 473 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
 474 #else
 475 static __always_inline void *
 476 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
 477 {
 478         return kmalloc_order(size, flags, order);
 479 }
 480 #endif
 481 
 482 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
 483 {
 484         unsigned int order = get_order(size);
 485         return kmalloc_order_trace(size, flags, order);
 486 }
 487 
 488 /**
 489  * kmalloc - allocate memory
 490  * @size: how many bytes of memory are required.
 491  * @flags: the type of memory to allocate.
 492  *
 493  * kmalloc is the normal method of allocating memory
 494  * for objects smaller than page size in the kernel.
 495  *
 496  * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
 497  * bytes. For @size of power of two bytes, the alignment is also guaranteed
 498  * to be at least to the size.
 499  *
 500  * The @flags argument may be one of the GFP flags defined at
 501  * include/linux/gfp.h and described at
 502  * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
 503  *
 504  * The recommended usage of the @flags is described at
 505  * :ref:`Documentation/core-api/memory-allocation.rst <memory-allocation>`
 506  *
 507  * Below is a brief outline of the most useful GFP flags
 508  *
 509  * %GFP_KERNEL
 510  *      Allocate normal kernel ram. May sleep.
 511  *
 512  * %GFP_NOWAIT
 513  *      Allocation will not sleep.
 514  *
 515  * %GFP_ATOMIC
 516  *      Allocation will not sleep.  May use emergency pools.
 517  *
 518  * %GFP_HIGHUSER
 519  *      Allocate memory from high memory on behalf of user.
 520  *
 521  * Also it is possible to set different flags by OR'ing
 522  * in one or more of the following additional @flags:
 523  *
 524  * %__GFP_HIGH
 525  *      This allocation has high priority and may use emergency pools.
 526  *
 527  * %__GFP_NOFAIL
 528  *      Indicate that this allocation is in no way allowed to fail
 529  *      (think twice before using).
 530  *
 531  * %__GFP_NORETRY
 532  *      If memory is not immediately available,
 533  *      then give up at once.
 534  *
 535  * %__GFP_NOWARN
 536  *      If allocation fails, don't issue any warnings.
 537  *
 538  * %__GFP_RETRY_MAYFAIL
 539  *      Try really hard to succeed the allocation but fail
 540  *      eventually.
 541  */
 542 static __always_inline void *kmalloc(size_t size, gfp_t flags)
 543 {
 544         if (__builtin_constant_p(size)) {
 545 #ifndef CONFIG_SLOB
 546                 unsigned int index;
 547 #endif
 548                 if (size > KMALLOC_MAX_CACHE_SIZE)
 549                         return kmalloc_large(size, flags);
 550 #ifndef CONFIG_SLOB
 551                 index = kmalloc_index(size);
 552 
 553                 if (!index)
 554                         return ZERO_SIZE_PTR;
 555 
 556                 return kmem_cache_alloc_trace(
 557                                 kmalloc_caches[kmalloc_type(flags)][index],
 558                                 flags, size);
 559 #endif
 560         }
 561         return __kmalloc(size, flags);
 562 }
 563 
 564 /*
 565  * Determine size used for the nth kmalloc cache.
 566  * return size or 0 if a kmalloc cache for that
 567  * size does not exist
 568  */
 569 static __always_inline unsigned int kmalloc_size(unsigned int n)
 570 {
 571 #ifndef CONFIG_SLOB
 572         if (n > 2)
 573                 return 1U << n;
 574 
 575         if (n == 1 && KMALLOC_MIN_SIZE <= 32)
 576                 return 96;
 577 
 578         if (n == 2 && KMALLOC_MIN_SIZE <= 64)
 579                 return 192;
 580 #endif
 581         return 0;
 582 }
 583 
 584 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 585 {
 586 #ifndef CONFIG_SLOB
 587         if (__builtin_constant_p(size) &&
 588                 size <= KMALLOC_MAX_CACHE_SIZE) {
 589                 unsigned int i = kmalloc_index(size);
 590 
 591                 if (!i)
 592                         return ZERO_SIZE_PTR;
 593 
 594                 return kmem_cache_alloc_node_trace(
 595                                 kmalloc_caches[kmalloc_type(flags)][i],
 596                                                 flags, node, size);
 597         }
 598 #endif
 599         return __kmalloc_node(size, flags, node);
 600 }
 601 
 602 int memcg_update_all_caches(int num_memcgs);
 603 
 604 /**
 605  * kmalloc_array - allocate memory for an array.
 606  * @n: number of elements.
 607  * @size: element size.
 608  * @flags: the type of memory to allocate (see kmalloc).
 609  */
 610 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
 611 {
 612         size_t bytes;
 613 
 614         if (unlikely(check_mul_overflow(n, size, &bytes)))
 615                 return NULL;
 616         if (__builtin_constant_p(n) && __builtin_constant_p(size))
 617                 return kmalloc(bytes, flags);
 618         return __kmalloc(bytes, flags);
 619 }
 620 
 621 /**
 622  * kcalloc - allocate memory for an array. The memory is set to zero.
 623  * @n: number of elements.
 624  * @size: element size.
 625  * @flags: the type of memory to allocate (see kmalloc).
 626  */
 627 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
 628 {
 629         return kmalloc_array(n, size, flags | __GFP_ZERO);
 630 }
 631 
 632 /*
 633  * kmalloc_track_caller is a special version of kmalloc that records the
 634  * calling function of the routine calling it for slab leak tracking instead
 635  * of just the calling function (confusing, eh?).
 636  * It's useful when the call to kmalloc comes from a widely-used standard
 637  * allocator where we care about the real place the memory allocation
 638  * request comes from.
 639  */
 640 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
 641 #define kmalloc_track_caller(size, flags) \
 642         __kmalloc_track_caller(size, flags, _RET_IP_)
 643 
 644 static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
 645                                        int node)
 646 {
 647         size_t bytes;
 648 
 649         if (unlikely(check_mul_overflow(n, size, &bytes)))
 650                 return NULL;
 651         if (__builtin_constant_p(n) && __builtin_constant_p(size))
 652                 return kmalloc_node(bytes, flags, node);
 653         return __kmalloc_node(bytes, flags, node);
 654 }
 655 
 656 static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
 657 {
 658         return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
 659 }
 660 
 661 
 662 #ifdef CONFIG_NUMA
 663 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
 664 #define kmalloc_node_track_caller(size, flags, node) \
 665         __kmalloc_node_track_caller(size, flags, node, \
 666                         _RET_IP_)
 667 
 668 #else /* CONFIG_NUMA */
 669 
 670 #define kmalloc_node_track_caller(size, flags, node) \
 671         kmalloc_track_caller(size, flags)
 672 
 673 #endif /* CONFIG_NUMA */
 674 
 675 /*
 676  * Shortcuts
 677  */
 678 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
 679 {
 680         return kmem_cache_alloc(k, flags | __GFP_ZERO);
 681 }
 682 
 683 /**
 684  * kzalloc - allocate memory. The memory is set to zero.
 685  * @size: how many bytes of memory are required.
 686  * @flags: the type of memory to allocate (see kmalloc).
 687  */
 688 static inline void *kzalloc(size_t size, gfp_t flags)
 689 {
 690         return kmalloc(size, flags | __GFP_ZERO);
 691 }
 692 
 693 /**
 694  * kzalloc_node - allocate zeroed memory from a particular memory node.
 695  * @size: how many bytes of memory are required.
 696  * @flags: the type of memory to allocate (see kmalloc).
 697  * @node: memory node from which to allocate
 698  */
 699 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
 700 {
 701         return kmalloc_node(size, flags | __GFP_ZERO, node);
 702 }
 703 
 704 unsigned int kmem_cache_size(struct kmem_cache *s);
 705 void __init kmem_cache_init_late(void);
 706 
 707 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
 708 int slab_prepare_cpu(unsigned int cpu);
 709 int slab_dead_cpu(unsigned int cpu);
 710 #else
 711 #define slab_prepare_cpu        NULL
 712 #define slab_dead_cpu           NULL
 713 #endif
 714 
 715 #endif  /* _LINUX_SLAB_H */

/* [<][>][^][v][top][bottom][index][help] */