root/mm/slab_common.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. setup_slab_nomerge
  2. kmem_cache_size
  3. kmem_cache_sanity_check
  4. kmem_cache_sanity_check
  5. __kmem_cache_free_bulk
  6. __kmem_cache_alloc_bulk
  7. slab_init_memcg_params
  8. init_memcg_params
  9. destroy_memcg_params
  10. free_memcg_params
  11. update_memcg_params
  12. memcg_update_all_caches
  13. memcg_link_cache
  14. memcg_unlink_cache
  15. init_memcg_params
  16. destroy_memcg_params
  17. memcg_unlink_cache
  18. calculate_alignment
  19. slab_unmergeable
  20. find_mergeable
  21. create_cache
  22. kmem_cache_create_usercopy
  23. kmem_cache_create
  24. slab_caches_to_rcu_destroy_workfn
  25. shutdown_cache
  26. memcg_create_kmem_cache
  27. kmemcg_workfn
  28. kmemcg_rcufn
  29. kmemcg_cache_shutdown_fn
  30. kmemcg_cache_shutdown
  31. kmemcg_cache_deactivate_after_rcu
  32. kmemcg_cache_deactivate
  33. memcg_deactivate_kmem_caches
  34. shutdown_memcg_caches
  35. flush_memcg_workqueue
  36. shutdown_memcg_caches
  37. flush_memcg_workqueue
  38. slab_kmem_cache_release
  39. kmem_cache_destroy
  40. kmem_cache_shrink
  41. kmem_cache_shrink_all
  42. slab_is_available
  43. create_boot_cache
  44. create_kmalloc_cache
  45. size_index_elem
  46. kmalloc_slab
  47. setup_kmalloc_cache_index_table
  48. kmalloc_cache_name
  49. new_kmalloc_cache
  50. create_kmalloc_caches
  51. kmalloc_order
  52. kmalloc_order_trace
  53. freelist_randomize
  54. cache_random_seq_create
  55. cache_random_seq_destroy
  56. print_slabinfo_header
  57. slab_start
  58. slab_next
  59. slab_stop
  60. memcg_accumulate_slabinfo
  61. cache_show
  62. slab_show
  63. dump_unreclaimable_slab
  64. memcg_slab_start
  65. memcg_slab_next
  66. memcg_slab_stop
  67. memcg_slab_show
  68. slabinfo_open
  69. slab_proc_init
  70. memcg_slabinfo_show
  71. memcg_slabinfo_init
  72. __do_krealloc
  73. __krealloc
  74. krealloc
  75. kzfree
  76. ksize
  77. should_failslab

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Slab allocator functions that are independent of the allocator strategy
   4  *
   5  * (C) 2012 Christoph Lameter <cl@linux.com>
   6  */
   7 #include <linux/slab.h>
   8 
   9 #include <linux/mm.h>
  10 #include <linux/poison.h>
  11 #include <linux/interrupt.h>
  12 #include <linux/memory.h>
  13 #include <linux/cache.h>
  14 #include <linux/compiler.h>
  15 #include <linux/module.h>
  16 #include <linux/cpu.h>
  17 #include <linux/uaccess.h>
  18 #include <linux/seq_file.h>
  19 #include <linux/proc_fs.h>
  20 #include <linux/debugfs.h>
  21 #include <asm/cacheflush.h>
  22 #include <asm/tlbflush.h>
  23 #include <asm/page.h>
  24 #include <linux/memcontrol.h>
  25 
  26 #define CREATE_TRACE_POINTS
  27 #include <trace/events/kmem.h>
  28 
  29 #include "slab.h"
  30 
  31 enum slab_state slab_state;
  32 LIST_HEAD(slab_caches);
  33 DEFINE_MUTEX(slab_mutex);
  34 struct kmem_cache *kmem_cache;
  35 
  36 #ifdef CONFIG_HARDENED_USERCOPY
  37 bool usercopy_fallback __ro_after_init =
  38                 IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK);
  39 module_param(usercopy_fallback, bool, 0400);
  40 MODULE_PARM_DESC(usercopy_fallback,
  41                 "WARN instead of reject usercopy whitelist violations");
  42 #endif
  43 
  44 static LIST_HEAD(slab_caches_to_rcu_destroy);
  45 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
  46 static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
  47                     slab_caches_to_rcu_destroy_workfn);
  48 
  49 /*
  50  * Set of flags that will prevent slab merging
  51  */
  52 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
  53                 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
  54                 SLAB_FAILSLAB | SLAB_KASAN)
  55 
  56 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
  57                          SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
  58 
  59 /*
  60  * Merge control. If this is set then no merging of slab caches will occur.
  61  */
  62 static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
  63 
  64 static int __init setup_slab_nomerge(char *str)
  65 {
  66         slab_nomerge = true;
  67         return 1;
  68 }
  69 
  70 #ifdef CONFIG_SLUB
  71 __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
  72 #endif
  73 
  74 __setup("slab_nomerge", setup_slab_nomerge);
  75 
  76 /*
  77  * Determine the size of a slab object
  78  */
  79 unsigned int kmem_cache_size(struct kmem_cache *s)
  80 {
  81         return s->object_size;
  82 }
  83 EXPORT_SYMBOL(kmem_cache_size);
  84 
  85 #ifdef CONFIG_DEBUG_VM
  86 static int kmem_cache_sanity_check(const char *name, unsigned int size)
  87 {
  88         if (!name || in_interrupt() || size < sizeof(void *) ||
  89                 size > KMALLOC_MAX_SIZE) {
  90                 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
  91                 return -EINVAL;
  92         }
  93 
  94         WARN_ON(strchr(name, ' '));     /* It confuses parsers */
  95         return 0;
  96 }
  97 #else
  98 static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
  99 {
 100         return 0;
 101 }
 102 #endif
 103 
 104 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
 105 {
 106         size_t i;
 107 
 108         for (i = 0; i < nr; i++) {
 109                 if (s)
 110                         kmem_cache_free(s, p[i]);
 111                 else
 112                         kfree(p[i]);
 113         }
 114 }
 115 
 116 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
 117                                                                 void **p)
 118 {
 119         size_t i;
 120 
 121         for (i = 0; i < nr; i++) {
 122                 void *x = p[i] = kmem_cache_alloc(s, flags);
 123                 if (!x) {
 124                         __kmem_cache_free_bulk(s, i, p);
 125                         return 0;
 126                 }
 127         }
 128         return i;
 129 }
 130 
 131 #ifdef CONFIG_MEMCG_KMEM
 132 
 133 LIST_HEAD(slab_root_caches);
 134 static DEFINE_SPINLOCK(memcg_kmem_wq_lock);
 135 
 136 static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref);
 137 
 138 void slab_init_memcg_params(struct kmem_cache *s)
 139 {
 140         s->memcg_params.root_cache = NULL;
 141         RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
 142         INIT_LIST_HEAD(&s->memcg_params.children);
 143         s->memcg_params.dying = false;
 144 }
 145 
 146 static int init_memcg_params(struct kmem_cache *s,
 147                              struct kmem_cache *root_cache)
 148 {
 149         struct memcg_cache_array *arr;
 150 
 151         if (root_cache) {
 152                 int ret = percpu_ref_init(&s->memcg_params.refcnt,
 153                                           kmemcg_cache_shutdown,
 154                                           0, GFP_KERNEL);
 155                 if (ret)
 156                         return ret;
 157 
 158                 s->memcg_params.root_cache = root_cache;
 159                 INIT_LIST_HEAD(&s->memcg_params.children_node);
 160                 INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node);
 161                 return 0;
 162         }
 163 
 164         slab_init_memcg_params(s);
 165 
 166         if (!memcg_nr_cache_ids)
 167                 return 0;
 168 
 169         arr = kvzalloc(sizeof(struct memcg_cache_array) +
 170                        memcg_nr_cache_ids * sizeof(void *),
 171                        GFP_KERNEL);
 172         if (!arr)
 173                 return -ENOMEM;
 174 
 175         RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
 176         return 0;
 177 }
 178 
 179 static void destroy_memcg_params(struct kmem_cache *s)
 180 {
 181         if (is_root_cache(s)) {
 182                 kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
 183         } else {
 184                 mem_cgroup_put(s->memcg_params.memcg);
 185                 WRITE_ONCE(s->memcg_params.memcg, NULL);
 186                 percpu_ref_exit(&s->memcg_params.refcnt);
 187         }
 188 }
 189 
 190 static void free_memcg_params(struct rcu_head *rcu)
 191 {
 192         struct memcg_cache_array *old;
 193 
 194         old = container_of(rcu, struct memcg_cache_array, rcu);
 195         kvfree(old);
 196 }
 197 
 198 static int update_memcg_params(struct kmem_cache *s, int new_array_size)
 199 {
 200         struct memcg_cache_array *old, *new;
 201 
 202         new = kvzalloc(sizeof(struct memcg_cache_array) +
 203                        new_array_size * sizeof(void *), GFP_KERNEL);
 204         if (!new)
 205                 return -ENOMEM;
 206 
 207         old = rcu_dereference_protected(s->memcg_params.memcg_caches,
 208                                         lockdep_is_held(&slab_mutex));
 209         if (old)
 210                 memcpy(new->entries, old->entries,
 211                        memcg_nr_cache_ids * sizeof(void *));
 212 
 213         rcu_assign_pointer(s->memcg_params.memcg_caches, new);
 214         if (old)
 215                 call_rcu(&old->rcu, free_memcg_params);
 216         return 0;
 217 }
 218 
 219 int memcg_update_all_caches(int num_memcgs)
 220 {
 221         struct kmem_cache *s;
 222         int ret = 0;
 223 
 224         mutex_lock(&slab_mutex);
 225         list_for_each_entry(s, &slab_root_caches, root_caches_node) {
 226                 ret = update_memcg_params(s, num_memcgs);
 227                 /*
 228                  * Instead of freeing the memory, we'll just leave the caches
 229                  * up to this point in an updated state.
 230                  */
 231                 if (ret)
 232                         break;
 233         }
 234         mutex_unlock(&slab_mutex);
 235         return ret;
 236 }
 237 
 238 void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg)
 239 {
 240         if (is_root_cache(s)) {
 241                 list_add(&s->root_caches_node, &slab_root_caches);
 242         } else {
 243                 css_get(&memcg->css);
 244                 s->memcg_params.memcg = memcg;
 245                 list_add(&s->memcg_params.children_node,
 246                          &s->memcg_params.root_cache->memcg_params.children);
 247                 list_add(&s->memcg_params.kmem_caches_node,
 248                          &s->memcg_params.memcg->kmem_caches);
 249         }
 250 }
 251 
 252 static void memcg_unlink_cache(struct kmem_cache *s)
 253 {
 254         if (is_root_cache(s)) {
 255                 list_del(&s->root_caches_node);
 256         } else {
 257                 list_del(&s->memcg_params.children_node);
 258                 list_del(&s->memcg_params.kmem_caches_node);
 259         }
 260 }
 261 #else
 262 static inline int init_memcg_params(struct kmem_cache *s,
 263                                     struct kmem_cache *root_cache)
 264 {
 265         return 0;
 266 }
 267 
 268 static inline void destroy_memcg_params(struct kmem_cache *s)
 269 {
 270 }
 271 
 272 static inline void memcg_unlink_cache(struct kmem_cache *s)
 273 {
 274 }
 275 #endif /* CONFIG_MEMCG_KMEM */
 276 
 277 /*
 278  * Figure out what the alignment of the objects will be given a set of
 279  * flags, a user specified alignment and the size of the objects.
 280  */
 281 static unsigned int calculate_alignment(slab_flags_t flags,
 282                 unsigned int align, unsigned int size)
 283 {
 284         /*
 285          * If the user wants hardware cache aligned objects then follow that
 286          * suggestion if the object is sufficiently large.
 287          *
 288          * The hardware cache alignment cannot override the specified
 289          * alignment though. If that is greater then use it.
 290          */
 291         if (flags & SLAB_HWCACHE_ALIGN) {
 292                 unsigned int ralign;
 293 
 294                 ralign = cache_line_size();
 295                 while (size <= ralign / 2)
 296                         ralign /= 2;
 297                 align = max(align, ralign);
 298         }
 299 
 300         if (align < ARCH_SLAB_MINALIGN)
 301                 align = ARCH_SLAB_MINALIGN;
 302 
 303         return ALIGN(align, sizeof(void *));
 304 }
 305 
 306 /*
 307  * Find a mergeable slab cache
 308  */
 309 int slab_unmergeable(struct kmem_cache *s)
 310 {
 311         if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
 312                 return 1;
 313 
 314         if (!is_root_cache(s))
 315                 return 1;
 316 
 317         if (s->ctor)
 318                 return 1;
 319 
 320         if (s->usersize)
 321                 return 1;
 322 
 323         /*
 324          * We may have set a slab to be unmergeable during bootstrap.
 325          */
 326         if (s->refcount < 0)
 327                 return 1;
 328 
 329         return 0;
 330 }
 331 
 332 struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
 333                 slab_flags_t flags, const char *name, void (*ctor)(void *))
 334 {
 335         struct kmem_cache *s;
 336 
 337         if (slab_nomerge)
 338                 return NULL;
 339 
 340         if (ctor)
 341                 return NULL;
 342 
 343         size = ALIGN(size, sizeof(void *));
 344         align = calculate_alignment(flags, align, size);
 345         size = ALIGN(size, align);
 346         flags = kmem_cache_flags(size, flags, name, NULL);
 347 
 348         if (flags & SLAB_NEVER_MERGE)
 349                 return NULL;
 350 
 351         list_for_each_entry_reverse(s, &slab_root_caches, root_caches_node) {
 352                 if (slab_unmergeable(s))
 353                         continue;
 354 
 355                 if (size > s->size)
 356                         continue;
 357 
 358                 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
 359                         continue;
 360                 /*
 361                  * Check if alignment is compatible.
 362                  * Courtesy of Adrian Drzewiecki
 363                  */
 364                 if ((s->size & ~(align - 1)) != s->size)
 365                         continue;
 366 
 367                 if (s->size - size >= sizeof(void *))
 368                         continue;
 369 
 370                 if (IS_ENABLED(CONFIG_SLAB) && align &&
 371                         (align > s->align || s->align % align))
 372                         continue;
 373 
 374                 return s;
 375         }
 376         return NULL;
 377 }
 378 
 379 static struct kmem_cache *create_cache(const char *name,
 380                 unsigned int object_size, unsigned int align,
 381                 slab_flags_t flags, unsigned int useroffset,
 382                 unsigned int usersize, void (*ctor)(void *),
 383                 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
 384 {
 385         struct kmem_cache *s;
 386         int err;
 387 
 388         if (WARN_ON(useroffset + usersize > object_size))
 389                 useroffset = usersize = 0;
 390 
 391         err = -ENOMEM;
 392         s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
 393         if (!s)
 394                 goto out;
 395 
 396         s->name = name;
 397         s->size = s->object_size = object_size;
 398         s->align = align;
 399         s->ctor = ctor;
 400         s->useroffset = useroffset;
 401         s->usersize = usersize;
 402 
 403         err = init_memcg_params(s, root_cache);
 404         if (err)
 405                 goto out_free_cache;
 406 
 407         err = __kmem_cache_create(s, flags);
 408         if (err)
 409                 goto out_free_cache;
 410 
 411         s->refcount = 1;
 412         list_add(&s->list, &slab_caches);
 413         memcg_link_cache(s, memcg);
 414 out:
 415         if (err)
 416                 return ERR_PTR(err);
 417         return s;
 418 
 419 out_free_cache:
 420         destroy_memcg_params(s);
 421         kmem_cache_free(kmem_cache, s);
 422         goto out;
 423 }
 424 
 425 /**
 426  * kmem_cache_create_usercopy - Create a cache with a region suitable
 427  * for copying to userspace
 428  * @name: A string which is used in /proc/slabinfo to identify this cache.
 429  * @size: The size of objects to be created in this cache.
 430  * @align: The required alignment for the objects.
 431  * @flags: SLAB flags
 432  * @useroffset: Usercopy region offset
 433  * @usersize: Usercopy region size
 434  * @ctor: A constructor for the objects.
 435  *
 436  * Cannot be called within a interrupt, but can be interrupted.
 437  * The @ctor is run when new pages are allocated by the cache.
 438  *
 439  * The flags are
 440  *
 441  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 442  * to catch references to uninitialised memory.
 443  *
 444  * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
 445  * for buffer overruns.
 446  *
 447  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 448  * cacheline.  This can be beneficial if you're counting cycles as closely
 449  * as davem.
 450  *
 451  * Return: a pointer to the cache on success, NULL on failure.
 452  */
 453 struct kmem_cache *
 454 kmem_cache_create_usercopy(const char *name,
 455                   unsigned int size, unsigned int align,
 456                   slab_flags_t flags,
 457                   unsigned int useroffset, unsigned int usersize,
 458                   void (*ctor)(void *))
 459 {
 460         struct kmem_cache *s = NULL;
 461         const char *cache_name;
 462         int err;
 463 
 464         get_online_cpus();
 465         get_online_mems();
 466         memcg_get_cache_ids();
 467 
 468         mutex_lock(&slab_mutex);
 469 
 470         err = kmem_cache_sanity_check(name, size);
 471         if (err) {
 472                 goto out_unlock;
 473         }
 474 
 475         /* Refuse requests with allocator specific flags */
 476         if (flags & ~SLAB_FLAGS_PERMITTED) {
 477                 err = -EINVAL;
 478                 goto out_unlock;
 479         }
 480 
 481         /*
 482          * Some allocators will constraint the set of valid flags to a subset
 483          * of all flags. We expect them to define CACHE_CREATE_MASK in this
 484          * case, and we'll just provide them with a sanitized version of the
 485          * passed flags.
 486          */
 487         flags &= CACHE_CREATE_MASK;
 488 
 489         /* Fail closed on bad usersize of useroffset values. */
 490         if (WARN_ON(!usersize && useroffset) ||
 491             WARN_ON(size < usersize || size - usersize < useroffset))
 492                 usersize = useroffset = 0;
 493 
 494         if (!usersize)
 495                 s = __kmem_cache_alias(name, size, align, flags, ctor);
 496         if (s)
 497                 goto out_unlock;
 498 
 499         cache_name = kstrdup_const(name, GFP_KERNEL);
 500         if (!cache_name) {
 501                 err = -ENOMEM;
 502                 goto out_unlock;
 503         }
 504 
 505         s = create_cache(cache_name, size,
 506                          calculate_alignment(flags, align, size),
 507                          flags, useroffset, usersize, ctor, NULL, NULL);
 508         if (IS_ERR(s)) {
 509                 err = PTR_ERR(s);
 510                 kfree_const(cache_name);
 511         }
 512 
 513 out_unlock:
 514         mutex_unlock(&slab_mutex);
 515 
 516         memcg_put_cache_ids();
 517         put_online_mems();
 518         put_online_cpus();
 519 
 520         if (err) {
 521                 if (flags & SLAB_PANIC)
 522                         panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
 523                                 name, err);
 524                 else {
 525                         pr_warn("kmem_cache_create(%s) failed with error %d\n",
 526                                 name, err);
 527                         dump_stack();
 528                 }
 529                 return NULL;
 530         }
 531         return s;
 532 }
 533 EXPORT_SYMBOL(kmem_cache_create_usercopy);
 534 
 535 /**
 536  * kmem_cache_create - Create a cache.
 537  * @name: A string which is used in /proc/slabinfo to identify this cache.
 538  * @size: The size of objects to be created in this cache.
 539  * @align: The required alignment for the objects.
 540  * @flags: SLAB flags
 541  * @ctor: A constructor for the objects.
 542  *
 543  * Cannot be called within a interrupt, but can be interrupted.
 544  * The @ctor is run when new pages are allocated by the cache.
 545  *
 546  * The flags are
 547  *
 548  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 549  * to catch references to uninitialised memory.
 550  *
 551  * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
 552  * for buffer overruns.
 553  *
 554  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 555  * cacheline.  This can be beneficial if you're counting cycles as closely
 556  * as davem.
 557  *
 558  * Return: a pointer to the cache on success, NULL on failure.
 559  */
 560 struct kmem_cache *
 561 kmem_cache_create(const char *name, unsigned int size, unsigned int align,
 562                 slab_flags_t flags, void (*ctor)(void *))
 563 {
 564         return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
 565                                           ctor);
 566 }
 567 EXPORT_SYMBOL(kmem_cache_create);
 568 
 569 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
 570 {
 571         LIST_HEAD(to_destroy);
 572         struct kmem_cache *s, *s2;
 573 
 574         /*
 575          * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
 576          * @slab_caches_to_rcu_destroy list.  The slab pages are freed
 577          * through RCU and and the associated kmem_cache are dereferenced
 578          * while freeing the pages, so the kmem_caches should be freed only
 579          * after the pending RCU operations are finished.  As rcu_barrier()
 580          * is a pretty slow operation, we batch all pending destructions
 581          * asynchronously.
 582          */
 583         mutex_lock(&slab_mutex);
 584         list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
 585         mutex_unlock(&slab_mutex);
 586 
 587         if (list_empty(&to_destroy))
 588                 return;
 589 
 590         rcu_barrier();
 591 
 592         list_for_each_entry_safe(s, s2, &to_destroy, list) {
 593 #ifdef SLAB_SUPPORTS_SYSFS
 594                 sysfs_slab_release(s);
 595 #else
 596                 slab_kmem_cache_release(s);
 597 #endif
 598         }
 599 }
 600 
 601 static int shutdown_cache(struct kmem_cache *s)
 602 {
 603         /* free asan quarantined objects */
 604         kasan_cache_shutdown(s);
 605 
 606         if (__kmem_cache_shutdown(s) != 0)
 607                 return -EBUSY;
 608 
 609         memcg_unlink_cache(s);
 610         list_del(&s->list);
 611 
 612         if (s->flags & SLAB_TYPESAFE_BY_RCU) {
 613 #ifdef SLAB_SUPPORTS_SYSFS
 614                 sysfs_slab_unlink(s);
 615 #endif
 616                 list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
 617                 schedule_work(&slab_caches_to_rcu_destroy_work);
 618         } else {
 619 #ifdef SLAB_SUPPORTS_SYSFS
 620                 sysfs_slab_unlink(s);
 621                 sysfs_slab_release(s);
 622 #else
 623                 slab_kmem_cache_release(s);
 624 #endif
 625         }
 626 
 627         return 0;
 628 }
 629 
 630 #ifdef CONFIG_MEMCG_KMEM
 631 /*
 632  * memcg_create_kmem_cache - Create a cache for a memory cgroup.
 633  * @memcg: The memory cgroup the new cache is for.
 634  * @root_cache: The parent of the new cache.
 635  *
 636  * This function attempts to create a kmem cache that will serve allocation
 637  * requests going from @memcg to @root_cache. The new cache inherits properties
 638  * from its parent.
 639  */
 640 void memcg_create_kmem_cache(struct mem_cgroup *memcg,
 641                              struct kmem_cache *root_cache)
 642 {
 643         static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
 644         struct cgroup_subsys_state *css = &memcg->css;
 645         struct memcg_cache_array *arr;
 646         struct kmem_cache *s = NULL;
 647         char *cache_name;
 648         int idx;
 649 
 650         get_online_cpus();
 651         get_online_mems();
 652 
 653         mutex_lock(&slab_mutex);
 654 
 655         /*
 656          * The memory cgroup could have been offlined while the cache
 657          * creation work was pending.
 658          */
 659         if (memcg->kmem_state != KMEM_ONLINE)
 660                 goto out_unlock;
 661 
 662         idx = memcg_cache_id(memcg);
 663         arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
 664                                         lockdep_is_held(&slab_mutex));
 665 
 666         /*
 667          * Since per-memcg caches are created asynchronously on first
 668          * allocation (see memcg_kmem_get_cache()), several threads can try to
 669          * create the same cache, but only one of them may succeed.
 670          */
 671         if (arr->entries[idx])
 672                 goto out_unlock;
 673 
 674         cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
 675         cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
 676                                css->serial_nr, memcg_name_buf);
 677         if (!cache_name)
 678                 goto out_unlock;
 679 
 680         s = create_cache(cache_name, root_cache->object_size,
 681                          root_cache->align,
 682                          root_cache->flags & CACHE_CREATE_MASK,
 683                          root_cache->useroffset, root_cache->usersize,
 684                          root_cache->ctor, memcg, root_cache);
 685         /*
 686          * If we could not create a memcg cache, do not complain, because
 687          * that's not critical at all as we can always proceed with the root
 688          * cache.
 689          */
 690         if (IS_ERR(s)) {
 691                 kfree(cache_name);
 692                 goto out_unlock;
 693         }
 694 
 695         /*
 696          * Since readers won't lock (see memcg_kmem_get_cache()), we need a
 697          * barrier here to ensure nobody will see the kmem_cache partially
 698          * initialized.
 699          */
 700         smp_wmb();
 701         arr->entries[idx] = s;
 702 
 703 out_unlock:
 704         mutex_unlock(&slab_mutex);
 705 
 706         put_online_mems();
 707         put_online_cpus();
 708 }
 709 
 710 static void kmemcg_workfn(struct work_struct *work)
 711 {
 712         struct kmem_cache *s = container_of(work, struct kmem_cache,
 713                                             memcg_params.work);
 714 
 715         get_online_cpus();
 716         get_online_mems();
 717 
 718         mutex_lock(&slab_mutex);
 719         s->memcg_params.work_fn(s);
 720         mutex_unlock(&slab_mutex);
 721 
 722         put_online_mems();
 723         put_online_cpus();
 724 }
 725 
 726 static void kmemcg_rcufn(struct rcu_head *head)
 727 {
 728         struct kmem_cache *s = container_of(head, struct kmem_cache,
 729                                             memcg_params.rcu_head);
 730 
 731         /*
 732          * We need to grab blocking locks.  Bounce to ->work.  The
 733          * work item shares the space with the RCU head and can't be
 734          * initialized eariler.
 735          */
 736         INIT_WORK(&s->memcg_params.work, kmemcg_workfn);
 737         queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
 738 }
 739 
 740 static void kmemcg_cache_shutdown_fn(struct kmem_cache *s)
 741 {
 742         WARN_ON(shutdown_cache(s));
 743 }
 744 
 745 static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref)
 746 {
 747         struct kmem_cache *s = container_of(percpu_ref, struct kmem_cache,
 748                                             memcg_params.refcnt);
 749         unsigned long flags;
 750 
 751         spin_lock_irqsave(&memcg_kmem_wq_lock, flags);
 752         if (s->memcg_params.root_cache->memcg_params.dying)
 753                 goto unlock;
 754 
 755         s->memcg_params.work_fn = kmemcg_cache_shutdown_fn;
 756         INIT_WORK(&s->memcg_params.work, kmemcg_workfn);
 757         queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
 758 
 759 unlock:
 760         spin_unlock_irqrestore(&memcg_kmem_wq_lock, flags);
 761 }
 762 
 763 static void kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
 764 {
 765         __kmemcg_cache_deactivate_after_rcu(s);
 766         percpu_ref_kill(&s->memcg_params.refcnt);
 767 }
 768 
 769 static void kmemcg_cache_deactivate(struct kmem_cache *s)
 770 {
 771         if (WARN_ON_ONCE(is_root_cache(s)))
 772                 return;
 773 
 774         __kmemcg_cache_deactivate(s);
 775         s->flags |= SLAB_DEACTIVATED;
 776 
 777         /*
 778          * memcg_kmem_wq_lock is used to synchronize memcg_params.dying
 779          * flag and make sure that no new kmem_cache deactivation tasks
 780          * are queued (see flush_memcg_workqueue() ).
 781          */
 782         spin_lock_irq(&memcg_kmem_wq_lock);
 783         if (s->memcg_params.root_cache->memcg_params.dying)
 784                 goto unlock;
 785 
 786         s->memcg_params.work_fn = kmemcg_cache_deactivate_after_rcu;
 787         call_rcu(&s->memcg_params.rcu_head, kmemcg_rcufn);
 788 unlock:
 789         spin_unlock_irq(&memcg_kmem_wq_lock);
 790 }
 791 
 792 void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg,
 793                                   struct mem_cgroup *parent)
 794 {
 795         int idx;
 796         struct memcg_cache_array *arr;
 797         struct kmem_cache *s, *c;
 798         unsigned int nr_reparented;
 799 
 800         idx = memcg_cache_id(memcg);
 801 
 802         get_online_cpus();
 803         get_online_mems();
 804 
 805         mutex_lock(&slab_mutex);
 806         list_for_each_entry(s, &slab_root_caches, root_caches_node) {
 807                 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
 808                                                 lockdep_is_held(&slab_mutex));
 809                 c = arr->entries[idx];
 810                 if (!c)
 811                         continue;
 812 
 813                 kmemcg_cache_deactivate(c);
 814                 arr->entries[idx] = NULL;
 815         }
 816         nr_reparented = 0;
 817         list_for_each_entry(s, &memcg->kmem_caches,
 818                             memcg_params.kmem_caches_node) {
 819                 WRITE_ONCE(s->memcg_params.memcg, parent);
 820                 css_put(&memcg->css);
 821                 nr_reparented++;
 822         }
 823         if (nr_reparented) {
 824                 list_splice_init(&memcg->kmem_caches,
 825                                  &parent->kmem_caches);
 826                 css_get_many(&parent->css, nr_reparented);
 827         }
 828         mutex_unlock(&slab_mutex);
 829 
 830         put_online_mems();
 831         put_online_cpus();
 832 }
 833 
 834 static int shutdown_memcg_caches(struct kmem_cache *s)
 835 {
 836         struct memcg_cache_array *arr;
 837         struct kmem_cache *c, *c2;
 838         LIST_HEAD(busy);
 839         int i;
 840 
 841         BUG_ON(!is_root_cache(s));
 842 
 843         /*
 844          * First, shutdown active caches, i.e. caches that belong to online
 845          * memory cgroups.
 846          */
 847         arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
 848                                         lockdep_is_held(&slab_mutex));
 849         for_each_memcg_cache_index(i) {
 850                 c = arr->entries[i];
 851                 if (!c)
 852                         continue;
 853                 if (shutdown_cache(c))
 854                         /*
 855                          * The cache still has objects. Move it to a temporary
 856                          * list so as not to try to destroy it for a second
 857                          * time while iterating over inactive caches below.
 858                          */
 859                         list_move(&c->memcg_params.children_node, &busy);
 860                 else
 861                         /*
 862                          * The cache is empty and will be destroyed soon. Clear
 863                          * the pointer to it in the memcg_caches array so that
 864                          * it will never be accessed even if the root cache
 865                          * stays alive.
 866                          */
 867                         arr->entries[i] = NULL;
 868         }
 869 
 870         /*
 871          * Second, shutdown all caches left from memory cgroups that are now
 872          * offline.
 873          */
 874         list_for_each_entry_safe(c, c2, &s->memcg_params.children,
 875                                  memcg_params.children_node)
 876                 shutdown_cache(c);
 877 
 878         list_splice(&busy, &s->memcg_params.children);
 879 
 880         /*
 881          * A cache being destroyed must be empty. In particular, this means
 882          * that all per memcg caches attached to it must be empty too.
 883          */
 884         if (!list_empty(&s->memcg_params.children))
 885                 return -EBUSY;
 886         return 0;
 887 }
 888 
 889 static void flush_memcg_workqueue(struct kmem_cache *s)
 890 {
 891         spin_lock_irq(&memcg_kmem_wq_lock);
 892         s->memcg_params.dying = true;
 893         spin_unlock_irq(&memcg_kmem_wq_lock);
 894 
 895         /*
 896          * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make
 897          * sure all registered rcu callbacks have been invoked.
 898          */
 899         rcu_barrier();
 900 
 901         /*
 902          * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB
 903          * deactivates the memcg kmem_caches through workqueue. Make sure all
 904          * previous workitems on workqueue are processed.
 905          */
 906         if (likely(memcg_kmem_cache_wq))
 907                 flush_workqueue(memcg_kmem_cache_wq);
 908 
 909         /*
 910          * If we're racing with children kmem_cache deactivation, it might
 911          * take another rcu grace period to complete their destruction.
 912          * At this moment the corresponding percpu_ref_kill() call should be
 913          * done, but it might take another rcu grace period to complete
 914          * switching to the atomic mode.
 915          * Please, note that we check without grabbing the slab_mutex. It's safe
 916          * because at this moment the children list can't grow.
 917          */
 918         if (!list_empty(&s->memcg_params.children))
 919                 rcu_barrier();
 920 }
 921 #else
 922 static inline int shutdown_memcg_caches(struct kmem_cache *s)
 923 {
 924         return 0;
 925 }
 926 
 927 static inline void flush_memcg_workqueue(struct kmem_cache *s)
 928 {
 929 }
 930 #endif /* CONFIG_MEMCG_KMEM */
 931 
 932 void slab_kmem_cache_release(struct kmem_cache *s)
 933 {
 934         __kmem_cache_release(s);
 935         destroy_memcg_params(s);
 936         kfree_const(s->name);
 937         kmem_cache_free(kmem_cache, s);
 938 }
 939 
 940 void kmem_cache_destroy(struct kmem_cache *s)
 941 {
 942         int err;
 943 
 944         if (unlikely(!s))
 945                 return;
 946 
 947         flush_memcg_workqueue(s);
 948 
 949         get_online_cpus();
 950         get_online_mems();
 951 
 952         mutex_lock(&slab_mutex);
 953 
 954         s->refcount--;
 955         if (s->refcount)
 956                 goto out_unlock;
 957 
 958         err = shutdown_memcg_caches(s);
 959         if (!err)
 960                 err = shutdown_cache(s);
 961 
 962         if (err) {
 963                 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
 964                        s->name);
 965                 dump_stack();
 966         }
 967 out_unlock:
 968         mutex_unlock(&slab_mutex);
 969 
 970         put_online_mems();
 971         put_online_cpus();
 972 }
 973 EXPORT_SYMBOL(kmem_cache_destroy);
 974 
 975 /**
 976  * kmem_cache_shrink - Shrink a cache.
 977  * @cachep: The cache to shrink.
 978  *
 979  * Releases as many slabs as possible for a cache.
 980  * To help debugging, a zero exit status indicates all slabs were released.
 981  *
 982  * Return: %0 if all slabs were released, non-zero otherwise
 983  */
 984 int kmem_cache_shrink(struct kmem_cache *cachep)
 985 {
 986         int ret;
 987 
 988         get_online_cpus();
 989         get_online_mems();
 990         kasan_cache_shrink(cachep);
 991         ret = __kmem_cache_shrink(cachep);
 992         put_online_mems();
 993         put_online_cpus();
 994         return ret;
 995 }
 996 EXPORT_SYMBOL(kmem_cache_shrink);
 997 
 998 /**
 999  * kmem_cache_shrink_all - shrink a cache and all memcg caches for root cache
1000  * @s: The cache pointer
1001  */
1002 void kmem_cache_shrink_all(struct kmem_cache *s)
1003 {
1004         struct kmem_cache *c;
1005 
1006         if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || !is_root_cache(s)) {
1007                 kmem_cache_shrink(s);
1008                 return;
1009         }
1010 
1011         get_online_cpus();
1012         get_online_mems();
1013         kasan_cache_shrink(s);
1014         __kmem_cache_shrink(s);
1015 
1016         /*
1017          * We have to take the slab_mutex to protect from the memcg list
1018          * modification.
1019          */
1020         mutex_lock(&slab_mutex);
1021         for_each_memcg_cache(c, s) {
1022                 /*
1023                  * Don't need to shrink deactivated memcg caches.
1024                  */
1025                 if (s->flags & SLAB_DEACTIVATED)
1026                         continue;
1027                 kasan_cache_shrink(c);
1028                 __kmem_cache_shrink(c);
1029         }
1030         mutex_unlock(&slab_mutex);
1031         put_online_mems();
1032         put_online_cpus();
1033 }
1034 
1035 bool slab_is_available(void)
1036 {
1037         return slab_state >= UP;
1038 }
1039 
1040 #ifndef CONFIG_SLOB
1041 /* Create a cache during boot when no slab services are available yet */
1042 void __init create_boot_cache(struct kmem_cache *s, const char *name,
1043                 unsigned int size, slab_flags_t flags,
1044                 unsigned int useroffset, unsigned int usersize)
1045 {
1046         int err;
1047         unsigned int align = ARCH_KMALLOC_MINALIGN;
1048 
1049         s->name = name;
1050         s->size = s->object_size = size;
1051 
1052         /*
1053          * For power of two sizes, guarantee natural alignment for kmalloc
1054          * caches, regardless of SL*B debugging options.
1055          */
1056         if (is_power_of_2(size))
1057                 align = max(align, size);
1058         s->align = calculate_alignment(flags, align, size);
1059 
1060         s->useroffset = useroffset;
1061         s->usersize = usersize;
1062 
1063         slab_init_memcg_params(s);
1064 
1065         err = __kmem_cache_create(s, flags);
1066 
1067         if (err)
1068                 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
1069                                         name, size, err);
1070 
1071         s->refcount = -1;       /* Exempt from merging for now */
1072 }
1073 
1074 struct kmem_cache *__init create_kmalloc_cache(const char *name,
1075                 unsigned int size, slab_flags_t flags,
1076                 unsigned int useroffset, unsigned int usersize)
1077 {
1078         struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1079 
1080         if (!s)
1081                 panic("Out of memory when creating slab %s\n", name);
1082 
1083         create_boot_cache(s, name, size, flags, useroffset, usersize);
1084         list_add(&s->list, &slab_caches);
1085         memcg_link_cache(s, NULL);
1086         s->refcount = 1;
1087         return s;
1088 }
1089 
1090 struct kmem_cache *
1091 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
1092 { /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
1093 EXPORT_SYMBOL(kmalloc_caches);
1094 
1095 /*
1096  * Conversion table for small slabs sizes / 8 to the index in the
1097  * kmalloc array. This is necessary for slabs < 192 since we have non power
1098  * of two cache sizes there. The size of larger slabs can be determined using
1099  * fls.
1100  */
1101 static u8 size_index[24] __ro_after_init = {
1102         3,      /* 8 */
1103         4,      /* 16 */
1104         5,      /* 24 */
1105         5,      /* 32 */
1106         6,      /* 40 */
1107         6,      /* 48 */
1108         6,      /* 56 */
1109         6,      /* 64 */
1110         1,      /* 72 */
1111         1,      /* 80 */
1112         1,      /* 88 */
1113         1,      /* 96 */
1114         7,      /* 104 */
1115         7,      /* 112 */
1116         7,      /* 120 */
1117         7,      /* 128 */
1118         2,      /* 136 */
1119         2,      /* 144 */
1120         2,      /* 152 */
1121         2,      /* 160 */
1122         2,      /* 168 */
1123         2,      /* 176 */
1124         2,      /* 184 */
1125         2       /* 192 */
1126 };
1127 
1128 static inline unsigned int size_index_elem(unsigned int bytes)
1129 {
1130         return (bytes - 1) / 8;
1131 }
1132 
1133 /*
1134  * Find the kmem_cache structure that serves a given size of
1135  * allocation
1136  */
1137 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
1138 {
1139         unsigned int index;
1140 
1141         if (size <= 192) {
1142                 if (!size)
1143                         return ZERO_SIZE_PTR;
1144 
1145                 index = size_index[size_index_elem(size)];
1146         } else {
1147                 if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
1148                         return NULL;
1149                 index = fls(size - 1);
1150         }
1151 
1152         return kmalloc_caches[kmalloc_type(flags)][index];
1153 }
1154 
1155 /*
1156  * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
1157  * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
1158  * kmalloc-67108864.
1159  */
1160 const struct kmalloc_info_struct kmalloc_info[] __initconst = {
1161         {NULL,                      0},         {"kmalloc-96",             96},
1162         {"kmalloc-192",           192},         {"kmalloc-8",               8},
1163         {"kmalloc-16",             16},         {"kmalloc-32",             32},
1164         {"kmalloc-64",             64},         {"kmalloc-128",           128},
1165         {"kmalloc-256",           256},         {"kmalloc-512",           512},
1166         {"kmalloc-1k",           1024},         {"kmalloc-2k",           2048},
1167         {"kmalloc-4k",           4096},         {"kmalloc-8k",           8192},
1168         {"kmalloc-16k",         16384},         {"kmalloc-32k",         32768},
1169         {"kmalloc-64k",         65536},         {"kmalloc-128k",       131072},
1170         {"kmalloc-256k",       262144},         {"kmalloc-512k",       524288},
1171         {"kmalloc-1M",        1048576},         {"kmalloc-2M",        2097152},
1172         {"kmalloc-4M",        4194304},         {"kmalloc-8M",        8388608},
1173         {"kmalloc-16M",      16777216},         {"kmalloc-32M",      33554432},
1174         {"kmalloc-64M",      67108864}
1175 };
1176 
1177 /*
1178  * Patch up the size_index table if we have strange large alignment
1179  * requirements for the kmalloc array. This is only the case for
1180  * MIPS it seems. The standard arches will not generate any code here.
1181  *
1182  * Largest permitted alignment is 256 bytes due to the way we
1183  * handle the index determination for the smaller caches.
1184  *
1185  * Make sure that nothing crazy happens if someone starts tinkering
1186  * around with ARCH_KMALLOC_MINALIGN
1187  */
1188 void __init setup_kmalloc_cache_index_table(void)
1189 {
1190         unsigned int i;
1191 
1192         BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
1193                 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
1194 
1195         for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
1196                 unsigned int elem = size_index_elem(i);
1197 
1198                 if (elem >= ARRAY_SIZE(size_index))
1199                         break;
1200                 size_index[elem] = KMALLOC_SHIFT_LOW;
1201         }
1202 
1203         if (KMALLOC_MIN_SIZE >= 64) {
1204                 /*
1205                  * The 96 byte size cache is not used if the alignment
1206                  * is 64 byte.
1207                  */
1208                 for (i = 64 + 8; i <= 96; i += 8)
1209                         size_index[size_index_elem(i)] = 7;
1210 
1211         }
1212 
1213         if (KMALLOC_MIN_SIZE >= 128) {
1214                 /*
1215                  * The 192 byte sized cache is not used if the alignment
1216                  * is 128 byte. Redirect kmalloc to use the 256 byte cache
1217                  * instead.
1218                  */
1219                 for (i = 128 + 8; i <= 192; i += 8)
1220                         size_index[size_index_elem(i)] = 8;
1221         }
1222 }
1223 
1224 static const char *
1225 kmalloc_cache_name(const char *prefix, unsigned int size)
1226 {
1227 
1228         static const char units[3] = "\0kM";
1229         int idx = 0;
1230 
1231         while (size >= 1024 && (size % 1024 == 0)) {
1232                 size /= 1024;
1233                 idx++;
1234         }
1235 
1236         return kasprintf(GFP_NOWAIT, "%s-%u%c", prefix, size, units[idx]);
1237 }
1238 
1239 static void __init
1240 new_kmalloc_cache(int idx, int type, slab_flags_t flags)
1241 {
1242         const char *name;
1243 
1244         if (type == KMALLOC_RECLAIM) {
1245                 flags |= SLAB_RECLAIM_ACCOUNT;
1246                 name = kmalloc_cache_name("kmalloc-rcl",
1247                                                 kmalloc_info[idx].size);
1248                 BUG_ON(!name);
1249         } else {
1250                 name = kmalloc_info[idx].name;
1251         }
1252 
1253         kmalloc_caches[type][idx] = create_kmalloc_cache(name,
1254                                         kmalloc_info[idx].size, flags, 0,
1255                                         kmalloc_info[idx].size);
1256 }
1257 
1258 /*
1259  * Create the kmalloc array. Some of the regular kmalloc arrays
1260  * may already have been created because they were needed to
1261  * enable allocations for slab creation.
1262  */
1263 void __init create_kmalloc_caches(slab_flags_t flags)
1264 {
1265         int i, type;
1266 
1267         for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
1268                 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
1269                         if (!kmalloc_caches[type][i])
1270                                 new_kmalloc_cache(i, type, flags);
1271 
1272                         /*
1273                          * Caches that are not of the two-to-the-power-of size.
1274                          * These have to be created immediately after the
1275                          * earlier power of two caches
1276                          */
1277                         if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
1278                                         !kmalloc_caches[type][1])
1279                                 new_kmalloc_cache(1, type, flags);
1280                         if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
1281                                         !kmalloc_caches[type][2])
1282                                 new_kmalloc_cache(2, type, flags);
1283                 }
1284         }
1285 
1286         /* Kmalloc array is now usable */
1287         slab_state = UP;
1288 
1289 #ifdef CONFIG_ZONE_DMA
1290         for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
1291                 struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
1292 
1293                 if (s) {
1294                         unsigned int size = kmalloc_size(i);
1295                         const char *n = kmalloc_cache_name("dma-kmalloc", size);
1296 
1297                         BUG_ON(!n);
1298                         kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
1299                                 n, size, SLAB_CACHE_DMA | flags, 0, 0);
1300                 }
1301         }
1302 #endif
1303 }
1304 #endif /* !CONFIG_SLOB */
1305 
1306 /*
1307  * To avoid unnecessary overhead, we pass through large allocation requests
1308  * directly to the page allocator. We use __GFP_COMP, because we will need to
1309  * know the allocation order to free the pages properly in kfree.
1310  */
1311 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1312 {
1313         void *ret = NULL;
1314         struct page *page;
1315 
1316         flags |= __GFP_COMP;
1317         page = alloc_pages(flags, order);
1318         if (likely(page)) {
1319                 ret = page_address(page);
1320                 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
1321                                     1 << order);
1322         }
1323         ret = kasan_kmalloc_large(ret, size, flags);
1324         /* As ret might get tagged, call kmemleak hook after KASAN. */
1325         kmemleak_alloc(ret, size, 1, flags);
1326         return ret;
1327 }
1328 EXPORT_SYMBOL(kmalloc_order);
1329 
1330 #ifdef CONFIG_TRACING
1331 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
1332 {
1333         void *ret = kmalloc_order(size, flags, order);
1334         trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
1335         return ret;
1336 }
1337 EXPORT_SYMBOL(kmalloc_order_trace);
1338 #endif
1339 
1340 #ifdef CONFIG_SLAB_FREELIST_RANDOM
1341 /* Randomize a generic freelist */
1342 static void freelist_randomize(struct rnd_state *state, unsigned int *list,
1343                                unsigned int count)
1344 {
1345         unsigned int rand;
1346         unsigned int i;
1347 
1348         for (i = 0; i < count; i++)
1349                 list[i] = i;
1350 
1351         /* Fisher-Yates shuffle */
1352         for (i = count - 1; i > 0; i--) {
1353                 rand = prandom_u32_state(state);
1354                 rand %= (i + 1);
1355                 swap(list[i], list[rand]);
1356         }
1357 }
1358 
1359 /* Create a random sequence per cache */
1360 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
1361                                     gfp_t gfp)
1362 {
1363         struct rnd_state state;
1364 
1365         if (count < 2 || cachep->random_seq)
1366                 return 0;
1367 
1368         cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
1369         if (!cachep->random_seq)
1370                 return -ENOMEM;
1371 
1372         /* Get best entropy at this stage of boot */
1373         prandom_seed_state(&state, get_random_long());
1374 
1375         freelist_randomize(&state, cachep->random_seq, count);
1376         return 0;
1377 }
1378 
1379 /* Destroy the per-cache random freelist sequence */
1380 void cache_random_seq_destroy(struct kmem_cache *cachep)
1381 {
1382         kfree(cachep->random_seq);
1383         cachep->random_seq = NULL;
1384 }
1385 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
1386 
1387 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
1388 #ifdef CONFIG_SLAB
1389 #define SLABINFO_RIGHTS (0600)
1390 #else
1391 #define SLABINFO_RIGHTS (0400)
1392 #endif
1393 
1394 static void print_slabinfo_header(struct seq_file *m)
1395 {
1396         /*
1397          * Output format version, so at least we can change it
1398          * without _too_ many complaints.
1399          */
1400 #ifdef CONFIG_DEBUG_SLAB
1401         seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1402 #else
1403         seq_puts(m, "slabinfo - version: 2.1\n");
1404 #endif
1405         seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1406         seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1407         seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1408 #ifdef CONFIG_DEBUG_SLAB
1409         seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1410         seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1411 #endif
1412         seq_putc(m, '\n');
1413 }
1414 
1415 void *slab_start(struct seq_file *m, loff_t *pos)
1416 {
1417         mutex_lock(&slab_mutex);
1418         return seq_list_start(&slab_root_caches, *pos);
1419 }
1420 
1421 void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1422 {
1423         return seq_list_next(p, &slab_root_caches, pos);
1424 }
1425 
1426 void slab_stop(struct seq_file *m, void *p)
1427 {
1428         mutex_unlock(&slab_mutex);
1429 }
1430 
1431 static void
1432 memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
1433 {
1434         struct kmem_cache *c;
1435         struct slabinfo sinfo;
1436 
1437         if (!is_root_cache(s))
1438                 return;
1439 
1440         for_each_memcg_cache(c, s) {
1441                 memset(&sinfo, 0, sizeof(sinfo));
1442                 get_slabinfo(c, &sinfo);
1443 
1444                 info->active_slabs += sinfo.active_slabs;
1445                 info->num_slabs += sinfo.num_slabs;
1446                 info->shared_avail += sinfo.shared_avail;
1447                 info->active_objs += sinfo.active_objs;
1448                 info->num_objs += sinfo.num_objs;
1449         }
1450 }
1451 
1452 static void cache_show(struct kmem_cache *s, struct seq_file *m)
1453 {
1454         struct slabinfo sinfo;
1455 
1456         memset(&sinfo, 0, sizeof(sinfo));
1457         get_slabinfo(s, &sinfo);
1458 
1459         memcg_accumulate_slabinfo(s, &sinfo);
1460 
1461         seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1462                    cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
1463                    sinfo.objects_per_slab, (1 << sinfo.cache_order));
1464 
1465         seq_printf(m, " : tunables %4u %4u %4u",
1466                    sinfo.limit, sinfo.batchcount, sinfo.shared);
1467         seq_printf(m, " : slabdata %6lu %6lu %6lu",
1468                    sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1469         slabinfo_show_stats(m, s);
1470         seq_putc(m, '\n');
1471 }
1472 
1473 static int slab_show(struct seq_file *m, void *p)
1474 {
1475         struct kmem_cache *s = list_entry(p, struct kmem_cache, root_caches_node);
1476 
1477         if (p == slab_root_caches.next)
1478                 print_slabinfo_header(m);
1479         cache_show(s, m);
1480         return 0;
1481 }
1482 
1483 void dump_unreclaimable_slab(void)
1484 {
1485         struct kmem_cache *s, *s2;
1486         struct slabinfo sinfo;
1487 
1488         /*
1489          * Here acquiring slab_mutex is risky since we don't prefer to get
1490          * sleep in oom path. But, without mutex hold, it may introduce a
1491          * risk of crash.
1492          * Use mutex_trylock to protect the list traverse, dump nothing
1493          * without acquiring the mutex.
1494          */
1495         if (!mutex_trylock(&slab_mutex)) {
1496                 pr_warn("excessive unreclaimable slab but cannot dump stats\n");
1497                 return;
1498         }
1499 
1500         pr_info("Unreclaimable slab info:\n");
1501         pr_info("Name                      Used          Total\n");
1502 
1503         list_for_each_entry_safe(s, s2, &slab_caches, list) {
1504                 if (!is_root_cache(s) || (s->flags & SLAB_RECLAIM_ACCOUNT))
1505                         continue;
1506 
1507                 get_slabinfo(s, &sinfo);
1508 
1509                 if (sinfo.num_objs > 0)
1510                         pr_info("%-17s %10luKB %10luKB\n", cache_name(s),
1511                                 (sinfo.active_objs * s->size) / 1024,
1512                                 (sinfo.num_objs * s->size) / 1024);
1513         }
1514         mutex_unlock(&slab_mutex);
1515 }
1516 
1517 #if defined(CONFIG_MEMCG)
1518 void *memcg_slab_start(struct seq_file *m, loff_t *pos)
1519 {
1520         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
1521 
1522         mutex_lock(&slab_mutex);
1523         return seq_list_start(&memcg->kmem_caches, *pos);
1524 }
1525 
1526 void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos)
1527 {
1528         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
1529 
1530         return seq_list_next(p, &memcg->kmem_caches, pos);
1531 }
1532 
1533 void memcg_slab_stop(struct seq_file *m, void *p)
1534 {
1535         mutex_unlock(&slab_mutex);
1536 }
1537 
1538 int memcg_slab_show(struct seq_file *m, void *p)
1539 {
1540         struct kmem_cache *s = list_entry(p, struct kmem_cache,
1541                                           memcg_params.kmem_caches_node);
1542         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
1543 
1544         if (p == memcg->kmem_caches.next)
1545                 print_slabinfo_header(m);
1546         cache_show(s, m);
1547         return 0;
1548 }
1549 #endif
1550 
1551 /*
1552  * slabinfo_op - iterator that generates /proc/slabinfo
1553  *
1554  * Output layout:
1555  * cache-name
1556  * num-active-objs
1557  * total-objs
1558  * object size
1559  * num-active-slabs
1560  * total-slabs
1561  * num-pages-per-slab
1562  * + further values on SMP and with statistics enabled
1563  */
1564 static const struct seq_operations slabinfo_op = {
1565         .start = slab_start,
1566         .next = slab_next,
1567         .stop = slab_stop,
1568         .show = slab_show,
1569 };
1570 
1571 static int slabinfo_open(struct inode *inode, struct file *file)
1572 {
1573         return seq_open(file, &slabinfo_op);
1574 }
1575 
1576 static const struct file_operations proc_slabinfo_operations = {
1577         .open           = slabinfo_open,
1578         .read           = seq_read,
1579         .write          = slabinfo_write,
1580         .llseek         = seq_lseek,
1581         .release        = seq_release,
1582 };
1583 
1584 static int __init slab_proc_init(void)
1585 {
1586         proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
1587                                                 &proc_slabinfo_operations);
1588         return 0;
1589 }
1590 module_init(slab_proc_init);
1591 
1592 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MEMCG_KMEM)
1593 /*
1594  * Display information about kmem caches that have child memcg caches.
1595  */
1596 static int memcg_slabinfo_show(struct seq_file *m, void *unused)
1597 {
1598         struct kmem_cache *s, *c;
1599         struct slabinfo sinfo;
1600 
1601         mutex_lock(&slab_mutex);
1602         seq_puts(m, "# <name> <css_id[:dead|deact]> <active_objs> <num_objs>");
1603         seq_puts(m, " <active_slabs> <num_slabs>\n");
1604         list_for_each_entry(s, &slab_root_caches, root_caches_node) {
1605                 /*
1606                  * Skip kmem caches that don't have any memcg children.
1607                  */
1608                 if (list_empty(&s->memcg_params.children))
1609                         continue;
1610 
1611                 memset(&sinfo, 0, sizeof(sinfo));
1612                 get_slabinfo(s, &sinfo);
1613                 seq_printf(m, "%-17s root       %6lu %6lu %6lu %6lu\n",
1614                            cache_name(s), sinfo.active_objs, sinfo.num_objs,
1615                            sinfo.active_slabs, sinfo.num_slabs);
1616 
1617                 for_each_memcg_cache(c, s) {
1618                         struct cgroup_subsys_state *css;
1619                         char *status = "";
1620 
1621                         css = &c->memcg_params.memcg->css;
1622                         if (!(css->flags & CSS_ONLINE))
1623                                 status = ":dead";
1624                         else if (c->flags & SLAB_DEACTIVATED)
1625                                 status = ":deact";
1626 
1627                         memset(&sinfo, 0, sizeof(sinfo));
1628                         get_slabinfo(c, &sinfo);
1629                         seq_printf(m, "%-17s %4d%-6s %6lu %6lu %6lu %6lu\n",
1630                                    cache_name(c), css->id, status,
1631                                    sinfo.active_objs, sinfo.num_objs,
1632                                    sinfo.active_slabs, sinfo.num_slabs);
1633                 }
1634         }
1635         mutex_unlock(&slab_mutex);
1636         return 0;
1637 }
1638 DEFINE_SHOW_ATTRIBUTE(memcg_slabinfo);
1639 
1640 static int __init memcg_slabinfo_init(void)
1641 {
1642         debugfs_create_file("memcg_slabinfo", S_IFREG | S_IRUGO,
1643                             NULL, NULL, &memcg_slabinfo_fops);
1644         return 0;
1645 }
1646 
1647 late_initcall(memcg_slabinfo_init);
1648 #endif /* CONFIG_DEBUG_FS && CONFIG_MEMCG_KMEM */
1649 #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
1650 
1651 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1652                                            gfp_t flags)
1653 {
1654         void *ret;
1655         size_t ks = 0;
1656 
1657         if (p)
1658                 ks = ksize(p);
1659 
1660         if (ks >= new_size) {
1661                 p = kasan_krealloc((void *)p, new_size, flags);
1662                 return (void *)p;
1663         }
1664 
1665         ret = kmalloc_track_caller(new_size, flags);
1666         if (ret && p)
1667                 memcpy(ret, p, ks);
1668 
1669         return ret;
1670 }
1671 
1672 /**
1673  * __krealloc - like krealloc() but don't free @p.
1674  * @p: object to reallocate memory for.
1675  * @new_size: how many bytes of memory are required.
1676  * @flags: the type of memory to allocate.
1677  *
1678  * This function is like krealloc() except it never frees the originally
1679  * allocated buffer. Use this if you don't want to free the buffer immediately
1680  * like, for example, with RCU.
1681  *
1682  * Return: pointer to the allocated memory or %NULL in case of error
1683  */
1684 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
1685 {
1686         if (unlikely(!new_size))
1687                 return ZERO_SIZE_PTR;
1688 
1689         return __do_krealloc(p, new_size, flags);
1690 
1691 }
1692 EXPORT_SYMBOL(__krealloc);
1693 
1694 /**
1695  * krealloc - reallocate memory. The contents will remain unchanged.
1696  * @p: object to reallocate memory for.
1697  * @new_size: how many bytes of memory are required.
1698  * @flags: the type of memory to allocate.
1699  *
1700  * The contents of the object pointed to are preserved up to the
1701  * lesser of the new and old sizes.  If @p is %NULL, krealloc()
1702  * behaves exactly like kmalloc().  If @new_size is 0 and @p is not a
1703  * %NULL pointer, the object pointed to is freed.
1704  *
1705  * Return: pointer to the allocated memory or %NULL in case of error
1706  */
1707 void *krealloc(const void *p, size_t new_size, gfp_t flags)
1708 {
1709         void *ret;
1710 
1711         if (unlikely(!new_size)) {
1712                 kfree(p);
1713                 return ZERO_SIZE_PTR;
1714         }
1715 
1716         ret = __do_krealloc(p, new_size, flags);
1717         if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
1718                 kfree(p);
1719 
1720         return ret;
1721 }
1722 EXPORT_SYMBOL(krealloc);
1723 
1724 /**
1725  * kzfree - like kfree but zero memory
1726  * @p: object to free memory of
1727  *
1728  * The memory of the object @p points to is zeroed before freed.
1729  * If @p is %NULL, kzfree() does nothing.
1730  *
1731  * Note: this function zeroes the whole allocated buffer which can be a good
1732  * deal bigger than the requested buffer size passed to kmalloc(). So be
1733  * careful when using this function in performance sensitive code.
1734  */
1735 void kzfree(const void *p)
1736 {
1737         size_t ks;
1738         void *mem = (void *)p;
1739 
1740         if (unlikely(ZERO_OR_NULL_PTR(mem)))
1741                 return;
1742         ks = ksize(mem);
1743         memset(mem, 0, ks);
1744         kfree(mem);
1745 }
1746 EXPORT_SYMBOL(kzfree);
1747 
1748 /**
1749  * ksize - get the actual amount of memory allocated for a given object
1750  * @objp: Pointer to the object
1751  *
1752  * kmalloc may internally round up allocations and return more memory
1753  * than requested. ksize() can be used to determine the actual amount of
1754  * memory allocated. The caller may use this additional memory, even though
1755  * a smaller amount of memory was initially specified with the kmalloc call.
1756  * The caller must guarantee that objp points to a valid object previously
1757  * allocated with either kmalloc() or kmem_cache_alloc(). The object
1758  * must not be freed during the duration of the call.
1759  *
1760  * Return: size of the actual memory used by @objp in bytes
1761  */
1762 size_t ksize(const void *objp)
1763 {
1764         size_t size;
1765 
1766         if (WARN_ON_ONCE(!objp))
1767                 return 0;
1768         /*
1769          * We need to check that the pointed to object is valid, and only then
1770          * unpoison the shadow memory below. We use __kasan_check_read(), to
1771          * generate a more useful report at the time ksize() is called (rather
1772          * than later where behaviour is undefined due to potential
1773          * use-after-free or double-free).
1774          *
1775          * If the pointed to memory is invalid we return 0, to avoid users of
1776          * ksize() writing to and potentially corrupting the memory region.
1777          *
1778          * We want to perform the check before __ksize(), to avoid potentially
1779          * crashing in __ksize() due to accessing invalid metadata.
1780          */
1781         if (unlikely(objp == ZERO_SIZE_PTR) || !__kasan_check_read(objp, 1))
1782                 return 0;
1783 
1784         size = __ksize(objp);
1785         /*
1786          * We assume that ksize callers could use whole allocated area,
1787          * so we need to unpoison this area.
1788          */
1789         kasan_unpoison_shadow(objp, size);
1790         return size;
1791 }
1792 EXPORT_SYMBOL(ksize);
1793 
1794 /* Tracepoints definitions. */
1795 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1796 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1797 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1798 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1799 EXPORT_TRACEPOINT_SYMBOL(kfree);
1800 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1801 
1802 int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
1803 {
1804         if (__should_failslab(s, gfpflags))
1805                 return -ENOMEM;
1806         return 0;
1807 }
1808 ALLOW_ERROR_INJECTION(should_failslab, ERRNO);

/* [<][>][^][v][top][bottom][index][help] */